Commit | Line | Data |
---|---|---|
4bcfdbee BA |
1 | #' @name clustering |
2 | #' @rdname clustering | |
eef6f6c9 | 3 | #' @aliases clusteringTask1 clusteringTask2 computeClusters1 computeClusters2 |
4bcfdbee | 4 | #' |
492cd9e7 | 5 | #' @title Two-stage clustering, withing one task (see \code{claws()}) |
4bcfdbee | 6 | #' |
492cd9e7 BA |
7 | #' @description \code{clusteringTask1()} runs one full stage-1 task, which consists in |
8 | #' iterated stage 1 clustering (on nb_curves / ntasks energy contributions, computed | |
bf5c0844 BA |
9 | #' through discrete wavelets coefficients). |
10 | #' \code{clusteringTask2()} runs a full stage-2 task, which consists in synchrones | |
11 | #' and then WER distances computations, before applying the clustering algorithm. | |
12 | #' \code{computeClusters1()} and \code{computeClusters2()} correspond to the atomic | |
13 | #' clustering procedures respectively for stage 1 and 2. The former applies the | |
2b9f5356 | 14 | #' first clustering algorithm on a contributions matrix, while the latter clusters |
0486fbad | 15 | #' a set of series inside one task (~nb_items_clust1) |
4bcfdbee BA |
16 | #' |
17 | #' @param indices Range of series indices to cluster in parallel (initial data) | |
18 | #' @param getContribs Function to retrieve contributions from initial series indices: | |
19 | #' \code{getContribs(indices)} outpus a contributions matrix | |
4bcfdbee BA |
20 | #' @inheritParams computeSynchrones |
21 | #' @inheritParams claws | |
22 | #' | |
0486fbad BA |
23 | #' @return For \code{clusteringTask1()}, the indices of the computed (K1) medoids. |
24 | #' Indices are irrelevant for stage 2 clustering, thus \code{clusteringTask2()} | |
25 | #' outputs a big.matrix of medoids (of size LxK2, K2 = final number of clusters) | |
4bcfdbee BA |
26 | NULL |
27 | ||
28 | #' @rdname clustering | |
29 | #' @export | |
0486fbad | 30 | clusteringTask1 = function(indices, getContribs, K1, algoClust1, nb_items_clust1, |
37c82bba | 31 | ncores_clust=1, verbose=FALSE, parll=TRUE) |
5c652979 | 32 | { |
492cd9e7 | 33 | if (parll) |
7b13d0c2 | 34 | { |
37c82bba | 35 | cl = parallel::makeCluster(ncores_clust, outfile = "") |
492cd9e7 | 36 | parallel::clusterExport(cl, varlist=c("getContribs","K1","verbose"), envir=environment()) |
7b13d0c2 | 37 | } |
492cd9e7 BA |
38 | while (length(indices) > K1) |
39 | { | |
0486fbad BA |
40 | indices_workers = .spreadIndices(indices, nb_items_clust1) |
41 | if (verbose) | |
42 | cat(paste("*** [iterated] Clustering task 1 on ",length(indices)," series\n", sep="")) | |
e161499b BA |
43 | indices <- |
44 | if (parll) | |
45 | { | |
46 | unlist( parallel::parLapply(cl, indices_workers, function(inds) { | |
47 | require("epclust", quietly=TRUE) | |
0486fbad | 48 | inds[ algoClust1(getContribs(inds), K1) ] |
e161499b BA |
49 | }) ) |
50 | } | |
51 | else | |
52 | { | |
53 | unlist( lapply(indices_workers, function(inds) | |
0486fbad | 54 | inds[ algoClust1(getContribs(inds), K1) ] |
e161499b BA |
55 | ) ) |
56 | } | |
492cd9e7 BA |
57 | } |
58 | if (parll) | |
59 | parallel::stopCluster(cl) | |
60 | ||
56857861 | 61 | indices #medoids |
5c652979 BA |
62 | } |
63 | ||
4bcfdbee BA |
64 | #' @rdname clustering |
65 | #' @export | |
0486fbad BA |
66 | clusteringTask2 = function(medoids, K2, algoClust2, getRefSeries, nb_ref_curves, |
67 | nb_series_per_chunk, sync_mean, nbytes,endian,ncores_clust=1,verbose=FALSE,parll=TRUE) | |
5c652979 | 68 | { |
e161499b | 69 | if (verbose) |
0486fbad | 70 | cat(paste("*** Clustering task 2 on ",ncol(medoids)," synchrones\n", sep="")) |
e161499b | 71 | |
0486fbad | 72 | if (ncol(medoids) <= K2) |
bf5c0844 | 73 | return (medoids) |
0486fbad BA |
74 | synchrones = computeSynchrones(medoids, getRefSeries, nb_ref_curves, |
75 | nb_series_per_chunk, sync_mean, ncores_clust, verbose, parll) | |
a174b8ea | 76 | distances = computeWerDists(synchrones, nbytes, endian, ncores_clust, verbose, parll) |
e161499b | 77 | if (verbose) |
0486fbad BA |
78 | cat(paste(" algoClust2() on ",nrow(distances)," items\n", sep="")) |
79 | medoids[ algoClust2(distances,K2), ] | |
e161499b | 80 | } |
bf5c0844 | 81 | |
4bcfdbee BA |
82 | #' computeSynchrones |
83 | #' | |
84 | #' Compute the synchrones curves (sum of clusters elements) from a matrix of medoids, | |
85 | #' using L2 distances. | |
86 | #' | |
24ed5d83 | 87 | #' @param medoids big.matrix of medoids (curves of same length as initial series) |
4bcfdbee BA |
88 | #' @param getRefSeries Function to retrieve initial series (e.g. in stage 2 after series |
89 | #' have been replaced by stage-1 medoids) | |
492cd9e7 | 90 | #' @param nb_ref_curves How many reference series? (This number is known at this stage) |
4bcfdbee BA |
91 | #' @inheritParams claws |
92 | #' | |
eef6f6c9 | 93 | #' @return A big.matrix of size L x K1 where L = length of a serie |
24ed5d83 | 94 | #' |
4bcfdbee | 95 | #' @export |
0486fbad BA |
96 | computeSynchrones = function(medoids, getRefSeries, nb_ref_curves, |
97 | nb_series_per_chunk, sync_mean, ncores_clust=1,verbose=FALSE,parll=TRUE) | |
e205f218 | 98 | { |
492cd9e7 | 99 | computeSynchronesChunk = function(indices) |
3eef8d3d | 100 | { |
363ae134 BA |
101 | if (parll) |
102 | { | |
103 | require("bigmemory", quietly=TRUE) | |
6ad3f3fd | 104 | requireNamespace("synchronicity", quietly=TRUE) |
363ae134 BA |
105 | require("epclust", quietly=TRUE) |
106 | synchrones <- bigmemory::attach.big.matrix(synchrones_desc) | |
0486fbad BA |
107 | if (sync_mean) |
108 | counts <- bigmemory::attach.big.matrix(counts_desc) | |
363ae134 BA |
109 | medoids <- bigmemory::attach.big.matrix(medoids_desc) |
110 | m <- synchronicity::attach.mutex(m_desc) | |
111 | } | |
112 | ||
6ad3f3fd BA |
113 | ref_series = getRefSeries(indices) |
114 | nb_series = nrow(ref_series) | |
115 | ||
0486fbad | 116 | # Get medoids indices for this chunk of series |
2c14dbea | 117 | mi = computeMedoidsIndices(medoids@address, ref_series) |
e161499b BA |
118 | |
119 | for (i in seq_len(nb_series)) | |
56857861 | 120 | { |
492cd9e7 BA |
121 | if (parll) |
122 | synchronicity::lock(m) | |
eef6f6c9 | 123 | synchrones[, mi[i] ] = synchrones[, mi[i] ] + ref_series[,i] |
0486fbad BA |
124 | if (sync_mean) |
125 | counts[ mi[i] ] = counts[ mi[i] ] + 1 | |
492cd9e7 BA |
126 | if (parll) |
127 | synchronicity::unlock(m) | |
128 | } | |
129 | } | |
130 | ||
0486fbad | 131 | K = ncol(medoids) ; L = nrow(medoids) |
492cd9e7 | 132 | # Use bigmemory (shared==TRUE by default) + synchronicity to fill synchrones in // |
24ed5d83 | 133 | # TODO: if size > RAM (not our case), use file-backed big.matrix |
eef6f6c9 | 134 | synchrones = bigmemory::big.matrix(nrow=L, ncol=K, type="double", init=0.) |
0486fbad BA |
135 | if (sync_mean) |
136 | counts = bigmemory::big.matrix(nrow=K, ncol=1, type="double", init=0) | |
24ed5d83 | 137 | # synchronicity is only for Linux & MacOS; on Windows: run sequentially |
492cd9e7 BA |
138 | parll = (requireNamespace("synchronicity",quietly=TRUE) |
139 | && parll && Sys.info()['sysname'] != "Windows") | |
140 | if (parll) | |
363ae134 | 141 | { |
492cd9e7 | 142 | m <- synchronicity::boost.mutex() |
363ae134 BA |
143 | m_desc <- synchronicity::describe(m) |
144 | synchrones_desc = bigmemory::describe(synchrones) | |
0486fbad BA |
145 | if (sync_mean) |
146 | counts_desc = bigmemory::describe(counts) | |
363ae134 | 147 | medoids_desc = bigmemory::describe(medoids) |
24ed5d83 | 148 | cl = parallel::makeCluster(ncores_clust) |
0486fbad BA |
149 | varlist=c("synchrones_desc","sync_mean","m_desc","medoids_desc","getRefSeries") |
150 | if (sync_mean) | |
151 | varlist = c(varlist, "counts_desc") | |
152 | parallel::clusterExport(cl, varlist, envir=environment()) | |
24ed5d83 BA |
153 | } |
154 | ||
0486fbad BA |
155 | if (verbose) |
156 | { | |
157 | if (verbose) | |
158 | cat(paste("--- Compute ",K," synchrones with ",nb_ref_curves," series\n", sep="")) | |
159 | } | |
492cd9e7 | 160 | indices_workers = .spreadIndices(seq_len(nb_ref_curves), nb_series_per_chunk) |
c45fd663 | 161 | ignored <- |
492cd9e7 | 162 | if (parll) |
e161499b | 163 | parallel::parLapply(cl, indices_workers, computeSynchronesChunk) |
492cd9e7 | 164 | else |
c45fd663 | 165 | lapply(indices_workers, computeSynchronesChunk) |
492cd9e7 | 166 | |
24ed5d83 BA |
167 | if (parll) |
168 | parallel::stopCluster(cl) | |
169 | ||
0486fbad BA |
170 | if (!sync_mean) |
171 | return (synchrones) | |
172 | ||
173 | #TODO: can we avoid this loop? ( synchrones = sweep(synchrones, 2, counts, '/') ) | |
492cd9e7 | 174 | for (i in seq_len(K)) |
eef6f6c9 | 175 | synchrones[,i] = synchrones[,i] / counts[i] |
3eef8d3d | 176 | #NOTE: odds for some clusters to be empty? (when series already come from stage 2) |
8702eb86 | 177 | # ...maybe; but let's hope resulting K1' be still quite bigger than K2 |
eef6f6c9 | 178 | noNA_rows = sapply(seq_len(K), function(i) all(!is.nan(synchrones[,i]))) |
24ed5d83 BA |
179 | if (all(noNA_rows)) |
180 | return (synchrones) | |
181 | # Else: some clusters are empty, need to slice synchrones | |
eef6f6c9 | 182 | bigmemory::as.big.matrix(synchrones[,noNA_rows]) |
e205f218 | 183 | } |
1c6f223e | 184 | |
4bcfdbee BA |
185 | #' computeWerDists |
186 | #' | |
187 | #' Compute the WER distances between the synchrones curves (in rows), which are | |
188 | #' returned (e.g.) by \code{computeSynchrones()} | |
189 | #' | |
24ed5d83 BA |
190 | #' @param synchrones A big.matrix of synchrones, in rows. The series have same length |
191 | #' as the series in the initial dataset | |
492cd9e7 | 192 | #' @inheritParams claws |
4bcfdbee | 193 | #' |
777c4b02 | 194 | #' @return A matrix of size K1 x K1 |
24ed5d83 | 195 | #' |
4bcfdbee | 196 | #' @export |
a174b8ea | 197 | computeWerDists = function(synchrones, nbytes,endian,ncores_clust=1,verbose=FALSE,parll=TRUE) |
d03c0621 | 198 | { |
4bcfdbee BA |
199 | n <- nrow(synchrones) |
200 | delta <- ncol(synchrones) | |
db6fc17d | 201 | #TODO: automatic tune of all these parameters ? (for other users) |
d03c0621 | 202 | nvoice <- 4 |
4bcfdbee | 203 | # noctave = 2^13 = 8192 half hours ~ 180 days ; ~log2(ncol(synchrones)) |
d7d55bc1 BA |
204 | noctave = 13 |
205 | # 4 here represent 2^5 = 32 half-hours ~ 1 day | |
db6fc17d | 206 | #NOTE: default scalevector == 2^(0:(noctave * nvoice) / nvoice) * s0 (?) |
24ed5d83 | 207 | scalevector <- 2^(4:(noctave * nvoice) / nvoice + 1) |
db6fc17d | 208 | #condition: ( log2(s0*w0/(2*pi)) - 1 ) * nvoice + 1.5 >= 1 |
a174b8ea BA |
209 | s0 = 2 |
210 | w0 = 2*pi | |
db6fc17d BA |
211 | scaled=FALSE |
212 | s0log = as.integer( (log2( s0*w0/(2*pi) ) - 1) * nvoice + 1.5 ) | |
213 | totnoct = noctave + as.integer(s0log/nvoice) + 1 | |
214 | ||
e161499b | 215 | Xwer_dist <- bigmemory::big.matrix(nrow=n, ncol=n, type="double") |
e161499b | 216 | |
4204e877 BA |
217 | cwt_file = ".epclust_bin/cwt" |
218 | #TODO: args, nb_per_chunk, nbytes, endian | |
219 | ||
e161499b BA |
220 | # Generate n(n-1)/2 pairs for WER distances computations |
221 | pairs = list() | |
4204e877 BA |
222 | V = seq_len(n) |
223 | for (i in 1:n) | |
e161499b BA |
224 | { |
225 | V = V[-1] | |
4204e877 BA |
226 | pairs = c(pairs, lapply(V, function(v) c(i,v))) |
227 | } | |
a174b8ea | 228 | |
4204e877 BA |
229 | computeSaveCWT = function(index) |
230 | { | |
231 | ts <- scale(ts(synchrones[index,]), center=TRUE, scale=scaled) | |
232 | totts.cwt = Rwave::cwt(ts, totnoct, nvoice, w0, plot=FALSE) | |
233 | ts.cwt = totts.cwt[,s0log:(s0log+noctave*nvoice)] | |
234 | #Normalization | |
235 | sqs <- sqrt(2^(0:(noctave*nvoice)/nvoice)*s0) | |
236 | sqres <- sweep(ts.cwt,2,sqs,'*') | |
237 | res <- sqres / max(Mod(sqres)) | |
238 | #TODO: serializer les CWT, les récupérer via getDataInFile ; | |
239 | #--> OK, faut juste stocker comme séries simples de taille delta*ncol (53*17519) | |
a174b8ea | 240 | binarize(c(as.double(Re(res)),as.double(Im(res))), cwt_file, ncol(res), ",", nbytes, endian) |
4204e877 BA |
241 | } |
242 | ||
243 | if (parll) | |
244 | { | |
245 | cl = parallel::makeCluster(ncores_clust) | |
246 | synchrones_desc <- bigmemory::describe(synchrones) | |
247 | Xwer_dist_desc <- bigmemory::describe(Xwer_dist) | |
248 | parallel::clusterExport(cl, varlist=c("synchrones_desc","Xwer_dist_desc","totnoct", | |
249 | "nvoice","w0","s0log","noctave","s0","verbose","getCWT"), envir=environment()) | |
250 | } | |
0486fbad BA |
251 | |
252 | if (verbose) | |
253 | { | |
254 | cat(paste("--- Compute WER dists\n", sep="")) | |
255 | # precompute save all CWT........ | |
256 | } | |
4204e877 BA |
257 | #precompute and serialize all CWT |
258 | ignored <- | |
259 | if (parll) | |
260 | parallel::parLapply(cl, 1:n, computeSaveCWT) | |
261 | else | |
262 | lapply(1:n, computeSaveCWT) | |
263 | ||
264 | getCWT = function(index) | |
265 | { | |
266 | #from cwt_file ... | |
a174b8ea | 267 | res <- getDataInFile(c(2*index-1,2*index), cwt_file, nbytes, endian) |
eef6f6c9 | 268 | ###############TODO: |
4204e877 | 269 | } |
e161499b | 270 | |
777c4b02 | 271 | # Distance between rows i and j |
e161499b BA |
272 | computeDistancesIJ = function(pair) |
273 | { | |
2c14dbea | 274 | if (parll) |
363ae134 | 275 | { |
2c14dbea BA |
276 | require("bigmemory", quietly=TRUE) |
277 | require("epclust", quietly=TRUE) | |
278 | synchrones <- bigmemory::attach.big.matrix(synchrones_desc) | |
279 | Xwer_dist <- bigmemory::attach.big.matrix(Xwer_dist_desc) | |
280 | } | |
281 | ||
e161499b BA |
282 | i = pair[1] ; j = pair[2] |
283 | if (verbose && j==i+1) | |
284 | cat(paste(" Distances (",i,",",j,"), (",i,",",j+1,") ...\n", sep="")) | |
4204e877 BA |
285 | cwt_i <- getCWT(i) |
286 | cwt_j <- getCWT(j) | |
2c14dbea | 287 | |
363ae134 BA |
288 | num <- epclustFilter(Mod(cwt_i * Conj(cwt_j))) |
289 | WX <- epclustFilter(Mod(cwt_i * Conj(cwt_i))) | |
4204e877 | 290 | WY <- epclustFilter(Mod(cwt_j * Conj(cwt_j))) |
e161499b | 291 | wer2 <- sum(colSums(num)^2) / sum(colSums(WX) * colSums(WY)) |
2c14dbea | 292 | Xwer_dist[i,j] <- sqrt(delta * ncol(cwt_i) * max(1 - wer2, 0.)) #FIXME: wer2 should be < 1 |
e161499b BA |
293 | Xwer_dist[j,i] <- Xwer_dist[i,j] |
294 | Xwer_dist[i,i] = 0. | |
295 | } | |
296 | ||
0486fbad BA |
297 | if (verbose) |
298 | { | |
299 | cat(paste("--- Compute WER dists\n", sep="")) | |
300 | } | |
e161499b | 301 | ignored <- |
492cd9e7 | 302 | if (parll) |
e161499b | 303 | parallel::parLapply(cl, pairs, computeDistancesIJ) |
492cd9e7 | 304 | else |
e161499b | 305 | lapply(pairs, computeDistancesIJ) |
492cd9e7 BA |
306 | |
307 | if (parll) | |
308 | parallel::stopCluster(cl) | |
6ad3f3fd | 309 | |
492cd9e7 | 310 | Xwer_dist[n,n] = 0. |
777c4b02 BA |
311 | distances <- Xwer_dist[,] |
312 | rm(Xwer_dist) ; gc() | |
313 | distances #~small matrix K1 x K1 | |
492cd9e7 BA |
314 | } |
315 | ||
316 | # Helper function to divide indices into balanced sets | |
0486fbad | 317 | .spreadIndices = function(indices, nb_per_set) |
492cd9e7 BA |
318 | { |
319 | L = length(indices) | |
0486fbad | 320 | nb_workers = floor( L / nb_per_set ) |
0fe757f7 | 321 | rem = L %% nb_per_set |
37c82bba | 322 | if (nb_workers == 0 || (nb_workers==1 && rem==0)) |
492cd9e7 | 323 | { |
0fe757f7 | 324 | # L <= nb_per_set, simple case |
492cd9e7 BA |
325 | indices_workers = list(indices) |
326 | } | |
327 | else | |
328 | { | |
329 | indices_workers = lapply( seq_len(nb_workers), function(i) | |
0fe757f7 | 330 | indices[(nb_per_set*(i-1)+1):(nb_per_set*i)] ) |
0486fbad BA |
331 | # Spread the remaining load among the workers |
332 | rem = L %% nb_per_set | |
492cd9e7 BA |
333 | while (rem > 0) |
334 | { | |
335 | index = rem%%nb_workers + 1 | |
336 | indices_workers[[index]] = c(indices_workers[[index]], indices[L-rem+1]) | |
337 | rem = rem - 1 | |
d03c0621 | 338 | } |
1c6f223e | 339 | } |
492cd9e7 | 340 | indices_workers |
1c6f223e | 341 | } |