Commit | Line | Data |
---|---|---|
4bcfdbee BA |
1 | #' @name clustering |
2 | #' @rdname clustering | |
eef6f6c9 | 3 | #' @aliases clusteringTask1 clusteringTask2 computeClusters1 computeClusters2 |
4bcfdbee | 4 | #' |
492cd9e7 | 5 | #' @title Two-stage clustering, withing one task (see \code{claws()}) |
4bcfdbee | 6 | #' |
492cd9e7 BA |
7 | #' @description \code{clusteringTask1()} runs one full stage-1 task, which consists in |
8 | #' iterated stage 1 clustering (on nb_curves / ntasks energy contributions, computed | |
bf5c0844 BA |
9 | #' through discrete wavelets coefficients). |
10 | #' \code{clusteringTask2()} runs a full stage-2 task, which consists in synchrones | |
11 | #' and then WER distances computations, before applying the clustering algorithm. | |
12 | #' \code{computeClusters1()} and \code{computeClusters2()} correspond to the atomic | |
13 | #' clustering procedures respectively for stage 1 and 2. The former applies the | |
2b9f5356 | 14 | #' first clustering algorithm on a contributions matrix, while the latter clusters |
0486fbad | 15 | #' a set of series inside one task (~nb_items_clust1) |
4bcfdbee BA |
16 | #' |
17 | #' @param indices Range of series indices to cluster in parallel (initial data) | |
18 | #' @param getContribs Function to retrieve contributions from initial series indices: | |
19 | #' \code{getContribs(indices)} outpus a contributions matrix | |
4bcfdbee BA |
20 | #' @inheritParams computeSynchrones |
21 | #' @inheritParams claws | |
22 | #' | |
0486fbad BA |
23 | #' @return For \code{clusteringTask1()}, the indices of the computed (K1) medoids. |
24 | #' Indices are irrelevant for stage 2 clustering, thus \code{clusteringTask2()} | |
25 | #' outputs a big.matrix of medoids (of size LxK2, K2 = final number of clusters) | |
4bcfdbee BA |
26 | NULL |
27 | ||
28 | #' @rdname clustering | |
29 | #' @export | |
0486fbad | 30 | clusteringTask1 = function(indices, getContribs, K1, algoClust1, nb_items_clust1, |
37c82bba | 31 | ncores_clust=1, verbose=FALSE, parll=TRUE) |
5c652979 | 32 | { |
492cd9e7 | 33 | if (parll) |
7b13d0c2 | 34 | { |
37c82bba | 35 | cl = parallel::makeCluster(ncores_clust, outfile = "") |
d9bb53c5 | 36 | parallel::clusterExport(cl, c("getContribs","K1","verbose"), envir=environment()) |
7b13d0c2 | 37 | } |
d9bb53c5 | 38 | # Iterate clustering algorithm 1 until K1 medoids are found |
492cd9e7 BA |
39 | while (length(indices) > K1) |
40 | { | |
d9bb53c5 | 41 | # Balance tasks by splitting the indices set - as evenly as possible |
0486fbad BA |
42 | indices_workers = .spreadIndices(indices, nb_items_clust1) |
43 | if (verbose) | |
44 | cat(paste("*** [iterated] Clustering task 1 on ",length(indices)," series\n", sep="")) | |
e161499b BA |
45 | indices <- |
46 | if (parll) | |
47 | { | |
48 | unlist( parallel::parLapply(cl, indices_workers, function(inds) { | |
49 | require("epclust", quietly=TRUE) | |
0486fbad | 50 | inds[ algoClust1(getContribs(inds), K1) ] |
e161499b BA |
51 | }) ) |
52 | } | |
53 | else | |
54 | { | |
55 | unlist( lapply(indices_workers, function(inds) | |
0486fbad | 56 | inds[ algoClust1(getContribs(inds), K1) ] |
e161499b BA |
57 | ) ) |
58 | } | |
492cd9e7 BA |
59 | } |
60 | if (parll) | |
61 | parallel::stopCluster(cl) | |
62 | ||
56857861 | 63 | indices #medoids |
5c652979 BA |
64 | } |
65 | ||
4bcfdbee BA |
66 | #' @rdname clustering |
67 | #' @export | |
0486fbad | 68 | clusteringTask2 = function(medoids, K2, algoClust2, getRefSeries, nb_ref_curves, |
a52836b2 | 69 | nb_series_per_chunk, nvoice, nbytes,endian,ncores_clust=1,verbose=FALSE,parll=TRUE) |
5c652979 | 70 | { |
e161499b | 71 | if (verbose) |
0486fbad | 72 | cat(paste("*** Clustering task 2 on ",ncol(medoids)," synchrones\n", sep="")) |
e161499b | 73 | |
0486fbad | 74 | if (ncol(medoids) <= K2) |
bf5c0844 | 75 | return (medoids) |
d9bb53c5 BA |
76 | |
77 | # A) Obtain synchrones, that is to say the cumulated power consumptions | |
78 | # for each of the K1 initial groups | |
0486fbad | 79 | synchrones = computeSynchrones(medoids, getRefSeries, nb_ref_curves, |
d9bb53c5 BA |
80 | nb_series_per_chunk, ncores_clust, verbose, parll) |
81 | ||
82 | # B) Compute the WER distances (Wavelets Extended coefficient of deteRmination) | |
a52836b2 BA |
83 | distances = computeWerDists( |
84 | synchrones, nvoice, nbytes, endian, ncores_clust, verbose, parll) | |
d9bb53c5 BA |
85 | |
86 | # C) Apply clustering algorithm 2 on the WER distances matrix | |
e161499b | 87 | if (verbose) |
a52836b2 | 88 | cat(paste("*** algoClust2() on ",nrow(distances)," items\n", sep="")) |
9f05a4a0 | 89 | medoids[ ,algoClust2(distances,K2) ] |
e161499b | 90 | } |
bf5c0844 | 91 | |
4bcfdbee BA |
92 | #' computeSynchrones |
93 | #' | |
94 | #' Compute the synchrones curves (sum of clusters elements) from a matrix of medoids, | |
d9bb53c5 | 95 | #' using euclidian distance. |
4bcfdbee | 96 | #' |
24ed5d83 | 97 | #' @param medoids big.matrix of medoids (curves of same length as initial series) |
4bcfdbee BA |
98 | #' @param getRefSeries Function to retrieve initial series (e.g. in stage 2 after series |
99 | #' have been replaced by stage-1 medoids) | |
492cd9e7 | 100 | #' @param nb_ref_curves How many reference series? (This number is known at this stage) |
4bcfdbee BA |
101 | #' @inheritParams claws |
102 | #' | |
eef6f6c9 | 103 | #' @return A big.matrix of size L x K1 where L = length of a serie |
24ed5d83 | 104 | #' |
4bcfdbee | 105 | #' @export |
0486fbad | 106 | computeSynchrones = function(medoids, getRefSeries, nb_ref_curves, |
d9bb53c5 | 107 | nb_series_per_chunk, ncores_clust=1,verbose=FALSE,parll=TRUE) |
e205f218 | 108 | { |
d9bb53c5 | 109 | # Synchrones computation is embarassingly parallel: compute it by chunks of series |
492cd9e7 | 110 | computeSynchronesChunk = function(indices) |
3eef8d3d | 111 | { |
363ae134 BA |
112 | if (parll) |
113 | { | |
114 | require("bigmemory", quietly=TRUE) | |
6ad3f3fd | 115 | requireNamespace("synchronicity", quietly=TRUE) |
363ae134 | 116 | require("epclust", quietly=TRUE) |
d9bb53c5 | 117 | # The big.matrix objects need to be attached to be usable on the workers |
363ae134 BA |
118 | synchrones <- bigmemory::attach.big.matrix(synchrones_desc) |
119 | medoids <- bigmemory::attach.big.matrix(medoids_desc) | |
120 | m <- synchronicity::attach.mutex(m_desc) | |
121 | } | |
122 | ||
d9bb53c5 | 123 | # Obtain a chunk of reference series |
6ad3f3fd | 124 | ref_series = getRefSeries(indices) |
9f05a4a0 | 125 | nb_series = ncol(ref_series) |
6ad3f3fd | 126 | |
0486fbad | 127 | # Get medoids indices for this chunk of series |
2c14dbea | 128 | mi = computeMedoidsIndices(medoids@address, ref_series) |
e161499b | 129 | |
d9bb53c5 | 130 | # Update synchrones using mi above |
e161499b | 131 | for (i in seq_len(nb_series)) |
56857861 | 132 | { |
492cd9e7 | 133 | if (parll) |
d9bb53c5 | 134 | synchronicity::lock(m) #locking required because several writes at the same time |
eef6f6c9 | 135 | synchrones[, mi[i] ] = synchrones[, mi[i] ] + ref_series[,i] |
492cd9e7 BA |
136 | if (parll) |
137 | synchronicity::unlock(m) | |
138 | } | |
a52836b2 | 139 | NULL |
492cd9e7 BA |
140 | } |
141 | ||
0486fbad | 142 | K = ncol(medoids) ; L = nrow(medoids) |
492cd9e7 | 143 | # Use bigmemory (shared==TRUE by default) + synchronicity to fill synchrones in // |
eef6f6c9 | 144 | synchrones = bigmemory::big.matrix(nrow=L, ncol=K, type="double", init=0.) |
d9bb53c5 | 145 | # NOTE: synchronicity is only for Linux & MacOS; on Windows: run sequentially |
a52836b2 BA |
146 | parll = (parll && requireNamespace("synchronicity",quietly=TRUE) |
147 | && Sys.info()['sysname'] != "Windows") | |
492cd9e7 | 148 | if (parll) |
363ae134 | 149 | { |
d9bb53c5 BA |
150 | m <- synchronicity::boost.mutex() #for lock/unlock, see computeSynchronesChunk |
151 | # mutex and big.matrix objects cannot be passed directly: | |
152 | # they will be accessed from their description | |
363ae134 BA |
153 | m_desc <- synchronicity::describe(m) |
154 | synchrones_desc = bigmemory::describe(synchrones) | |
155 | medoids_desc = bigmemory::describe(medoids) | |
24ed5d83 | 156 | cl = parallel::makeCluster(ncores_clust) |
d9bb53c5 BA |
157 | parallel::clusterExport(cl, envir=environment(), |
158 | varlist=c("synchrones_desc","m_desc","medoids_desc","getRefSeries")) | |
24ed5d83 BA |
159 | } |
160 | ||
0486fbad | 161 | if (verbose) |
d9bb53c5 BA |
162 | cat(paste("--- Compute ",K," synchrones with ",nb_ref_curves," series\n", sep="")) |
163 | ||
164 | # Balance tasks by splitting the indices set - maybe not so evenly, but | |
165 | # max==TRUE in next call ensures that no set has more than nb_series_per_chunk items. | |
166 | indices_workers = .spreadIndices(seq_len(nb_ref_curves), nb_series_per_chunk, max=TRUE) | |
c45fd663 | 167 | ignored <- |
492cd9e7 | 168 | if (parll) |
e161499b | 169 | parallel::parLapply(cl, indices_workers, computeSynchronesChunk) |
492cd9e7 | 170 | else |
c45fd663 | 171 | lapply(indices_workers, computeSynchronesChunk) |
492cd9e7 | 172 | |
24ed5d83 BA |
173 | if (parll) |
174 | parallel::stopCluster(cl) | |
175 | ||
d9bb53c5 | 176 | return (synchrones) |
e205f218 | 177 | } |
1c6f223e | 178 | |
4bcfdbee BA |
179 | #' computeWerDists |
180 | #' | |
a52836b2 | 181 | #' Compute the WER distances between the synchrones curves (in columns), which are |
4bcfdbee BA |
182 | #' returned (e.g.) by \code{computeSynchrones()} |
183 | #' | |
a52836b2 BA |
184 | #' @param synchrones A big.matrix of synchrones, in columns. The series have same |
185 | #' length as the series in the initial dataset | |
492cd9e7 | 186 | #' @inheritParams claws |
4bcfdbee | 187 | #' |
a52836b2 | 188 | #' @return A distances matrix of size K1 x K1 |
24ed5d83 | 189 | #' |
4bcfdbee | 190 | #' @export |
a52836b2 BA |
191 | computeWerDists = function(synchrones, nvoice, nbytes,endian,ncores_clust=1, |
192 | verbose=FALSE,parll=TRUE) | |
d03c0621 | 193 | { |
d9bb53c5 BA |
194 | n <- ncol(synchrones) |
195 | L <- nrow(synchrones) | |
a52836b2 | 196 | noctave = ceiling(log2(L)) #min power of 2 to cover serie range |
db6fc17d | 197 | |
a52836b2 | 198 | # Initialize result as a square big.matrix of size 'number of synchrones' |
e161499b | 199 | Xwer_dist <- bigmemory::big.matrix(nrow=n, ncol=n, type="double") |
e161499b BA |
200 | |
201 | # Generate n(n-1)/2 pairs for WER distances computations | |
202 | pairs = list() | |
4204e877 BA |
203 | V = seq_len(n) |
204 | for (i in 1:n) | |
e161499b BA |
205 | { |
206 | V = V[-1] | |
4204e877 BA |
207 | pairs = c(pairs, lapply(V, function(v) c(i,v))) |
208 | } | |
a174b8ea | 209 | |
a52836b2 BA |
210 | cwt_file = ".cwt.bin" |
211 | # Compute the synchrones[,index] CWT, and store it in the binary file above | |
4204e877 BA |
212 | computeSaveCWT = function(index) |
213 | { | |
a52836b2 BA |
214 | if (parll && !exists(synchrones)) #avoid going here after first call on a worker |
215 | { | |
216 | require("bigmemory", quietly=TRUE) | |
217 | require("Rwave", quietly=TRUE) | |
218 | require("epclust", quietly=TRUE) | |
219 | synchrones <- bigmemory::attach.big.matrix(synchrones_desc) | |
220 | } | |
d9bb53c5 | 221 | ts <- scale(ts(synchrones[,index]), center=TRUE, scale=FALSE) |
a52836b2 BA |
222 | ts_cwt = Rwave::cwt(ts, noctave, nvoice, w0=2*pi, twoD=TRUE, plot=FALSE) |
223 | ||
224 | # Serialization | |
225 | binarize(as.matrix(c(as.double(Re(ts_cwt)),as.double(Im(ts_cwt)))), cwt_file, 1, | |
226 | ",", nbytes, endian) | |
4204e877 BA |
227 | } |
228 | ||
229 | if (parll) | |
230 | { | |
231 | cl = parallel::makeCluster(ncores_clust) | |
232 | synchrones_desc <- bigmemory::describe(synchrones) | |
233 | Xwer_dist_desc <- bigmemory::describe(Xwer_dist) | |
a52836b2 BA |
234 | parallel::clusterExport(cl, varlist=c("parll","synchrones_desc","Xwer_dist_desc", |
235 | "noctave","nvoice","verbose","getCWT"), envir=environment()) | |
4204e877 | 236 | } |
a52836b2 | 237 | |
0486fbad | 238 | if (verbose) |
a52836b2 BA |
239 | cat(paste("--- Precompute and serialize synchrones CWT\n", sep="")) |
240 | ||
4204e877 BA |
241 | ignored <- |
242 | if (parll) | |
243 | parallel::parLapply(cl, 1:n, computeSaveCWT) | |
244 | else | |
245 | lapply(1:n, computeSaveCWT) | |
246 | ||
a52836b2 BA |
247 | # Function to retrieve a synchrone CWT from (binary) file |
248 | getSynchroneCWT = function(index, L) | |
4204e877 | 249 | { |
a52836b2 BA |
250 | flat_cwt <- getDataInFile(index, cwt_file, nbytes, endian) |
251 | cwt_length = length(flat_cwt) / 2 | |
252 | re_part = as.matrix(flat_cwt[1:cwt_length], nrow=L) | |
253 | im_part = as.matrix(flat_cwt[(cwt_length+1):(2*cwt_length)], nrow=L) | |
254 | re_part + 1i * im_part | |
4204e877 | 255 | } |
e161499b | 256 | |
a52836b2 BA |
257 | # Compute distance between columns i and j in synchrones |
258 | computeDistanceIJ = function(pair) | |
e161499b | 259 | { |
2c14dbea | 260 | if (parll) |
363ae134 | 261 | { |
a52836b2 | 262 | # parallel workers start with an empty environment |
2c14dbea BA |
263 | require("bigmemory", quietly=TRUE) |
264 | require("epclust", quietly=TRUE) | |
265 | synchrones <- bigmemory::attach.big.matrix(synchrones_desc) | |
266 | Xwer_dist <- bigmemory::attach.big.matrix(Xwer_dist_desc) | |
267 | } | |
268 | ||
e161499b | 269 | i = pair[1] ; j = pair[2] |
a52836b2 | 270 | if (verbose && j==i+1 && !parll) |
e161499b | 271 | cat(paste(" Distances (",i,",",j,"), (",i,",",j+1,") ...\n", sep="")) |
2c14dbea | 272 | |
a52836b2 BA |
273 | # Compute CWT of columns i and j in synchrones |
274 | L = nrow(synchrones) | |
275 | cwt_i <- getSynchroneCWT(i, L) | |
276 | cwt_j <- getSynchroneCWT(j, L) | |
277 | ||
278 | # Compute the ratio of integrals formula 5.6 for WER^2 | |
279 | # in https://arxiv.org/abs/1101.4744v2 ยง5.3 | |
280 | num <- filterMA(Mod(cwt_i * Conj(cwt_j))) | |
281 | WX <- filterMA(Mod(cwt_i * Conj(cwt_i))) | |
282 | WY <- filterMA(Mod(cwt_j * Conj(cwt_j))) | |
e161499b | 283 | wer2 <- sum(colSums(num)^2) / sum(colSums(WX) * colSums(WY)) |
a52836b2 BA |
284 | |
285 | Xwer_dist[i,j] <- sqrt(L * ncol(cwt_i) * (1 - wer2)) | |
e161499b | 286 | Xwer_dist[j,i] <- Xwer_dist[i,j] |
a52836b2 | 287 | Xwer_dist[i,i] <- 0. |
e161499b BA |
288 | } |
289 | ||
0486fbad | 290 | if (verbose) |
a52836b2 BA |
291 | cat(paste("--- Compute WER distances\n", sep="")) |
292 | ||
e161499b | 293 | ignored <- |
492cd9e7 | 294 | if (parll) |
a52836b2 | 295 | parallel::parLapply(cl, pairs, computeDistanceIJ) |
492cd9e7 | 296 | else |
a52836b2 | 297 | lapply(pairs, computeDistanceIJ) |
492cd9e7 BA |
298 | |
299 | if (parll) | |
300 | parallel::stopCluster(cl) | |
6ad3f3fd | 301 | |
a52836b2 BA |
302 | unlink(cwt_file) |
303 | ||
492cd9e7 | 304 | Xwer_dist[n,n] = 0. |
a52836b2 | 305 | Xwer_dist[,] #~small matrix K1 x K1 |
492cd9e7 BA |
306 | } |
307 | ||
308 | # Helper function to divide indices into balanced sets | |
d9bb53c5 BA |
309 | # If max == TRUE, sets sizes cannot exceed nb_per_set |
310 | .spreadIndices = function(indices, nb_per_set, max=FALSE) | |
492cd9e7 BA |
311 | { |
312 | L = length(indices) | |
0486fbad | 313 | nb_workers = floor( L / nb_per_set ) |
0fe757f7 | 314 | rem = L %% nb_per_set |
37c82bba | 315 | if (nb_workers == 0 || (nb_workers==1 && rem==0)) |
492cd9e7 | 316 | { |
0fe757f7 | 317 | # L <= nb_per_set, simple case |
492cd9e7 BA |
318 | indices_workers = list(indices) |
319 | } | |
320 | else | |
321 | { | |
322 | indices_workers = lapply( seq_len(nb_workers), function(i) | |
0fe757f7 | 323 | indices[(nb_per_set*(i-1)+1):(nb_per_set*i)] ) |
d9bb53c5 BA |
324 | |
325 | if (max) | |
326 | { | |
327 | # Sets are not so well balanced, but size is supposed to be critical | |
a52836b2 | 328 | return ( c( indices_workers, if (rem>0) list((L-rem+1):L) else NULL ) ) |
d9bb53c5 BA |
329 | } |
330 | ||
0486fbad BA |
331 | # Spread the remaining load among the workers |
332 | rem = L %% nb_per_set | |
492cd9e7 BA |
333 | while (rem > 0) |
334 | { | |
335 | index = rem%%nb_workers + 1 | |
336 | indices_workers[[index]] = c(indices_workers[[index]], indices[L-rem+1]) | |
337 | rem = rem - 1 | |
d03c0621 | 338 | } |
1c6f223e | 339 | } |
492cd9e7 | 340 | indices_workers |
1c6f223e | 341 | } |