Commit | Line | Data |
---|---|---|
4bcfdbee BA |
1 | #' @name clustering |
2 | #' @rdname clustering | |
eef6f6c9 | 3 | #' @aliases clusteringTask1 clusteringTask2 computeClusters1 computeClusters2 |
4bcfdbee | 4 | #' |
492cd9e7 | 5 | #' @title Two-stage clustering, withing one task (see \code{claws()}) |
4bcfdbee | 6 | #' |
492cd9e7 BA |
7 | #' @description \code{clusteringTask1()} runs one full stage-1 task, which consists in |
8 | #' iterated stage 1 clustering (on nb_curves / ntasks energy contributions, computed | |
bf5c0844 BA |
9 | #' through discrete wavelets coefficients). |
10 | #' \code{clusteringTask2()} runs a full stage-2 task, which consists in synchrones | |
11 | #' and then WER distances computations, before applying the clustering algorithm. | |
12 | #' \code{computeClusters1()} and \code{computeClusters2()} correspond to the atomic | |
13 | #' clustering procedures respectively for stage 1 and 2. The former applies the | |
2b9f5356 | 14 | #' first clustering algorithm on a contributions matrix, while the latter clusters |
0486fbad | 15 | #' a set of series inside one task (~nb_items_clust1) |
4bcfdbee BA |
16 | #' |
17 | #' @param indices Range of series indices to cluster in parallel (initial data) | |
18 | #' @param getContribs Function to retrieve contributions from initial series indices: | |
19 | #' \code{getContribs(indices)} outpus a contributions matrix | |
4bcfdbee BA |
20 | #' @inheritParams computeSynchrones |
21 | #' @inheritParams claws | |
22 | #' | |
0486fbad BA |
23 | #' @return For \code{clusteringTask1()}, the indices of the computed (K1) medoids. |
24 | #' Indices are irrelevant for stage 2 clustering, thus \code{clusteringTask2()} | |
25 | #' outputs a big.matrix of medoids (of size LxK2, K2 = final number of clusters) | |
4bcfdbee BA |
26 | NULL |
27 | ||
28 | #' @rdname clustering | |
29 | #' @export | |
0486fbad | 30 | clusteringTask1 = function(indices, getContribs, K1, algoClust1, nb_items_clust1, |
37c82bba | 31 | ncores_clust=1, verbose=FALSE, parll=TRUE) |
5c652979 | 32 | { |
492cd9e7 | 33 | if (parll) |
7b13d0c2 | 34 | { |
37c82bba | 35 | cl = parallel::makeCluster(ncores_clust, outfile = "") |
d9bb53c5 | 36 | parallel::clusterExport(cl, c("getContribs","K1","verbose"), envir=environment()) |
7b13d0c2 | 37 | } |
d9bb53c5 | 38 | # Iterate clustering algorithm 1 until K1 medoids are found |
492cd9e7 BA |
39 | while (length(indices) > K1) |
40 | { | |
d9bb53c5 | 41 | # Balance tasks by splitting the indices set - as evenly as possible |
0486fbad BA |
42 | indices_workers = .spreadIndices(indices, nb_items_clust1) |
43 | if (verbose) | |
44 | cat(paste("*** [iterated] Clustering task 1 on ",length(indices)," series\n", sep="")) | |
e161499b BA |
45 | indices <- |
46 | if (parll) | |
47 | { | |
48 | unlist( parallel::parLapply(cl, indices_workers, function(inds) { | |
49 | require("epclust", quietly=TRUE) | |
0486fbad | 50 | inds[ algoClust1(getContribs(inds), K1) ] |
e161499b BA |
51 | }) ) |
52 | } | |
53 | else | |
54 | { | |
55 | unlist( lapply(indices_workers, function(inds) | |
0486fbad | 56 | inds[ algoClust1(getContribs(inds), K1) ] |
e161499b BA |
57 | ) ) |
58 | } | |
492cd9e7 BA |
59 | } |
60 | if (parll) | |
61 | parallel::stopCluster(cl) | |
62 | ||
56857861 | 63 | indices #medoids |
5c652979 BA |
64 | } |
65 | ||
4bcfdbee BA |
66 | #' @rdname clustering |
67 | #' @export | |
0486fbad | 68 | clusteringTask2 = function(medoids, K2, algoClust2, getRefSeries, nb_ref_curves, |
d9bb53c5 | 69 | nb_series_per_chunk, nbytes,endian,ncores_clust=1,verbose=FALSE,parll=TRUE) |
5c652979 | 70 | { |
e161499b | 71 | if (verbose) |
0486fbad | 72 | cat(paste("*** Clustering task 2 on ",ncol(medoids)," synchrones\n", sep="")) |
e161499b | 73 | |
0486fbad | 74 | if (ncol(medoids) <= K2) |
bf5c0844 | 75 | return (medoids) |
d9bb53c5 BA |
76 | |
77 | # A) Obtain synchrones, that is to say the cumulated power consumptions | |
78 | # for each of the K1 initial groups | |
0486fbad | 79 | synchrones = computeSynchrones(medoids, getRefSeries, nb_ref_curves, |
d9bb53c5 BA |
80 | nb_series_per_chunk, ncores_clust, verbose, parll) |
81 | ||
82 | # B) Compute the WER distances (Wavelets Extended coefficient of deteRmination) | |
a174b8ea | 83 | distances = computeWerDists(synchrones, nbytes, endian, ncores_clust, verbose, parll) |
d9bb53c5 BA |
84 | |
85 | # C) Apply clustering algorithm 2 on the WER distances matrix | |
e161499b | 86 | if (verbose) |
0486fbad | 87 | cat(paste(" algoClust2() on ",nrow(distances)," items\n", sep="")) |
9f05a4a0 | 88 | medoids[ ,algoClust2(distances,K2) ] |
e161499b | 89 | } |
bf5c0844 | 90 | |
4bcfdbee BA |
91 | #' computeSynchrones |
92 | #' | |
93 | #' Compute the synchrones curves (sum of clusters elements) from a matrix of medoids, | |
d9bb53c5 | 94 | #' using euclidian distance. |
4bcfdbee | 95 | #' |
24ed5d83 | 96 | #' @param medoids big.matrix of medoids (curves of same length as initial series) |
4bcfdbee BA |
97 | #' @param getRefSeries Function to retrieve initial series (e.g. in stage 2 after series |
98 | #' have been replaced by stage-1 medoids) | |
492cd9e7 | 99 | #' @param nb_ref_curves How many reference series? (This number is known at this stage) |
4bcfdbee BA |
100 | #' @inheritParams claws |
101 | #' | |
eef6f6c9 | 102 | #' @return A big.matrix of size L x K1 where L = length of a serie |
24ed5d83 | 103 | #' |
4bcfdbee | 104 | #' @export |
0486fbad | 105 | computeSynchrones = function(medoids, getRefSeries, nb_ref_curves, |
d9bb53c5 | 106 | nb_series_per_chunk, ncores_clust=1,verbose=FALSE,parll=TRUE) |
e205f218 | 107 | { |
d9bb53c5 | 108 | # Synchrones computation is embarassingly parallel: compute it by chunks of series |
492cd9e7 | 109 | computeSynchronesChunk = function(indices) |
3eef8d3d | 110 | { |
363ae134 BA |
111 | if (parll) |
112 | { | |
113 | require("bigmemory", quietly=TRUE) | |
6ad3f3fd | 114 | requireNamespace("synchronicity", quietly=TRUE) |
363ae134 | 115 | require("epclust", quietly=TRUE) |
d9bb53c5 | 116 | # The big.matrix objects need to be attached to be usable on the workers |
363ae134 BA |
117 | synchrones <- bigmemory::attach.big.matrix(synchrones_desc) |
118 | medoids <- bigmemory::attach.big.matrix(medoids_desc) | |
119 | m <- synchronicity::attach.mutex(m_desc) | |
120 | } | |
121 | ||
d9bb53c5 | 122 | # Obtain a chunk of reference series |
6ad3f3fd | 123 | ref_series = getRefSeries(indices) |
9f05a4a0 | 124 | nb_series = ncol(ref_series) |
6ad3f3fd | 125 | |
0486fbad | 126 | # Get medoids indices for this chunk of series |
2c14dbea | 127 | mi = computeMedoidsIndices(medoids@address, ref_series) |
e161499b | 128 | |
d9bb53c5 | 129 | # Update synchrones using mi above |
e161499b | 130 | for (i in seq_len(nb_series)) |
56857861 | 131 | { |
492cd9e7 | 132 | if (parll) |
d9bb53c5 | 133 | synchronicity::lock(m) #locking required because several writes at the same time |
eef6f6c9 | 134 | synchrones[, mi[i] ] = synchrones[, mi[i] ] + ref_series[,i] |
492cd9e7 BA |
135 | if (parll) |
136 | synchronicity::unlock(m) | |
137 | } | |
138 | } | |
139 | ||
0486fbad | 140 | K = ncol(medoids) ; L = nrow(medoids) |
492cd9e7 | 141 | # Use bigmemory (shared==TRUE by default) + synchronicity to fill synchrones in // |
eef6f6c9 | 142 | synchrones = bigmemory::big.matrix(nrow=L, ncol=K, type="double", init=0.) |
d9bb53c5 | 143 | # NOTE: synchronicity is only for Linux & MacOS; on Windows: run sequentially |
492cd9e7 BA |
144 | parll = (requireNamespace("synchronicity",quietly=TRUE) |
145 | && parll && Sys.info()['sysname'] != "Windows") | |
146 | if (parll) | |
363ae134 | 147 | { |
d9bb53c5 BA |
148 | m <- synchronicity::boost.mutex() #for lock/unlock, see computeSynchronesChunk |
149 | # mutex and big.matrix objects cannot be passed directly: | |
150 | # they will be accessed from their description | |
363ae134 BA |
151 | m_desc <- synchronicity::describe(m) |
152 | synchrones_desc = bigmemory::describe(synchrones) | |
153 | medoids_desc = bigmemory::describe(medoids) | |
24ed5d83 | 154 | cl = parallel::makeCluster(ncores_clust) |
d9bb53c5 BA |
155 | parallel::clusterExport(cl, envir=environment(), |
156 | varlist=c("synchrones_desc","m_desc","medoids_desc","getRefSeries")) | |
24ed5d83 BA |
157 | } |
158 | ||
0486fbad | 159 | if (verbose) |
d9bb53c5 BA |
160 | cat(paste("--- Compute ",K," synchrones with ",nb_ref_curves," series\n", sep="")) |
161 | ||
162 | # Balance tasks by splitting the indices set - maybe not so evenly, but | |
163 | # max==TRUE in next call ensures that no set has more than nb_series_per_chunk items. | |
164 | indices_workers = .spreadIndices(seq_len(nb_ref_curves), nb_series_per_chunk, max=TRUE) | |
c45fd663 | 165 | ignored <- |
492cd9e7 | 166 | if (parll) |
e161499b | 167 | parallel::parLapply(cl, indices_workers, computeSynchronesChunk) |
492cd9e7 | 168 | else |
c45fd663 | 169 | lapply(indices_workers, computeSynchronesChunk) |
492cd9e7 | 170 | |
24ed5d83 BA |
171 | if (parll) |
172 | parallel::stopCluster(cl) | |
173 | ||
d9bb53c5 | 174 | return (synchrones) |
e205f218 | 175 | } |
1c6f223e | 176 | |
4bcfdbee BA |
177 | #' computeWerDists |
178 | #' | |
179 | #' Compute the WER distances between the synchrones curves (in rows), which are | |
180 | #' returned (e.g.) by \code{computeSynchrones()} | |
181 | #' | |
24ed5d83 BA |
182 | #' @param synchrones A big.matrix of synchrones, in rows. The series have same length |
183 | #' as the series in the initial dataset | |
492cd9e7 | 184 | #' @inheritParams claws |
4bcfdbee | 185 | #' |
777c4b02 | 186 | #' @return A matrix of size K1 x K1 |
24ed5d83 | 187 | #' |
4bcfdbee | 188 | #' @export |
a174b8ea | 189 | computeWerDists = function(synchrones, nbytes,endian,ncores_clust=1,verbose=FALSE,parll=TRUE) |
d03c0621 | 190 | { |
d9bb53c5 BA |
191 | n <- ncol(synchrones) |
192 | L <- nrow(synchrones) | |
db6fc17d | 193 | #TODO: automatic tune of all these parameters ? (for other users) |
d9bb53c5 | 194 | # 4 here represent 2^5 = 32 half-hours ~ 1 day |
d03c0621 | 195 | nvoice <- 4 |
4bcfdbee | 196 | # noctave = 2^13 = 8192 half hours ~ 180 days ; ~log2(ncol(synchrones)) |
d7d55bc1 | 197 | noctave = 13 |
db6fc17d | 198 | |
e161499b | 199 | Xwer_dist <- bigmemory::big.matrix(nrow=n, ncol=n, type="double") |
e161499b | 200 | |
4204e877 BA |
201 | cwt_file = ".epclust_bin/cwt" |
202 | #TODO: args, nb_per_chunk, nbytes, endian | |
203 | ||
e161499b BA |
204 | # Generate n(n-1)/2 pairs for WER distances computations |
205 | pairs = list() | |
4204e877 BA |
206 | V = seq_len(n) |
207 | for (i in 1:n) | |
e161499b BA |
208 | { |
209 | V = V[-1] | |
4204e877 BA |
210 | pairs = c(pairs, lapply(V, function(v) c(i,v))) |
211 | } | |
a174b8ea | 212 | |
4204e877 BA |
213 | computeSaveCWT = function(index) |
214 | { | |
d9bb53c5 BA |
215 | ts <- scale(ts(synchrones[,index]), center=TRUE, scale=FALSE) |
216 | totts.cwt = Rwave::cwt(ts, totnoct, nvoice, w0=2*pi, twoD=TRUE, plot=FALSE) | |
4204e877 BA |
217 | ts.cwt = totts.cwt[,s0log:(s0log+noctave*nvoice)] |
218 | #Normalization | |
219 | sqs <- sqrt(2^(0:(noctave*nvoice)/nvoice)*s0) | |
220 | sqres <- sweep(ts.cwt,2,sqs,'*') | |
221 | res <- sqres / max(Mod(sqres)) | |
222 | #TODO: serializer les CWT, les récupérer via getDataInFile ; | |
d9bb53c5 | 223 | #--> OK, faut juste stocker comme séries simples de taille L*n' (53*17519) |
a174b8ea | 224 | binarize(c(as.double(Re(res)),as.double(Im(res))), cwt_file, ncol(res), ",", nbytes, endian) |
4204e877 BA |
225 | } |
226 | ||
227 | if (parll) | |
228 | { | |
229 | cl = parallel::makeCluster(ncores_clust) | |
230 | synchrones_desc <- bigmemory::describe(synchrones) | |
231 | Xwer_dist_desc <- bigmemory::describe(Xwer_dist) | |
d9bb53c5 BA |
232 | parallel::clusterExport(cl, envir=environment(), |
233 | varlist=c("synchrones_desc","Xwer_dist_desc","totnoct","nvoice","w0","s0log", | |
234 | "noctave","s0","verbose","getCWT")) | |
4204e877 | 235 | } |
0486fbad BA |
236 | |
237 | if (verbose) | |
238 | { | |
239 | cat(paste("--- Compute WER dists\n", sep="")) | |
240 | # precompute save all CWT........ | |
241 | } | |
4204e877 BA |
242 | #precompute and serialize all CWT |
243 | ignored <- | |
244 | if (parll) | |
245 | parallel::parLapply(cl, 1:n, computeSaveCWT) | |
246 | else | |
247 | lapply(1:n, computeSaveCWT) | |
248 | ||
249 | getCWT = function(index) | |
250 | { | |
251 | #from cwt_file ... | |
a174b8ea | 252 | res <- getDataInFile(c(2*index-1,2*index), cwt_file, nbytes, endian) |
eef6f6c9 | 253 | ###############TODO: |
4204e877 | 254 | } |
e161499b | 255 | |
777c4b02 | 256 | # Distance between rows i and j |
e161499b BA |
257 | computeDistancesIJ = function(pair) |
258 | { | |
2c14dbea | 259 | if (parll) |
363ae134 | 260 | { |
2c14dbea BA |
261 | require("bigmemory", quietly=TRUE) |
262 | require("epclust", quietly=TRUE) | |
263 | synchrones <- bigmemory::attach.big.matrix(synchrones_desc) | |
264 | Xwer_dist <- bigmemory::attach.big.matrix(Xwer_dist_desc) | |
265 | } | |
266 | ||
e161499b BA |
267 | i = pair[1] ; j = pair[2] |
268 | if (verbose && j==i+1) | |
269 | cat(paste(" Distances (",i,",",j,"), (",i,",",j+1,") ...\n", sep="")) | |
4204e877 BA |
270 | cwt_i <- getCWT(i) |
271 | cwt_j <- getCWT(j) | |
2c14dbea | 272 | |
363ae134 BA |
273 | num <- epclustFilter(Mod(cwt_i * Conj(cwt_j))) |
274 | WX <- epclustFilter(Mod(cwt_i * Conj(cwt_i))) | |
4204e877 | 275 | WY <- epclustFilter(Mod(cwt_j * Conj(cwt_j))) |
e161499b | 276 | wer2 <- sum(colSums(num)^2) / sum(colSums(WX) * colSums(WY)) |
d9bb53c5 | 277 | Xwer_dist[i,j] <- sqrt(L * ncol(cwt_i) * max(1 - wer2, 0.)) |
e161499b BA |
278 | Xwer_dist[j,i] <- Xwer_dist[i,j] |
279 | Xwer_dist[i,i] = 0. | |
280 | } | |
281 | ||
0486fbad BA |
282 | if (verbose) |
283 | { | |
284 | cat(paste("--- Compute WER dists\n", sep="")) | |
285 | } | |
e161499b | 286 | ignored <- |
492cd9e7 | 287 | if (parll) |
e161499b | 288 | parallel::parLapply(cl, pairs, computeDistancesIJ) |
492cd9e7 | 289 | else |
e161499b | 290 | lapply(pairs, computeDistancesIJ) |
492cd9e7 BA |
291 | |
292 | if (parll) | |
293 | parallel::stopCluster(cl) | |
6ad3f3fd | 294 | |
492cd9e7 | 295 | Xwer_dist[n,n] = 0. |
777c4b02 BA |
296 | distances <- Xwer_dist[,] |
297 | rm(Xwer_dist) ; gc() | |
298 | distances #~small matrix K1 x K1 | |
492cd9e7 BA |
299 | } |
300 | ||
301 | # Helper function to divide indices into balanced sets | |
d9bb53c5 BA |
302 | # If max == TRUE, sets sizes cannot exceed nb_per_set |
303 | .spreadIndices = function(indices, nb_per_set, max=FALSE) | |
492cd9e7 BA |
304 | { |
305 | L = length(indices) | |
0486fbad | 306 | nb_workers = floor( L / nb_per_set ) |
0fe757f7 | 307 | rem = L %% nb_per_set |
37c82bba | 308 | if (nb_workers == 0 || (nb_workers==1 && rem==0)) |
492cd9e7 | 309 | { |
0fe757f7 | 310 | # L <= nb_per_set, simple case |
492cd9e7 BA |
311 | indices_workers = list(indices) |
312 | } | |
313 | else | |
314 | { | |
315 | indices_workers = lapply( seq_len(nb_workers), function(i) | |
0fe757f7 | 316 | indices[(nb_per_set*(i-1)+1):(nb_per_set*i)] ) |
d9bb53c5 BA |
317 | |
318 | if (max) | |
319 | { | |
320 | # Sets are not so well balanced, but size is supposed to be critical | |
321 | return ( c( indices_workers, (L-rem+1):L ) ) | |
322 | } | |
323 | ||
0486fbad BA |
324 | # Spread the remaining load among the workers |
325 | rem = L %% nb_per_set | |
492cd9e7 BA |
326 | while (rem > 0) |
327 | { | |
328 | index = rem%%nb_workers + 1 | |
329 | indices_workers[[index]] = c(indices_workers[[index]], indices[L-rem+1]) | |
330 | rem = rem - 1 | |
d03c0621 | 331 | } |
1c6f223e | 332 | } |
492cd9e7 | 333 | indices_workers |
1c6f223e | 334 | } |