| 1 | #' @name clustering |
| 2 | #' @rdname clustering |
| 3 | #' @aliases clusteringTask1 clusteringTask2 computeClusters1 computeClusters2 |
| 4 | #' |
| 5 | #' @title Two-stage clustering, withing one task (see \code{claws()}) |
| 6 | #' |
| 7 | #' @description \code{clusteringTask1()} runs one full stage-1 task, which consists in |
| 8 | #' iterated stage 1 clustering (on nb_curves / ntasks energy contributions, computed |
| 9 | #' through discrete wavelets coefficients). |
| 10 | #' \code{clusteringTask2()} runs a full stage-2 task, which consists in synchrones |
| 11 | #' and then WER distances computations, before applying the clustering algorithm. |
| 12 | #' \code{computeClusters1()} and \code{computeClusters2()} correspond to the atomic |
| 13 | #' clustering procedures respectively for stage 1 and 2. The former applies the |
| 14 | #' first clustering algorithm on a contributions matrix, while the latter clusters |
| 15 | #' a set of series inside one task (~nb_items_clust1) |
| 16 | #' |
| 17 | #' @param indices Range of series indices to cluster in parallel (initial data) |
| 18 | #' @param getContribs Function to retrieve contributions from initial series indices: |
| 19 | #' \code{getContribs(indices)} outpus a contributions matrix |
| 20 | #' @inheritParams computeSynchrones |
| 21 | #' @inheritParams claws |
| 22 | #' |
| 23 | #' @return For \code{clusteringTask1()}, the indices of the computed (K1) medoids. |
| 24 | #' Indices are irrelevant for stage 2 clustering, thus \code{clusteringTask2()} |
| 25 | #' outputs a big.matrix of medoids (of size LxK2, K2 = final number of clusters) |
| 26 | NULL |
| 27 | |
| 28 | #' @rdname clustering |
| 29 | #' @export |
| 30 | clusteringTask1 = function(indices, getContribs, K1, algoClust1, nb_items_clust1, |
| 31 | ncores_clust=1, verbose=FALSE, parll=TRUE) |
| 32 | { |
| 33 | if (parll) |
| 34 | { |
| 35 | cl = parallel::makeCluster(ncores_clust, outfile = "") |
| 36 | parallel::clusterExport(cl, c("getContribs","K1","verbose"), envir=environment()) |
| 37 | } |
| 38 | # Iterate clustering algorithm 1 until K1 medoids are found |
| 39 | while (length(indices) > K1) |
| 40 | { |
| 41 | # Balance tasks by splitting the indices set - as evenly as possible |
| 42 | indices_workers = .spreadIndices(indices, nb_items_clust1) |
| 43 | if (verbose) |
| 44 | cat(paste("*** [iterated] Clustering task 1 on ",length(indices)," series\n", sep="")) |
| 45 | indices <- |
| 46 | if (parll) |
| 47 | { |
| 48 | unlist( parallel::parLapply(cl, indices_workers, function(inds) { |
| 49 | require("epclust", quietly=TRUE) |
| 50 | inds[ algoClust1(getContribs(inds), K1) ] |
| 51 | }) ) |
| 52 | } |
| 53 | else |
| 54 | { |
| 55 | unlist( lapply(indices_workers, function(inds) |
| 56 | inds[ algoClust1(getContribs(inds), K1) ] |
| 57 | ) ) |
| 58 | } |
| 59 | } |
| 60 | if (parll) |
| 61 | parallel::stopCluster(cl) |
| 62 | |
| 63 | indices #medoids |
| 64 | } |
| 65 | |
| 66 | #' @rdname clustering |
| 67 | #' @export |
| 68 | clusteringTask2 = function(medoids, K2, algoClust2, getRefSeries, nb_ref_curves, |
| 69 | nb_series_per_chunk, nbytes,endian,ncores_clust=1,verbose=FALSE,parll=TRUE) |
| 70 | { |
| 71 | if (verbose) |
| 72 | cat(paste("*** Clustering task 2 on ",ncol(medoids)," synchrones\n", sep="")) |
| 73 | |
| 74 | if (ncol(medoids) <= K2) |
| 75 | return (medoids) |
| 76 | |
| 77 | # A) Obtain synchrones, that is to say the cumulated power consumptions |
| 78 | # for each of the K1 initial groups |
| 79 | synchrones = computeSynchrones(medoids, getRefSeries, nb_ref_curves, |
| 80 | nb_series_per_chunk, ncores_clust, verbose, parll) |
| 81 | |
| 82 | # B) Compute the WER distances (Wavelets Extended coefficient of deteRmination) |
| 83 | distances = computeWerDists(synchrones, nbytes, endian, ncores_clust, verbose, parll) |
| 84 | |
| 85 | # C) Apply clustering algorithm 2 on the WER distances matrix |
| 86 | if (verbose) |
| 87 | cat(paste(" algoClust2() on ",nrow(distances)," items\n", sep="")) |
| 88 | medoids[ ,algoClust2(distances,K2) ] |
| 89 | } |
| 90 | |
| 91 | #' computeSynchrones |
| 92 | #' |
| 93 | #' Compute the synchrones curves (sum of clusters elements) from a matrix of medoids, |
| 94 | #' using euclidian distance. |
| 95 | #' |
| 96 | #' @param medoids big.matrix of medoids (curves of same length as initial series) |
| 97 | #' @param getRefSeries Function to retrieve initial series (e.g. in stage 2 after series |
| 98 | #' have been replaced by stage-1 medoids) |
| 99 | #' @param nb_ref_curves How many reference series? (This number is known at this stage) |
| 100 | #' @inheritParams claws |
| 101 | #' |
| 102 | #' @return A big.matrix of size L x K1 where L = length of a serie |
| 103 | #' |
| 104 | #' @export |
| 105 | computeSynchrones = function(medoids, getRefSeries, nb_ref_curves, |
| 106 | nb_series_per_chunk, ncores_clust=1,verbose=FALSE,parll=TRUE) |
| 107 | { |
| 108 | # Synchrones computation is embarassingly parallel: compute it by chunks of series |
| 109 | computeSynchronesChunk = function(indices) |
| 110 | { |
| 111 | if (parll) |
| 112 | { |
| 113 | require("bigmemory", quietly=TRUE) |
| 114 | requireNamespace("synchronicity", quietly=TRUE) |
| 115 | require("epclust", quietly=TRUE) |
| 116 | # The big.matrix objects need to be attached to be usable on the workers |
| 117 | synchrones <- bigmemory::attach.big.matrix(synchrones_desc) |
| 118 | medoids <- bigmemory::attach.big.matrix(medoids_desc) |
| 119 | m <- synchronicity::attach.mutex(m_desc) |
| 120 | } |
| 121 | |
| 122 | # Obtain a chunk of reference series |
| 123 | ref_series = getRefSeries(indices) |
| 124 | nb_series = ncol(ref_series) |
| 125 | |
| 126 | # Get medoids indices for this chunk of series |
| 127 | mi = computeMedoidsIndices(medoids@address, ref_series) |
| 128 | |
| 129 | # Update synchrones using mi above |
| 130 | for (i in seq_len(nb_series)) |
| 131 | { |
| 132 | if (parll) |
| 133 | synchronicity::lock(m) #locking required because several writes at the same time |
| 134 | synchrones[, mi[i] ] = synchrones[, mi[i] ] + ref_series[,i] |
| 135 | if (parll) |
| 136 | synchronicity::unlock(m) |
| 137 | } |
| 138 | } |
| 139 | |
| 140 | K = ncol(medoids) ; L = nrow(medoids) |
| 141 | # Use bigmemory (shared==TRUE by default) + synchronicity to fill synchrones in // |
| 142 | synchrones = bigmemory::big.matrix(nrow=L, ncol=K, type="double", init=0.) |
| 143 | # NOTE: synchronicity is only for Linux & MacOS; on Windows: run sequentially |
| 144 | parll = (requireNamespace("synchronicity",quietly=TRUE) |
| 145 | && parll && Sys.info()['sysname'] != "Windows") |
| 146 | if (parll) |
| 147 | { |
| 148 | m <- synchronicity::boost.mutex() #for lock/unlock, see computeSynchronesChunk |
| 149 | # mutex and big.matrix objects cannot be passed directly: |
| 150 | # they will be accessed from their description |
| 151 | m_desc <- synchronicity::describe(m) |
| 152 | synchrones_desc = bigmemory::describe(synchrones) |
| 153 | medoids_desc = bigmemory::describe(medoids) |
| 154 | cl = parallel::makeCluster(ncores_clust) |
| 155 | parallel::clusterExport(cl, envir=environment(), |
| 156 | varlist=c("synchrones_desc","m_desc","medoids_desc","getRefSeries")) |
| 157 | } |
| 158 | |
| 159 | if (verbose) |
| 160 | cat(paste("--- Compute ",K," synchrones with ",nb_ref_curves," series\n", sep="")) |
| 161 | |
| 162 | # Balance tasks by splitting the indices set - maybe not so evenly, but |
| 163 | # max==TRUE in next call ensures that no set has more than nb_series_per_chunk items. |
| 164 | indices_workers = .spreadIndices(seq_len(nb_ref_curves), nb_series_per_chunk, max=TRUE) |
| 165 | ignored <- |
| 166 | if (parll) |
| 167 | parallel::parLapply(cl, indices_workers, computeSynchronesChunk) |
| 168 | else |
| 169 | lapply(indices_workers, computeSynchronesChunk) |
| 170 | |
| 171 | if (parll) |
| 172 | parallel::stopCluster(cl) |
| 173 | |
| 174 | return (synchrones) |
| 175 | } |
| 176 | |
| 177 | #' computeWerDists |
| 178 | #' |
| 179 | #' Compute the WER distances between the synchrones curves (in rows), which are |
| 180 | #' returned (e.g.) by \code{computeSynchrones()} |
| 181 | #' |
| 182 | #' @param synchrones A big.matrix of synchrones, in rows. The series have same length |
| 183 | #' as the series in the initial dataset |
| 184 | #' @inheritParams claws |
| 185 | #' |
| 186 | #' @return A matrix of size K1 x K1 |
| 187 | #' |
| 188 | #' @export |
| 189 | computeWerDists = function(synchrones, nbytes,endian,ncores_clust=1,verbose=FALSE,parll=TRUE) |
| 190 | { |
| 191 | n <- ncol(synchrones) |
| 192 | L <- nrow(synchrones) |
| 193 | #TODO: automatic tune of all these parameters ? (for other users) |
| 194 | # 4 here represent 2^5 = 32 half-hours ~ 1 day |
| 195 | nvoice <- 4 |
| 196 | # noctave = 2^13 = 8192 half hours ~ 180 days ; ~log2(ncol(synchrones)) |
| 197 | noctave = 13 |
| 198 | |
| 199 | Xwer_dist <- bigmemory::big.matrix(nrow=n, ncol=n, type="double") |
| 200 | |
| 201 | cwt_file = ".epclust_bin/cwt" |
| 202 | #TODO: args, nb_per_chunk, nbytes, endian |
| 203 | |
| 204 | # Generate n(n-1)/2 pairs for WER distances computations |
| 205 | pairs = list() |
| 206 | V = seq_len(n) |
| 207 | for (i in 1:n) |
| 208 | { |
| 209 | V = V[-1] |
| 210 | pairs = c(pairs, lapply(V, function(v) c(i,v))) |
| 211 | } |
| 212 | |
| 213 | computeSaveCWT = function(index) |
| 214 | { |
| 215 | ts <- scale(ts(synchrones[,index]), center=TRUE, scale=FALSE) |
| 216 | totts.cwt = Rwave::cwt(ts, totnoct, nvoice, w0=2*pi, twoD=TRUE, plot=FALSE) |
| 217 | ts.cwt = totts.cwt[,s0log:(s0log+noctave*nvoice)] |
| 218 | #Normalization |
| 219 | sqs <- sqrt(2^(0:(noctave*nvoice)/nvoice)*s0) |
| 220 | sqres <- sweep(ts.cwt,2,sqs,'*') |
| 221 | res <- sqres / max(Mod(sqres)) |
| 222 | #TODO: serializer les CWT, les récupérer via getDataInFile ; |
| 223 | #--> OK, faut juste stocker comme séries simples de taille L*n' (53*17519) |
| 224 | binarize(c(as.double(Re(res)),as.double(Im(res))), cwt_file, ncol(res), ",", nbytes, endian) |
| 225 | } |
| 226 | |
| 227 | if (parll) |
| 228 | { |
| 229 | cl = parallel::makeCluster(ncores_clust) |
| 230 | synchrones_desc <- bigmemory::describe(synchrones) |
| 231 | Xwer_dist_desc <- bigmemory::describe(Xwer_dist) |
| 232 | parallel::clusterExport(cl, envir=environment(), |
| 233 | varlist=c("synchrones_desc","Xwer_dist_desc","totnoct","nvoice","w0","s0log", |
| 234 | "noctave","s0","verbose","getCWT")) |
| 235 | } |
| 236 | |
| 237 | if (verbose) |
| 238 | { |
| 239 | cat(paste("--- Compute WER dists\n", sep="")) |
| 240 | # precompute save all CWT........ |
| 241 | } |
| 242 | #precompute and serialize all CWT |
| 243 | ignored <- |
| 244 | if (parll) |
| 245 | parallel::parLapply(cl, 1:n, computeSaveCWT) |
| 246 | else |
| 247 | lapply(1:n, computeSaveCWT) |
| 248 | |
| 249 | getCWT = function(index) |
| 250 | { |
| 251 | #from cwt_file ... |
| 252 | res <- getDataInFile(c(2*index-1,2*index), cwt_file, nbytes, endian) |
| 253 | ###############TODO: |
| 254 | } |
| 255 | |
| 256 | # Distance between rows i and j |
| 257 | computeDistancesIJ = function(pair) |
| 258 | { |
| 259 | if (parll) |
| 260 | { |
| 261 | require("bigmemory", quietly=TRUE) |
| 262 | require("epclust", quietly=TRUE) |
| 263 | synchrones <- bigmemory::attach.big.matrix(synchrones_desc) |
| 264 | Xwer_dist <- bigmemory::attach.big.matrix(Xwer_dist_desc) |
| 265 | } |
| 266 | |
| 267 | i = pair[1] ; j = pair[2] |
| 268 | if (verbose && j==i+1) |
| 269 | cat(paste(" Distances (",i,",",j,"), (",i,",",j+1,") ...\n", sep="")) |
| 270 | cwt_i <- getCWT(i) |
| 271 | cwt_j <- getCWT(j) |
| 272 | |
| 273 | num <- epclustFilter(Mod(cwt_i * Conj(cwt_j))) |
| 274 | WX <- epclustFilter(Mod(cwt_i * Conj(cwt_i))) |
| 275 | WY <- epclustFilter(Mod(cwt_j * Conj(cwt_j))) |
| 276 | wer2 <- sum(colSums(num)^2) / sum(colSums(WX) * colSums(WY)) |
| 277 | Xwer_dist[i,j] <- sqrt(L * ncol(cwt_i) * max(1 - wer2, 0.)) |
| 278 | Xwer_dist[j,i] <- Xwer_dist[i,j] |
| 279 | Xwer_dist[i,i] = 0. |
| 280 | } |
| 281 | |
| 282 | if (verbose) |
| 283 | { |
| 284 | cat(paste("--- Compute WER dists\n", sep="")) |
| 285 | } |
| 286 | ignored <- |
| 287 | if (parll) |
| 288 | parallel::parLapply(cl, pairs, computeDistancesIJ) |
| 289 | else |
| 290 | lapply(pairs, computeDistancesIJ) |
| 291 | |
| 292 | if (parll) |
| 293 | parallel::stopCluster(cl) |
| 294 | |
| 295 | Xwer_dist[n,n] = 0. |
| 296 | distances <- Xwer_dist[,] |
| 297 | rm(Xwer_dist) ; gc() |
| 298 | distances #~small matrix K1 x K1 |
| 299 | } |
| 300 | |
| 301 | # Helper function to divide indices into balanced sets |
| 302 | # If max == TRUE, sets sizes cannot exceed nb_per_set |
| 303 | .spreadIndices = function(indices, nb_per_set, max=FALSE) |
| 304 | { |
| 305 | L = length(indices) |
| 306 | nb_workers = floor( L / nb_per_set ) |
| 307 | rem = L %% nb_per_set |
| 308 | if (nb_workers == 0 || (nb_workers==1 && rem==0)) |
| 309 | { |
| 310 | # L <= nb_per_set, simple case |
| 311 | indices_workers = list(indices) |
| 312 | } |
| 313 | else |
| 314 | { |
| 315 | indices_workers = lapply( seq_len(nb_workers), function(i) |
| 316 | indices[(nb_per_set*(i-1)+1):(nb_per_set*i)] ) |
| 317 | |
| 318 | if (max) |
| 319 | { |
| 320 | # Sets are not so well balanced, but size is supposed to be critical |
| 321 | return ( c( indices_workers, (L-rem+1):L ) ) |
| 322 | } |
| 323 | |
| 324 | # Spread the remaining load among the workers |
| 325 | rem = L %% nb_per_set |
| 326 | while (rem > 0) |
| 327 | { |
| 328 | index = rem%%nb_workers + 1 |
| 329 | indices_workers[[index]] = c(indices_workers[[index]], indices[L-rem+1]) |
| 330 | rem = rem - 1 |
| 331 | } |
| 332 | } |
| 333 | indices_workers |
| 334 | } |