| 1 | #' @name clustering |
| 2 | #' @rdname clustering |
| 3 | #' @aliases clusteringTask1 computeClusters1 computeClusters2 |
| 4 | #' |
| 5 | #' @title Two-stage clustering, withing one task (see \code{claws()}) |
| 6 | #' |
| 7 | #' @description \code{clusteringTask1()} runs one full stage-1 task, which consists in |
| 8 | #' iterated stage 1 clustering (on nb_curves / ntasks energy contributions, computed |
| 9 | #' through discrete wavelets coefficients). \code{computeClusters1()} and |
| 10 | #' \code{computeClusters2()} correspond to the atomic clustering procedures respectively |
| 11 | #' for stage 1 and 2. The former applies the clustering algorithm (PAM) on a |
| 12 | #' contributions matrix, while the latter clusters a chunk of series inside one task |
| 13 | #' (~max nb_series_per_chunk) |
| 14 | #' |
| 15 | #' @param indices Range of series indices to cluster in parallel (initial data) |
| 16 | #' @param getContribs Function to retrieve contributions from initial series indices: |
| 17 | #' \code{getContribs(indices)} outpus a contributions matrix |
| 18 | #' @param contribs matrix of contributions (e.g. output of \code{curvesToContribs()}) |
| 19 | #' @inheritParams computeSynchrones |
| 20 | #' @inheritParams claws |
| 21 | #' |
| 22 | #' @return For \code{clusteringTask1()} and \code{computeClusters1()}, the indices of the |
| 23 | #' computed (K1) medoids. Indices are irrelevant for stage 2 clustering, thus |
| 24 | #' \code{computeClusters2()} outputs a big.matrix of medoids |
| 25 | #' (of size limited by nb_series_per_chunk) |
| 26 | NULL |
| 27 | |
| 28 | #' @rdname clustering |
| 29 | #' @export |
| 30 | clusteringTask1 = function( |
| 31 | indices, getContribs, K1, nb_series_per_chunk, ncores_clust=1, verbose=FALSE, parll=TRUE) |
| 32 | { |
| 33 | if (verbose) |
| 34 | cat(paste("*** Clustering task on ",length(indices)," lines\n", sep="")) |
| 35 | |
| 36 | wrapComputeClusters1 = function(inds) { |
| 37 | if (parll) |
| 38 | require("epclust", quietly=TRUE) |
| 39 | if (verbose) |
| 40 | cat(paste(" computeClusters1() on ",length(inds)," lines\n", sep="")) |
| 41 | inds[ computeClusters1(getContribs(inds), K1) ] |
| 42 | } |
| 43 | |
| 44 | if (parll) |
| 45 | { |
| 46 | cl = parallel::makeCluster(ncores_clust) |
| 47 | parallel::clusterExport(cl, varlist=c("getContribs","K1","verbose"), envir=environment()) |
| 48 | } |
| 49 | while (length(indices) > K1) |
| 50 | { |
| 51 | indices_workers = .spreadIndices(indices, nb_series_per_chunk) |
| 52 | if (parll) |
| 53 | indices = unlist( parallel::parLapply(cl, indices_workers, wrapComputeClusters1) ) |
| 54 | else |
| 55 | indices = unlist( lapply(indices_workers, wrapComputeClusters1) ) |
| 56 | } |
| 57 | if (parll) |
| 58 | parallel::stopCluster(cl) |
| 59 | |
| 60 | indices #medoids |
| 61 | } |
| 62 | |
| 63 | #' @rdname clustering |
| 64 | #' @export |
| 65 | computeClusters1 = function(contribs, K1) |
| 66 | cluster::pam(contribs, K1, diss=FALSE)$id.med |
| 67 | |
| 68 | #' @rdname clustering |
| 69 | #' @export |
| 70 | computeClusters2 = function(medoids, K2, |
| 71 | getRefSeries, nb_ref_curves, nb_series_per_chunk, ncores_clust=1,verbose=FALSE,parll=TRUE) |
| 72 | { |
| 73 | synchrones = computeSynchrones(medoids, |
| 74 | getRefSeries, nb_ref_curves, nb_series_per_chunk, ncores_clust, verbose, parll) |
| 75 | distances = computeWerDists(synchrones, ncores_clust, verbose, parll) |
| 76 | #TODO: if PAM cannot take big.matrix in input, cast it before... (more than OK in RAM) |
| 77 | medoids[ cluster::pam(distances, K2, diss=TRUE)$medoids , ] |
| 78 | } |
| 79 | |
| 80 | #' computeSynchrones |
| 81 | #' |
| 82 | #' Compute the synchrones curves (sum of clusters elements) from a matrix of medoids, |
| 83 | #' using L2 distances. |
| 84 | #' |
| 85 | #' @param medoids big.matrix of medoids (curves of same length as initial series) |
| 86 | #' @param getRefSeries Function to retrieve initial series (e.g. in stage 2 after series |
| 87 | #' have been replaced by stage-1 medoids) |
| 88 | #' @param nb_ref_curves How many reference series? (This number is known at this stage) |
| 89 | #' @inheritParams claws |
| 90 | #' |
| 91 | #' @return A big.matrix of size K1 x L where L = data_length |
| 92 | #' |
| 93 | #' @export |
| 94 | computeSynchrones = function(medoids, getRefSeries, |
| 95 | nb_ref_curves, nb_series_per_chunk, ncores_clust=1,verbose=FALSE,parll=TRUE) |
| 96 | { |
| 97 | |
| 98 | |
| 99 | |
| 100 | #TODO: si parll, getMedoids + serialization, pass only getMedoids to nodes |
| 101 | # --> BOF... chaque node chargera tous les medoids (efficacité) :/ ==> faut que ça tienne en RAM |
| 102 | #au pire :: C-ifier et charger medoids 1 by 1... |
| 103 | |
| 104 | #MIEUX :: medoids DOIT etre une big.matrix partagée ! |
| 105 | |
| 106 | computeSynchronesChunk = function(indices) |
| 107 | { |
| 108 | if (verbose) |
| 109 | cat(paste("--- Compute synchrones for ",length(indices)," lines\n", sep="")) |
| 110 | ref_series = getRefSeries(indices) |
| 111 | #get medoids indices for this chunk of series |
| 112 | for (i in seq_len(nrow(ref_series))) |
| 113 | { |
| 114 | j = which.min( rowSums( sweep(medoids, 2, ref_series[i,], '-')^2 ) ) |
| 115 | if (parll) |
| 116 | synchronicity::lock(m) |
| 117 | synchrones[j,] = synchrones[j,] + ref_series[i,] |
| 118 | counts[j,1] = counts[j,1] + 1 |
| 119 | if (parll) |
| 120 | synchronicity::unlock(m) |
| 121 | } |
| 122 | } |
| 123 | |
| 124 | K = nrow(medoids) |
| 125 | # Use bigmemory (shared==TRUE by default) + synchronicity to fill synchrones in // |
| 126 | # TODO: if size > RAM (not our case), use file-backed big.matrix |
| 127 | synchrones = bigmemory::big.matrix(nrow=K,ncol=ncol(medoids),type="double",init=0.) |
| 128 | counts = bigmemory::big.matrix(nrow=K,ncol=1,type="double",init=0) |
| 129 | # synchronicity is only for Linux & MacOS; on Windows: run sequentially |
| 130 | parll = (requireNamespace("synchronicity",quietly=TRUE) |
| 131 | && parll && Sys.info()['sysname'] != "Windows") |
| 132 | if (parll) |
| 133 | m <- synchronicity::boost.mutex() |
| 134 | |
| 135 | if (parll) |
| 136 | { |
| 137 | cl = parallel::makeCluster(ncores_clust) |
| 138 | parallel::clusterExport(cl, |
| 139 | varlist=c("synchrones","counts","verbose","medoids","getRefSeries"), |
| 140 | envir=environment()) |
| 141 | } |
| 142 | |
| 143 | indices_workers = .spreadIndices(seq_len(nb_ref_curves), nb_series_per_chunk) |
| 144 | ignored <- |
| 145 | if (parll) |
| 146 | parallel::parLapply(indices_workers, computeSynchronesChunk) |
| 147 | else |
| 148 | lapply(indices_workers, computeSynchronesChunk) |
| 149 | |
| 150 | if (parll) |
| 151 | parallel::stopCluster(cl) |
| 152 | |
| 153 | #TODO: can we avoid this loop? ( synchrones = sweep(synchrones, 1, counts, '/') ) |
| 154 | for (i in seq_len(K)) |
| 155 | synchrones[i,] = synchrones[i,] / counts[i,1] |
| 156 | #NOTE: odds for some clusters to be empty? (when series already come from stage 2) |
| 157 | # ...maybe; but let's hope resulting K1' be still quite bigger than K2 |
| 158 | noNA_rows = sapply(seq_len(K), function(i) all(!is.nan(synchrones[i,]))) |
| 159 | if (all(noNA_rows)) |
| 160 | return (synchrones) |
| 161 | # Else: some clusters are empty, need to slice synchrones |
| 162 | synchrones[noNA_rows,] |
| 163 | } |
| 164 | |
| 165 | #' computeWerDists |
| 166 | #' |
| 167 | #' Compute the WER distances between the synchrones curves (in rows), which are |
| 168 | #' returned (e.g.) by \code{computeSynchrones()} |
| 169 | #' |
| 170 | #' @param synchrones A big.matrix of synchrones, in rows. The series have same length |
| 171 | #' as the series in the initial dataset |
| 172 | #' @inheritParams claws |
| 173 | #' |
| 174 | #' @return A big.matrix of size K1 x K1 |
| 175 | #' |
| 176 | #' @export |
| 177 | computeWerDists = function(synchrones, ncores_clust=1,verbose=FALSE,parll=TRUE) |
| 178 | { |
| 179 | |
| 180 | |
| 181 | |
| 182 | #TODO: re-organize to call computeWerDist(x,y) [C] (in //?) from two indices + big.matrix |
| 183 | |
| 184 | |
| 185 | n <- nrow(synchrones) |
| 186 | delta <- ncol(synchrones) |
| 187 | #TODO: automatic tune of all these parameters ? (for other users) |
| 188 | nvoice <- 4 |
| 189 | # noctave = 2^13 = 8192 half hours ~ 180 days ; ~log2(ncol(synchrones)) |
| 190 | noctave = 13 |
| 191 | # 4 here represent 2^5 = 32 half-hours ~ 1 day |
| 192 | #NOTE: default scalevector == 2^(0:(noctave * nvoice) / nvoice) * s0 (?) |
| 193 | scalevector <- 2^(4:(noctave * nvoice) / nvoice + 1) |
| 194 | #condition: ( log2(s0*w0/(2*pi)) - 1 ) * nvoice + 1.5 >= 1 |
| 195 | s0=2 |
| 196 | w0=2*pi |
| 197 | scaled=FALSE |
| 198 | s0log = as.integer( (log2( s0*w0/(2*pi) ) - 1) * nvoice + 1.5 ) |
| 199 | totnoct = noctave + as.integer(s0log/nvoice) + 1 |
| 200 | |
| 201 | computeCWT = function(i) |
| 202 | { |
| 203 | if (verbose) |
| 204 | cat(paste("+++ Compute Rwave::cwt() on serie ",i,"\n", sep="")) |
| 205 | ts <- scale(ts(synchrones[i,]), center=TRUE, scale=scaled) |
| 206 | totts.cwt = Rwave::cwt(ts, totnoct, nvoice, w0, plot=FALSE) |
| 207 | ts.cwt = totts.cwt[,s0log:(s0log+noctave*nvoice)] |
| 208 | #Normalization |
| 209 | sqs <- sqrt(2^(0:(noctave*nvoice)/nvoice)*s0) |
| 210 | sqres <- sweep(ts.cwt,2,sqs,'*') |
| 211 | sqres / max(Mod(sqres)) |
| 212 | } |
| 213 | |
| 214 | if (parll) |
| 215 | { |
| 216 | cl = parallel::makeCluster(ncores_clust) |
| 217 | parallel::clusterExport(cl, |
| 218 | varlist=c("synchrones","totnoct","nvoice","w0","s0log","noctave","s0","verbose"), |
| 219 | envir=environment()) |
| 220 | } |
| 221 | |
| 222 | # list of CWT from synchrones |
| 223 | # TODO: fit in RAM, OK? If not, 2 options: serialize, compute individual distances |
| 224 | Xcwt4 <- |
| 225 | if (parll) |
| 226 | parallel::parLapply(cl, seq_len(n), computeCWT) |
| 227 | else |
| 228 | lapply(seq_len(n), computeCWT) |
| 229 | |
| 230 | if (parll) |
| 231 | parallel::stopCluster(cl) |
| 232 | |
| 233 | Xwer_dist <- bigmemory::big.matrix(nrow=n, ncol=n, type="double") |
| 234 | fcoefs = rep(1/3, 3) #moving average on 3 values (TODO: very slow! correct?!) |
| 235 | if (verbose) |
| 236 | cat("*** Compute WER distances from CWT\n") |
| 237 | |
| 238 | #TODO: computeDistances(i,j), et répartir les n(n-1)/2 couples d'indices |
| 239 | #là c'est trop déséquilibré |
| 240 | |
| 241 | computeDistancesLineI = function(i) |
| 242 | { |
| 243 | if (verbose) |
| 244 | cat(paste(" Line ",i,"\n", sep="")) |
| 245 | for (j in (i+1):n) |
| 246 | { |
| 247 | #TODO: 'circular=TRUE' is wrong, should just take values on the sides; to rewrite in C |
| 248 | num <- filter(Mod(Xcwt4[[i]] * Conj(Xcwt4[[j]])), fcoefs, circular=TRUE) |
| 249 | WX <- filter(Mod(Xcwt4[[i]] * Conj(Xcwt4[[i]])), fcoefs, circular=TRUE) |
| 250 | WY <- filter(Mod(Xcwt4[[j]] * Conj(Xcwt4[[j]])), fcoefs, circular=TRUE) |
| 251 | wer2 <- sum(colSums(num)^2) / sum( sum(colSums(WX) * colSums(WY)) ) |
| 252 | if (parll) |
| 253 | synchronicity::lock(m) |
| 254 | Xwer_dist[i,j] <- sqrt(delta * ncol(Xcwt4[[1]]) * (1 - wer2)) |
| 255 | Xwer_dist[j,i] <- Xwer_dist[i,j] |
| 256 | if (parll) |
| 257 | synchronicity::unlock(m) |
| 258 | } |
| 259 | Xwer_dist[i,i] = 0. |
| 260 | } |
| 261 | |
| 262 | parll = (requireNamespace("synchronicity",quietly=TRUE) |
| 263 | && parll && Sys.info()['sysname'] != "Windows") |
| 264 | if (parll) |
| 265 | m <- synchronicity::boost.mutex() |
| 266 | |
| 267 | ignored <- |
| 268 | if (parll) |
| 269 | { |
| 270 | parallel::mclapply(seq_len(n-1), computeDistancesLineI, |
| 271 | mc.cores=ncores_clust, mc.allow.recursive=FALSE) |
| 272 | } |
| 273 | else |
| 274 | lapply(seq_len(n-1), computeDistancesLineI) |
| 275 | Xwer_dist[n,n] = 0. |
| 276 | Xwer_dist |
| 277 | } |
| 278 | |
| 279 | # Helper function to divide indices into balanced sets |
| 280 | .spreadIndices = function(indices, nb_per_chunk) |
| 281 | { |
| 282 | L = length(indices) |
| 283 | nb_workers = floor( L / nb_per_chunk ) |
| 284 | if (nb_workers == 0) |
| 285 | { |
| 286 | # L < nb_series_per_chunk, simple case |
| 287 | indices_workers = list(indices) |
| 288 | } |
| 289 | else |
| 290 | { |
| 291 | indices_workers = lapply( seq_len(nb_workers), function(i) |
| 292 | indices[(nb_per_chunk*(i-1)+1):(nb_per_chunk*i)] ) |
| 293 | # Spread the remaining load among the workers |
| 294 | rem = L %% nb_per_chunk |
| 295 | while (rem > 0) |
| 296 | { |
| 297 | index = rem%%nb_workers + 1 |
| 298 | indices_workers[[index]] = c(indices_workers[[index]], indices[L-rem+1]) |
| 299 | rem = rem - 1 |
| 300 | } |
| 301 | } |
| 302 | indices_workers |
| 303 | } |