X-Git-Url: https://git.auder.net/?a=blobdiff_plain;f=epclust%2FR%2Fclustering.R;h=2ce4267ef35e717a7bea0a8667f648367d38fbcf;hb=d9bb53c5e1392018bf67f92140edb10137f3423c;hp=493f90f31c1c20d0cfb591d4acc043ff32505a8a;hpb=8702eb86906bd6d59e07bb887e690a20f29be63f;p=epclust.git diff --git a/epclust/R/clustering.R b/epclust/R/clustering.R index 493f90f..2ce4267 100644 --- a/epclust/R/clustering.R +++ b/epclust/R/clustering.R @@ -1,119 +1,334 @@ -# Cluster one full task (nb_curves / ntasks series); only step 1 -clusteringTask = function(indices, getCoefs, K1, nb_series_per_chunk, ncores) +#' @name clustering +#' @rdname clustering +#' @aliases clusteringTask1 clusteringTask2 computeClusters1 computeClusters2 +#' +#' @title Two-stage clustering, withing one task (see \code{claws()}) +#' +#' @description \code{clusteringTask1()} runs one full stage-1 task, which consists in +#' iterated stage 1 clustering (on nb_curves / ntasks energy contributions, computed +#' through discrete wavelets coefficients). +#' \code{clusteringTask2()} runs a full stage-2 task, which consists in synchrones +#' and then WER distances computations, before applying the clustering algorithm. +#' \code{computeClusters1()} and \code{computeClusters2()} correspond to the atomic +#' clustering procedures respectively for stage 1 and 2. The former applies the +#' first clustering algorithm on a contributions matrix, while the latter clusters +#' a set of series inside one task (~nb_items_clust1) +#' +#' @param indices Range of series indices to cluster in parallel (initial data) +#' @param getContribs Function to retrieve contributions from initial series indices: +#' \code{getContribs(indices)} outpus a contributions matrix +#' @inheritParams computeSynchrones +#' @inheritParams claws +#' +#' @return For \code{clusteringTask1()}, the indices of the computed (K1) medoids. +#' Indices are irrelevant for stage 2 clustering, thus \code{clusteringTask2()} +#' outputs a big.matrix of medoids (of size LxK2, K2 = final number of clusters) +NULL + +#' @rdname clustering +#' @export +clusteringTask1 = function(indices, getContribs, K1, algoClust1, nb_items_clust1, + ncores_clust=1, verbose=FALSE, parll=TRUE) { - cl = parallel::makeCluster(ncores) - parallel::clusterExport(cl, varlist=c("getCoefs","K1"), envir=environment()) - repeat + if (parll) { - nb_workers = max( 1, floor( length(indices) / nb_series_per_chunk ) ) - indices_workers = lapply( seq_len(nb_workers), function(i) - indices[(nb_series_per_chunk*(i-1)+1):(nb_series_per_chunk*i)] ) - # Spread the remaining load among the workers - rem = length(indices) %% nb_series_per_chunk - while (rem > 0) - { - index = rem%%nb_workers + 1 - indices_workers[[index]] = c(indices_workers[[index]], tail(indices,rem)) - rem = rem - 1 - } - indices = unlist( parallel::parLapply( cl, indices_workers, function(inds) { - require("epclust", quietly=TRUE) - inds[ computeClusters1(getCoefs(inds), K1) ] - } ) ) - if (length(indices) == K1) - break + cl = parallel::makeCluster(ncores_clust, outfile = "") + parallel::clusterExport(cl, c("getContribs","K1","verbose"), envir=environment()) } - parallel::stopCluster(cl) + # Iterate clustering algorithm 1 until K1 medoids are found + while (length(indices) > K1) + { + # Balance tasks by splitting the indices set - as evenly as possible + indices_workers = .spreadIndices(indices, nb_items_clust1) + if (verbose) + cat(paste("*** [iterated] Clustering task 1 on ",length(indices)," series\n", sep="")) + indices <- + if (parll) + { + unlist( parallel::parLapply(cl, indices_workers, function(inds) { + require("epclust", quietly=TRUE) + inds[ algoClust1(getContribs(inds), K1) ] + }) ) + } + else + { + unlist( lapply(indices_workers, function(inds) + inds[ algoClust1(getContribs(inds), K1) ] + ) ) + } + } + if (parll) + parallel::stopCluster(cl) + indices #medoids } -# Apply the clustering algorithm (PAM) on a coeffs or distances matrix -computeClusters1 = function(coefs, K1) - cluster::pam(coefs, K1, diss=FALSE)$id.med - -# Cluster a chunk of series inside one task (~max nb_series_per_chunk) -computeClusters2 = function(medoids, K2, getRefSeries, nb_series_per_chunk) +#' @rdname clustering +#' @export +clusteringTask2 = function(medoids, K2, algoClust2, getRefSeries, nb_ref_curves, + nb_series_per_chunk, nbytes,endian,ncores_clust=1,verbose=FALSE,parll=TRUE) { - synchrones = computeSynchrones(medoids, getRefSeries, nb_series_per_chunk) - medoids[ cluster::pam(computeWerDists(synchrones), K2, diss=TRUE)$medoids , ] + if (verbose) + cat(paste("*** Clustering task 2 on ",ncol(medoids)," synchrones\n", sep="")) + + if (ncol(medoids) <= K2) + return (medoids) + + # A) Obtain synchrones, that is to say the cumulated power consumptions + # for each of the K1 initial groups + synchrones = computeSynchrones(medoids, getRefSeries, nb_ref_curves, + nb_series_per_chunk, ncores_clust, verbose, parll) + + # B) Compute the WER distances (Wavelets Extended coefficient of deteRmination) + distances = computeWerDists(synchrones, nbytes, endian, ncores_clust, verbose, parll) + + # C) Apply clustering algorithm 2 on the WER distances matrix + if (verbose) + cat(paste(" algoClust2() on ",nrow(distances)," items\n", sep="")) + medoids[ ,algoClust2(distances,K2) ] } -# Compute the synchrones curves (sum of clusters elements) from a clustering result -computeSynchrones = function(medoids, getRefSeries, nb_series_per_chunk) +#' computeSynchrones +#' +#' Compute the synchrones curves (sum of clusters elements) from a matrix of medoids, +#' using euclidian distance. +#' +#' @param medoids big.matrix of medoids (curves of same length as initial series) +#' @param getRefSeries Function to retrieve initial series (e.g. in stage 2 after series +#' have been replaced by stage-1 medoids) +#' @param nb_ref_curves How many reference series? (This number is known at this stage) +#' @inheritParams claws +#' +#' @return A big.matrix of size L x K1 where L = length of a serie +#' +#' @export +computeSynchrones = function(medoids, getRefSeries, nb_ref_curves, + nb_series_per_chunk, ncores_clust=1,verbose=FALSE,parll=TRUE) { - K = nrow(medoids) - synchrones = matrix(0, nrow=K, ncol=ncol(medoids)) - counts = rep(0,K) - index = 1 - repeat + # Synchrones computation is embarassingly parallel: compute it by chunks of series + computeSynchronesChunk = function(indices) { - range = (index-1) + seq_len(nb_series_per_chunk) - ref_series = getRefSeries(range) - if (is.null(ref_series)) - break - #get medoids indices for this chunk of series - for (i in seq_len(nrow(ref_series))) + if (parll) { - j = which.min( rowSums( sweep(medoids, 2, ref_series[i,], '-')^2 ) ) - synchrones[j,] = synchrones[j,] + ref_series[i,] - counts[j] = counts[j] + 1 + require("bigmemory", quietly=TRUE) + requireNamespace("synchronicity", quietly=TRUE) + require("epclust", quietly=TRUE) + # The big.matrix objects need to be attached to be usable on the workers + synchrones <- bigmemory::attach.big.matrix(synchrones_desc) + medoids <- bigmemory::attach.big.matrix(medoids_desc) + m <- synchronicity::attach.mutex(m_desc) + } + + # Obtain a chunk of reference series + ref_series = getRefSeries(indices) + nb_series = ncol(ref_series) + + # Get medoids indices for this chunk of series + mi = computeMedoidsIndices(medoids@address, ref_series) + + # Update synchrones using mi above + for (i in seq_len(nb_series)) + { + if (parll) + synchronicity::lock(m) #locking required because several writes at the same time + synchrones[, mi[i] ] = synchrones[, mi[i] ] + ref_series[,i] + if (parll) + synchronicity::unlock(m) } - index = index + nb_series_per_chunk } - #NOTE: odds for some clusters to be empty? (when series already come from stage 2) - # ...maybe; but let's hope resulting K1' be still quite bigger than K2 - synchrones = sweep(synchrones, 1, counts, '/') - synchrones[ sapply(seq_len(K), function(i) all(!is.nan(synchrones[i,]))) , ] + + K = ncol(medoids) ; L = nrow(medoids) + # Use bigmemory (shared==TRUE by default) + synchronicity to fill synchrones in // + synchrones = bigmemory::big.matrix(nrow=L, ncol=K, type="double", init=0.) + # NOTE: synchronicity is only for Linux & MacOS; on Windows: run sequentially + parll = (requireNamespace("synchronicity",quietly=TRUE) + && parll && Sys.info()['sysname'] != "Windows") + if (parll) + { + m <- synchronicity::boost.mutex() #for lock/unlock, see computeSynchronesChunk + # mutex and big.matrix objects cannot be passed directly: + # they will be accessed from their description + m_desc <- synchronicity::describe(m) + synchrones_desc = bigmemory::describe(synchrones) + medoids_desc = bigmemory::describe(medoids) + cl = parallel::makeCluster(ncores_clust) + parallel::clusterExport(cl, envir=environment(), + varlist=c("synchrones_desc","m_desc","medoids_desc","getRefSeries")) + } + + if (verbose) + cat(paste("--- Compute ",K," synchrones with ",nb_ref_curves," series\n", sep="")) + + # Balance tasks by splitting the indices set - maybe not so evenly, but + # max==TRUE in next call ensures that no set has more than nb_series_per_chunk items. + indices_workers = .spreadIndices(seq_len(nb_ref_curves), nb_series_per_chunk, max=TRUE) + ignored <- + if (parll) + parallel::parLapply(cl, indices_workers, computeSynchronesChunk) + else + lapply(indices_workers, computeSynchronesChunk) + + if (parll) + parallel::stopCluster(cl) + + return (synchrones) } -# Compute the WER distance between the synchrones curves (in rows) -computeWerDists = function(curves) +#' computeWerDists +#' +#' Compute the WER distances between the synchrones curves (in rows), which are +#' returned (e.g.) by \code{computeSynchrones()} +#' +#' @param synchrones A big.matrix of synchrones, in rows. The series have same length +#' as the series in the initial dataset +#' @inheritParams claws +#' +#' @return A matrix of size K1 x K1 +#' +#' @export +computeWerDists = function(synchrones, nbytes,endian,ncores_clust=1,verbose=FALSE,parll=TRUE) { - if (!require("Rwave", quietly=TRUE)) - stop("Unable to load Rwave library") - n <- nrow(curves) - delta <- ncol(curves) + n <- ncol(synchrones) + L <- nrow(synchrones) #TODO: automatic tune of all these parameters ? (for other users) + # 4 here represent 2^5 = 32 half-hours ~ 1 day nvoice <- 4 - # noctave = 2^13 = 8192 half hours ~ 180 days ; ~log2(ncol(curves)) + # noctave = 2^13 = 8192 half hours ~ 180 days ; ~log2(ncol(synchrones)) noctave = 13 - # 4 here represent 2^5 = 32 half-hours ~ 1 day - #NOTE: default scalevector == 2^(0:(noctave * nvoice) / nvoice) * s0 (?) - scalevector <- 2^(4:(noctave * nvoice) / nvoice) * 2 - #condition: ( log2(s0*w0/(2*pi)) - 1 ) * nvoice + 1.5 >= 1 - s0=2 - w0=2*pi - scaled=FALSE - s0log = as.integer( (log2( s0*w0/(2*pi) ) - 1) * nvoice + 1.5 ) - totnoct = noctave + as.integer(s0log/nvoice) + 1 - - # (normalized) observations node with CWT - Xcwt4 <- lapply(seq_len(n), function(i) { - ts <- scale(ts(curves[i,]), center=TRUE, scale=scaled) - totts.cwt = Rwave::cwt(ts,totnoct,nvoice,w0,plot=0) + + Xwer_dist <- bigmemory::big.matrix(nrow=n, ncol=n, type="double") + + cwt_file = ".epclust_bin/cwt" + #TODO: args, nb_per_chunk, nbytes, endian + + # Generate n(n-1)/2 pairs for WER distances computations + pairs = list() + V = seq_len(n) + for (i in 1:n) + { + V = V[-1] + pairs = c(pairs, lapply(V, function(v) c(i,v))) + } + + computeSaveCWT = function(index) + { + ts <- scale(ts(synchrones[,index]), center=TRUE, scale=FALSE) + totts.cwt = Rwave::cwt(ts, totnoct, nvoice, w0=2*pi, twoD=TRUE, plot=FALSE) ts.cwt = totts.cwt[,s0log:(s0log+noctave*nvoice)] #Normalization sqs <- sqrt(2^(0:(noctave*nvoice)/nvoice)*s0) - sqres <- sweep(ts.cwt,MARGIN=2,sqs,'*') - sqres / max(Mod(sqres)) - }) + sqres <- sweep(ts.cwt,2,sqs,'*') + res <- sqres / max(Mod(sqres)) + #TODO: serializer les CWT, les récupérer via getDataInFile ; + #--> OK, faut juste stocker comme séries simples de taille L*n' (53*17519) + binarize(c(as.double(Re(res)),as.double(Im(res))), cwt_file, ncol(res), ",", nbytes, endian) + } + + if (parll) + { + cl = parallel::makeCluster(ncores_clust) + synchrones_desc <- bigmemory::describe(synchrones) + Xwer_dist_desc <- bigmemory::describe(Xwer_dist) + parallel::clusterExport(cl, envir=environment(), + varlist=c("synchrones_desc","Xwer_dist_desc","totnoct","nvoice","w0","s0log", + "noctave","s0","verbose","getCWT")) + } + + if (verbose) + { + cat(paste("--- Compute WER dists\n", sep="")) + # precompute save all CWT........ + } + #precompute and serialize all CWT + ignored <- + if (parll) + parallel::parLapply(cl, 1:n, computeSaveCWT) + else + lapply(1:n, computeSaveCWT) - Xwer_dist <- matrix(0., n, n) - fcoefs = rep(1/3, 3) #moving average on 3 values (TODO: very slow! correct?!) - for (i in 1:(n-1)) + getCWT = function(index) { - for (j in (i+1):n) + #from cwt_file ... + res <- getDataInFile(c(2*index-1,2*index), cwt_file, nbytes, endian) + ###############TODO: + } + + # Distance between rows i and j + computeDistancesIJ = function(pair) + { + if (parll) { - #TODO: later, compute CWT here (because not enough storage space for 200k series) - # 'circular=TRUE' is wrong, should just take values on the sides; to rewrite in C - num <- filter(Mod(Xcwt4[[i]] * Conj(Xcwt4[[j]])), fcoefs, circular=TRUE) - WX <- filter(Mod(Xcwt4[[i]] * Conj(Xcwt4[[i]])), fcoefs, circular=TRUE) - WY <- filter(Mod(Xcwt4[[j]] * Conj(Xcwt4[[j]])), fcoefs, circular=TRUE) - wer2 <- sum(colSums(num)^2) / sum( sum(colSums(WX) * colSums(WY)) ) - Xwer_dist[i,j] <- sqrt(delta * ncol(Xcwt4[[1]]) * (1 - wer2)) - Xwer_dist[j,i] <- Xwer_dist[i,j] + require("bigmemory", quietly=TRUE) + require("epclust", quietly=TRUE) + synchrones <- bigmemory::attach.big.matrix(synchrones_desc) + Xwer_dist <- bigmemory::attach.big.matrix(Xwer_dist_desc) + } + + i = pair[1] ; j = pair[2] + if (verbose && j==i+1) + cat(paste(" Distances (",i,",",j,"), (",i,",",j+1,") ...\n", sep="")) + cwt_i <- getCWT(i) + cwt_j <- getCWT(j) + + num <- epclustFilter(Mod(cwt_i * Conj(cwt_j))) + WX <- epclustFilter(Mod(cwt_i * Conj(cwt_i))) + WY <- epclustFilter(Mod(cwt_j * Conj(cwt_j))) + wer2 <- sum(colSums(num)^2) / sum(colSums(WX) * colSums(WY)) + Xwer_dist[i,j] <- sqrt(L * ncol(cwt_i) * max(1 - wer2, 0.)) + Xwer_dist[j,i] <- Xwer_dist[i,j] + Xwer_dist[i,i] = 0. + } + + if (verbose) + { + cat(paste("--- Compute WER dists\n", sep="")) + } + ignored <- + if (parll) + parallel::parLapply(cl, pairs, computeDistancesIJ) + else + lapply(pairs, computeDistancesIJ) + + if (parll) + parallel::stopCluster(cl) + + Xwer_dist[n,n] = 0. + distances <- Xwer_dist[,] + rm(Xwer_dist) ; gc() + distances #~small matrix K1 x K1 +} + +# Helper function to divide indices into balanced sets +# If max == TRUE, sets sizes cannot exceed nb_per_set +.spreadIndices = function(indices, nb_per_set, max=FALSE) +{ + L = length(indices) + nb_workers = floor( L / nb_per_set ) + rem = L %% nb_per_set + if (nb_workers == 0 || (nb_workers==1 && rem==0)) + { + # L <= nb_per_set, simple case + indices_workers = list(indices) + } + else + { + indices_workers = lapply( seq_len(nb_workers), function(i) + indices[(nb_per_set*(i-1)+1):(nb_per_set*i)] ) + + if (max) + { + # Sets are not so well balanced, but size is supposed to be critical + return ( c( indices_workers, (L-rem+1):L ) ) + } + + # Spread the remaining load among the workers + rem = L %% nb_per_set + while (rem > 0) + { + index = rem%%nb_workers + 1 + indices_workers[[index]] = c(indices_workers[[index]], indices[L-rem+1]) + rem = rem - 1 } } - diag(Xwer_dist) <- numeric(n) - Xwer_dist + indices_workers }