X-Git-Url: https://git.auder.net/?a=blobdiff_plain;f=epclust%2FR%2Fclustering.R;h=886bfbcca2fbd1b52239c2403e3c521d0c2c7f18;hb=e0154a59e55143dac0fbd2a4739a3509bc958e76;hp=4519f44bf36dade463302ac84011c3e004b2d54e;hpb=4204e8774fdafe2db7ed44cd8cae018bc0c4e9d7;p=epclust.git diff --git a/epclust/R/clustering.R b/epclust/R/clustering.R index 4519f44..886bfbc 100644 --- a/epclust/R/clustering.R +++ b/epclust/R/clustering.R @@ -1,63 +1,69 @@ -#' @name clustering -#' @rdname clustering -#' @aliases clusteringTask1 computeClusters1 computeClusters2 +#' Two-stage clustering, within one task (see \code{claws()}) #' -#' @title Two-stage clustering, withing one task (see \code{claws()}) +#' \code{clusteringTask1()} runs one full stage-1 task, which consists in iterated +#' clustering on nb_curves / ntasks energy contributions, computed through +#' discrete wavelets coefficients. +#' \code{clusteringTask2()} runs a full stage-2 task, which consists in WER distances +#' computations between medoids (indices) output from stage 1, before applying +#' the second clustering algorithm on the distances matrix. #' -#' @description \code{clusteringTask1()} runs one full stage-1 task, which consists in -#' iterated stage 1 clustering (on nb_curves / ntasks energy contributions, computed -#' through discrete wavelets coefficients). -#' \code{clusteringTask2()} runs a full stage-2 task, which consists in synchrones -#' and then WER distances computations, before applying the clustering algorithm. -#' \code{computeClusters1()} and \code{computeClusters2()} correspond to the atomic -#' clustering procedures respectively for stage 1 and 2. The former applies the -#' clustering algorithm (PAM) on a contributions matrix, while the latter clusters -#' a chunk of series inside one task (~max nb_series_per_chunk) -#' -#' @param indices Range of series indices to cluster in parallel (initial data) #' @param getContribs Function to retrieve contributions from initial series indices: -#' \code{getContribs(indices)} outpus a contributions matrix -#' @param contribs matrix of contributions (e.g. output of \code{curvesToContribs()}) -#' @param distances matrix of K1 x K1 (WER) distances between synchrones -#' @inheritParams computeSynchrones +#' \code{getContribs(indices)} outputs a contributions matrix, in columns #' @inheritParams claws +#' @inheritParams computeSynchrones +#' @inheritParams computeWerDists +#' +#' @return The indices of the computed (resp. K1 and K2) medoids. #' -#' @return For \code{clusteringTask1()} and \code{computeClusters1()}, the indices of the -#' computed (K1) medoids. Indices are irrelevant for stage 2 clustering, thus -#' \code{computeClusters2()} outputs a big.matrix of medoids -#' (of size limited by nb_series_per_chunk) +#' @name clustering +#' @rdname clustering +#' @aliases clusteringTask1 clusteringTask2 NULL #' @rdname clustering #' @export -clusteringTask1 = function( - indices, getContribs, K1, nb_series_per_chunk, ncores_clust=1, verbose=FALSE, parll=TRUE) +clusteringTask1 <- function(indices, getContribs, K1, algoClust1, nb_items_clust, + ncores_clust=3, verbose=FALSE, parll=TRUE) { if (verbose) - cat(paste("*** Clustering task 1 on ",length(indices)," lines\n", sep="")) + cat(paste("*** Clustering task 1 on ",length(indices)," series [start]\n", sep="")) + + if (length(indices) <= K1) + return (indices) if (parll) { - cl = parallel::makeCluster(ncores_clust) - parallel::clusterExport(cl, varlist=c("getContribs","K1","verbose"), envir=environment()) + # outfile=="" to see stderr/stdout on terminal + cl <- + if (verbose) + parallel::makeCluster(ncores_clust, outfile = "") + else + parallel::makeCluster(ncores_clust) + parallel::clusterExport(cl, c("getContribs","K1","verbose"), envir=environment()) } + # Iterate clustering algorithm 1 until K1 medoids are found while (length(indices) > K1) { - indices_workers = .spreadIndices(indices, nb_series_per_chunk) + # Balance tasks by splitting the indices set - as evenly as possible + indices_workers <- .splitIndices(indices, nb_items_clust, min_size=K1+1) indices <- if (parll) { unlist( parallel::parLapply(cl, indices_workers, function(inds) { require("epclust", quietly=TRUE) - inds[ computeClusters1(getContribs(inds), K1, verbose) ] + inds[ algoClust1(getContribs(inds), K1) ] }) ) } else { unlist( lapply(indices_workers, function(inds) - inds[ computeClusters1(getContribs(inds), K1, verbose) ] + inds[ algoClust1(getContribs(inds), K1) ] ) ) } + if (verbose) + { + cat(paste("*** Clustering task 1 on ",length(indices)," medoids [iter]\n", sep="")) + } } if (parll) parallel::stopCluster(cl) @@ -67,275 +73,21 @@ clusteringTask1 = function( #' @rdname clustering #' @export -clusteringTask2 = function(medoids, K2, - getRefSeries, nb_ref_curves, nb_series_per_chunk, ncores_clust=1,verbose=FALSE,parll=TRUE) +clusteringTask2 <- function(indices, getSeries, K2, algoClust2, nb_series_per_chunk, + smooth_lvl, nvoice, nbytes, endian, ncores_clust=3, verbose=FALSE, parll=TRUE) { if (verbose) - cat(paste("*** Clustering task 2 on ",nrow(medoids)," lines\n", sep="")) + cat(paste("*** Clustering task 2 on ",length(indices)," medoids\n", sep="")) - if (nrow(medoids) <= K2) - return (medoids) - synchrones = computeSynchrones(medoids, - getRefSeries, nb_ref_curves, nb_series_per_chunk, ncores_clust, verbose, parll) - distances = computeWerDists(synchrones, ncores_clust, verbose, parll) - medoids[ computeClusters2(distances,K2,verbose), ] -} + if (length(indices) <= K2) + return (indices) -#' @rdname clustering -#' @export -computeClusters1 = function(contribs, K1, verbose=FALSE) -{ - if (verbose) - cat(paste(" computeClusters1() on ",nrow(contribs)," lines\n", sep="")) - cluster::pam(contribs, K1, diss=FALSE)$id.med -} - -#' @rdname clustering -#' @export -computeClusters2 = function(distances, K2, verbose=FALSE) -{ - if (verbose) - cat(paste(" computeClusters2() on ",nrow(distances)," lines\n", sep="")) - cluster::pam(distances, K2, diss=TRUE)$id.med -} + # A) Compute the WER distances (Wavelets Extended coefficient of deteRmination) + distances <- computeWerDists(indices, getSeries, nb_series_per_chunk, + smooth_lvl, nvoice, nbytes, endian, ncores_clust, verbose, parll) -#' computeSynchrones -#' -#' Compute the synchrones curves (sum of clusters elements) from a matrix of medoids, -#' using L2 distances. -#' -#' @param medoids big.matrix of medoids (curves of same length as initial series) -#' @param getRefSeries Function to retrieve initial series (e.g. in stage 2 after series -#' have been replaced by stage-1 medoids) -#' @param nb_ref_curves How many reference series? (This number is known at this stage) -#' @inheritParams claws -#' -#' @return A big.matrix of size K1 x L where L = data_length -#' -#' @export -computeSynchrones = function(medoids, getRefSeries, - nb_ref_curves, nb_series_per_chunk, ncores_clust=1,verbose=FALSE,parll=TRUE) -{ + # B) Apply clustering algorithm 2 on the WER distances matrix if (verbose) - cat(paste("--- Compute synchrones\n", sep="")) - - computeSynchronesChunk = function(indices) - { - if (parll) - { - require("bigmemory", quietly=TRUE) - requireNamespace("synchronicity", quietly=TRUE) - require("epclust", quietly=TRUE) - synchrones <- bigmemory::attach.big.matrix(synchrones_desc) - counts <- bigmemory::attach.big.matrix(counts_desc) - medoids <- bigmemory::attach.big.matrix(medoids_desc) - m <- synchronicity::attach.mutex(m_desc) - } - - ref_series = getRefSeries(indices) - nb_series = nrow(ref_series) - - #get medoids indices for this chunk of series - mi = computeMedoidsIndices(medoids@address, ref_series) - - for (i in seq_len(nb_series)) - { - if (parll) - synchronicity::lock(m) - synchrones[ mi[i], ] = synchrones[ mi[i], ] + ref_series[i,] - counts[ mi[i] ] = counts[ mi[i] ] + 1 #TODO: remove counts? - if (parll) - synchronicity::unlock(m) - } - } - - K = nrow(medoids) ; L = ncol(medoids) - # Use bigmemory (shared==TRUE by default) + synchronicity to fill synchrones in // - # TODO: if size > RAM (not our case), use file-backed big.matrix - synchrones = bigmemory::big.matrix(nrow=K, ncol=L, type="double", init=0.) - counts = bigmemory::big.matrix(nrow=K, ncol=1, type="double", init=0) - # synchronicity is only for Linux & MacOS; on Windows: run sequentially - parll = (requireNamespace("synchronicity",quietly=TRUE) - && parll && Sys.info()['sysname'] != "Windows") - if (parll) - { - m <- synchronicity::boost.mutex() - m_desc <- synchronicity::describe(m) - synchrones_desc = bigmemory::describe(synchrones) - counts_desc = bigmemory::describe(counts) - medoids_desc = bigmemory::describe(medoids) - cl = parallel::makeCluster(ncores_clust) - parallel::clusterExport(cl, varlist=c("synchrones_desc","counts_desc","counts", - "verbose","m_desc","medoids_desc","getRefSeries"), envir=environment()) - } - - indices_workers = .spreadIndices(seq_len(nb_ref_curves), nb_series_per_chunk) - ignored <- - if (parll) - parallel::parLapply(cl, indices_workers, computeSynchronesChunk) - else - lapply(indices_workers, computeSynchronesChunk) - - if (parll) - parallel::stopCluster(cl) - - #TODO: can we avoid this loop? ( synchrones = sweep(synchrones, 1, counts, '/') ) - for (i in seq_len(K)) - synchrones[i,] = synchrones[i,] / counts[i,1] - #NOTE: odds for some clusters to be empty? (when series already come from stage 2) - # ...maybe; but let's hope resulting K1' be still quite bigger than K2 - noNA_rows = sapply(seq_len(K), function(i) all(!is.nan(synchrones[i,]))) - if (all(noNA_rows)) - return (synchrones) - # Else: some clusters are empty, need to slice synchrones - synchrones[noNA_rows,] -} - -#' computeWerDists -#' -#' Compute the WER distances between the synchrones curves (in rows), which are -#' returned (e.g.) by \code{computeSynchrones()} -#' -#' @param synchrones A big.matrix of synchrones, in rows. The series have same length -#' as the series in the initial dataset -#' @inheritParams claws -#' -#' @return A matrix of size K1 x K1 -#' -#' @export -computeWerDists = function(synchrones, ncores_clust=1,verbose=FALSE,parll=TRUE) -{ - if (verbose) - cat(paste("--- Compute WER dists\n", sep="")) - - n <- nrow(synchrones) - delta <- ncol(synchrones) - #TODO: automatic tune of all these parameters ? (for other users) - nvoice <- 4 - # noctave = 2^13 = 8192 half hours ~ 180 days ; ~log2(ncol(synchrones)) - noctave = 13 - # 4 here represent 2^5 = 32 half-hours ~ 1 day - #NOTE: default scalevector == 2^(0:(noctave * nvoice) / nvoice) * s0 (?) - scalevector <- 2^(4:(noctave * nvoice) / nvoice + 1) - #condition: ( log2(s0*w0/(2*pi)) - 1 ) * nvoice + 1.5 >= 1 - s0=2 - w0=2*pi - scaled=FALSE - s0log = as.integer( (log2( s0*w0/(2*pi) ) - 1) * nvoice + 1.5 ) - totnoct = noctave + as.integer(s0log/nvoice) + 1 - - Xwer_dist <- bigmemory::big.matrix(nrow=n, ncol=n, type="double") - - cwt_file = ".epclust_bin/cwt" - #TODO: args, nb_per_chunk, nbytes, endian - - # Generate n(n-1)/2 pairs for WER distances computations - pairs = list() - V = seq_len(n) - for (i in 1:n) - { - V = V[-1] - pairs = c(pairs, lapply(V, function(v) c(i,v))) - } - - computeSaveCWT = function(index) - { - ts <- scale(ts(synchrones[index,]), center=TRUE, scale=scaled) - totts.cwt = Rwave::cwt(ts, totnoct, nvoice, w0, plot=FALSE) - ts.cwt = totts.cwt[,s0log:(s0log+noctave*nvoice)] - #Normalization - sqs <- sqrt(2^(0:(noctave*nvoice)/nvoice)*s0) - sqres <- sweep(ts.cwt,2,sqs,'*') - res <- sqres / max(Mod(sqres)) - #TODO: serializer les CWT, les récupérer via getDataInFile ; - #--> OK, faut juste stocker comme séries simples de taille delta*ncol (53*17519) - binarize(res, cwt_file, 100, ",", nbytes, endian) - } - - if (parll) - { - cl = parallel::makeCluster(ncores_clust) - synchrones_desc <- bigmemory::describe(synchrones) - Xwer_dist_desc <- bigmemory::describe(Xwer_dist) - parallel::clusterExport(cl, varlist=c("synchrones_desc","Xwer_dist_desc","totnoct", - "nvoice","w0","s0log","noctave","s0","verbose","getCWT"), envir=environment()) - } - - #precompute and serialize all CWT - ignored <- - if (parll) - parallel::parLapply(cl, 1:n, computeSaveCWT) - else - lapply(1:n, computeSaveCWT) - - getCWT = function(index) - { - #from cwt_file ... - } - - # Distance between rows i and j - computeDistancesIJ = function(pair) - { - if (parll) - { - require("bigmemory", quietly=TRUE) - require("epclust", quietly=TRUE) - synchrones <- bigmemory::attach.big.matrix(synchrones_desc) - Xwer_dist <- bigmemory::attach.big.matrix(Xwer_dist_desc) - } - - i = pair[1] ; j = pair[2] - if (verbose && j==i+1) - cat(paste(" Distances (",i,",",j,"), (",i,",",j+1,") ...\n", sep="")) - cwt_i <- getCWT(i) - cwt_j <- getCWT(j) - - num <- epclustFilter(Mod(cwt_i * Conj(cwt_j))) - WX <- epclustFilter(Mod(cwt_i * Conj(cwt_i))) - WY <- epclustFilter(Mod(cwt_j * Conj(cwt_j))) - wer2 <- sum(colSums(num)^2) / sum(colSums(WX) * colSums(WY)) - Xwer_dist[i,j] <- sqrt(delta * ncol(cwt_i) * max(1 - wer2, 0.)) #FIXME: wer2 should be < 1 - Xwer_dist[j,i] <- Xwer_dist[i,j] - Xwer_dist[i,i] = 0. - } - - ignored <- - if (parll) - parallel::parLapply(cl, pairs, computeDistancesIJ) - else - lapply(pairs, computeDistancesIJ) - - if (parll) - parallel::stopCluster(cl) - - Xwer_dist[n,n] = 0. - distances <- Xwer_dist[,] - rm(Xwer_dist) ; gc() - distances #~small matrix K1 x K1 -} - -# Helper function to divide indices into balanced sets -.spreadIndices = function(indices, nb_per_chunk) -{ - L = length(indices) - nb_workers = floor( L / nb_per_chunk ) - if (nb_workers == 0) - { - # L < nb_series_per_chunk, simple case - indices_workers = list(indices) - } - else - { - indices_workers = lapply( seq_len(nb_workers), function(i) - indices[(nb_per_chunk*(i-1)+1):(nb_per_chunk*i)] ) - # Spread the remaining load among the workers - rem = L %% nb_per_chunk - while (rem > 0) - { - index = rem%%nb_workers + 1 - indices_workers[[index]] = c(indices_workers[[index]], indices[L-rem+1]) - rem = rem - 1 - } - } - indices_workers + cat(paste("*** algoClust2() on ",nrow(distances)," items\n", sep="")) + indices[ algoClust2(distances,K2) ] }