X-Git-Url: https://git.auder.net/?a=blobdiff_plain;f=epclust%2FR%2Fclustering.R;h=886bfbcca2fbd1b52239c2403e3c521d0c2c7f18;hb=e0154a59e55143dac0fbd2a4739a3509bc958e76;hp=640837064273f0947ce82a2c9d2130ee37268221;hpb=4bcfdbee4e2157f232427a5bfdf240f34760110d;p=epclust.git diff --git a/epclust/R/clustering.R b/epclust/R/clustering.R index 6408370..886bfbc 100644 --- a/epclust/R/clustering.R +++ b/epclust/R/clustering.R @@ -1,175 +1,93 @@ -#' @name clustering -#' @rdname clustering -#' @aliases clusteringTask computeClusters1 computeClusters2 -#' -#' @title Two-stages clustering, withing one task (see \code{claws()}) +#' Two-stage clustering, within one task (see \code{claws()}) #' -#' @description \code{clusteringTask()} runs one full task, which consists in iterated stage 1 -#' clustering (on nb_curves / ntasks energy contributions, computed through discrete -#' wavelets coefficients). \code{computeClusters1()} and \code{computeClusters2()} -#' correspond to the atomic clustering procedures respectively for stage 1 and 2. -#' The former applies the clustering algorithm (PAM) on a contributions matrix, while -#' the latter clusters a chunk of series inside one task (~max nb_series_per_chunk) +#' \code{clusteringTask1()} runs one full stage-1 task, which consists in iterated +#' clustering on nb_curves / ntasks energy contributions, computed through +#' discrete wavelets coefficients. +#' \code{clusteringTask2()} runs a full stage-2 task, which consists in WER distances +#' computations between medoids (indices) output from stage 1, before applying +#' the second clustering algorithm on the distances matrix. #' -#' @param indices Range of series indices to cluster in parallel (initial data) #' @param getContribs Function to retrieve contributions from initial series indices: -#' \code{getContribs(indices)} outpus a contributions matrix -#' @param contribs matrix of contributions (e.g. output of \code{curvesToContribs()}) -#' @inheritParams computeSynchrones +#' \code{getContribs(indices)} outputs a contributions matrix, in columns #' @inheritParams claws +#' @inheritParams computeSynchrones +#' @inheritParams computeWerDists #' -#' @return For \code{clusteringTask()} and \code{computeClusters1()}, the indices of the -#' computed (K1) medoids. Indices are irrelevant for stage 2 clustering, thus -#' \code{computeClusters2()} outputs a matrix of medoids -#' (of size limited by nb_series_per_chunk) +#' @return The indices of the computed (resp. K1 and K2) medoids. +#' +#' @name clustering +#' @rdname clustering +#' @aliases clusteringTask1 clusteringTask2 NULL #' @rdname clustering #' @export -clusteringTask = function(indices, getContribs, K1, nb_series_per_chunk, ncores_clust) +clusteringTask1 <- function(indices, getContribs, K1, algoClust1, nb_items_clust, + ncores_clust=3, verbose=FALSE, parll=TRUE) { + if (verbose) + cat(paste("*** Clustering task 1 on ",length(indices)," series [start]\n", sep="")) -#NOTE: comment out parallel sections for debugging -#propagate verbose arg ?! + if (length(indices) <= K1) + return (indices) -# cl = parallel::makeCluster(ncores_clust) -# parallel::clusterExport(cl, varlist=c("getContribs","K1"), envir=environment()) - repeat + if (parll) { - -print(length(indices)) - - nb_workers = max( 1, floor( length(indices) / nb_series_per_chunk ) ) - indices_workers = lapply( seq_len(nb_workers), function(i) - indices[(nb_series_per_chunk*(i-1)+1):(nb_series_per_chunk*i)] ) - # Spread the remaining load among the workers - rem = length(indices) %% nb_series_per_chunk - while (rem > 0) + # outfile=="" to see stderr/stdout on terminal + cl <- + if (verbose) + parallel::makeCluster(ncores_clust, outfile = "") + else + parallel::makeCluster(ncores_clust) + parallel::clusterExport(cl, c("getContribs","K1","verbose"), envir=environment()) + } + # Iterate clustering algorithm 1 until K1 medoids are found + while (length(indices) > K1) + { + # Balance tasks by splitting the indices set - as evenly as possible + indices_workers <- .splitIndices(indices, nb_items_clust, min_size=K1+1) + indices <- + if (parll) + { + unlist( parallel::parLapply(cl, indices_workers, function(inds) { + require("epclust", quietly=TRUE) + inds[ algoClust1(getContribs(inds), K1) ] + }) ) + } + else + { + unlist( lapply(indices_workers, function(inds) + inds[ algoClust1(getContribs(inds), K1) ] + ) ) + } + if (verbose) { - index = rem%%nb_workers + 1 - indices_workers[[index]] = c(indices_workers[[index]], tail(indices,rem)) - rem = rem - 1 + cat(paste("*** Clustering task 1 on ",length(indices)," medoids [iter]\n", sep="")) } -# indices = unlist( parallel::parLapply( cl, indices_workers, function(inds) { - indices = unlist( lapply( indices_workers, function(inds) { -# require("epclust", quietly=TRUE) - -print(paste(" ",length(inds))) ## PROBLEME ICI : 21104 ??! - - inds[ computeClusters1(getContribs(inds), K1) ] - } ) ) - if (length(indices) == K1) - break } -# parallel::stopCluster(cl) + if (parll) + parallel::stopCluster(cl) + indices #medoids } #' @rdname clustering #' @export -computeClusters1 = function(contribs, K1) - cluster::pam(contribs, K1, diss=FALSE)$id.med - -#' @rdname clustering -#' @export -computeClusters2 = function(medoids, K2, getRefSeries, nb_series_per_chunk) -{ - synchrones = computeSynchrones(medoids, getRefSeries, nb_series_per_chunk) - medoids[ cluster::pam(computeWerDists(synchrones), K2, diss=TRUE)$medoids , ] -} - -#' computeSynchrones -#' -#' Compute the synchrones curves (sum of clusters elements) from a matrix of medoids, -#' using L2 distances. -#' -#' @param medoids Matrix of medoids (curves of same legnth as initial series) -#' @param getRefSeries Function to retrieve initial series (e.g. in stage 2 after series -#' have been replaced by stage-1 medoids) -#' @inheritParams claws -#' -#' @export -computeSynchrones = function(medoids, getRefSeries, nb_series_per_chunk) +clusteringTask2 <- function(indices, getSeries, K2, algoClust2, nb_series_per_chunk, + smooth_lvl, nvoice, nbytes, endian, ncores_clust=3, verbose=FALSE, parll=TRUE) { - K = nrow(medoids) - synchrones = matrix(0, nrow=K, ncol=ncol(medoids)) - counts = rep(0,K) - index = 1 - repeat - { - range = (index-1) + seq_len(nb_series_per_chunk) - ref_series = getRefSeries(range) - if (is.null(ref_series)) - break - #get medoids indices for this chunk of series - for (i in seq_len(nrow(ref_series))) - { - j = which.min( rowSums( sweep(medoids, 2, ref_series[i,], '-')^2 ) ) - synchrones[j,] = synchrones[j,] + ref_series[i,] - counts[j] = counts[j] + 1 - } - index = index + nb_series_per_chunk - } - #NOTE: odds for some clusters to be empty? (when series already come from stage 2) - # ...maybe; but let's hope resulting K1' be still quite bigger than K2 - synchrones = sweep(synchrones, 1, counts, '/') - synchrones[ sapply(seq_len(K), function(i) all(!is.nan(synchrones[i,]))) , ] -} + if (verbose) + cat(paste("*** Clustering task 2 on ",length(indices)," medoids\n", sep="")) -#' computeWerDists -#' -#' Compute the WER distances between the synchrones curves (in rows), which are -#' returned (e.g.) by \code{computeSynchrones()} -#' -#' @param synchrones A matrix of synchrones, in rows. The series have same length as the -#' series in the initial dataset -#' -#' @export -computeWerDists = function(synchrones) -{ - n <- nrow(synchrones) - delta <- ncol(synchrones) - #TODO: automatic tune of all these parameters ? (for other users) - nvoice <- 4 - # noctave = 2^13 = 8192 half hours ~ 180 days ; ~log2(ncol(synchrones)) - noctave = 13 - # 4 here represent 2^5 = 32 half-hours ~ 1 day - #NOTE: default scalevector == 2^(0:(noctave * nvoice) / nvoice) * s0 (?) - scalevector <- 2^(4:(noctave * nvoice) / nvoice) * 2 - #condition: ( log2(s0*w0/(2*pi)) - 1 ) * nvoice + 1.5 >= 1 - s0=2 - w0=2*pi - scaled=FALSE - s0log = as.integer( (log2( s0*w0/(2*pi) ) - 1) * nvoice + 1.5 ) - totnoct = noctave + as.integer(s0log/nvoice) + 1 + if (length(indices) <= K2) + return (indices) - # (normalized) observations node with CWT - Xcwt4 <- lapply(seq_len(n), function(i) { - ts <- scale(ts(synchrones[i,]), center=TRUE, scale=scaled) - totts.cwt = Rwave::cwt(ts,totnoct,nvoice,w0,plot=0) - ts.cwt = totts.cwt[,s0log:(s0log+noctave*nvoice)] - #Normalization - sqs <- sqrt(2^(0:(noctave*nvoice)/nvoice)*s0) - sqres <- sweep(ts.cwt,MARGIN=2,sqs,'*') - sqres / max(Mod(sqres)) - }) + # A) Compute the WER distances (Wavelets Extended coefficient of deteRmination) + distances <- computeWerDists(indices, getSeries, nb_series_per_chunk, + smooth_lvl, nvoice, nbytes, endian, ncores_clust, verbose, parll) - Xwer_dist <- matrix(0., n, n) - fcoefs = rep(1/3, 3) #moving average on 3 values (TODO: very slow! correct?!) - for (i in 1:(n-1)) - { - for (j in (i+1):n) - { - #TODO: later, compute CWT here (because not enough storage space for 200k series) - # 'circular=TRUE' is wrong, should just take values on the sides; to rewrite in C - num <- filter(Mod(Xcwt4[[i]] * Conj(Xcwt4[[j]])), fcoefs, circular=TRUE) - WX <- filter(Mod(Xcwt4[[i]] * Conj(Xcwt4[[i]])), fcoefs, circular=TRUE) - WY <- filter(Mod(Xcwt4[[j]] * Conj(Xcwt4[[j]])), fcoefs, circular=TRUE) - wer2 <- sum(colSums(num)^2) / sum( sum(colSums(WX) * colSums(WY)) ) - Xwer_dist[i,j] <- sqrt(delta * ncol(Xcwt4[[1]]) * (1 - wer2)) - Xwer_dist[j,i] <- Xwer_dist[i,j] - } - } - diag(Xwer_dist) <- numeric(n) - Xwer_dist + # B) Apply clustering algorithm 2 on the WER distances matrix + if (verbose) + cat(paste("*** algoClust2() on ",nrow(distances)," items\n", sep="")) + indices[ algoClust2(distances,K2) ] }