X-Git-Url: https://git.auder.net/?a=blobdiff_plain;f=epclust%2FR%2Fclustering.R;h=5b5f6684763321b1b854d0cdbc2de6bb2b8ded16;hb=074a48c472fcbdf99a36fae333dd8dbb568c06a0;hp=fce1b1c695f4b6094580375e28a6cd5cd0f1e805;hpb=56857861dc15088cf58e7438968fe5714b22168e;p=epclust.git diff --git a/epclust/R/clustering.R b/epclust/R/clustering.R index fce1b1c..5b5f668 100644 --- a/epclust/R/clustering.R +++ b/epclust/R/clustering.R @@ -1,111 +1,94 @@ -# Cluster one full task (nb_curves / ntasks series); only step 1 -clusteringTask = function(indices, getCoefs, K1, nb_series_per_chunk, ncores) -{ - cl = parallel::makeCluster(ncores) - repeat - { - nb_workers = max( 1, round( length(indices) / nb_series_per_chunk ) ) - indices_workers = lapply(seq_len(nb_workers), function(i) { - upper_bound = ifelse( i init vect nul pour chacun, puis incr avec les - #courbes (getSeriesForSynchrones) les plus proches... --> au sens de la norme L2 ? - K = nrow(medoids) - synchrones = matrix(0, nrow=K, ncol=ncol(medoids)) - counts = rep(0,K) - index = 1 - repeat + if (length(indices) <= K1) + return (indices) + + parll <- (ncores_clust > 1) + if (parll) { - range = (index-1) + seq_len(nb_series_per_chunk) - ref_series = getRefSeries(range) - if (is.null(ref_series)) - break - #get medoids indices for this chunk of series - for (i in seq_len(nrow(ref_series))) + # outfile=="" to see stderr/stdout on terminal + cl <- + if (verbose) + parallel::makeCluster(ncores_clust, outfile = "") + else + parallel::makeCluster(ncores_clust) + parallel::clusterExport(cl, c("getContribs","K1","verbose"), envir=environment()) + } + # Iterate clustering algorithm 1 until K1 medoids are found + while (length(indices) > K1) + { + # Balance tasks by splitting the indices set - as evenly as possible + indices_workers <- .splitIndices(indices, nb_items_clust, min_size=K1+1) + indices <- + if (parll) + { + unlist( parallel::parLapply(cl, indices_workers, function(inds) { + require("epclust", quietly=TRUE) + inds[ algoClust1(getContribs(inds), K1) ] + }) ) + } + else + { + unlist( lapply(indices_workers, function(inds) + inds[ algoClust1(getContribs(inds), K1) ] + ) ) + } + if (verbose) { - j = which.min( rowSums( sweep(medoids, 2, series[i,], '-')^2 ) ) - synchrones[j,] = synchrones[j,] + series[i,] - counts[j] = counts[j] + 1 + cat(paste("*** Clustering task 1 on ",length(indices)," medoids [iter]\n", sep="")) } - index = index + nb_series_per_chunk } - #NOTE: odds for some clusters to be empty? (when series already come from stage 2) - sweep(synchrones, 1, counts, '/') + if (parll) + parallel::stopCluster(cl) + + indices #medoids } -# Compute the WER distance between the synchrones curves (in rows) -computeWerDist = function(curves) +#' @rdname clustering +#' @export +clusteringTask2 <- function(indices, getSeries, K2, algoClust2, nb_series_per_chunk, + smooth_lvl, nvoice, nbytes, endian, ncores_clust=3, verbose=FALSE) { - if (!require("Rwave", quietly=TRUE)) - stop("Unable to load Rwave library") - n <- nrow(curves) - delta <- ncol(curves) - #TODO: automatic tune of all these parameters ? (for other users) - nvoice <- 4 - # noctave = 2^13 = 8192 half hours ~ 180 days ; ~log2(ncol(curves)) - noctave = 13 - # 4 here represent 2^5 = 32 half-hours ~ 1 day - #NOTE: default scalevector == 2^(0:(noctave * nvoice) / nvoice) * s0 (?) - scalevector <- 2^(4:(noctave * nvoice) / nvoice) * 2 - #condition: ( log2(s0*w0/(2*pi)) - 1 ) * nvoice + 1.5 >= 1 - s0=2 - w0=2*pi - scaled=FALSE - s0log = as.integer( (log2( s0*w0/(2*pi) ) - 1) * nvoice + 1.5 ) - totnoct = noctave + as.integer(s0log/nvoice) + 1 + if (verbose) + cat(paste("*** Clustering task 2 on ",length(indices)," medoids\n", sep="")) - # (normalized) observations node with CWT - Xcwt4 <- lapply(seq_len(n), function(i) { - ts <- scale(ts(curves[i,]), center=TRUE, scale=scaled) - totts.cwt = Rwave::cwt(ts,totnoct,nvoice,w0,plot=0) - ts.cwt = totts.cwt[,s0log:(s0log+noctave*nvoice)] - #Normalization - sqs <- sqrt(2^(0:(noctave*nvoice)/nvoice)*s0) - sqres <- sweep(ts.cwt,MARGIN=2,sqs,'*') - sqres / max(Mod(sqres)) - }) + if (length(indices) <= K2) + return (indices) - Xwer_dist <- matrix(0., n, n) - fcoefs = rep(1/3, 3) #moving average on 3 values (TODO: very slow! correct?!) - for (i in 1:(n-1)) - { - for (j in (i+1):n) - { - #TODO: later, compute CWT here (because not enough storage space for 200k series) - # 'circular=TRUE' is wrong, should just take values on the sides; to rewrite in C - num <- filter(Mod(Xcwt4[[i]] * Conj(Xcwt4[[j]])), fcoefs, circular=TRUE) - WX <- filter(Mod(Xcwt4[[i]] * Conj(Xcwt4[[i]])), fcoefs, circular=TRUE) - WY <- filter(Mod(Xcwt4[[j]] * Conj(Xcwt4[[j]])), fcoefs, circular=TRUE) - wer2 <- sum(colSums(num)^2) / sum( sum(colSums(WX) * colSums(WY)) ) - Xwer_dist[i,j] <- sqrt(delta * ncol(Xcwt4[[1]]) * (1 - wer2)) - Xwer_dist[j,i] <- Xwer_dist[i,j] - } - } - diag(Xwer_dist) <- numeric(n) - Xwer_dist + # A) Compute the WER distances (Wavelets Extended coefficient of deteRmination) + distances <- computeWerDists(indices, getSeries, nb_series_per_chunk, + smooth_lvl, nvoice, nbytes, endian, ncores_clust, verbose) + + # B) Apply clustering algorithm 2 on the WER distances matrix + if (verbose) + cat(paste("*** algoClust2() on ",nrow(distances)," items\n", sep="")) + indices[ algoClust2(distances,K2) ] }