#' @name clustering
#' @rdname clustering
-#' @aliases clusteringTask1 computeClusters1 computeClusters2
+#' @aliases clusteringTask1 clusteringTask2 computeClusters1 computeClusters2
#'
#' @title Two-stage clustering, withing one task (see \code{claws()})
#'
#' and then WER distances computations, before applying the clustering algorithm.
#' \code{computeClusters1()} and \code{computeClusters2()} correspond to the atomic
#' clustering procedures respectively for stage 1 and 2. The former applies the
-#' clustering algorithm (PAM) on a contributions matrix, while the latter clusters
-#' a chunk of series inside one task (~max nb_series_per_chunk)
+#' first clustering algorithm on a contributions matrix, while the latter clusters
+#' a set of series inside one task (~nb_items_clust1)
#'
#' @param indices Range of series indices to cluster in parallel (initial data)
#' @param getContribs Function to retrieve contributions from initial series indices:
#' \code{getContribs(indices)} outpus a contributions matrix
-#' @param contribs matrix of contributions (e.g. output of \code{curvesToContribs()})
#' @inheritParams computeSynchrones
#' @inheritParams claws
#'
-#' @return For \code{clusteringTask1()} and \code{computeClusters1()}, the indices of the
-#' computed (K1) medoids. Indices are irrelevant for stage 2 clustering, thus
-#' \code{computeClusters2()} outputs a big.matrix of medoids
-#' (of size limited by nb_series_per_chunk)
+#' @return For \code{clusteringTask1()}, the indices of the computed (K1) medoids.
+#' Indices are irrelevant for stage 2 clustering, thus \code{clusteringTask2()}
+#' outputs a big.matrix of medoids (of size LxK2, K2 = final number of clusters)
NULL
#' @rdname clustering
#' @export
-clusteringTask1 = function(
- indices, getContribs, K1, nb_series_per_chunk, ncores_clust=1, verbose=FALSE, parll=TRUE)
+clusteringTask1 = function(indices, getContribs, K1, algoClust1, nb_items_clust1,
+ ncores_clust=1, verbose=FALSE, parll=TRUE)
{
- if (verbose)
- cat(paste("*** Clustering task on ",length(indices)," lines\n", sep=""))
-
- wrapComputeClusters1 = function(inds) {
- if (parll)
- require("epclust", quietly=TRUE)
- if (verbose)
- cat(paste(" computeClusters1() on ",length(inds)," lines\n", sep=""))
- inds[ computeClusters1(getContribs(inds), K1) ]
- }
-
if (parll)
{
- cl = parallel::makeCluster(ncores_clust)
- parallel::clusterExport(cl, varlist=c("getContribs","K1","verbose"), envir=environment())
+ cl = parallel::makeCluster(ncores_clust, outfile = "")
+ parallel::clusterExport(cl, c("getContribs","K1","verbose"), envir=environment())
}
+ # Iterate clustering algorithm 1 until K1 medoids are found
while (length(indices) > K1)
{
- indices_workers = .spreadIndices(indices, nb_series_per_chunk)
- if (parll)
- indices = unlist( parallel::parLapply(cl, indices_workers, wrapComputeClusters1) )
- else
- indices = unlist( lapply(indices_workers, wrapComputeClusters1) )
+ # Balance tasks by splitting the indices set - as evenly as possible
+ indices_workers = .spreadIndices(indices, nb_items_clust1)
+ if (verbose)
+ cat(paste("*** [iterated] Clustering task 1 on ",length(indices)," series\n", sep=""))
+ indices <-
+ if (parll)
+ {
+ unlist( parallel::parLapply(cl, indices_workers, function(inds) {
+ require("epclust", quietly=TRUE)
+ inds[ algoClust1(getContribs(inds), K1) ]
+ }) )
+ }
+ else
+ {
+ unlist( lapply(indices_workers, function(inds)
+ inds[ algoClust1(getContribs(inds), K1) ]
+ ) )
+ }
}
if (parll)
parallel::stopCluster(cl)
#' @rdname clustering
#' @export
-clusteringTask2 = function(medoids, K2,
- getRefSeries, nb_ref_curves, nb_series_per_chunk, ncores_clust=1,verbose=FALSE,parll=TRUE)
+clusteringTask2 = function(medoids, K2, algoClust2, getRefSeries, nb_ref_curves,
+ nb_series_per_chunk, nbytes,endian,ncores_clust=1,verbose=FALSE,parll=TRUE)
{
- if (nrow(medoids) <= K2)
+ if (verbose)
+ cat(paste("*** Clustering task 2 on ",ncol(medoids)," synchrones\n", sep=""))
+
+ if (ncol(medoids) <= K2)
return (medoids)
- synchrones = computeSynchrones(medoids,
- getRefSeries, nb_ref_curves, nb_series_per_chunk, ncores_clust, verbose, parll)
- distances = computeWerDists(synchrones, ncores_clust, verbose, parll)
- # PAM in package 'cluster' cannot take big.matrix in input: need to cast it
- mat_dists = matrix(nrow=K1, ncol=K1)
- for (i in seq_len(K1))
- mat_dists[i,] = distances[i,]
- medoids[ computeClusters2(mat_dists,K2), ]
-}
-#' @rdname clustering
-#' @export
-computeClusters1 = function(contribs, K1)
- cluster::pam(contribs, K1, diss=FALSE)$id.med
+ # A) Obtain synchrones, that is to say the cumulated power consumptions
+ # for each of the K1 initial groups
+ synchrones = computeSynchrones(medoids, getRefSeries, nb_ref_curves,
+ nb_series_per_chunk, ncores_clust, verbose, parll)
-#' @rdname clustering
-#' @export
-computeClusters2 = function(distances, K2)
- cluster::pam(distances, K2, diss=TRUE)$id.med
+ # B) Compute the WER distances (Wavelets Extended coefficient of deteRmination)
+ distances = computeWerDists(synchrones, nbytes, endian, ncores_clust, verbose, parll)
+
+ # C) Apply clustering algorithm 2 on the WER distances matrix
+ if (verbose)
+ cat(paste(" algoClust2() on ",nrow(distances)," items\n", sep=""))
+ medoids[ ,algoClust2(distances,K2) ]
+}
#' computeSynchrones
#'
#' Compute the synchrones curves (sum of clusters elements) from a matrix of medoids,
-#' using L2 distances.
+#' using euclidian distance.
#'
#' @param medoids big.matrix of medoids (curves of same length as initial series)
#' @param getRefSeries Function to retrieve initial series (e.g. in stage 2 after series
#' @param nb_ref_curves How many reference series? (This number is known at this stage)
#' @inheritParams claws
#'
-#' @return A big.matrix of size K1 x L where L = data_length
+#' @return A big.matrix of size L x K1 where L = length of a serie
#'
#' @export
-computeSynchrones = function(medoids, getRefSeries,
- nb_ref_curves, nb_series_per_chunk, ncores_clust=1,verbose=FALSE,parll=TRUE)
+computeSynchrones = function(medoids, getRefSeries, nb_ref_curves,
+ nb_series_per_chunk, ncores_clust=1,verbose=FALSE,parll=TRUE)
{
-
-
-
-#TODO: si parll, getMedoids + serialization, pass only getMedoids to nodes
-# --> BOF... chaque node chargera tous les medoids (efficacité) :/ ==> faut que ça tienne en RAM
-#au pire :: C-ifier et charger medoids 1 by 1...
-
- #MIEUX :: medoids DOIT etre une big.matrix partagée !
-
+ # Synchrones computation is embarassingly parallel: compute it by chunks of series
computeSynchronesChunk = function(indices)
{
- if (verbose)
- cat(paste("--- Compute synchrones for ",length(indices)," lines\n", sep=""))
+ if (parll)
+ {
+ require("bigmemory", quietly=TRUE)
+ requireNamespace("synchronicity", quietly=TRUE)
+ require("epclust", quietly=TRUE)
+ # The big.matrix objects need to be attached to be usable on the workers
+ synchrones <- bigmemory::attach.big.matrix(synchrones_desc)
+ medoids <- bigmemory::attach.big.matrix(medoids_desc)
+ m <- synchronicity::attach.mutex(m_desc)
+ }
+
+ # Obtain a chunk of reference series
ref_series = getRefSeries(indices)
- #get medoids indices for this chunk of series
- for (i in seq_len(nrow(ref_series)))
+ nb_series = ncol(ref_series)
+
+ # Get medoids indices for this chunk of series
+ mi = computeMedoidsIndices(medoids@address, ref_series)
+
+ # Update synchrones using mi above
+ for (i in seq_len(nb_series))
{
- j = which.min( rowSums( sweep(medoids, 2, ref_series[i,], '-')^2 ) )
if (parll)
- synchronicity::lock(m)
- synchrones[j,] = synchrones[j,] + ref_series[i,]
- counts[j,1] = counts[j,1] + 1
+ synchronicity::lock(m) #locking required because several writes at the same time
+ synchrones[, mi[i] ] = synchrones[, mi[i] ] + ref_series[,i]
if (parll)
synchronicity::unlock(m)
}
}
- K = nrow(medoids)
+ K = ncol(medoids) ; L = nrow(medoids)
# Use bigmemory (shared==TRUE by default) + synchronicity to fill synchrones in //
- # TODO: if size > RAM (not our case), use file-backed big.matrix
- synchrones = bigmemory::big.matrix(nrow=K,ncol=ncol(medoids),type="double",init=0.)
- counts = bigmemory::big.matrix(nrow=K,ncol=1,type="double",init=0)
- # synchronicity is only for Linux & MacOS; on Windows: run sequentially
+ synchrones = bigmemory::big.matrix(nrow=L, ncol=K, type="double", init=0.)
+ # NOTE: synchronicity is only for Linux & MacOS; on Windows: run sequentially
parll = (requireNamespace("synchronicity",quietly=TRUE)
&& parll && Sys.info()['sysname'] != "Windows")
- if (parll)
- m <- synchronicity::boost.mutex()
-
if (parll)
{
+ m <- synchronicity::boost.mutex() #for lock/unlock, see computeSynchronesChunk
+ # mutex and big.matrix objects cannot be passed directly:
+ # they will be accessed from their description
+ m_desc <- synchronicity::describe(m)
+ synchrones_desc = bigmemory::describe(synchrones)
+ medoids_desc = bigmemory::describe(medoids)
cl = parallel::makeCluster(ncores_clust)
- parallel::clusterExport(cl,
- varlist=c("synchrones","counts","verbose","medoids","getRefSeries"),
- envir=environment())
+ parallel::clusterExport(cl, envir=environment(),
+ varlist=c("synchrones_desc","m_desc","medoids_desc","getRefSeries"))
}
- indices_workers = .spreadIndices(seq_len(nb_ref_curves), nb_series_per_chunk)
+ if (verbose)
+ cat(paste("--- Compute ",K," synchrones with ",nb_ref_curves," series\n", sep=""))
+
+ # Balance tasks by splitting the indices set - maybe not so evenly, but
+ # max==TRUE in next call ensures that no set has more than nb_series_per_chunk items.
+ indices_workers = .spreadIndices(seq_len(nb_ref_curves), nb_series_per_chunk, max=TRUE)
ignored <-
if (parll)
- parallel::parLapply(indices_workers, computeSynchronesChunk)
+ parallel::parLapply(cl, indices_workers, computeSynchronesChunk)
else
lapply(indices_workers, computeSynchronesChunk)
if (parll)
parallel::stopCluster(cl)
- #TODO: can we avoid this loop? ( synchrones = sweep(synchrones, 1, counts, '/') )
- for (i in seq_len(K))
- synchrones[i,] = synchrones[i,] / counts[i,1]
- #NOTE: odds for some clusters to be empty? (when series already come from stage 2)
- # ...maybe; but let's hope resulting K1' be still quite bigger than K2
- noNA_rows = sapply(seq_len(K), function(i) all(!is.nan(synchrones[i,])))
- if (all(noNA_rows))
- return (synchrones)
- # Else: some clusters are empty, need to slice synchrones
- synchrones[noNA_rows,]
+ return (synchrones)
}
#' computeWerDists
#' as the series in the initial dataset
#' @inheritParams claws
#'
-#' @return A big.matrix of size K1 x K1
+#' @return A matrix of size K1 x K1
#'
#' @export
-computeWerDists = function(synchrones, ncores_clust=1,verbose=FALSE,parll=TRUE)
+computeWerDists = function(synchrones, nbytes,endian,ncores_clust=1,verbose=FALSE,parll=TRUE)
{
-
-
-
-#TODO: re-organize to call computeWerDist(x,y) [C] (in //?) from two indices + big.matrix
-
-
- n <- nrow(synchrones)
- delta <- ncol(synchrones)
+ n <- ncol(synchrones)
+ L <- nrow(synchrones)
#TODO: automatic tune of all these parameters ? (for other users)
+ # 4 here represent 2^5 = 32 half-hours ~ 1 day
nvoice <- 4
# noctave = 2^13 = 8192 half hours ~ 180 days ; ~log2(ncol(synchrones))
noctave = 13
- # 4 here represent 2^5 = 32 half-hours ~ 1 day
- #NOTE: default scalevector == 2^(0:(noctave * nvoice) / nvoice) * s0 (?)
- scalevector <- 2^(4:(noctave * nvoice) / nvoice + 1)
- #condition: ( log2(s0*w0/(2*pi)) - 1 ) * nvoice + 1.5 >= 1
- s0=2
- w0=2*pi
- scaled=FALSE
- s0log = as.integer( (log2( s0*w0/(2*pi) ) - 1) * nvoice + 1.5 )
- totnoct = noctave + as.integer(s0log/nvoice) + 1
-
- computeCWT = function(i)
+
+ Xwer_dist <- bigmemory::big.matrix(nrow=n, ncol=n, type="double")
+
+ cwt_file = ".epclust_bin/cwt"
+ #TODO: args, nb_per_chunk, nbytes, endian
+
+ # Generate n(n-1)/2 pairs for WER distances computations
+ pairs = list()
+ V = seq_len(n)
+ for (i in 1:n)
{
- if (verbose)
- cat(paste("+++ Compute Rwave::cwt() on serie ",i,"\n", sep=""))
- ts <- scale(ts(synchrones[i,]), center=TRUE, scale=scaled)
- totts.cwt = Rwave::cwt(ts, totnoct, nvoice, w0, plot=FALSE)
+ V = V[-1]
+ pairs = c(pairs, lapply(V, function(v) c(i,v)))
+ }
+
+ computeSaveCWT = function(index)
+ {
+ ts <- scale(ts(synchrones[,index]), center=TRUE, scale=FALSE)
+ totts.cwt = Rwave::cwt(ts, totnoct, nvoice, w0=2*pi, twoD=TRUE, plot=FALSE)
ts.cwt = totts.cwt[,s0log:(s0log+noctave*nvoice)]
#Normalization
sqs <- sqrt(2^(0:(noctave*nvoice)/nvoice)*s0)
sqres <- sweep(ts.cwt,2,sqs,'*')
- sqres / max(Mod(sqres))
+ res <- sqres / max(Mod(sqres))
+ #TODO: serializer les CWT, les récupérer via getDataInFile ;
+ #--> OK, faut juste stocker comme séries simples de taille L*n' (53*17519)
+ binarize(c(as.double(Re(res)),as.double(Im(res))), cwt_file, ncol(res), ",", nbytes, endian)
}
if (parll)
{
cl = parallel::makeCluster(ncores_clust)
- parallel::clusterExport(cl,
- varlist=c("synchrones","totnoct","nvoice","w0","s0log","noctave","s0","verbose"),
- envir=environment())
+ synchrones_desc <- bigmemory::describe(synchrones)
+ Xwer_dist_desc <- bigmemory::describe(Xwer_dist)
+ parallel::clusterExport(cl, envir=environment(),
+ varlist=c("synchrones_desc","Xwer_dist_desc","totnoct","nvoice","w0","s0log",
+ "noctave","s0","verbose","getCWT"))
}
-
- # list of CWT from synchrones
- # TODO: fit in RAM, OK? If not, 2 options: serialize, compute individual distances
- Xcwt4 <-
+
+ if (verbose)
+ {
+ cat(paste("--- Compute WER dists\n", sep=""))
+ # precompute save all CWT........
+ }
+ #precompute and serialize all CWT
+ ignored <-
if (parll)
- parallel::parLapply(cl, seq_len(n), computeCWT)
+ parallel::parLapply(cl, 1:n, computeSaveCWT)
else
- lapply(seq_len(n), computeCWT)
-
- if (parll)
- parallel::stopCluster(cl)
+ lapply(1:n, computeSaveCWT)
- Xwer_dist <- bigmemory::big.matrix(nrow=n, ncol=n, type="double")
- fcoefs = rep(1/3, 3) #moving average on 3 values (TODO: very slow! correct?!)
- if (verbose)
- cat("*** Compute WER distances from CWT\n")
-
- #TODO: computeDistances(i,j), et répartir les n(n-1)/2 couples d'indices
- #là c'est trop déséquilibré
+ getCWT = function(index)
+ {
+ #from cwt_file ...
+ res <- getDataInFile(c(2*index-1,2*index), cwt_file, nbytes, endian)
+ ###############TODO:
+ }
- computeDistancesLineI = function(i)
+ # Distance between rows i and j
+ computeDistancesIJ = function(pair)
{
- if (verbose)
- cat(paste(" Line ",i,"\n", sep=""))
- for (j in (i+1):n)
+ if (parll)
{
- #TODO: 'circular=TRUE' is wrong, should just take values on the sides; to rewrite in C
- num <- filter(Mod(Xcwt4[[i]] * Conj(Xcwt4[[j]])), fcoefs, circular=TRUE)
- WX <- filter(Mod(Xcwt4[[i]] * Conj(Xcwt4[[i]])), fcoefs, circular=TRUE)
- WY <- filter(Mod(Xcwt4[[j]] * Conj(Xcwt4[[j]])), fcoefs, circular=TRUE)
- wer2 <- sum(colSums(num)^2) / sum( sum(colSums(WX) * colSums(WY)) )
- if (parll)
- synchronicity::lock(m)
- Xwer_dist[i,j] <- sqrt(delta * ncol(Xcwt4[[1]]) * (1 - wer2))
- Xwer_dist[j,i] <- Xwer_dist[i,j]
- if (parll)
- synchronicity::unlock(m)
+ require("bigmemory", quietly=TRUE)
+ require("epclust", quietly=TRUE)
+ synchrones <- bigmemory::attach.big.matrix(synchrones_desc)
+ Xwer_dist <- bigmemory::attach.big.matrix(Xwer_dist_desc)
}
+
+ i = pair[1] ; j = pair[2]
+ if (verbose && j==i+1)
+ cat(paste(" Distances (",i,",",j,"), (",i,",",j+1,") ...\n", sep=""))
+ cwt_i <- getCWT(i)
+ cwt_j <- getCWT(j)
+
+ num <- epclustFilter(Mod(cwt_i * Conj(cwt_j)))
+ WX <- epclustFilter(Mod(cwt_i * Conj(cwt_i)))
+ WY <- epclustFilter(Mod(cwt_j * Conj(cwt_j)))
+ wer2 <- sum(colSums(num)^2) / sum(colSums(WX) * colSums(WY))
+ Xwer_dist[i,j] <- sqrt(L * ncol(cwt_i) * max(1 - wer2, 0.))
+ Xwer_dist[j,i] <- Xwer_dist[i,j]
Xwer_dist[i,i] = 0.
}
- parll = (requireNamespace("synchronicity",quietly=TRUE)
- && parll && Sys.info()['sysname'] != "Windows")
- if (parll)
- m <- synchronicity::boost.mutex()
-
+ if (verbose)
+ {
+ cat(paste("--- Compute WER dists\n", sep=""))
+ }
ignored <-
if (parll)
- {
- parallel::mclapply(seq_len(n-1), computeDistancesLineI,
- mc.cores=ncores_clust, mc.allow.recursive=FALSE)
- }
+ parallel::parLapply(cl, pairs, computeDistancesIJ)
else
- lapply(seq_len(n-1), computeDistancesLineI)
+ lapply(pairs, computeDistancesIJ)
+
+ if (parll)
+ parallel::stopCluster(cl)
+
Xwer_dist[n,n] = 0.
- Xwer_dist
+ distances <- Xwer_dist[,]
+ rm(Xwer_dist) ; gc()
+ distances #~small matrix K1 x K1
}
# Helper function to divide indices into balanced sets
-.spreadIndices = function(indices, nb_per_chunk)
+# If max == TRUE, sets sizes cannot exceed nb_per_set
+.spreadIndices = function(indices, nb_per_set, max=FALSE)
{
L = length(indices)
- nb_workers = floor( L / nb_per_chunk )
- if (nb_workers == 0)
+ nb_workers = floor( L / nb_per_set )
+ rem = L %% nb_per_set
+ if (nb_workers == 0 || (nb_workers==1 && rem==0))
{
- # L < nb_series_per_chunk, simple case
+ # L <= nb_per_set, simple case
indices_workers = list(indices)
}
else
{
indices_workers = lapply( seq_len(nb_workers), function(i)
- indices[(nb_per_chunk*(i-1)+1):(nb_per_chunk*i)] )
+ indices[(nb_per_set*(i-1)+1):(nb_per_set*i)] )
+
+ if (max)
+ {
+ # Sets are not so well balanced, but size is supposed to be critical
+ return ( c( indices_workers, (L-rem+1):L ) )
+ }
+
# Spread the remaining load among the workers
- rem = L %% nb_per_chunk
+ rem = L %% nb_per_set
while (rem > 0)
{
index = rem%%nb_workers + 1