-# Cluster one full task (nb_curves / ntasks series); only step 1
-clusteringTask = function(indices, getCoefs, K1, nb_series_per_chunk, ncores)
+#' @name clustering
+#' @rdname clustering
+#' @aliases clusteringTask1 clusteringTask2 computeClusters1 computeClusters2
+#'
+#' @title Two-stage clustering, withing one task (see \code{claws()})
+#'
+#' @description \code{clusteringTask1()} runs one full stage-1 task, which consists in
+#' iterated stage 1 clustering (on nb_curves / ntasks energy contributions, computed
+#' through discrete wavelets coefficients).
+#' \code{clusteringTask2()} runs a full stage-2 task, which consists in synchrones
+#' and then WER distances computations, before applying the clustering algorithm.
+#' \code{computeClusters1()} and \code{computeClusters2()} correspond to the atomic
+#' clustering procedures respectively for stage 1 and 2. The former applies the
+#' first clustering algorithm on a contributions matrix, while the latter clusters
+#' a set of series inside one task (~nb_items_clust1)
+#'
+#' @param indices Range of series indices to cluster in parallel (initial data)
+#' @param getContribs Function to retrieve contributions from initial series indices:
+#' \code{getContribs(indices)} outpus a contributions matrix
+#' @inheritParams computeSynchrones
+#' @inheritParams claws
+#'
+#' @return For \code{clusteringTask1()}, the indices of the computed (K1) medoids.
+#' Indices are irrelevant for stage 2 clustering, thus \code{clusteringTask2()}
+#' outputs a big.matrix of medoids (of size LxK2, K2 = final number of clusters)
+NULL
+
+#' @rdname clustering
+#' @export
+clusteringTask1 = function(indices, getContribs, K1, algoClust1, nb_items_clust1,
+ ncores_clust=1, verbose=FALSE, parll=TRUE)
{
- cl = parallel::makeCluster(ncores)
- parallel::clusterExport(cl, varlist=c("getCoefs","K1"), envir=environment())
- repeat
+ if (parll)
{
- nb_workers = max( 1, floor( length(indices) / nb_series_per_chunk ) )
- indices_workers = lapply( seq_len(nb_workers), function(i)
- indices[(nb_series_per_chunk*(i-1)+1):(nb_series_per_chunk*i)] )
- # Spread the remaining load among the workers
- rem = length(indices) %% nb_series_per_chunk
- while (rem > 0)
- {
- index = rem%%nb_workers + 1
- indices_workers[[index]] = c(indices_workers[[index]], tail(indices,rem))
- rem = rem - 1
- }
- indices = unlist( parallel::parLapply( cl, indices_workers, function(inds) {
- require("epclust", quietly=TRUE)
- inds[ computeClusters1(getCoefs(inds), K1) ]
- } ) )
- if (length(indices) == K1)
- break
+ cl = parallel::makeCluster(ncores_clust, outfile = "")
+ parallel::clusterExport(cl, varlist=c("getContribs","K1","verbose"), envir=environment())
}
- parallel::stopCluster(cl)
+ while (length(indices) > K1)
+ {
+ indices_workers = .spreadIndices(indices, nb_items_clust1)
+ if (verbose)
+ cat(paste("*** [iterated] Clustering task 1 on ",length(indices)," series\n", sep=""))
+ indices <-
+ if (parll)
+ {
+ unlist( parallel::parLapply(cl, indices_workers, function(inds) {
+ require("epclust", quietly=TRUE)
+ inds[ algoClust1(getContribs(inds), K1) ]
+ }) )
+ }
+ else
+ {
+ unlist( lapply(indices_workers, function(inds)
+ inds[ algoClust1(getContribs(inds), K1) ]
+ ) )
+ }
+ }
+ if (parll)
+ parallel::stopCluster(cl)
+
indices #medoids
}
-# Apply the clustering algorithm (PAM) on a coeffs or distances matrix
-computeClusters1 = function(coefs, K1)
- cluster::pam(coefs, K1, diss=FALSE)$id.med
-
-# Cluster a chunk of series inside one task (~max nb_series_per_chunk)
-computeClusters2 = function(medoids, K2, getRefSeries, nb_series_per_chunk)
+#' @rdname clustering
+#' @export
+clusteringTask2 = function(medoids, K2, algoClust2, getRefSeries, nb_ref_curves,
+ nb_series_per_chunk, sync_mean, nbytes,endian,ncores_clust=1,verbose=FALSE,parll=TRUE)
{
- synchrones = computeSynchrones(medoids, getRefSeries, nb_series_per_chunk)
- medoids[ cluster::pam(computeWerDists(synchrones), K2, diss=TRUE)$medoids , ]
+ if (verbose)
+ cat(paste("*** Clustering task 2 on ",ncol(medoids)," synchrones\n", sep=""))
+
+ if (ncol(medoids) <= K2)
+ return (medoids)
+ synchrones = computeSynchrones(medoids, getRefSeries, nb_ref_curves,
+ nb_series_per_chunk, sync_mean, ncores_clust, verbose, parll)
+ distances = computeWerDists(synchrones, nbytes, endian, ncores_clust, verbose, parll)
+ if (verbose)
+ cat(paste(" algoClust2() on ",nrow(distances)," items\n", sep=""))
+ medoids[ algoClust2(distances,K2), ]
}
-# Compute the synchrones curves (sum of clusters elements) from a clustering result
-computeSynchrones = function(medoids, getRefSeries, nb_series_per_chunk)
+#' computeSynchrones
+#'
+#' Compute the synchrones curves (sum of clusters elements) from a matrix of medoids,
+#' using L2 distances.
+#'
+#' @param medoids big.matrix of medoids (curves of same length as initial series)
+#' @param getRefSeries Function to retrieve initial series (e.g. in stage 2 after series
+#' have been replaced by stage-1 medoids)
+#' @param nb_ref_curves How many reference series? (This number is known at this stage)
+#' @inheritParams claws
+#'
+#' @return A big.matrix of size L x K1 where L = length of a serie
+#'
+#' @export
+computeSynchrones = function(medoids, getRefSeries, nb_ref_curves,
+ nb_series_per_chunk, sync_mean, ncores_clust=1,verbose=FALSE,parll=TRUE)
{
- K = nrow(medoids)
- synchrones = matrix(0, nrow=K, ncol=ncol(medoids))
- counts = rep(0,K)
- index = 1
- repeat
+ computeSynchronesChunk = function(indices)
{
- range = (index-1) + seq_len(nb_series_per_chunk)
- ref_series = getRefSeries(range)
- if (is.null(ref_series))
- break
- #get medoids indices for this chunk of series
- for (i in seq_len(nrow(ref_series)))
+ if (parll)
{
- j = which.min( rowSums( sweep(medoids, 2, ref_series[i,], '-')^2 ) )
- synchrones[j,] = synchrones[j,] + ref_series[i,]
- counts[j] = counts[j] + 1
+ require("bigmemory", quietly=TRUE)
+ requireNamespace("synchronicity", quietly=TRUE)
+ require("epclust", quietly=TRUE)
+ synchrones <- bigmemory::attach.big.matrix(synchrones_desc)
+ if (sync_mean)
+ counts <- bigmemory::attach.big.matrix(counts_desc)
+ medoids <- bigmemory::attach.big.matrix(medoids_desc)
+ m <- synchronicity::attach.mutex(m_desc)
}
- index = index + nb_series_per_chunk
+
+ ref_series = getRefSeries(indices)
+ nb_series = nrow(ref_series)
+
+ # Get medoids indices for this chunk of series
+ mi = computeMedoidsIndices(medoids@address, ref_series)
+
+ for (i in seq_len(nb_series))
+ {
+ if (parll)
+ synchronicity::lock(m)
+ synchrones[, mi[i] ] = synchrones[, mi[i] ] + ref_series[,i]
+ if (sync_mean)
+ counts[ mi[i] ] = counts[ mi[i] ] + 1
+ if (parll)
+ synchronicity::unlock(m)
+ }
+ }
+
+ K = ncol(medoids) ; L = nrow(medoids)
+ # Use bigmemory (shared==TRUE by default) + synchronicity to fill synchrones in //
+ # TODO: if size > RAM (not our case), use file-backed big.matrix
+ synchrones = bigmemory::big.matrix(nrow=L, ncol=K, type="double", init=0.)
+ if (sync_mean)
+ counts = bigmemory::big.matrix(nrow=K, ncol=1, type="double", init=0)
+ # synchronicity is only for Linux & MacOS; on Windows: run sequentially
+ parll = (requireNamespace("synchronicity",quietly=TRUE)
+ && parll && Sys.info()['sysname'] != "Windows")
+ if (parll)
+ {
+ m <- synchronicity::boost.mutex()
+ m_desc <- synchronicity::describe(m)
+ synchrones_desc = bigmemory::describe(synchrones)
+ if (sync_mean)
+ counts_desc = bigmemory::describe(counts)
+ medoids_desc = bigmemory::describe(medoids)
+ cl = parallel::makeCluster(ncores_clust)
+ varlist=c("synchrones_desc","sync_mean","m_desc","medoids_desc","getRefSeries")
+ if (sync_mean)
+ varlist = c(varlist, "counts_desc")
+ parallel::clusterExport(cl, varlist, envir=environment())
+ }
+
+ if (verbose)
+ {
+ if (verbose)
+ cat(paste("--- Compute ",K," synchrones with ",nb_ref_curves," series\n", sep=""))
}
+ indices_workers = .spreadIndices(seq_len(nb_ref_curves), nb_series_per_chunk)
+ ignored <-
+ if (parll)
+ parallel::parLapply(cl, indices_workers, computeSynchronesChunk)
+ else
+ lapply(indices_workers, computeSynchronesChunk)
+
+ if (parll)
+ parallel::stopCluster(cl)
+
+ if (!sync_mean)
+ return (synchrones)
+
+ #TODO: can we avoid this loop? ( synchrones = sweep(synchrones, 2, counts, '/') )
+ for (i in seq_len(K))
+ synchrones[,i] = synchrones[,i] / counts[i]
#NOTE: odds for some clusters to be empty? (when series already come from stage 2)
# ...maybe; but let's hope resulting K1' be still quite bigger than K2
- synchrones = sweep(synchrones, 1, counts, '/')
- synchrones[ sapply(seq_len(K), function(i) all(!is.nan(synchrones[i,]))) , ]
+ noNA_rows = sapply(seq_len(K), function(i) all(!is.nan(synchrones[,i])))
+ if (all(noNA_rows))
+ return (synchrones)
+ # Else: some clusters are empty, need to slice synchrones
+ bigmemory::as.big.matrix(synchrones[,noNA_rows])
}
-# Compute the WER distance between the synchrones curves (in rows)
-computeWerDists = function(curves)
+#' computeWerDists
+#'
+#' Compute the WER distances between the synchrones curves (in rows), which are
+#' returned (e.g.) by \code{computeSynchrones()}
+#'
+#' @param synchrones A big.matrix of synchrones, in rows. The series have same length
+#' as the series in the initial dataset
+#' @inheritParams claws
+#'
+#' @return A matrix of size K1 x K1
+#'
+#' @export
+computeWerDists = function(synchrones, nbytes,endian,ncores_clust=1,verbose=FALSE,parll=TRUE)
{
- if (!require("Rwave", quietly=TRUE))
- stop("Unable to load Rwave library")
- n <- nrow(curves)
- delta <- ncol(curves)
+ n <- nrow(synchrones)
+ delta <- ncol(synchrones)
#TODO: automatic tune of all these parameters ? (for other users)
nvoice <- 4
- # noctave = 2^13 = 8192 half hours ~ 180 days ; ~log2(ncol(curves))
+ # noctave = 2^13 = 8192 half hours ~ 180 days ; ~log2(ncol(synchrones))
noctave = 13
# 4 here represent 2^5 = 32 half-hours ~ 1 day
#NOTE: default scalevector == 2^(0:(noctave * nvoice) / nvoice) * s0 (?)
- scalevector <- 2^(4:(noctave * nvoice) / nvoice) * 2
+ scalevector <- 2^(4:(noctave * nvoice) / nvoice + 1)
#condition: ( log2(s0*w0/(2*pi)) - 1 ) * nvoice + 1.5 >= 1
- s0=2
- w0=2*pi
+ s0 = 2
+ w0 = 2*pi
scaled=FALSE
s0log = as.integer( (log2( s0*w0/(2*pi) ) - 1) * nvoice + 1.5 )
totnoct = noctave + as.integer(s0log/nvoice) + 1
- # (normalized) observations node with CWT
- Xcwt4 <- lapply(seq_len(n), function(i) {
- ts <- scale(ts(curves[i,]), center=TRUE, scale=scaled)
- totts.cwt = Rwave::cwt(ts,totnoct,nvoice,w0,plot=0)
+ Xwer_dist <- bigmemory::big.matrix(nrow=n, ncol=n, type="double")
+
+ cwt_file = ".epclust_bin/cwt"
+ #TODO: args, nb_per_chunk, nbytes, endian
+
+ # Generate n(n-1)/2 pairs for WER distances computations
+ pairs = list()
+ V = seq_len(n)
+ for (i in 1:n)
+ {
+ V = V[-1]
+ pairs = c(pairs, lapply(V, function(v) c(i,v)))
+ }
+
+ computeSaveCWT = function(index)
+ {
+ ts <- scale(ts(synchrones[index,]), center=TRUE, scale=scaled)
+ totts.cwt = Rwave::cwt(ts, totnoct, nvoice, w0, plot=FALSE)
ts.cwt = totts.cwt[,s0log:(s0log+noctave*nvoice)]
#Normalization
sqs <- sqrt(2^(0:(noctave*nvoice)/nvoice)*s0)
- sqres <- sweep(ts.cwt,MARGIN=2,sqs,'*')
- sqres / max(Mod(sqres))
- })
+ sqres <- sweep(ts.cwt,2,sqs,'*')
+ res <- sqres / max(Mod(sqres))
+ #TODO: serializer les CWT, les récupérer via getDataInFile ;
+ #--> OK, faut juste stocker comme séries simples de taille delta*ncol (53*17519)
+ binarize(c(as.double(Re(res)),as.double(Im(res))), cwt_file, ncol(res), ",", nbytes, endian)
+ }
- Xwer_dist <- matrix(0., n, n)
- fcoefs = rep(1/3, 3) #moving average on 3 values (TODO: very slow! correct?!)
- for (i in 1:(n-1))
+ if (parll)
+ {
+ cl = parallel::makeCluster(ncores_clust)
+ synchrones_desc <- bigmemory::describe(synchrones)
+ Xwer_dist_desc <- bigmemory::describe(Xwer_dist)
+ parallel::clusterExport(cl, varlist=c("synchrones_desc","Xwer_dist_desc","totnoct",
+ "nvoice","w0","s0log","noctave","s0","verbose","getCWT"), envir=environment())
+ }
+
+ if (verbose)
{
- for (j in (i+1):n)
+ cat(paste("--- Compute WER dists\n", sep=""))
+ # precompute save all CWT........
+ }
+ #precompute and serialize all CWT
+ ignored <-
+ if (parll)
+ parallel::parLapply(cl, 1:n, computeSaveCWT)
+ else
+ lapply(1:n, computeSaveCWT)
+
+ getCWT = function(index)
+ {
+ #from cwt_file ...
+ res <- getDataInFile(c(2*index-1,2*index), cwt_file, nbytes, endian)
+ ###############TODO:
+ }
+
+ # Distance between rows i and j
+ computeDistancesIJ = function(pair)
+ {
+ if (parll)
{
- #TODO: later, compute CWT here (because not enough storage space for 200k series)
- # 'circular=TRUE' is wrong, should just take values on the sides; to rewrite in C
- num <- filter(Mod(Xcwt4[[i]] * Conj(Xcwt4[[j]])), fcoefs, circular=TRUE)
- WX <- filter(Mod(Xcwt4[[i]] * Conj(Xcwt4[[i]])), fcoefs, circular=TRUE)
- WY <- filter(Mod(Xcwt4[[j]] * Conj(Xcwt4[[j]])), fcoefs, circular=TRUE)
- wer2 <- sum(colSums(num)^2) / sum( sum(colSums(WX) * colSums(WY)) )
- Xwer_dist[i,j] <- sqrt(delta * ncol(Xcwt4[[1]]) * (1 - wer2))
- Xwer_dist[j,i] <- Xwer_dist[i,j]
+ require("bigmemory", quietly=TRUE)
+ require("epclust", quietly=TRUE)
+ synchrones <- bigmemory::attach.big.matrix(synchrones_desc)
+ Xwer_dist <- bigmemory::attach.big.matrix(Xwer_dist_desc)
+ }
+
+ i = pair[1] ; j = pair[2]
+ if (verbose && j==i+1)
+ cat(paste(" Distances (",i,",",j,"), (",i,",",j+1,") ...\n", sep=""))
+ cwt_i <- getCWT(i)
+ cwt_j <- getCWT(j)
+
+ num <- epclustFilter(Mod(cwt_i * Conj(cwt_j)))
+ WX <- epclustFilter(Mod(cwt_i * Conj(cwt_i)))
+ WY <- epclustFilter(Mod(cwt_j * Conj(cwt_j)))
+ wer2 <- sum(colSums(num)^2) / sum(colSums(WX) * colSums(WY))
+ Xwer_dist[i,j] <- sqrt(delta * ncol(cwt_i) * max(1 - wer2, 0.)) #FIXME: wer2 should be < 1
+ Xwer_dist[j,i] <- Xwer_dist[i,j]
+ Xwer_dist[i,i] = 0.
+ }
+
+ if (verbose)
+ {
+ cat(paste("--- Compute WER dists\n", sep=""))
+ }
+ ignored <-
+ if (parll)
+ parallel::parLapply(cl, pairs, computeDistancesIJ)
+ else
+ lapply(pairs, computeDistancesIJ)
+
+ if (parll)
+ parallel::stopCluster(cl)
+
+ Xwer_dist[n,n] = 0.
+ distances <- Xwer_dist[,]
+ rm(Xwer_dist) ; gc()
+ distances #~small matrix K1 x K1
+}
+
+# Helper function to divide indices into balanced sets
+.spreadIndices = function(indices, nb_per_set)
+{
+ L = length(indices)
+ nb_workers = floor( L / nb_per_set )
+ rem = L %% nb_per_set
+ if (nb_workers == 0 || (nb_workers==1 && rem==0))
+ {
+ # L <= nb_per_set, simple case
+ indices_workers = list(indices)
+ }
+ else
+ {
+ indices_workers = lapply( seq_len(nb_workers), function(i)
+ indices[(nb_per_set*(i-1)+1):(nb_per_set*i)] )
+ # Spread the remaining load among the workers
+ rem = L %% nb_per_set
+ while (rem > 0)
+ {
+ index = rem%%nb_workers + 1
+ indices_workers[[index]] = c(indices_workers[[index]], indices[L-rem+1])
+ rem = rem - 1
}
}
- diag(Xwer_dist) <- numeric(n)
- Xwer_dist
+ indices_workers
}