-# Cluster one full task (nb_curves / ntasks series)
-clusteringTask = function(indices,getSeries,getSeriesForSynchrones,synchrones_file,
- getCoefs,K1,K2,nb_series_per_chunk,ncores,to_file,ftype)
-{
- cl = parallel::makeCluster(ncores)
- repeat
- {
- nb_workers = max( 1, round( length(indices) / nb_series_per_chunk ) )
- indices_workers = lapply(seq_len(nb_workers), function(i) {
- upper_bound = ifelse( i<nb_workers,
- min(nb_series_per_chunk*i,length(indices)), length(indices) )
- indices[(nb_series_per_chunk*(i-1)+1):upper_bound]
- })
- indices = unlist( parallel::parLapply(cl, indices_workers, function(inds)
- computeClusters1(inds, getCoefs, K1)) )
- if (length(indices_clust) == K1)
- break
- }
- parallel::stopCluster(cl)
- if (K2 == 0)
- return (indices)
- computeClusters2(indices, K2, getSeries, getSeriesForSynchrones, to_file,
- nb_series_per_chunk,ftype)
- vector("integer",0)
-}
+#' Two-stage clustering, within one task (see \code{claws()})
+#'
+#' \code{clusteringTask1()} runs one full stage-1 task, which consists in iterated
+#' clustering on nb_curves / ntasks energy contributions, computed through
+#' discrete wavelets coefficients.
+#' \code{clusteringTask2()} runs a full stage-2 task, which consists in WER distances
+#' computations between medoids (indices) output from stage 1, before applying
+#' the second clustering algorithm on the distances matrix.
+#'
+#' @param getContribs Function to retrieve contributions from initial series indices:
+#' \code{getContribs(indices)} outputs a contributions matrix, in columns
+#' @inheritParams claws
+#' @inheritParams computeSynchrones
+#' @inheritParams computeWerDists
+#'
+#' @return The indices of the computed (resp. K1 and K2) medoids.
+#'
+#' @name clustering
+#' @rdname clustering
+#' @aliases clusteringTask1 clusteringTask2
+NULL
-# Apply the clustering algorithm (PAM) on a coeffs or distances matrix
-computeClusters1 = function(indices, getCoefs, K1)
+#' @rdname clustering
+#' @export
+clusteringTask1 <- function(indices, getContribs, K1, algoClust1, nb_items_clust,
+ ncores_clust=3, verbose=FALSE)
{
- coefs = getCoefs(indices)
- indices[ cluster::pam(coefs, K1, diss=FALSE)$id.med ]
-}
+ if (verbose)
+ cat(paste("*** Clustering task 1 on ",length(indices)," series [start]\n", sep=""))
-# Cluster a chunk of series inside one task (~max nb_series_per_chunk)
-computeClusters2 = function(indices, K2, getSeries, getSeriesForSynchrones, to_file,
- nb_series_per_chunk, ftype)
-{
- curves = computeSynchrones(indices, getSeries, getSeriesForSynchrones, nb_series_per_chunk)
- dists = computeWerDists(curves)
- medoids = cluster::pam(dists, K2, diss=TRUE)$medoids
- if (to_file)
+ if (length(indices) <= K1)
+ return (indices)
+
+ parll <- (ncores_clust > 1)
+ if (parll)
{
- serialize(medoids, synchrones_file, ftype, nb_series_per_chunk)
- return (NULL)
+ # outfile=="" to see stderr/stdout on terminal
+ cl <-
+ if (verbose)
+ parallel::makeCluster(ncores_clust, outfile = "")
+ else
+ parallel::makeCluster(ncores_clust)
+ parallel::clusterExport(cl, c("getContribs","K1","verbose"), envir=environment())
}
- medoids
-}
-
-# Compute the synchrones curves (sum of clusters elements) from a clustering result
-computeSynchrones = function(indices, getSeries, getSeriesForSynchrones, nb_series_per_chunk)
-{
- #les getSeries(indices) sont les medoides --> init vect nul pour chacun, puis incr avec les
- #courbes (getSeriesForSynchrones) les plus proches... --> au sens de la norme L2 ?
- medoids = getSeries(indices)
- K = nrow(medoids)
- synchrones = matrix(0, nrow=K, ncol=ncol(medoids))
- counts = rep(0,K)
- index = 1
- repeat
+ # Iterate clustering algorithm 1 until K1 medoids are found
+ while (length(indices) > K1)
{
- series = getSeriesForSynchrones((index-1)+seq_len(nb_series_per_chunk))
- if (is.null(series))
- break
- #get medoids indices for this chunk of series
- index = which.min( rowSums( sweep(medoids, 2, series[i,], '-')^2 ) )
- synchrones[index,] = synchrones[index,] + series[i,]
- counts[index] = counts[index] + 1
+ # Balance tasks by splitting the indices set - as evenly as possible
+ indices_workers <- .splitIndices(indices, nb_items_clust, min_size=K1+1)
+ indices <-
+ if (parll)
+ {
+ unlist( parallel::parLapply(cl, indices_workers, function(inds) {
+ require("epclust", quietly=TRUE)
+ inds[ algoClust1(getContribs(inds), K1) ]
+ }) )
+ }
+ else
+ {
+ unlist( lapply(indices_workers, function(inds)
+ inds[ algoClust1(getContribs(inds), K1) ]
+ ) )
+ }
+ if (verbose)
+ {
+ cat(paste("*** Clustering task 1 on ",length(indices)," medoids [iter]\n", sep=""))
+ }
}
- #NOTE: odds for some clusters to be empty? (when series already come from stage 2)
- synchrones = sweep(synchrones, 1, counts, '/')
+ if (parll)
+ parallel::stopCluster(cl)
+
+ indices #medoids
}
-# Compute the WER distance between the synchrones curves (in rows)
-computeWerDist = function(curves)
+#' @rdname clustering
+#' @export
+clusteringTask2 <- function(indices, getSeries, K2, algoClust2, nb_series_per_chunk,
+ smooth_lvl, nvoice, nbytes, endian, ncores_clust=3, verbose=FALSE)
{
- if (!require("Rwave", quietly=TRUE))
- stop("Unable to load Rwave library")
- n <- nrow(curves)
- delta <- ncol(curves)
- #TODO: automatic tune of all these parameters ? (for other users)
- nvoice <- 4
- # noctave = 2^13 = 8192 half hours ~ 180 days ; ~log2(ncol(curves))
- noctave = 13
- # 4 here represent 2^5 = 32 half-hours ~ 1 day
- #NOTE: default scalevector == 2^(0:(noctave * nvoice) / nvoice) * s0 (?)
- scalevector <- 2^(4:(noctave * nvoice) / nvoice) * 2
- #condition: ( log2(s0*w0/(2*pi)) - 1 ) * nvoice + 1.5 >= 1
- s0=2
- w0=2*pi
- scaled=FALSE
- s0log = as.integer( (log2( s0*w0/(2*pi) ) - 1) * nvoice + 1.5 )
- totnoct = noctave + as.integer(s0log/nvoice) + 1
+ if (verbose)
+ cat(paste("*** Clustering task 2 on ",length(indices)," medoids\n", sep=""))
+
+ if (length(indices) <= K2)
+ return (indices)
- # (normalized) observations node with CWT
- Xcwt4 <- lapply(seq_len(n), function(i) {
- ts <- scale(ts(curves[i,]), center=TRUE, scale=scaled)
- totts.cwt = Rwave::cwt(ts,totnoct,nvoice,w0,plot=0)
- ts.cwt = totts.cwt[,s0log:(s0log+noctave*nvoice)]
- #Normalization
- sqs <- sqrt(2^(0:(noctave*nvoice)/nvoice)*s0)
- sqres <- sweep(ts.cwt,MARGIN=2,sqs,'*')
- sqres / max(Mod(sqres))
- })
+ # A) Compute the WER distances (Wavelets Extended coefficient of deteRmination)
+ distances <- computeWerDists(indices, getSeries, nb_series_per_chunk,
+ smooth_lvl, nvoice, nbytes, endian, ncores_clust, verbose)
- Xwer_dist <- matrix(0., n, n)
- fcoefs = rep(1/3, 3) #moving average on 3 values (TODO: very slow! correct?!)
- for (i in 1:(n-1))
- {
- for (j in (i+1):n)
- {
- #TODO: later, compute CWT here (because not enough storage space for 200k series)
- # 'circular=TRUE' is wrong, should just take values on the sides; to rewrite in C
- num <- filter(Mod(Xcwt4[[i]] * Conj(Xcwt4[[j]])), fcoefs, circular=TRUE)
- WX <- filter(Mod(Xcwt4[[i]] * Conj(Xcwt4[[i]])), fcoefs, circular=TRUE)
- WY <- filter(Mod(Xcwt4[[j]] * Conj(Xcwt4[[j]])), fcoefs, circular=TRUE)
- wer2 <- sum(colSums(num)^2) / sum( sum(colSums(WX) * colSums(WY)) )
- Xwer_dist[i,j] <- sqrt(delta * ncol(Xcwt4[[1]]) * (1 - wer2))
- Xwer_dist[j,i] <- Xwer_dist[i,j]
- }
- }
- diag(Xwer_dist) <- numeric(n)
- Xwer_dist
+ # B) Apply clustering algorithm 2 on the WER distances matrix
+ if (verbose)
+ cat(paste("*** algoClust2() on ",nrow(distances)," items\n", sep=""))
+ indices[ algoClust2(distances,K2) ]
}