-# Cluster one full task (nb_curves / ntasks series)
-clusteringTask = function(indices, ncores)
+# Cluster one full task (nb_curves / ntasks series); only step 1
+clusteringTask = function(indices, getCoefs, K1, nb_series_per_chunk, ncores)
{
cl = parallel::makeCluster(ncores)
- parallel::clusterExport(cl,
- varlist=c("K1","getCoefs"),
- envir=environment())
+ parallel::clusterExport(cl, varlist=c("getCoefs","K1"), envir=environment())
repeat
{
- nb_workers = max( 1, round( length(indices_clust) / nb_series_per_chunk ) )
- indices_workers = lapply(seq_len(nb_workers), function(i) {
- upper_bound = ifelse( i<nb_workers,
- min(nb_series_per_chunk*i,length(indices_clust)), length(indices_clust) )
- indices_clust[(nb_series_per_chunk*(i-1)+1):upper_bound]
- })
- indices_clust = unlist( parallel::parLapply(cl, indices_workers, function(indices)
- computeClusters1(indices, getCoefs, K1)) )
- if (length(indices_clust) == K1)
+ nb_workers = max( 1, floor( length(indices) / nb_series_per_chunk ) )
+ indices_workers = lapply( seq_len(nb_workers), function(i)
+ indices[(nb_series_per_chunk*(i-1)+1):(nb_series_per_chunk*i)] )
+ # Spread the remaining load among the workers
+ rem = length(indices) %% nb_series_per_chunk
+ while (rem > 0)
+ {
+ index = rem%%nb_workers + 1
+ indices_workers[[index]] = c(indices_workers[[index]], tail(indices,rem))
+ rem = rem - 1
+ }
+ indices = unlist( parallel::parLapply( cl, indices_workers, function(inds) {
+ require("epclust", quietly=TRUE)
+ inds[ computeClusters1(getCoefs(inds), K1) ]
+ } ) )
+ if (length(indices) == K1)
break
}
- parallel::stopCluster(cl_clust)
- if (WER == "end")
- return (indices_clust)
- #WER=="mix"
- computeClusters2(indices_clust, K2, getSeries, to_file=TRUE)
+ parallel::stopCluster(cl)
+ indices #medoids
}
# Apply the clustering algorithm (PAM) on a coeffs or distances matrix
-computeClusters1 = function(indices, getCoefs, K1)
- indices[ cluster::pam(getCoefs(indices), K1, diss=FALSE)$id.med ]
+computeClusters1 = function(coefs, K1)
+ cluster::pam(coefs, K1, diss=FALSE)$id.med
# Cluster a chunk of series inside one task (~max nb_series_per_chunk)
-computeClusters2 = function(indices, K2, getSeries, to_file)
+computeClusters2 = function(medoids, K2, getRefSeries, nb_series_per_chunk)
{
- if (is.null(indices))
- {
- #get series from file
- }
-#Puis K-means après WER...
- if (WER=="mix" > 0)
- {
- curves = computeSynchrones(indices)
- dists = computeWerDists(curves)
- indices = computeClusters(dists, K2, diss=TRUE)
- }
- if (to_file)
- #write results to file (JUST series ; no possible ID here)
+ synchrones = computeSynchrones(medoids, getRefSeries, nb_series_per_chunk)
+ medoids[ cluster::pam(computeWerDists(synchrones), K2, diss=TRUE)$medoids , ]
}
# Compute the synchrones curves (sum of clusters elements) from a clustering result
-computeSynchrones = function(inds)
- sapply(seq_along(inds), colMeans(getSeries(inds[[i]]$indices,inds[[i]]$ids)))
+computeSynchrones = function(medoids, getRefSeries, nb_series_per_chunk)
+{
+ K = nrow(medoids)
+ synchrones = matrix(0, nrow=K, ncol=ncol(medoids))
+ counts = rep(0,K)
+ index = 1
+ repeat
+ {
+ range = (index-1) + seq_len(nb_series_per_chunk)
+ ref_series = getRefSeries(range)
+ if (is.null(ref_series))
+ break
+ #get medoids indices for this chunk of series
+ for (i in seq_len(nrow(ref_series)))
+ {
+ j = which.min( rowSums( sweep(medoids, 2, ref_series[i,], '-')^2 ) )
+ synchrones[j,] = synchrones[j,] + ref_series[i,]
+ counts[j] = counts[j] + 1
+ }
+ index = index + nb_series_per_chunk
+ }
+ #NOTE: odds for some clusters to be empty? (when series already come from stage 2)
+ # ...maybe; but let's hope resulting K1' be still quite bigger than K2
+ synchrones = sweep(synchrones, 1, counts, '/')
+ synchrones[ sapply(seq_len(K), function(i) all(!is.nan(synchrones[i,]))) , ]
+}
-# Compute the WER distance between the synchrones curves (in columns)
-computeWerDist = function(curves)
+# Compute the WER distance between the synchrones curves (in rows)
+computeWerDists = function(curves)
{
if (!require("Rwave", quietly=TRUE))
stop("Unable to load Rwave library")
# (normalized) observations node with CWT
Xcwt4 <- lapply(seq_len(n), function(i) {
- ts <- scale(ts(curves[,i]), center=TRUE, scale=scaled)
+ ts <- scale(ts(curves[i,]), center=TRUE, scale=scaled)
totts.cwt = Rwave::cwt(ts,totnoct,nvoice,w0,plot=0)
ts.cwt = totts.cwt[,s0log:(s0log+noctave*nvoice)]
#Normalization