clusteringTask = function(indices, getCoefs, K1, nb_series_per_chunk, ncores)
{
cl = parallel::makeCluster(ncores)
+ parallel::clusterExport(cl, varlist=c("getCoefs","K1"), envir=environment())
repeat
{
- nb_workers = max( 1, round( length(indices) / nb_series_per_chunk ) )
- indices_workers = lapply(seq_len(nb_workers), function(i) {
- upper_bound = ifelse( i<nb_workers,
- min(nb_series_per_chunk*i,length(indices)), length(indices) )
- indices[(nb_series_per_chunk*(i-1)+1):upper_bound]
- })
- indices = unlist( parallel::parLapply(cl, indices_workers, function(inds)
- computeClusters1(getCoefs(inds), K1)) )
+ nb_workers = max( 1, floor( length(indices) / nb_series_per_chunk ) )
+ indices_workers = lapply( seq_len(nb_workers), function(i)
+ indices[(nb_series_per_chunk*(i-1)+1):(nb_series_per_chunk*i)] )
+ # Spread the remaining load among the workers
+ rem = length(indices) %% nb_series_per_chunk
+ while (rem > 0)
+ {
+ index = rem%%nb_workers + 1
+ indices_workers[[index]] = c(indices_workers[[index]], tail(indices,rem))
+ rem = rem - 1
+ }
+ indices = unlist( parallel::parLapply( cl, indices_workers, function(inds) {
+ require("epclust", quietly=TRUE)
+ inds[ computeClusters1(getCoefs(inds), K1) ]
+ } ) )
if (length(indices) == K1)
break
}
# Apply the clustering algorithm (PAM) on a coeffs or distances matrix
computeClusters1 = function(coefs, K1)
- indices[ cluster::pam(coefs, K1, diss=FALSE)$id.med ]
+ cluster::pam(coefs, K1, diss=FALSE)$id.med
# Cluster a chunk of series inside one task (~max nb_series_per_chunk)
computeClusters2 = function(medoids, K2, getRefSeries, nb_series_per_chunk)
{
synchrones = computeSynchrones(medoids, getRefSeries, nb_series_per_chunk)
- cluster::pam(computeWerDists(synchrones), K2, diss=TRUE)$medoids
+ medoids[ cluster::pam(computeWerDists(synchrones), K2, diss=TRUE)$medoids , ]
}
# Compute the synchrones curves (sum of clusters elements) from a clustering result
computeSynchrones = function(medoids, getRefSeries, nb_series_per_chunk)
{
- #les getSeries(indices) sont les medoides --> init vect nul pour chacun, puis incr avec les
- #courbes (getSeriesForSynchrones) les plus proches... --> au sens de la norme L2 ?
K = nrow(medoids)
synchrones = matrix(0, nrow=K, ncol=ncol(medoids))
counts = rep(0,K)
#get medoids indices for this chunk of series
for (i in seq_len(nrow(ref_series)))
{
- j = which.min( rowSums( sweep(medoids, 2, series[i,], '-')^2 ) )
- synchrones[j,] = synchrones[j,] + series[i,]
+ j = which.min( rowSums( sweep(medoids, 2, ref_series[i,], '-')^2 ) )
+ synchrones[j,] = synchrones[j,] + ref_series[i,]
counts[j] = counts[j] + 1
}
index = index + nb_series_per_chunk
}
#NOTE: odds for some clusters to be empty? (when series already come from stage 2)
- sweep(synchrones, 1, counts, '/')
+ # ...maybe; but let's hope resulting K1' be still quite bigger than K2
+ synchrones = sweep(synchrones, 1, counts, '/')
+ synchrones[ sapply(seq_len(K), function(i) all(!is.nan(synchrones[i,]))) , ]
}
# Compute the WER distance between the synchrones curves (in rows)
-computeWerDist = function(curves)
+computeWerDists = function(curves)
{
if (!require("Rwave", quietly=TRUE))
stop("Unable to load Rwave library")