#' @name clustering
#' @rdname clustering
-#' @aliases clusteringTask1 computeClusters1 computeClusters2
+#' @aliases clusteringTask1 clusteringTask2 computeClusters1 computeClusters2
#'
#' @title Two-stage clustering, withing one task (see \code{claws()})
#'
#' and then WER distances computations, before applying the clustering algorithm.
#' \code{computeClusters1()} and \code{computeClusters2()} correspond to the atomic
#' clustering procedures respectively for stage 1 and 2. The former applies the
-#' clustering algorithm (PAM) on a contributions matrix, while the latter clusters
-#' a chunk of series inside one task (~max nb_series_per_chunk)
+#' first clustering algorithm on a contributions matrix, while the latter clusters
+#' a set of series inside one task (~nb_items_clust)
#'
#' @param indices Range of series indices to cluster in parallel (initial data)
#' @param getContribs Function to retrieve contributions from initial series indices:
#' @rdname clustering
#' @export
-clusteringTask1 = function(
- indices, getContribs, K1, nb_series_per_chunk, ncores_clust=1, verbose=FALSE, parll=TRUE)
+clusteringTask1 = function(indices, getContribs, K1, nb_items_clust1,
+ ncores_clust=1, verbose=FALSE, parll=TRUE)
{
if (verbose)
cat(paste("*** Clustering task 1 on ",length(indices)," lines\n", sep=""))
if (parll)
{
- cl = parallel::makeCluster(ncores_clust)
+ cl = parallel::makeCluster(ncores_clust, outfile = "")
parallel::clusterExport(cl, varlist=c("getContribs","K1","verbose"), envir=environment())
}
while (length(indices) > K1)
{
- indices_workers = .spreadIndices(indices, nb_series_per_chunk)
+ indices_workers = .spreadIndices(indices, nb_items_clust1, K1+1)
indices <-
if (parll)
{
{
if (verbose)
cat(paste(" computeClusters1() on ",nrow(contribs)," lines\n", sep=""))
- cluster::pam(contribs, K1, diss=FALSE)$id.med
+ cluster::pam( t(contribs) , K1, diss=FALSE)$id.med
}
#' @rdname clustering
{
if (verbose)
cat(paste(" computeClusters2() on ",nrow(distances)," lines\n", sep=""))
- cluster::pam(distances, K2, diss=TRUE)$id.med
+ cluster::pam( distances , K2, diss=TRUE)$id.med
}
#' computeSynchrones
#' @param nb_ref_curves How many reference series? (This number is known at this stage)
#' @inheritParams claws
#'
-#' @return A big.matrix of size K1 x L where L = data_length
+#' @return A big.matrix of size L x K1 where L = length of a serie
#'
#' @export
computeSynchrones = function(medoids, getRefSeries,
{
if (parll)
synchronicity::lock(m)
- synchrones[ mi[i], ] = synchrones[ mi[i], ] + ref_series[i,]
- counts[ mi[i] ] = counts[ mi[i] ] + 1 #TODO: remove counts?
+ synchrones[, mi[i] ] = synchrones[, mi[i] ] + ref_series[,i]
+ counts[ mi[i] ] = counts[ mi[i] ] + 1 #TODO: remove counts? ...or as arg?!
if (parll)
synchronicity::unlock(m)
}
K = nrow(medoids) ; L = ncol(medoids)
# Use bigmemory (shared==TRUE by default) + synchronicity to fill synchrones in //
# TODO: if size > RAM (not our case), use file-backed big.matrix
- synchrones = bigmemory::big.matrix(nrow=K, ncol=L, type="double", init=0.)
+ synchrones = bigmemory::big.matrix(nrow=L, ncol=K, type="double", init=0.)
counts = bigmemory::big.matrix(nrow=K, ncol=1, type="double", init=0)
# synchronicity is only for Linux & MacOS; on Windows: run sequentially
parll = (requireNamespace("synchronicity",quietly=TRUE)
#TODO: can we avoid this loop? ( synchrones = sweep(synchrones, 1, counts, '/') )
for (i in seq_len(K))
- synchrones[i,] = synchrones[i,] / counts[i,1]
+ synchrones[,i] = synchrones[,i] / counts[i]
#NOTE: odds for some clusters to be empty? (when series already come from stage 2)
# ...maybe; but let's hope resulting K1' be still quite bigger than K2
- noNA_rows = sapply(seq_len(K), function(i) all(!is.nan(synchrones[i,])))
+ noNA_rows = sapply(seq_len(K), function(i) all(!is.nan(synchrones[,i])))
if (all(noNA_rows))
return (synchrones)
# Else: some clusters are empty, need to slice synchrones
- synchrones[noNA_rows,]
+ bigmemory::as.big.matrix(synchrones[,noNA_rows])
}
#' computeWerDists
{
#from cwt_file ...
res <- getDataInFile(c(2*index-1,2*index), cwt_file, nbytes, endian)
- ###############TODO:
+ ###############TODO:
}
# Distance between rows i and j
}
# Helper function to divide indices into balanced sets
-.spreadIndices = function(indices, nb_per_chunk)
+.spreadIndices = function(indices, max_per_set, min_nb_per_set = 1)
{
L = length(indices)
- nb_workers = floor( L / nb_per_chunk )
- if (nb_workers == 0)
+ min_nb_workers = floor( L / max_per_set )
+ rem = L %% max_per_set
+ if (nb_workers == 0 || (nb_workers==1 && rem==0))
{
- # L < nb_series_per_chunk, simple case
+ # L <= max_nb_per_set, simple case
indices_workers = list(indices)
}
else
{
indices_workers = lapply( seq_len(nb_workers), function(i)
- indices[(nb_per_chunk*(i-1)+1):(nb_per_chunk*i)] )
- # Spread the remaining load among the workers
+ indices[(max_nb_per_set*(i-1)+1):(max_per_set*i)] )
+ # Two cases: remainder is >= min_per_set (easy)...
+ if (rem >= min_nb_per_set)
+ indices_workers = c( indices_workers, list(tail(indices,rem)) )
+ #...or < min_per_set: harder, need to remove indices from current sets to feed
+ # the too-small remainder. It may fail: then fallback to "slightly bigger sets"
+ else
+ {
+ save_indices_workers = indices_workers
+ small_set = tail(indices,rem)
+ # Try feeding small_set until it reaches min_per_set, whle keeping the others big enough
+ # Spread the remaining load among the workers
rem = L %% nb_per_chunk
while (rem > 0)
{