+ # Synchrones computation is embarassingly parallel: compute it by chunks of series
+ computeSynchronesChunk = function(indices)
+ {
+ if (parll)
+ {
+ require("bigmemory", quietly=TRUE)
+ requireNamespace("synchronicity", quietly=TRUE)
+ require("epclust", quietly=TRUE)
+ # The big.matrix objects need to be attached to be usable on the workers
+ synchrones <- bigmemory::attach.big.matrix(synchrones_desc)
+ medoids <- bigmemory::attach.big.matrix(medoids_desc)
+ m <- synchronicity::attach.mutex(m_desc)
+ }
+
+ # Obtain a chunk of reference series
+ ref_series = getRefSeries(indices)
+ nb_series = ncol(ref_series)
+
+ # Get medoids indices for this chunk of series
+ mi = computeMedoidsIndices(medoids@address, ref_series)
+
+ # Update synchrones using mi above
+ for (i in seq_len(nb_series))
+ {
+ if (parll)
+ synchronicity::lock(m) #locking required because several writes at the same time
+ synchrones[, mi[i] ] = synchrones[, mi[i] ] + ref_series[,i]
+ if (parll)
+ synchronicity::unlock(m)
+ }
+ NULL
+ }
+
+ K = ncol(medoids) ; L = nrow(medoids)
+ # Use bigmemory (shared==TRUE by default) + synchronicity to fill synchrones in //
+ synchrones = bigmemory::big.matrix(nrow=L, ncol=K, type="double", init=0.)
+ # NOTE: synchronicity is only for Linux & MacOS; on Windows: run sequentially
+ parll = (parll && requireNamespace("synchronicity",quietly=TRUE)
+ && Sys.info()['sysname'] != "Windows")
+ if (parll)
+ {
+ m <- synchronicity::boost.mutex() #for lock/unlock, see computeSynchronesChunk
+ # mutex and big.matrix objects cannot be passed directly:
+ # they will be accessed from their description
+ m_desc <- synchronicity::describe(m)
+ synchrones_desc = bigmemory::describe(synchrones)
+ medoids_desc = bigmemory::describe(medoids)
+ cl = parallel::makeCluster(ncores_clust)
+ parallel::clusterExport(cl, envir=environment(),
+ varlist=c("synchrones_desc","m_desc","medoids_desc","getRefSeries"))
+ }
+
+ if (verbose)
+ cat(paste("--- Compute ",K," synchrones with ",nb_ref_curves," series\n", sep=""))
+
+ # Balance tasks by splitting the indices set - maybe not so evenly, but
+ # max==TRUE in next call ensures that no set has more than nb_series_per_chunk items.
+ indices_workers = .spreadIndices(seq_len(nb_ref_curves), nb_series_per_chunk, max=TRUE)
+ ignored <-
+ if (parll)
+ parallel::parLapply(cl, indices_workers, computeSynchronesChunk)
+ else
+ lapply(indices_workers, computeSynchronesChunk)
+
+ if (parll)
+ parallel::stopCluster(cl)
+
+ return (synchrones)