#' two stage procedure in parallel (see details).
#' Input series must be sampled on the same time grid, no missing values.
#'
-#' @details Summary of the function execution flow:
+#' Summary of the function execution flow:
+#' \enumerate{
+#' \item Compute and serialize all contributions, obtained through discrete wavelet
+#' decomposition (see Antoniadis & al. [2013])
+#' \item Divide series into \code{ntasks} groups to process in parallel. In each task:
#' \enumerate{
-#' \item Compute and serialize all contributions, obtained through discrete wavelet
-#' decomposition (see Antoniadis & al. [2013])
-#' \item Divide series into \code{ntasks} groups to process in parallel. In each task:
-#' \enumerate{
-#' \item iterate the first clustering algorithm on its aggregated outputs,
-#' on inputs of size \code{nb_items_clust1}
-#' \item optionally, if WER=="mix":
-#' a) compute the K1 synchrones curves,
-#' b) compute WER distances (K1xK1 matrix) between synchrones and
-#' c) apply the second clustering algorithm
-#' }
-#' \item Launch a final task on the aggregated outputs of all previous tasks:
-#' in the case WER=="end" this task takes indices in input, otherwise
-#' (medoid) curves
+#' \item iterate the first clustering algorithm on its aggregated outputs,
+#' on inputs of size \code{nb_items_clust1}
+#' \item optionally, if WER=="mix":
+#' a) compute the K1 synchrones curves,
+#' b) compute WER distances (K1xK1 matrix) between synchrones and
+#' c) apply the second clustering algorithm
#' }
-#' The main argument -- \code{getSeries} -- has a quite misleading name, since it can be
-#' either a [big.]matrix, a CSV file, a connection or a user function to retrieve
-#' series; the name was chosen because all types of arguments are converted to a function.
-#' When \code{getSeries} is given as a function, it must take a single argument,
-#' 'indices', integer vector equal to the indices of the curves to retrieve;
-#' see SQLite example. The nature and role of other arguments should be clear
+#' \item Launch a final task on the aggregated outputs of all previous tasks:
+#' in the case WER=="end" this task takes indices in input, otherwise
+#' (medoid) curves
+#' }
+#' \cr
+#' The main argument -- \code{getSeries} -- has a quite misleading name, since it can be
+#' either a [big.]matrix, a CSV file, a connection or a user function to retrieve
+#' series; the name was chosen because all types of arguments are converted to a function.
+#' When \code{getSeries} is given as a function, it must take a single argument,
+#' 'indices', integer vector equal to the indices of the curves to retrieve;
+#' see SQLite example. The nature and role of other arguments should be clear
+#' \cr
+#' Note: Since we don't make assumptions on initial data, there is a possibility that
+#' even when serialized, contributions or synchrones do not fit in RAM. For example,
+#' 30e6 series of length 100,000 would lead to a +4Go contribution matrix. Therefore,
+#' it's safer to place these in (binary) files; that's what we do.
#'
#' @param getSeries Access to the (time-)series, which can be of one of the three
#' following types:
#' @param contrib_type Type of contribution: "relative", "logit" or "absolute" (any prefix)
#' @param WER "end" to apply stage 2 after stage 1 has fully iterated, or "mix" to apply
#' stage 2 at the end of each task
-#' @param sync_mean TRUE to compute a synchrone as a mean curve, FALSE for a sum
#' @param random TRUE (default) for random chunks repartition
#' @param ntasks Number of tasks (parallel iterations to obtain K1 [if WER=="end"]
#' or K2 [if WER=="mix"] medoids); default: 1.
#' digest::sha1(medoids_db)
#' }
#' @export
-claws <- function(getSeries, K1, K2, nb_series_per_chunk,
- nb_items_clust1=7*K1,
+claws <- function(getSeries, K1, K2, nb_series_per_chunk, nb_items_clust1=7*K1,
algoClust1=function(data,K) cluster::pam(t(data),K,diss=FALSE)$id.med,
algoClust2=function(dists,K) cluster::pam(dists,K,diss=TRUE)$id.med,
- wav_filt="d8", contrib_type="absolute",
- WER="end",sync_mean=TRUE,
- random=TRUE,
- ntasks=1, ncores_tasks=1, ncores_clust=4,
- sep=",",
- nbytes=4, endian=.Platform$endian,
- verbose=FALSE, parll=TRUE)
+ wav_filt="d8", contrib_type="absolute", WER="end", random=TRUE,
+ ntasks=1, ncores_tasks=1, ncores_clust=4, sep=",", nbytes=4,
+ endian=.Platform$endian, verbose=FALSE, parll=TRUE)
{
# Check/transform arguments
if (!is.matrix(getSeries) && !bigmemory::is.big.matrix(getSeries)
stop("'contrib_type' in {'relative','absolute','logit'}")
if (WER!="end" && WER!="mix")
stop("'WER': in {'end','mix'}")
- sync_mean <- .toLogical(sync_mean)
random <- .toLogical(random)
ntasks <- .toInteger(ntasks, function(x) x>=1)
ncores_tasks <- .toInteger(ncores_tasks, function(x) x>=1)
verbose <- .toLogical(verbose)
parll <- .toLogical(parll)
- # Since we don't make assumptions on initial data, there is a possibility that even
- # when serialized, contributions or synchrones do not fit in RAM. For example,
- # 30e6 series of length 100,000 would lead to a +4Go contribution matrix. Therefore,
- # it's safer to place these in (binary) files, located in the following folder.
- bin_dir <- ".epclust_bin/"
- dir.create(bin_dir, showWarnings=FALSE, mode="0755")
-
# Binarize series if getSeries is not a function; the aim is to always use a function,
# to uniformize treatments. An equally good alternative would be to use a file-backed
- # bigmemory::big.matrix, but it would break the uniformity.
+ # bigmemory::big.matrix, but it would break the "all-is-function" pattern.
if (!is.function(getSeries))
{
if (verbose)
cat("...Serialize time-series\n")
- series_file = paste(bin_dir,"data",sep="") ; unlink(series_file)
+ series_file = ".series.bin" ; unlink(series_file)
binarize(getSeries, series_file, nb_series_per_chunk, sep, nbytes, endian)
getSeries = function(inds) getDataInFile(inds, series_file, nbytes, endian)
}
# Serialize all computed wavelets contributions into a file
- contribs_file = paste(bin_dir,"contribs",sep="") ; unlink(contribs_file)
+ contribs_file = ".contribs.bin" ; unlink(contribs_file)
index = 1
nb_curves = 0
if (verbose)
if (nb_series_per_task < K2)
stop("Too many tasks: less series in one task than final number of clusters")
- # Generate a random permutation of 1:N (if random==TRUE); otherwise just use arrival
- # (storage) order.
+ # Generate a random permutation of 1:N (if random==TRUE);
+ # otherwise just use arrival (storage) order.
indices_all = if (random) sample(nb_curves) else seq_len(nb_curves)
# Split (all) indices into ntasks groups of ~same size
indices_tasks = lapply(seq_len(ntasks), function(i) {
# under Linux. All necessary variables are passed to the workers.
cl = parallel::makeCluster(ncores_tasks, outfile="")
varlist = c("getSeries","getContribs","K1","K2","algoClust1","algoClust2",
- "nb_series_per_chunk","nb_items_clust1","ncores_clust","sep",
- "nbytes","endian","verbose","parll")
- if (WER=="mix")
+ "nb_series_per_chunk","nb_items_clust1","ncores_clust",
+ "sep","nbytes","endian","verbose","parll")
+ if (WER=="mix" && ntasks>1)
varlist = c(varlist, "medoids_file")
parallel::clusterExport(cl, varlist, envir = environment())
}
# where n = N / ntasks, N being the total number of curves.
runTwoStepClustering = function(inds)
{
- # When running in parallel, the environment is blank: we need to load required
+ # When running in parallel, the environment is blank: we need to load the required
# packages, and pass useful variables.
if (parll && ntasks>1)
require("epclust", quietly=TRUE)
indices_medoids = clusteringTask1(
inds, getContribs, K1, algoClust1, nb_series_per_chunk, ncores_clust, verbose, parll)
- if (WER=="mix")
+ if (WER=="mix" && ntasks>1)
{
- if (parll && ntasks>1)
+ if (parll)
require("bigmemory", quietly=TRUE)
medoids1 = bigmemory::as.big.matrix( getSeries(indices_medoids) )
medoids2 = clusteringTask2(medoids1, K2, algoClust2, getSeries, nb_curves,
- nb_series_per_chunk, sync_mean, nbytes, endian, ncores_clust, verbose, parll)
+ nb_series_per_chunk, nbytes, endian, ncores_clust, verbose, parll)
binarize(medoids2, medoids_file, nb_series_per_chunk, sep, nbytes, endian)
return (vector("integer",0))
}
# Synchrones (medoids) need to be stored only if WER=="mix"; indeed in this case, every
# task output is a set of new (medoids) curves. If WER=="end" however, output is just a
# set of indices, representing some initial series.
- if (WER=="mix")
- {medoids_file = paste(bin_dir,"medoids",sep="") ; unlink(medoids_file)}
+ if (WER=="mix" && ntasks>1)
+ {medoids_file = ".medoids.bin" ; unlink(medoids_file)}
if (verbose)
{
}
# As explained above, indices will be assigned to ntasks*K1 medoids indices [if WER=="end"],
- # or nothing (empty vector) if WER=="mix"; in this case, medoids (synchrones) are stored
- # in a file.
+ # or nothing (empty vector) if WER=="mix"; in this case, synchrones are stored in a file.
indices <-
if (parll && ntasks>1)
unlist( parallel::parLapply(cl, indices_tasks, runTwoStepClustering) )
parallel::stopCluster(cl)
# Right before the final stage, two situations are possible:
- # a. data to be processed now sit in binary format in medoids_file (if WER=="mix")
+ # a. data to be processed now sit in a binary format in medoids_file (if WER=="mix")
# b. data still is the initial set of curves, referenced by the ntasks*K1 indices
# So, the function getSeries() will potentially change. However, computeSynchrones()
# requires a function retrieving the initial series. Thus, the next line saves future
# conditional instructions.
getRefSeries = getSeries
- if (WER=="mix")
+ if (WER=="mix" && ntasks>1)
{
indices = seq_len(ntasks*K2)
# Now series (synchrones) must be retrieved from medoids_file
contribs_file, nb_series_per_chunk, nbytes, endian)
}
-#TODO: check THAT
-
-
# Run step2 on resulting indices or series (from file)
if (verbose)
cat("...Run final // stage 1 + stage 2\n")
nb_series_per_chunk, ncores_tasks*ncores_clust, verbose, parll)
medoids1 = bigmemory::as.big.matrix( getSeries(indices_medoids) )
medoids2 = clusteringTask2(medoids1, K2, algoClust2, getRefSeries, nb_curves,
- nb_series_per_chunk, sync_mean, nbytes, endian, ncores_tasks*ncores_clust, verbose, parll)
+ nb_series_per_chunk, nbytes, endian, ncores_tasks*ncores_clust, verbose, parll)
- # Cleanup: remove temporary binary files and their folder
- unlink(bin_dir, recursive=TRUE)
+ # Cleanup: remove temporary binary files
+ tryCatch(
+ {unlink(series_file); unlink(contribs_file); unlink(medoids_file)},
+ error = function(e) {})
# Return medoids as a standard matrix, since K2 series have to fit in RAM
# (clustering algorithm 1 takes K1 > K2 of them as input)
series = as.matrix(series) #1D serie could occur
L = nrow(series)
D = ceiling( log2(L) )
+ # Series are interpolated to all have length 2^D
nb_sample_points = 2^D
apply(series, 2, function(x) {
interpolated_curve = spline(1:L, x, n=nb_sample_points)$y
W = wavelets::dwt(interpolated_curve, filter=wav_filt, D)@W
+ # Compute the sum of squared discrete wavelet coefficients, for each scale
nrj = rev( sapply( W, function(v) ( sqrt( sum(v^2) ) ) ) )
if (contrib_type!="absolute")
nrj = nrj / sum(nrj)