#' two stage procedure in parallel (see details).
#' Input series must be sampled on the same time grid, no missing values.
#'
-#' @details Summary of the function execution flow:
+#' Summary of the function execution flow:
+#' \enumerate{
+#' \item Compute and serialize all contributions, obtained through discrete wavelet
+#' decomposition (see Antoniadis & al. [2013])
+#' \item Divide series into \code{ntasks} groups to process in parallel. In each task:
#' \enumerate{
-#' \item Compute and serialize all contributions, obtained through discrete wavelet
-#' decomposition (see Antoniadis & al. [2013])
-#' \item Divide series into \code{ntasks} groups to process in parallel. In each task:
-#' \enumerate{
-#' \item iterate the first clustering algorithm on its aggregated outputs,
-#' on inputs of size \code{nb_items_clust1}
-#' \item optionally, if WER=="mix":
-#' a) compute the K1 synchrones curves,
-#' b) compute WER distances (K1xK1 matrix) between synchrones and
-#' c) apply the second clustering algorithm
-#' }
-#' \item Launch a final task on the aggregated outputs of all previous tasks:
-#' in the case WER=="end" this task takes indices in input, otherwise
-#' (medoid) curves
+#' \item iterate the first clustering algorithm on its aggregated outputs,
+#' on inputs of size \code{nb_series_per_chunk}
+#' \item optionally, if WER=="mix":
+#' a) compute the K1 synchrones curves,
+#' a) compute WER distances (K1xK1 matrix) between medoids and
+#' b) apply the second clustering algorithm (output: K2 indices)
#' }
-#' The main argument -- \code{getSeries} -- has a quite misleading name, since it can be
-#' either a [big.]matrix, a CSV file, a connection or a user function to retrieve
-#' series; the name was chosen because all types of arguments are converted to a function.
-#' When \code{getSeries} is given as a function, it must take a single argument,
-#' 'indices', integer vector equal to the indices of the curves to retrieve;
-#' see SQLite example. The nature and role of other arguments should be clear
+#' \item Launch a final task on the aggregated outputs of all previous tasks:
+#' ntasks*K1 if WER=="end", ntasks*K2 otherwise
+#' \item Compute synchrones (sum of series within each final group)
+#' }
+#' \cr
+#' The main argument -- \code{series} -- has a quite misleading name, since it can be
+#' either a [big.]matrix, a CSV file, a connection or a user function to retrieve series.
+#' When \code{series} is given as a function, it must take a single argument,
+#' 'indices', integer vector equal to the indices of the curves to retrieve;
+#' see SQLite example.
+#' WARNING: the return value must be a matrix (in columns), or NULL if no matches.
+#' \cr
+#' Note: Since we don't make assumptions on initial data, there is a possibility that
+#' even when serialized, contributions do not fit in RAM. For example,
+#' 30e6 series of length 100,000 would lead to a +4Go contribution matrix. Therefore,
+#' it's safer to place these in (binary) files; that's what we do.
#'
-#' @param getSeries Access to the (time-)series, which can be of one of the three
+#' @param series Access to the (time-)series, which can be of one of the three
#' following types:
#' \itemize{
#' \item [big.]matrix: each column contains the (time-ordered) values of one time-serie
#' @param K1 Number of clusters to be found after stage 1 (K1 << N [number of series])
#' @param K2 Number of clusters to be found after stage 2 (K2 << K1)
#' @param nb_series_per_chunk (Maximum) number of series to retrieve in one batch
+#' @param nb_items_clust (~Maximum) number of items in clustering algorithm 1 input
#' @param algoClust1 Clustering algorithm for stage 1. A function which takes (data, K)
#' as argument where data is a matrix in columns and K the desired number of clusters,
-#' and outputs K medoids ranks. Default: PAM.
-#' In our method, this function is called on iterated medoids during stage 1
+#' and outputs K medoids ranks. Default: PAM. In our method, this function is called
+#' on iterated medoids during stage 1
#' @param algoClust2 Clustering algorithm for stage 2. A function which takes (dists, K)
#' as argument where dists is a matrix of distances and K the desired number of clusters,
-#' and outputs K clusters representatives (curves). Default: PAM.
-#' In our method, this function is called on a matrix of K1 x K1 (WER) distances computed
-# between synchrones
-#' @param nb_items_clust1 (~Maximum) number of items in input of the clustering algorithm
-#' for stage 1. At worst, a clustering algorithm might be called with ~2*nb_items_clust1
-#' items; but this could only happen at the last few iterations.
+#' and outputs K medoids ranks. Default: PAM. In our method, this function is called
+#' on a matrix of K1 x K1 (WER) distances computed between medoids after algorithm 1
#' @param wav_filt Wavelet transform filter; see ?wavelets::wt.filter
#' @param contrib_type Type of contribution: "relative", "logit" or "absolute" (any prefix)
#' @param WER "end" to apply stage 2 after stage 1 has fully iterated, or "mix" to apply
#' stage 2 at the end of each task
-#' @param sync_mean TRUE to compute a synchrone as a mean curve, FALSE for a sum
+#' @param nvoice Number of voices within each octave for CWT computations
#' @param random TRUE (default) for random chunks repartition
#' @param ntasks Number of tasks (parallel iterations to obtain K1 [if WER=="end"]
#' or K2 [if WER=="mix"] medoids); default: 1.
#' Note: ntasks << N (number of series), so that N is "roughly divisible" by ntasks
#' @param ncores_tasks Number of parallel tasks (1 to disable: sequential tasks)
-#' @param ncores_clust Number of parallel clusterings in one task (4 should be a minimum)
+#' @param ncores_clust Number of parallel clusterings in one task (3 should be a minimum)
#' @param sep Separator in CSV input file (if any provided)
#' @param nbytes Number of bytes to serialize a floating-point number; 4 or 8
#' @param endian Endianness for (de)serialization ("little" or "big")
#' @param verbose Level of verbosity (0/FALSE for nothing or 1/TRUE for all; devel stage)
#' @param parll TRUE to fully parallelize; otherwise run sequentially (debug, comparison)
#'
-#' @return A matrix of the final K2 medoids curves, in columns
+#' @return A list with
+#' \itemize{
+#' medoids: a matrix of the final K2 medoids curves, in columns
+#' ranks: corresponding indices in the dataset
+#' synchrones: a matrix of the K2 sum of series within each final group
+#' }
#'
#' @references Clustering functional data using Wavelets [2013];
#' A. Antoniadis, X. Brossat, J. Cugliari & J.-M. Poggi.
#' ref_series = matrix( c(cos(x),cos(2*x),cos(3*x),sin(x),sin(2*x),sin(3*x)), ncol=6 )
#' library(wmtsa)
#' series = do.call( cbind, lapply( 1:6, function(i)
-#' do.call(cbind, wmtsa::wavBootstrap(ref_series[i,], n.realization=400)) ) )
+#' do.call(cbind, wmtsa::wavBootstrap(ref_series[,i], n.realization=400)) ) )
#' #dim(series) #c(2400,10001)
-#' medoids_ascii = claws(series, K1=60, K2=6, 200, verbose=TRUE)
+#' res_ascii = claws(series, K1=60, K2=6, 200, verbose=TRUE)
#'
#' # Same example, from CSV file
#' csv_file = "/tmp/epclust_series.csv"
#' write.table(series, csv_file, sep=",", row.names=FALSE, col.names=FALSE)
-#' medoids_csv = claws(csv_file, K1=60, K2=6, 200)
+#' res_csv = claws(csv_file, K1=60, K2=6, 200)
#'
#' # Same example, from binary file
#' bin_file <- "/tmp/epclust_series.bin"
#' endian <- "little"
#' binarize(csv_file, bin_file, 500, nbytes, endian)
#' getSeries <- function(indices) getDataInFile(indices, bin_file, nbytes, endian)
-#' medoids_bin <- claws(getSeries, K1=60, K2=6, 200)
+#' res_bin <- claws(getSeries, K1=60, K2=6, 200)
#' unlink(csv_file)
#' unlink(bin_file)
#'
#' request <- paste(request, indexToID_inDB[i], ",", sep="")
#' request <- paste(request, ")", sep="")
#' df_series <- dbGetQuery(series_db, request)
-#' as.matrix(df_series[,"value"], nrow=serie_length)
+#' if (length(df_series) >= 1)
+#' as.matrix(df_series[,"value"], nrow=serie_length)
+#' else
+#' NULL
#' }
-#' medoids_db = claws(getSeries, K1=60, K2=6, 200))
+#' res_db = claws(getSeries, K1=60, K2=6, 200))
#' dbDisconnect(series_db)
#'
-#' # All computed medoids should be the same:
-#' digest::sha1(medoids_ascii)
-#' digest::sha1(medoids_csv)
-#' digest::sha1(medoids_bin)
-#' digest::sha1(medoids_db)
+#' # All results should be the same:
+#' library(digest)
+#' digest::sha1(res_ascii)
+#' digest::sha1(res_csv)
+#' digest::sha1(res_bin)
+#' digest::sha1(res_db)
#' }
#' @export
-claws <- function(getSeries, K1, K2, nb_series_per_chunk,
- nb_items_clust1=7*K1,
- algoClust1=function(data,K) cluster::pam(t(data),K,diss=FALSE)$id.med,
- algoClust2=function(dists,K) t( cluster::pam(dists,K,diss=TRUE)$medoids ),
- wav_filt="d8", contrib_type="absolute",
- WER="end",sync_mean=TRUE,
- random=TRUE,
- ntasks=1, ncores_tasks=1, ncores_clust=4,
- sep=",",
- nbytes=4, endian=.Platform$endian,
- verbose=FALSE, parll=TRUE)
+claws <- function(series, K1, K2, nb_series_per_chunk, nb_items_clust=7*K1,
+ algoClust1=function(data,K) cluster::pam(t(data),K,diss=FALSE,pamonce=1)$id.med,
+ algoClust2=function(dists,K) cluster::pam(dists,K,diss=TRUE,pamonce=1)$id.med,
+ wav_filt="d8", contrib_type="absolute", WER="end", nvoice=4, random=TRUE,
+ ntasks=1, ncores_tasks=1, ncores_clust=4, sep=",", nbytes=4,
+ endian=.Platform$endian, verbose=FALSE, parll=TRUE)
{
# Check/transform arguments
- if (!is.matrix(getSeries) && !bigmemory::is.big.matrix(getSeries)
- && !is.function(getSeries)
- && !methods::is(getSeries,"connection") && !is.character(getSeries))
+ if (!is.matrix(series) && !bigmemory::is.big.matrix(series)
+ && !is.function(series)
+ && !methods::is(series,"connection") && !is.character(series))
{
- stop("'getSeries': [big]matrix, function, file or valid connection (no NA)")
+ stop("'series': [big]matrix, function, file or valid connection (no NA)")
}
K1 <- .toInteger(K1, function(x) x>=2)
K2 <- .toInteger(K2, function(x) x>=2)
nb_series_per_chunk <- .toInteger(nb_series_per_chunk, function(x) x>=1)
- # K1 (number of clusters at step 1) cannot exceed nb_series_per_chunk, because we will need
- # to load K1 series in memory for clustering stage 2.
- if (K1 > nb_series_per_chunk)
- stop("'K1' cannot exceed 'nb_series_per_chunk'")
- nb_items_clust1 <- .toInteger(nb_items_clust1, function(x) x>K1)
+ nb_items_clust <- .toInteger(nb_items_clust, function(x) x>K1)
random <- .toLogical(random)
tryCatch( {ignored <- wavelets::wt.filter(wav_filt)},
error = function(e) stop("Invalid wavelet filter; see ?wavelets::wt.filter") )
stop("'contrib_type' in {'relative','absolute','logit'}")
if (WER!="end" && WER!="mix")
stop("'WER': in {'end','mix'}")
- sync_mean <- .toLogical(sync_mean)
random <- .toLogical(random)
ntasks <- .toInteger(ntasks, function(x) x>=1)
ncores_tasks <- .toInteger(ncores_tasks, function(x) x>=1)
verbose <- .toLogical(verbose)
parll <- .toLogical(parll)
- # Since we don't make assumptions on initial data, there is a possibility that even
- # when serialized, contributions or synchrones do not fit in RAM. For example,
- # 30e6 series of length 100,000 would lead to a +4Go contribution matrix. Therefore,
- # it's safer to place these in (binary) files, located in the following folder.
- bin_dir <- ".epclust_bin/"
- dir.create(bin_dir, showWarnings=FALSE, mode="0755")
-
- # Binarize series if getSeries is not a function; the aim is to always use a function,
+ # Binarize series if it is not a function; the aim is to always use a function,
# to uniformize treatments. An equally good alternative would be to use a file-backed
- # bigmemory::big.matrix, but it would break the uniformity.
- if (!is.function(getSeries))
+ # bigmemory::big.matrix, but it would break the "all-is-function" pattern.
+ if (!is.function(series))
{
if (verbose)
- cat("...Serialize time-series\n")
- series_file = paste(bin_dir,"data",sep="") ; unlink(series_file)
- binarize(getSeries, series_file, nb_series_per_chunk, sep, nbytes, endian)
+ cat("...Serialize time-series (or retrieve past binary file)\n")
+ series_file = ".series.epclust.bin"
+ if (!file.exists(series_file))
+ binarize(series, series_file, nb_series_per_chunk, sep, nbytes, endian)
getSeries = function(inds) getDataInFile(inds, series_file, nbytes, endian)
}
+ else
+ getSeries = series
# Serialize all computed wavelets contributions into a file
- contribs_file = paste(bin_dir,"contribs",sep="") ; unlink(contribs_file)
+ contribs_file = ".contribs.epclust.bin"
index = 1
nb_curves = 0
if (verbose)
- cat("...Compute contributions and serialize them\n")
- nb_curves = binarizeTransform(getSeries,
- function(series) curvesToContribs(series, wf, ctype),
- contribs_file, nb_series_per_chunk, nbytes, endian)
+ cat("...Compute contributions and serialize them (or retrieve past binary file)\n")
+ if (!file.exists(contribs_file))
+ {
+ nb_curves = binarizeTransform(getSeries,
+ function(curves) curvesToContribs(curves, wav_filt, contrib_type),
+ contribs_file, nb_series_per_chunk, nbytes, endian)
+ }
+ else
+ {
+ # TODO: duplicate from getDataInFile() in de_serialize.R
+ contribs_size = file.info(contribs_file)$size #number of bytes in the file
+ contrib_length = readBin(contribs_file, "integer", n=1, size=8, endian=endian)
+ nb_curves = (contribs_size-8) / (nbytes*contrib_length)
+ }
getContribs = function(indices) getDataInFile(indices, contribs_file, nbytes, endian)
# A few sanity checks: do not continue if too few data available.
if (nb_series_per_task < K2)
stop("Too many tasks: less series in one task than final number of clusters")
- # Generate a random permutation of 1:N (if random==TRUE); otherwise just use arrival
- # (storage) order.
+ # Generate a random permutation of 1:N (if random==TRUE);
+ # otherwise just use arrival (storage) order.
indices_all = if (random) sample(nb_curves) else seq_len(nb_curves)
# Split (all) indices into ntasks groups of ~same size
indices_tasks = lapply(seq_len(ntasks), function(i) {
# Initialize parallel runs: outfile="" allow to output verbose traces in the console
# under Linux. All necessary variables are passed to the workers.
cl = parallel::makeCluster(ncores_tasks, outfile="")
- varlist = c("getSeries","getContribs","K1","K2","algoClust1","algoClust2",
- "nb_series_per_chunk","nb_items_clust1","ncores_clust","sep",
- "nbytes","endian","verbose","parll")
+ varlist = c("ncores_clust","verbose","parll", #task 1 & 2
+ "K1","getContribs","algoClust1","nb_items_clust") #task 1
if (WER=="mix")
- varlist = c(varlist, "medoids_file")
+ {
+ # Add variables for task 2
+ varlist = c(varlist, "K2","getSeries","algoClust2","nb_series_per_chunk",
+ "nvoice","nbytes","endian")
+ }
parallel::clusterExport(cl, varlist, envir = environment())
}
# This function achieves one complete clustering task, divided in stage 1 + stage 2.
- # stage 1: n indices --> clusteringTask1(...) --> K1 medoids
- # stage 2: K1 medoids --> clusteringTask2(...) --> K2 medoids,
+ # stage 1: n indices --> clusteringTask1(...) --> K1 medoids (indices)
+ # stage 2: K1 indices --> K1xK1 WER distances --> clusteringTask2(...) --> K2 medoids,
# where n = N / ntasks, N being the total number of curves.
runTwoStepClustering = function(inds)
{
- # When running in parallel, the environment is blank: we need to load required
+ # When running in parallel, the environment is blank: we need to load the required
# packages, and pass useful variables.
if (parll && ntasks>1)
require("epclust", quietly=TRUE)
- indices_medoids = clusteringTask1(
- inds, getContribs, K1, algoClust1, nb_series_per_chunk, ncores_clust, verbose, parll)
+ indices_medoids = clusteringTask1(inds, getContribs, K1, algoClust1,
+ nb_items_clust, ncores_clust, verbose, parll)
if (WER=="mix")
{
- if (parll && ntasks>1)
- require("bigmemory", quietly=TRUE)
- medoids1 = bigmemory::as.big.matrix( getSeries(indices_medoids) )
- medoids2 = clusteringTask2(medoids1, K2, algoClust2, getSeries, nb_curves,
- nb_series_per_chunk, sync_mean, nbytes, endian, ncores_clust, verbose, parll)
- binarize(medoids2, medoids_file, nb_series_per_chunk, sep, nbytes, endian)
- return (vector("integer",0))
+ indices_medoids = clusteringTask2(indices_medoids, getSeries, K2, algoClust2,
+ nb_series_per_chunk, nvoice, nbytes, endian, ncores_clust, verbose, parll)
}
indices_medoids
}
- # Synchrones (medoids) need to be stored only if WER=="mix"; indeed in this case, every
- # task output is a set of new (medoids) curves. If WER=="end" however, output is just a
- # set of indices, representing some initial series.
- if (WER=="mix")
- {medoids_file = paste(bin_dir,"medoids",sep="") ; unlink(medoids_file)}
-
if (verbose)
{
message = paste("...Run ",ntasks," x stage 1", sep="")
cat(paste(message,"\n", sep=""))
}
- # As explained above, indices will be assigned to ntasks*K1 medoids indices [if WER=="end"],
- # or nothing (empty vector) if WER=="mix"; in this case, medoids (synchrones) are stored
- # in a file.
- indices <-
+ # As explained above, we obtain after all runs ntasks*[K1 or K2] medoids indices,
+ # depending wether WER=="end" or "mix", respectively.
+ indices_medoids_all <-
if (parll && ntasks>1)
unlist( parallel::parLapply(cl, indices_tasks, runTwoStepClustering) )
else
unlist( lapply(indices_tasks, runTwoStepClustering) )
+
if (parll && ntasks>1)
parallel::stopCluster(cl)
- # Right before the final stage, two situations are possible:
- # a. data to be processed now sit in binary format in medoids_file (if WER=="mix")
- # b. data still is the initial set of curves, referenced by the ntasks*K1 indices
- # So, the function getSeries() will potentially change. However, computeSynchrones()
- # requires a function retrieving the initial series. Thus, the next line saves future
- # conditional instructions.
- getRefSeries = getSeries
-
- if (WER=="mix")
- {
- indices = seq_len(ntasks*K2)
- # Now series (synchrones) must be retrieved from medoids_file
- getSeries = function(inds) getDataInFile(inds, medoids_file, nbytes, endian)
- # Contributions must be re-computed
- unlink(contribs_file)
- index = 1
- if (verbose)
- cat("...Serialize contributions computed on synchrones\n")
- ignored = binarizeTransform(getSeries,
- function(series) curvesToContribs(series, wf, ctype),
- contribs_file, nb_series_per_chunk, nbytes, endian)
- }
-
-#TODO: check THAT
+ # For the last stage, ncores_tasks*(ncores_clusts+1) cores should be available:
+ # - ntasks for level 1 parallelism
+ # - ntasks*ncores_clust for level 2 parallelism,
+ # but since an extension MPI <--> tasks / OpenMP <--> sub-tasks is on the way,
+ # it's better to just re-use ncores_clust
+ ncores_last_stage <- ncores_clust
-
- # Run step2 on resulting indices or series (from file)
+ # Run last clustering tasks to obtain only K2 medoids indices
if (verbose)
cat("...Run final // stage 1 + stage 2\n")
- indices_medoids = clusteringTask1(indices, getContribs, K1, algoClust1,
- nb_series_per_chunk, ncores_tasks*ncores_clust, verbose, parll)
- medoids1 = bigmemory::as.big.matrix( getSeries(indices_medoids) )
- medoids2 = clusteringTask2(medoids1, K2, algoClust2, getRefSeries, nb_curves,
- nb_series_per_chunk, sync_mean, nbytes, endian, ncores_tasks*ncores_clust, verbose, parll)
-
- # Cleanup: remove temporary binary files and their folder
- unlink(bin_dir, recursive=TRUE)
-
- # Return medoids as a standard matrix, since K2 series have to fit in RAM
- # (clustering algorithm 1 takes K1 > K2 of them as input)
- medoids2[,]
-}
-
-#' curvesToContribs
-#'
-#' Compute the discrete wavelet coefficients for each series, and aggregate them in
-#' energy contribution across scales as described in https://arxiv.org/abs/1101.4744v2
-#'
-#' @param series [big.]matrix of series (in columns), of size L x n
-#' @inheritParams claws
-#'
-#' @return A [big.]matrix of size log(L) x n containing contributions in columns
-#'
-#' @export
-curvesToContribs = function(series, wav_filt, contrib_type, coin=FALSE)
-{
- L = nrow(series)
- if (coin) browser()
- D = ceiling( log2(L) )
- nb_sample_points = 2^D
- apply(series, 2, function(x) {
- interpolated_curve = spline(1:L, x, n=nb_sample_points)$y
- W = wavelets::dwt(interpolated_curve, filter=wf, D)@W
- nrj = rev( sapply( W, function(v) ( sqrt( sum(v^2) ) ) ) )
- if (contrib_type!="absolute")
- nrj = nrj / sum(nrj)
- if (contrib_type=="logit")
- nrj = - log(1 - nrj)
- nrj
- })
-}
+ indices_medoids = clusteringTask1(indices_medoids_all, getContribs, K1, algoClust1,
+ nb_items_clust, ncores_tasks*ncores_clust, verbose, parll)
+ indices_medoids = clusteringTask2(indices_medoids, getContribs, K2, algoClust2,
+ nb_series_per_chunk, nvoice, nbytes, endian, ncores_last_stage, verbose, parll)
-# Check integer arguments with functional conditions
-.toInteger <- function(x, condition)
-{
- errWarn <- function(ignored)
- paste("Cannot convert argument' ",substitute(x),"' to integer", sep="")
- if (!is.integer(x))
- tryCatch({x = as.integer(x)[1]; if (is.na(x)) stop()},
- warning = errWarn, error = errWarn)
- if (!condition(x))
- {
- stop(paste("Argument '",substitute(x),
- "' does not verify condition ",body(condition), sep=""))
- }
- x
-}
+ # Compute synchrones, that is to say the cumulated power consumptions for each of the K2
+ # final groups.
+ medoids = getSeries(indices_medoids)
+ synchrones = computeSynchrones(medoids, getSeries, nb_curves, nb_series_per_chunk,
+ ncores_last_stage, verbose, parll)
-# Check logical arguments
-.toLogical <- function(x)
-{
- errWarn <- function(ignored)
- paste("Cannot convert argument' ",substitute(x),"' to logical", sep="")
- if (!is.logical(x))
- tryCatch({x = as.logical(x)[1]; if (is.na(x)) stop()},
- warning = errWarn, error = errWarn)
- x
+ # NOTE: no need to use big.matrix here, since there are only K2 << K1 << N remaining curves
+ list("medoids"=medoids, "ranks"=indices_medoids, "synchrones"=synchrones)
}