#' CLAWS: CLustering with wAvelets and Wer distanceS
#'
-#' Groups electricity power curves (or any series of similar nature) by applying PAM
-#' algorithm in parallel to chunks of size \code{nb_series_per_chunk}. Input series
-#' must be sampled on the same time grid, no missing values.
+#' Cluster electricity power curves (or any series of similar nature) by applying a
+#' two stage procedure in parallel (see details).
+#' Input series must be sampled on the same time grid, no missing values.
+#'
+#' @details Summary of the function execution flow:
+#' \enumerate{
+#' \item Compute and serialize all contributions, obtained through discrete wavelet
+#' decomposition (see Antoniadis & al. [2013])
+#' \item Divide series into \code{ntasks} groups to process in parallel. In each task:
+#' \enumerate{
+#' \item iterate the first clustering algorithm on its aggregated outputs,
+#' on inputs of size \code{nb_items_clust}
+#' \item optionally, if WER=="mix":
+#' a) compute the K1 synchrones curves,
+#' b) compute WER distances (K1xK1 matrix) between synchrones and
+#' c) apply the second clustering algorithm
+#' }
+#' \item Launch a final task on the aggregated outputs of all previous tasks:
+#' in the case WER=="end" this task takes indices in input, otherwise
+#' (medoid) curves
+#' }
+#' The main argument -- \code{getSeries} -- has a quite misleading name, since it can be
+#' either a [big.]matrix, a CSV file, a connection or a user function to retrieve
+#' series; the name was chosen because all types of arguments are converted to a function.
+#' When \code{getSeries} is given as a function, it must take a single argument,
+#' 'indices', integer vector equal to the indices of the curves to retrieve;
+#' see SQLite example. The nature and role of other arguments should be clear
#'
#' @param getSeries Access to the (time-)series, which can be of one of the three
#' following types:
#' \itemize{
-#' \item [big.]matrix: each line contains all the values for one time-serie, ordered by time
+#' \item [big.]matrix: each column contains the (time-ordered) values of one time-serie
#' \item connection: any R connection object providing lines as described above
#' \item character: name of a CSV file containing series in rows (no header)
#' \item function: a custom way to retrieve the curves; it has only one argument:
-#' the indices of the series to be retrieved. See examples
+#' the indices of the series to be retrieved. See SQLite example
#' }
-#' @inheritParams clustering
-#' @param K1 Number of super-consumers to be found after stage 1 (K1 << N)
+#' @param K1 Number of clusters to be found after stage 1 (K1 << N [number of series])
#' @param K2 Number of clusters to be found after stage 2 (K2 << K1)
-#' @param wf Wavelet transform filter; see ?wavelets::wt.filter
-#' @param ctype Type of contribution: "relative" or "absolute" (or any prefix)
-#' @param WER "end" to apply stage 2 after stage 1 has fully iterated, or "mix" to apply stage 2
-#' at the end of each task
+#' @param nb_per_chunk (Maximum) number of items to retrieve in one batch, for both types of
+#' retrieval: resp. series and contribution; in a vector of size 2
+#' @param nb_items_clust1 (Maximum) number of items in input of the clustering algorithm
+#' for stage 1
+#' @param wav_filt Wavelet transform filter; see ?wavelets::wt.filter
+#' @param contrib_type Type of contribution: "relative", "logit" or "absolute" (any prefix)
+#' @param WER "end" to apply stage 2 after stage 1 has fully iterated, or "mix" to apply
+#' stage 2 at the end of each task
#' @param random TRUE (default) for random chunks repartition
-#' @param ntasks Number of tasks (parallel iterations to obtain K1 medoids); default: 1.
-#' Note: ntasks << N, so that N is "roughly divisible" by N (number of series)
-#' @param ncores_tasks "MPI" number of parallel tasks (1 to disable: sequential tasks)
-#' @param ncores_clust "OpenMP" number of parallel clusterings in one task
-#' @param nb_series_per_chunk (~Maximum) number of series in each group, inside a task
-#' @param min_series_per_chunk Minimum number of series in each group
+#' @param ntasks Number of tasks (parallel iterations to obtain K1 [if WER=="end"]
+#' or K2 [if WER=="mix"] medoids); default: 1.
+#' Note: ntasks << N (number of series), so that N is "roughly divisible" by ntasks
+#' @param ncores_tasks Number of parallel tasks (1 to disable: sequential tasks)
+#' @param ncores_clust Number of parallel clusterings in one task (4 should be a minimum)
#' @param sep Separator in CSV input file (if any provided)
#' @param nbytes Number of bytes to serialize a floating-point number; 4 or 8
-#' @param endian Endianness to use for (de)serialization. Use "little" or "big" for portability
+#' @param endian Endianness for (de)serialization ("little" or "big")
#' @param verbose Level of verbosity (0/FALSE for nothing or 1/TRUE for all; devel stage)
#' @param parll TRUE to fully parallelize; otherwise run sequentially (debug, comparison)
#'
-#' @return A big.matrix of the final medoids curves (K2) in rows
+#' @return A matrix of the final K2 medoids curves, in columns
+#'
+#' @references Clustering functional data using Wavelets [2013];
+#' A. Antoniadis, X. Brossat, J. Cugliari & J.-M. Poggi.
+#' Inter. J. of Wavelets, Multiresolution and Information Procesing,
+#' vol. 11, No 1, pp.1-30. doi:10.1142/S0219691313500033
#'
#' @examples
#' \dontrun{
-#' # WER distances computations are a bit too long for CRAN (for now)
+#' # WER distances computations are too long for CRAN (for now)
#'
#' # Random series around cos(x,2x,3x)/sin(x,2x,3x)
#' x = seq(0,500,0.05)
#' L = length(x) #10001
-#' ref_series = matrix( c(cos(x), cos(2*x), cos(3*x), sin(x), sin(2*x), sin(3*x)),
-#' byrow=TRUE, ncol=L )
+#' ref_series = matrix( c(cos(x),cos(2*x),cos(3*x),sin(x),sin(2*x),sin(3*x)), ncol=6 )
#' library(wmtsa)
-#' series = do.call( rbind, lapply( 1:6, function(i)
-#' do.call(rbind, wmtsa::wavBootstrap(ref_series[i,], n.realization=400)) ) )
+#' series = do.call( cbind, lapply( 1:6, function(i)
+#' do.call(cbind, wmtsa::wavBootstrap(ref_series[i,], n.realization=400)) ) )
#' #dim(series) #c(2400,10001)
-#' medoids_ascii = claws(series, K1=60, K2=6, "d8", "rel", nb_series_per_chunk=500)
+#' medoids_ascii = claws(series, K1=60, K2=6, nb_per_chunk=c(200,500), verbose=TRUE)
#'
#' # Same example, from CSV file
#' csv_file = "/tmp/epclust_series.csv"
#' write.table(series, csv_file, sep=",", row.names=FALSE, col.names=FALSE)
-#' medoids_csv = claws(csv_file, K1=60, K2=6, "d8", "rel", nb_series_per_chunk=500)
+#' medoids_csv = claws(csv_file, K1=60, K2=6, nb_per_chunk=c(200,500))
#'
#' # Same example, from binary file
-#' bin_file = "/tmp/epclust_series.bin"
-#' nbytes = 8
-#' endian = "little"
-#' epclust::binarize(csv_file, bin_file, 500, nbytes, endian)
-#' getSeries = function(indices) getDataInFile(indices, bin_file, nbytes, endian)
-#' medoids_bin = claws(getSeries, K1=60, K2=6, "d8", "rel", nb_series_per_chunk=500)
+#' bin_file <- "/tmp/epclust_series.bin"
+#' nbytes <- 8
+#' endian <- "little"
+#' binarize(csv_file, bin_file, 500, nbytes, endian)
+#' getSeries <- function(indices) getDataInFile(indices, bin_file, nbytes, endian)
+#' medoids_bin <- claws(getSeries, K1=60, K2=6, nb_per_chunk=c(200,500))
#' unlink(csv_file)
#' unlink(bin_file)
#'
#' library(DBI)
#' series_db <- dbConnect(RSQLite::SQLite(), "file::memory:")
#' # Prepare data.frame in DB-format
-#' n = nrow(series)
-#' time_values = data.frame(
+#' n <- nrow(series)
+#' time_values <- data.frame(
#' id = rep(1:n,each=L),
#' time = rep( as.POSIXct(1800*(0:n),"GMT",origin="2001-01-01"), L ),
#' value = as.double(t(series)) )
#' # Fill associative array, map index to identifier
#' indexToID_inDB <- as.character(
#' dbGetQuery(series_db, 'SELECT DISTINCT id FROM time_values')[,"id"] )
-#' getSeries = function(indices) {
-#' request = "SELECT id,value FROM times_values WHERE id in ("
+#' serie_length <- as.integer( dbGetQuery(series_db,
+#' paste("SELECT COUNT * FROM time_values WHERE id == ",indexToID_inDB[1],sep="")) )
+#' getSeries <- function(indices) {
+#' request <- "SELECT id,value FROM times_values WHERE id in ("
#' for (i in indices)
-#' request = paste(request, i, ",", sep="")
-#' request = paste(request, ")", sep="")
-#' df_series = dbGetQuery(series_db, request)
-#' # Assume that all series share same length at this stage
-#' ts_length = sum(df_series[,"id"] == df_series[1,"id"])
-#' t( as.matrix(df_series[,"value"], nrow=ts_length) )
+#' request <- paste(request, indexToID_inDB[i], ",", sep="")
+#' request <- paste(request, ")", sep="")
+#' df_series <- dbGetQuery(series_db, request)
+#' as.matrix(df_series[,"value"], nrow=serie_length)
#' }
-#' medoids_db = claws(getSeries, K1=60, K2=6, "d8", "rel", nb_series_per_chunk=500)
+#' medoids_db = claws(getSeries, K1=60, K2=6, nb_per_chunk=c(200,500))
#' dbDisconnect(series_db)
#'
#' # All computed medoids should be the same:
#' digest::sha1(medoids_db)
#' }
#' @export
-claws = function(getSeries, K1, K2,
- wf,ctype, #stage 1
+claws <- function(getSeries, K1, K2,
+ nb_per_chunk,nb_items_clust1=7*K1 #volumes of data
+ wav_filt="d8",contrib_type="absolute", #stage 1
WER="end", #stage 2
random=TRUE, #randomize series order?
- ntasks=1, ncores_tasks=1, ncores_clust=4, #control parallelism
- nb_series_per_chunk=50*K1, min_series_per_chunk=5*K1, #chunk size
+ ntasks=1, ncores_tasks=1, ncores_clust=4, #parallelism
sep=",", #ASCII input separator
nbytes=4, endian=.Platform$endian, #serialization (write,read)
verbose=FALSE, parll=TRUE)
{
stop("'getSeries': [big]matrix, function, file or valid connection (no NA)")
}
- K1 = .toInteger(K1, function(x) x>=2)
- K2 = .toInteger(K2, function(x) x>=2)
- if (!is.logical(random))
- stop("'random': logical")
- tryCatch(
- {ignored <- wavelets::wt.filter(wf)},
- error = function(e) stop("Invalid wavelet filter; see ?wavelets::wt.filter"))
+ K1 <- .toInteger(K1, function(x) x>=2)
+ K2 <- .toInteger(K2, function(x) x>=2)
+ if (!is.numeric(nb_per_chunk) || length(nb_per_chunk)!=2)
+ stop("'nb_per_chunk': numeric, size 2")
+ nb_per_chunk[1] <- .toInteger(nb_per_chunk[1], function(x) x>=1)
+ # A batch of contributions should have at least as many elements as a batch of series,
+ # because it always contains much less values
+ nb_per_chunk[2] <- max(.toInteger(nb_per_chunk[2],function(x) x>=1), nb_per_chunk[1])
+ nb_items_clust1 <- .toInteger(nb_items_clust1, function(x) x>K1)
+ random <- .toLogical(random)
+ tryCatch
+ (
+ {ignored <- wavelets::wt.filter(wav_filt)},
+ error = function(e) stop("Invalid wavelet filter; see ?wavelets::wt.filter")
+ )
+ ctypes = c("relative","absolute","logit")
+ contrib_type = ctypes[ pmatch(contrib_type,ctypes) ]
+ if (is.na(contrib_type))
+ stop("'contrib_type' in {'relative','absolute','logit'}")
if (WER!="end" && WER!="mix")
- stop("WER takes values in {'end','mix'}")
- ntasks = .toInteger(ntasks, function(x) x>=1)
- ncores_tasks = .toInteger(ncores_tasks, function(x) x>=1)
- ncores_clust = .toInteger(ncores_clust, function(x) x>=1)
- nb_series_per_chunk = .toInteger(nb_series_per_chunk, function(x) x>=K1)
- min_series_per_chunk = .toInteger(K1, function(x) x>=K1 && x<=nb_series_per_chunk)
+ stop("'WER': in {'end','mix'}")
+ random <- .toLogical(random)
+ ntasks <- .toInteger(ntasks, function(x) x>=1)
+ ncores_tasks <- .toInteger(ncores_tasks, function(x) x>=1)
+ ncores_clust <- .toInteger(ncores_clust, function(x) x>=1)
if (!is.character(sep))
stop("'sep': character")
- nbytes = .toInteger(nbytes, function(x) x==4 || x==8)
+ nbytes <- .toInteger(nbytes, function(x) x==4 || x==8)
+ verbose <- .toLogical(verbose)
+ parll <- .toLogical(parll)
# Serialize series if required, to always use a function
- bin_dir = ".epclust_bin/"
+ bin_dir <- ".epclust_bin/"
dir.create(bin_dir, showWarnings=FALSE, mode="0755")
if (!is.function(getSeries))
{
contribs_file, nb_series_per_chunk, nbytes, endian)
getContribs = function(indices) getDataInFile(indices, contribs_file, nbytes, endian)
- if (nb_curves < min_series_per_chunk)
- stop("Not enough data: less rows than min_series_per_chunk!")
+ if (nb_curves < K2)
+ stop("Not enough data: less series than final number of clusters")
nb_series_per_task = round(nb_curves / ntasks)
- if (nb_series_per_task < min_series_per_chunk)
- stop("Too many tasks: less series in one task than min_series_per_chunk!")
+ if (nb_series_per_task < K2)
+ stop("Too many tasks: less series in one task than final number of clusters")
runTwoStepClustering = function(inds)
{
inds, getContribs, K1, nb_series_per_chunk, ncores_clust, verbose, parll)
if (WER=="mix")
{
- require("bigmemory", quietly=TRUE)
+ if (parll && ntasks>1)
+ require("bigmemory", quietly=TRUE)
medoids1 = bigmemory::as.big.matrix( getSeries(indices_medoids) )
medoids2 = clusteringTask2(medoids1, K2, getSeries, nb_curves, nb_series_per_chunk,
nbytes, endian, ncores_clust, verbose, parll)
{synchrones_file = paste(bin_dir,"synchrones",sep="") ; unlink(synchrones_file)}
if (parll && ntasks>1)
{
- cl = parallel::makeCluster(ncores_tasks)
+ cl = parallel::makeCluster(ncores_tasks, outfile="")
varlist = c("getSeries","getContribs","K1","K2","verbose","parll",
"nb_series_per_chunk","ntasks","ncores_clust","sep","nbytes","endian")
if (WER=="mix")
}
# 1000*K1 indices [if WER=="end"], or empty vector [if WER=="mix"] --> series on file
- if (parll && ntasks>1)
- indices = unlist( parallel::parLapply(cl, indices_tasks, runTwoStepClustering) )
- else
- indices = unlist( lapply(indices_tasks, runTwoStepClustering) )
+ indices <-
+ if (parll && ntasks>1)
+ unlist( parallel::parLapply(cl, indices_tasks, runTwoStepClustering) )
+ else
+ unlist( lapply(indices_tasks, runTwoStepClustering) )
if (parll && ntasks>1)
parallel::stopCluster(cl)
# Cleanup
unlink(bin_dir, recursive=TRUE)
- medoids2
+ medoids2[,]
}
#' curvesToContribs
#' Compute the discrete wavelet coefficients for each series, and aggregate them in
#' energy contribution across scales as described in https://arxiv.org/abs/1101.4744v2
#'
-#' @param series Matrix of series (in rows), of size n x L
+#' @param series [big.]matrix of series (in columns), of size L x n
#' @inheritParams claws
#'
-#' @return A matrix of size n x log(L) containing contributions in rows
+#' @return A [big.]matrix of size log(L) x n containing contributions in columns
#'
#' @export
-curvesToContribs = function(series, wf, ctype)
+curvesToContribs = function(series, wav_filt, contrib_type)
{
- L = length(series[1,])
+ L = nrow(series)
D = ceiling( log2(L) )
nb_sample_points = 2^D
- cont_types = c("relative","absolute")
- ctype = cont_types[ pmatch(ctype,cont_types) ]
- t( apply(series, 1, function(x) {
+ apply(series, 2, function(x) {
interpolated_curve = spline(1:L, x, n=nb_sample_points)$y
W = wavelets::dwt(interpolated_curve, filter=wf, D)@W
nrj = rev( sapply( W, function(v) ( sqrt( sum(v^2) ) ) ) )
- if (ctype=="relative") nrj / sum(nrj) else nrj
- }) )
+ if (contrib_type!="absolute")
+ nrj = nrj / sum(nrj)
+ if (contrib_type=="logit")
+ nrj = - log(1 - nrj)
+ nrj
+ })
}
# Check integer arguments with functional conditions
.toInteger <- function(x, condition)
{
+ errWarn <- function(ignored)
+ paste("Cannot convert argument' ",substitute(x),"' to integer", sep="")
if (!is.integer(x))
- tryCatch(
- {x = as.integer(x)[1]},
- error = function(e) paste("Cannot convert argument",substitute(x),"to integer")
- )
+ tryCatch({x = as.integer(x)[1]; if (is.na(x)) stop()},
+ warning = errWarn, error = errWarn)
if (!condition(x))
- stop(paste("Argument",substitute(x),"does not verify condition",body(condition)))
+ {
+ stop(paste("Argument '",substitute(x),
+ "' does not verify condition ",body(condition), sep=""))
+ }
+ x
+}
+
+# Check logical arguments
+.toLogical <- function(x)
+{
+ errWarn <- function(ignored)
+ paste("Cannot convert argument' ",substitute(x),"' to logical", sep="")
+ if (!is.logical(x))
+ tryCatch({x = as.logical(x)[1]; if (is.na(x)) stop()},
+ warning = errWarn, error = errWarn)
x
}