X-Git-Url: https://git.auder.net/?a=blobdiff_plain;f=epclust%2FR%2Fmain.R;h=09e1ed7feca546b476b3e69d8b9e379e535a967d;hb=14c10f2d252f45349e0b4fbf87e17dfbfae39f92;hp=977e61b235da2ea1ac4e0fc3c4cd40bbddcc049f;hpb=e161499b97c782aadfc287c22b55f85724f86fae;p=epclust.git diff --git a/epclust/R/main.R b/epclust/R/main.R index 977e61b..09e1ed7 100644 --- a/epclust/R/main.R +++ b/epclust/R/main.R @@ -1,67 +1,119 @@ #' CLAWS: CLustering with wAvelets and Wer distanceS #' -#' Groups electricity power curves (or any series of similar nature) by applying PAM -#' algorithm in parallel to chunks of size \code{nb_series_per_chunk}. Input series -#' must be sampled on the same time grid, no missing values. +#' Cluster electricity power curves (or any series of similar nature) by applying a +#' two stage procedure in parallel (see details). +#' Input series must be sampled on the same time grid, no missing values. #' -#' @param getSeries Access to the (time-)series, which can be of one of the three +#' Summary of the function execution flow: +#' \enumerate{ +#' \item Compute and serialize all contributions, obtained through discrete wavelet +#' decomposition (see Antoniadis & al. [2013]) +#' \item Divide series into \code{ntasks} groups to process in parallel. In each task: +#' \enumerate{ +#' \item iterate the first clustering algorithm on its aggregated outputs, +#' on inputs of size \code{nb_items_clust}\cr +#' -> K1 medoids indices +#' \item optionally, if WER=="mix":\cr +#' a. compute WER distances (K1xK1) between medoids\cr +#' b. apply the 2nd clustering algorithm\cr +#' -> K2 medoids indices +#' } +#' \item Launch a final task on the aggregated outputs of all previous tasks: +#' ntasks*K1 if WER=="end", ntasks*K2 otherwise +#' \item Compute synchrones (sum of series within each final group) +#' } +#' +#' The main argument -- \code{series} -- has a quite misleading name, since it can be +#' either a [big.]matrix, a CSV file, a connection or a user function to retrieve series. +#' When \code{series} is given as a function it must take a single argument, +#' 'indices': integer vector equal to the indices of the curves to retrieve; +#' see SQLite example. +#' WARNING: the return value must be a matrix (in columns), or NULL if no matches. +#' +#' Note: Since we don't make assumptions on initial data, there is a possibility that +#' even when serialized, contributions do not fit in RAM. For example, +#' 30e6 series of length 100,000 would lead to a +4Go contribution matrix. Therefore, +#' it's safer to place these in (binary) files; that's what we do. +#' +#' @param series Access to the N (time-)series, which can be of one of the four #' following types: #' \itemize{ -#' \item [big.]matrix: each line contains all the values for one time-serie, ordered by time +#' \item [big.]matrix: each column contains the (time-ordered) values of one time-serie #' \item connection: any R connection object providing lines as described above #' \item character: name of a CSV file containing series in rows (no header) #' \item function: a custom way to retrieve the curves; it has only one argument: -#' the indices of the series to be retrieved. See examples +#' the indices of the series to be retrieved. See SQLite example #' } -#' @inheritParams clustering -#' @param K1 Number of super-consumers to be found after stage 1 (K1 << N) +#' @param K1 Number of clusters to be found after stage 1 (K1 << N) #' @param K2 Number of clusters to be found after stage 2 (K2 << K1) -#' @param wf Wavelet transform filter; see ?wavelets::wt.filter -#' @param ctype Type of contribution: "relative" or "absolute" (or any prefix) -#' @param WER "end" to apply stage 2 after stage 1 has fully iterated, or "mix" to apply stage 2 -#' at the end of each task +#' @param nb_series_per_chunk Number of series to retrieve in one batch +#' @param nb_items_clust Number of items in 1st clustering algorithm input +#' @param algoClust1 Clustering algorithm for stage 1. A function which takes (data, K) +#' as argument where data is a matrix in columns and K the desired number of clusters, +#' and outputs K medoids ranks. Default: PAM. +#' @param algoClust2 Clustering algorithm for stage 2. A function which takes (dists, K) +#' as argument where dists is a matrix of distances and K the desired number of clusters, +#' and outputs K medoids ranks. Default: PAM. +#' @param wav_filt Wavelet transform filter; see ?wavelets::wt.filter +#' @param contrib_type Type of contribution: "relative", "logit" or "absolute" (any prefix) +#' @param WER "end" to apply stage 2 after stage 1 has fully iterated, or "mix" to apply +#' stage 2 at the end of each task +#' @param smooth_lvl Smoothing level: odd integer, 1 == no smoothing. +#' @param nvoice Number of voices within each octave for CWT computations #' @param random TRUE (default) for random chunks repartition -#' @param ntasks Number of tasks (parallel iterations to obtain K1 medoids); default: 1. -#' Note: ntasks << N, so that N is "roughly divisible" by N (number of series) -#' @param ncores_tasks "MPI" number of parallel tasks (1 to disable: sequential tasks) -#' @param ncores_clust "OpenMP" number of parallel clusterings in one task -#' @param nb_series_per_chunk (~Maximum) number of series in each group, inside a task -#' @param min_series_per_chunk Minimum number of series in each group +#' @param ntasks Number of tasks (parallel iterations to obtain K1 [if WER=="end"] +#' or K2 [if WER=="mix"] medoids); default: 1.\cr +#' Note: ntasks << N (number of series), so that N is "roughly divisible" by ntasks +#' @param ncores_tasks Number of parallel tasks ('1' == sequential tasks) +#' @param ncores_clust Number of parallel clusterings in one task #' @param sep Separator in CSV input file (if any provided) -#' @param nbytes Number of bytes to serialize a floating-point number; 4 or 8 -#' @param endian Endianness to use for (de)serialization. Use "little" or "big" for portability -#' @param verbose Level of verbosity (0/FALSE for nothing or 1/TRUE for all; devel stage) -#' @param parll TRUE to fully parallelize; otherwise run sequentially (debug, comparison) +#' @param nbytes 4 or 8 bytes to (de)serialize a floating-point number +#' @param endian Endianness for (de)serialization: "little" or "big" +#' @param verbose FALSE: nothing printed; TRUE: some execution traces #' -#' @return A big.matrix of the final medoids curves (K2) in rows +#' @return A list: +#' \itemize{ +#' \item medoids: matrix of the final K2 medoids curves +#' \item ranks: corresponding indices in the dataset +#' \item synchrones: sum of series within each final group +#' } +#' +#' @references Clustering functional data using Wavelets [2013]; +#' A. Antoniadis, X. Brossat, J. Cugliari & J.-M. Poggi. +#' Inter. J. of Wavelets, Multiresolution and Information Procesing, +#' vol. 11, No 1, pp.1-30. doi:10.1142/S0219691313500033 #' #' @examples #' \dontrun{ -#' # WER distances computations are a bit too long for CRAN (for now) +#' # WER distances computations are too long for CRAN (for now) +#' # Note: on this small example, sequential run is faster #' #' # Random series around cos(x,2x,3x)/sin(x,2x,3x) -#' x = seq(0,500,0.05) -#' L = length(x) #10001 -#' ref_series = matrix( c(cos(x), cos(2*x), cos(3*x), sin(x), sin(2*x), sin(3*x)), -#' byrow=TRUE, ncol=L ) +#' x <- seq(0,50,0.05) +#' L <- length(x) #1001 +#' ref_series <- matrix( c(cos(x),cos(2*x),cos(3*x),sin(x),sin(2*x),sin(3*x)), ncol=6 ) #' library(wmtsa) -#' series = do.call( rbind, lapply( 1:6, function(i) -#' do.call(rbind, wmtsa::wavBootstrap(ref_series[i,], n.realization=400)) ) ) -#' #dim(series) #c(2400,10001) -#' medoids_ascii = claws(series, K1=60, K2=6, "d8", "rel", nb_series_per_chunk=500) +#' series <- do.call( cbind, lapply( 1:6, function(i) +#' do.call(cbind, wmtsa::wavBootstrap(ref_series[,i], n.realization=40)) ) ) +#' # Mix series so that all groups are evenly spread +#' permut <- (0:239)%%6 * 40 + (0:239)%/%6 + 1 +#' series = series[,permut] +#' #dim(series) #c(240,1001) +#' res_ascii <- claws(series, K1=30, K2=6, nb_series_per_chunk=500, +#' nb_items_clust=100, random=FALSE, verbose=TRUE, ncores_clust=1) #' #' # Same example, from CSV file -#' csv_file = "/tmp/epclust_series.csv" -#' write.table(series, csv_file, sep=",", row.names=FALSE, col.names=FALSE) -#' medoids_csv = claws(csv_file, K1=60, K2=6, "d8", "rel", nb_series_per_chunk=500) +#' csv_file <- tempfile(pattern="epclust_series.csv_") +#' write.table(t(series), csv_file, sep=",", row.names=FALSE, col.names=FALSE) +#' res_csv <- claws(csv_file, 30, 6, 500, 100, random=FALSE, ncores_clust=1) #' #' # Same example, from binary file -#' bin_file = "/tmp/epclust_series.bin" -#' nbytes = 8 -#' endian = "little" -#' epclust::binarize(csv_file, bin_file, 500, nbytes, endian) -#' getSeries = function(indices) getDataInFile(indices, bin_file, nbytes, endian) -#' medoids_bin = claws(getSeries, K1=60, K2=6, "d8", "rel", nb_series_per_chunk=500) +#' bin_file <- tempfile(pattern="epclust_series.bin_") +#' nbytes <- 8 +#' endian <- "little" +#' binarize(csv_file, bin_file, 500, ",", nbytes, endian) +#' getSeries <- function(indices) getDataInFile(indices, bin_file, nbytes, endian) +#' res_bin <- claws(getSeries, 30, 6, 500, 100, random=FALSE, ncores_clust=1) #' unlink(csv_file) #' unlink(bin_file) #' @@ -69,215 +121,208 @@ #' library(DBI) #' series_db <- dbConnect(RSQLite::SQLite(), "file::memory:") #' # Prepare data.frame in DB-format -#' n = nrow(series) -#' time_values = data.frame( +#' n <- ncol(series) +#' times_values <- data.frame( #' id = rep(1:n,each=L), -#' time = rep( as.POSIXct(1800*(0:n),"GMT",origin="2001-01-01"), L ), -#' value = as.double(t(series)) ) +#' time = rep( as.POSIXct(1800*(1:L),"GMT",origin="2001-01-01"), n ), +#' value = as.double(series) ) #' dbWriteTable(series_db, "times_values", times_values) #' # Fill associative array, map index to identifier #' indexToID_inDB <- as.character( -#' dbGetQuery(series_db, 'SELECT DISTINCT id FROM time_values')[,"id"] ) -#' getSeries = function(indices) { -#' request = "SELECT id,value FROM times_values WHERE id in (" -#' for (i in indices) -#' request = paste(request, i, ",", sep="") -#' request = paste(request, ")", sep="") -#' df_series = dbGetQuery(series_db, request) -#' # Assume that all series share same length at this stage -#' ts_length = sum(df_series[,"id"] == df_series[1,"id"]) -#' t( as.matrix(df_series[,"value"], nrow=ts_length) ) +#' dbGetQuery(series_db, 'SELECT DISTINCT id FROM times_values')[,"id"] ) +#' serie_length <- as.integer( dbGetQuery(series_db, +#' paste("SELECT COUNT(*) FROM times_values WHERE id == ",indexToID_inDB[1],sep="")) ) +#' getSeries <- function(indices) { +#' indices = indices[ indices <= length(indexToID_inDB) ] +#' if (length(indices) == 0) +#' return (NULL) +#' request <- "SELECT id,value FROM times_values WHERE id in (" +#' for (i in seq_along(indices)) { +#' request <- paste(request, indexToID_inDB[ indices[i] ], sep="") +#' if (i < length(indices)) +#' request <- paste(request, ",", sep="") +#' } +#' request <- paste(request, ")", sep="") +#' df_series <- dbGetQuery(series_db, request) +#' matrix(df_series[,"value"], nrow=serie_length) #' } -#' medoids_db = claws(getSeries, K1=60, K2=6, "d8", "rel", nb_series_per_chunk=500) +#' res_db <- claws(getSeries, 30, 6, 500, 100, random=FALSE, ncores_clust=1) #' dbDisconnect(series_db) #' -#' # All computed medoids should be the same: -#' digest::sha1(medoids_ascii) -#' digest::sha1(medoids_csv) -#' digest::sha1(medoids_bin) -#' digest::sha1(medoids_db) +#' # All results should be equal: +#' all(res_ascii$ranks == res_csv$ranks +#' & res_ascii$ranks == res_bin$ranks +#' & res_ascii$ranks == res_db$ranks) #' } #' @export -claws = function(getSeries, K1, K2, - wf,ctype, #stage 1 - WER="end", #stage 2 - random=TRUE, #randomize series order? - ntasks=1, ncores_tasks=1, ncores_clust=4, #control parallelism - nb_series_per_chunk=50*K1, min_series_per_chunk=5*K1, #chunk size - sep=",", #ASCII input separator - nbytes=4, endian=.Platform$endian, #serialization (write,read) - verbose=FALSE, parll=TRUE) +claws <- function(series, K1, K2, nb_series_per_chunk, nb_items_clust=5*K1, + algoClust1=function(data,K) cluster::pam(t(data),K,diss=FALSE,pamonce=1)$id.med, + algoClust2=function(dists,K) cluster::pam(dists,K,diss=TRUE,pamonce=1)$id.med, + wav_filt="d8", contrib_type="absolute", WER="end", smooth_lvl=3, nvoice=4, + random=TRUE, ntasks=1, ncores_tasks=1, ncores_clust=3, sep=",", nbytes=4, + endian=.Platform$endian, verbose=FALSE) { # Check/transform arguments - if (!is.matrix(getSeries) && !bigmemory::is.big.matrix(getSeries) - && !is.function(getSeries) - && !methods::is(getSeries,"connection") && !is.character(getSeries)) + if (!is.matrix(series) && !bigmemory::is.big.matrix(series) + && !is.function(series) + && !methods::is(series,"connection") && !is.character(series)) { - stop("'getSeries': [big]matrix, function, file or valid connection (no NA)") + stop("'series': [big]matrix, function, file or valid connection (no NA)") } - K1 = .toInteger(K1, function(x) x>=2) - K2 = .toInteger(K2, function(x) x>=2) - if (!is.logical(random)) - stop("'random': logical") - tryCatch( - {ignored <- wavelets::wt.filter(wf)}, - error = function(e) stop("Invalid wavelet filter; see ?wavelets::wt.filter")) + K1 <- .toInteger(K1, function(x) x>=2) + K2 <- .toInteger(K2, function(x) x>=2) + nb_series_per_chunk <- .toInteger(nb_series_per_chunk, function(x) x>=1) + nb_items_clust <- .toInteger(nb_items_clust, function(x) x>K1) + random <- .toLogical(random) + tryCatch({ignored <- wavelets::wt.filter(wav_filt)}, + error=function(e) stop("Invalid wavelet filter; see ?wavelets::wt.filter") ) + ctypes <- c("relative","absolute","logit") + contrib_type <- ctypes[ pmatch(contrib_type,ctypes) ] + if (is.na(contrib_type)) + stop("'contrib_type' in {'relative','absolute','logit'}") if (WER!="end" && WER!="mix") - stop("WER takes values in {'end','mix'}") - ntasks = .toInteger(ntasks, function(x) x>=1) - ncores_tasks = .toInteger(ncores_tasks, function(x) x>=1) - ncores_clust = .toInteger(ncores_clust, function(x) x>=1) - nb_series_per_chunk = .toInteger(nb_series_per_chunk, function(x) x>=K1) - min_series_per_chunk = .toInteger(K1, function(x) x>=K1 && x<=nb_series_per_chunk) + stop("'WER': in {'end','mix'}") + random <- .toLogical(random) + ntasks <- .toInteger(ntasks, function(x) x>=1) + ncores_tasks <- .toInteger(ncores_tasks, function(x) x>=1) + ncores_clust <- .toInteger(ncores_clust, function(x) x>=1) if (!is.character(sep)) stop("'sep': character") - nbytes = .toInteger(nbytes, function(x) x==4 || x==8) + nbytes <- .toInteger(nbytes, function(x) x==4 || x==8) + verbose <- .toLogical(verbose) - # Serialize series if required, to always use a function - bin_dir = ".epclust_bin/" - dir.create(bin_dir, showWarnings=FALSE, mode="0755") - if (!is.function(getSeries)) + # Binarize series if it is not a function; the aim is to always use a function, + # to uniformize treatments. An equally good alternative would be to use a file-backed + # bigmemory::big.matrix, but it would break the "all-is-function" pattern. + if (!is.function(series)) { if (verbose) - cat("...Serialize time-series\n") - series_file = paste(bin_dir,"data",sep="") ; unlink(series_file) - binarize(getSeries, series_file, nb_series_per_chunk, sep, nbytes, endian) - getSeries = function(inds) getDataInFile(inds, series_file, nbytes, endian) + cat("...Serialize time-series (or retrieve past binary file)\n") + series_file <- ".series.epclust.bin" + if (!file.exists(series_file)) + binarize(series, series_file, nb_series_per_chunk, sep, nbytes, endian) + getSeries <- function(inds) getDataInFile(inds, series_file, nbytes, endian) } + else + getSeries <- series # Serialize all computed wavelets contributions into a file - contribs_file = paste(bin_dir,"contribs",sep="") ; unlink(contribs_file) - index = 1 - nb_curves = 0 + contribs_file <- ".contribs.epclust.bin" if (verbose) - cat("...Compute contributions and serialize them\n") - nb_curves = binarizeTransform(getSeries, - function(series) curvesToContribs(series, wf, ctype), - contribs_file, nb_series_per_chunk, nbytes, endian) - getContribs = function(indices) getDataInFile(indices, contribs_file, nbytes, endian) + cat("...Compute contributions and serialize them (or retrieve past binary file)\n") + if (!file.exists(contribs_file)) + { + nb_curves <- binarizeTransform(getSeries, + function(curves) curvesToContribs(curves, wav_filt, contrib_type), + contribs_file, nb_series_per_chunk, nbytes, endian) + } + else + { + # TODO: duplicate from getDataInFile() in de_serialize.R + contribs_size <- file.info(contribs_file)$size #number of bytes in the file + contrib_length <- readBin(contribs_file, "integer", n=1, size=8, endian=endian) + nb_curves <- (contribs_size-8) / (nbytes*contrib_length) + } + getContribs <- function(indices) getDataInFile(indices, contribs_file, nbytes, endian) + + # A few sanity checks: do not continue if too few data available. + if (nb_curves < K2) + stop("Not enough data: less series than final number of clusters") + nb_series_per_task <- round(nb_curves / ntasks) + if (nb_series_per_task < K2) + stop("Too many tasks: less series in one task than final number of clusters") + + # Generate a random permutation of 1:N (if random==TRUE); + # otherwise just use arrival (storage) order. + indices_all <- if (random) sample(nb_curves) else seq_len(nb_curves) + # Split (all) indices into ntasks groups of ~same size + indices_tasks <- lapply(seq_len(ntasks), function(i) { + upper_bound <- ifelse( i 1) + if (parll && ntasks>1) + { + # Initialize parallel runs: outfile="" allow to output verbose traces in the console + # under Linux. All necessary variables are passed to the workers. + cl <- + if (verbose) + parallel::makeCluster(ncores_tasks, outfile="") + else + parallel::makeCluster(ncores_tasks) + varlist <- c("ncores_clust","verbose", #task 1 & 2 + "K1","getContribs","algoClust1","nb_items_clust") #task 1 + if (WER=="mix") + { + # Add variables for task 2 + varlist <- c(varlist, "K2","getSeries","algoClust2","nb_series_per_chunk", + "smooth_lvl","nvoice","nbytes","endian") + } + parallel::clusterExport(cl, varlist, envir <- environment()) + } - runTwoStepClustering = function(inds) + # This function achieves one complete clustering task, divided in stage 1 + stage 2. + # stage 1: n indices --> clusteringTask1(...) --> K1 medoids (indices) + # stage 2: K1 indices --> K1xK1 WER distances --> clusteringTask2(...) --> K2 medoids, + # where n == N / ntasks, N being the total number of curves. + runTwoStepClustering <- function(inds) { + # When running in parallel, the environment is blank: we need to load the required + # packages, and pass useful variables. if (parll && ntasks>1) require("epclust", quietly=TRUE) - indices_medoids = clusteringTask1( - inds, getContribs, K1, nb_series_per_chunk, ncores_clust, verbose, parll) + indices_medoids <- clusteringTask1(inds, getContribs, K1, algoClust1, + nb_items_clust, ncores_clust, verbose) if (WER=="mix") { - medoids1 = bigmemory::as.big.matrix( getSeries(indices_medoids) ) - medoids2 = clusteringTask2(medoids1, - K2, getSeries, nb_curves, nb_series_per_chunk, ncores_clust, verbose, parll) - binarize(medoids2, synchrones_file, nb_series_per_chunk, sep, nbytes, endian) - return (vector("integer",0)) + indices_medoids <- clusteringTask2(indices_medoids, getSeries, K2, algoClust2, + nb_series_per_chunk,smooth_lvl,nvoice,nbytes,endian,ncores_clust,verbose) } indices_medoids } - # Cluster contributions in parallel (by nb_series_per_chunk) - indices_all = if (random) sample(nb_curves) else seq_len(nb_curves) - indices_tasks = lapply(seq_len(ntasks), function(i) { - upper_bound = ifelse( i1) - { - cl = parallel::makeCluster(ncores_tasks) - varlist = c("getSeries","getContribs","K1","K2","verbose","parll", - "nb_series_per_chunk","ntasks","ncores_clust","sep","nbytes","endian") - if (WER=="mix") - varlist = c(varlist, "synchrones_file") - parallel::clusterExport(cl, varlist=varlist, envir = environment()) - } - # 1000*K1 indices [if WER=="end"], or empty vector [if WER=="mix"] --> series on file - if (parll && ntasks>1) - indices = unlist( parallel::parLapply(cl, indices_tasks, runTwoStepClustering) ) - else - indices = unlist( lapply(indices_tasks, runTwoStepClustering) ) + # As explained above, we obtain after all runs ntasks*[K1 or K2] medoids indices, + # depending whether WER=="end" or "mix", respectively. + indices_medoids_all <- + if (parll && ntasks>1) + unlist( parallel::parLapply(cl, indices_tasks, runTwoStepClustering) ) + else + unlist( lapply(indices_tasks, runTwoStepClustering) ) + if (parll && ntasks>1) parallel::stopCluster(cl) - getRefSeries = getSeries - if (WER=="mix") - { - indices = seq_len(ntasks*K2) - #Now series must be retrieved from synchrones_file - getSeries = function(inds) getDataInFile(inds, synchrones_file, nbytes, endian) - #Contributions must be re-computed - unlink(contribs_file) - index = 1 - if (verbose) - cat("...Serialize contributions computed on synchrones\n") - ignored = binarizeTransform(getSeries, - function(series) curvesToContribs(series, wf, ctype), - contribs_file, nb_series_per_chunk, nbytes, endian) - } + # For the last stage, ncores_tasks*(ncores_clusts+1) cores should be available: + # - ntasks for level 1 parallelism + # - ntasks*ncores_clust for level 2 parallelism, + # but since an extension MPI <--> tasks / OpenMP <--> sub-tasks is on the way, + # it's better to just re-use ncores_clust + ncores_last_stage <- ncores_clust - # Run step2 on resulting indices or series (from file) + # Run last clustering tasks to obtain only K2 medoids indices if (verbose) cat("...Run final // stage 1 + stage 2\n") - indices_medoids = clusteringTask1( - indices, getContribs, K1, nb_series_per_chunk, ncores_tasks*ncores_clust, verbose, parll) - medoids1 = bigmemory::as.big.matrix( getSeries(indices_medoids) ) - medoids2 = clusteringTask2(medoids1, K2, - getRefSeries, nb_curves, nb_series_per_chunk, ncores_tasks*ncores_clust, verbose, parll) + indices_medoids <- clusteringTask1(indices_medoids_all, getContribs, K1, algoClust1, + nb_items_clust, ncores_tasks*ncores_clust, verbose) - # Cleanup - unlink(bin_dir, recursive=TRUE) + indices_medoids <- clusteringTask2(indices_medoids, getSeries, K2, algoClust2, + nb_series_per_chunk,smooth_lvl,nvoice,nbytes,endian,ncores_last_stage,verbose) - medoids2 -} + # Compute synchrones, that is to say the cumulated power consumptions for each of the K2 + # final groups. + medoids <- getSeries(indices_medoids) + synchrones <- computeSynchrones(medoids, getSeries, nb_curves, nb_series_per_chunk, + ncores_last_stage, verbose) -#' curvesToContribs -#' -#' Compute the discrete wavelet coefficients for each series, and aggregate them in -#' energy contribution across scales as described in https://arxiv.org/abs/1101.4744v2 -#' -#' @param series Matrix of series (in rows), of size n x L -#' @inheritParams claws -#' -#' @return A matrix of size n x log(L) containing contributions in rows -#' -#' @export -curvesToContribs = function(series, wf, ctype) -{ - L = length(series[1,]) - D = ceiling( log2(L) ) - nb_sample_points = 2^D - cont_types = c("relative","absolute") - ctype = cont_types[ pmatch(ctype,cont_types) ] - t( apply(series, 1, function(x) { - interpolated_curve = spline(1:L, x, n=nb_sample_points)$y - W = wavelets::dwt(interpolated_curve, filter=wf, D)@W - nrj = rev( sapply( W, function(v) ( sqrt( sum(v^2) ) ) ) ) - if (ctype=="relative") nrj / sum(nrj) else nrj - }) ) -} - -# Check integer arguments with functional conditions -.toInteger <- function(x, condition) -{ - if (!is.integer(x)) - tryCatch( - {x = as.integer(x)[1]}, - error = function(e) paste("Cannot convert argument",substitute(x),"to integer") - ) - if (!condition(x)) - stop(paste("Argument",substitute(x),"does not verify condition",body(condition))) - x + # NOTE: no need to use big.matrix here, since there are only K2 << K1 << N remaining curves + list("medoids"=medoids, "ranks"=indices_medoids, "synchrones"=synchrones) }