X-Git-Url: https://git.auder.net/?a=blobdiff_plain;f=epclust%2FR%2Fmain.R;h=603f7bfbbec05e2c71db48bcc75a7132507eb2f4;hb=40f12a2f66d06fd77183ea02b996f5c66f90761c;hp=eded9523e4c2cc1c8316ed01617e35dfa13c6b18;hpb=dc1aa85a96bbf815b0d896c22a9b4a539a9e8a9c;p=epclust.git diff --git a/epclust/R/main.R b/epclust/R/main.R index eded952..603f7bf 100644 --- a/epclust/R/main.R +++ b/epclust/R/main.R @@ -1,161 +1,313 @@ -#' @include defaults.R - -#' @title Cluster power curves with PAM in parallel +#' CLAWS: CLustering with wAvelets and Wer distanceS +#' +#' Cluster electricity power curves (or any series of similar nature) by applying a +#' two stage procedure in parallel (see details). +#' Input series must be sampled on the same time grid, no missing values. #' -#' @description Groups electricity power curves (or any series of similar nature) by applying PAM -#' algorithm in parallel to chunks of size \code{nb_series_per_chunk} +#' Summary of the function execution flow: +#' \enumerate{ +#' \item Compute and serialize all contributions, obtained through discrete wavelet +#' decomposition (see Antoniadis & al. [2013]) +#' \item Divide series into \code{ntasks} groups to process in parallel. In each task: +#' \enumerate{ +#' \item iterate the first clustering algorithm on its aggregated outputs, +#' on inputs of size \code{nb_series_per_chunk} +#' \item optionally, if WER=="mix": +#' a) compute the K1 synchrones curves, +#' a) compute WER distances (K1xK1 matrix) between medoids and +#' b) apply the second clustering algorithm (output: K2 indices) +#' } +#' \item Launch a final task on the aggregated outputs of all previous tasks: +#' ntasks*K1 if WER=="end", ntasks*K2 otherwise +#' \item Compute synchrones (sum of series within each final group) +#' } +#' \cr +#' The main argument -- \code{series} -- has a quite misleading name, since it can be +#' either a [big.]matrix, a CSV file, a connection or a user function to retrieve series. +#' When \code{series} is given as a function, it must take a single argument, +#' 'indices', integer vector equal to the indices of the curves to retrieve; +#' see SQLite example. +#' WARNING: the return value must be a matrix (in columns), or NULL if no matches. +#' \cr +#' Note: Since we don't make assumptions on initial data, there is a possibility that +#' even when serialized, contributions do not fit in RAM. For example, +#' 30e6 series of length 100,000 would lead to a +4Go contribution matrix. Therefore, +#' it's safer to place these in (binary) files; that's what we do. #' -#' @param data Access to the data, which can be of one of the three following types: +#' @param series Access to the (time-)series, which can be of one of the three +#' following types: +#' \itemize{ +#' \item [big.]matrix: each column contains the (time-ordered) values of one time-serie +#' \item connection: any R connection object providing lines as described above +#' \item character: name of a CSV file containing series in rows (no header) +#' \item function: a custom way to retrieve the curves; it has only one argument: +#' the indices of the series to be retrieved. See SQLite example +#' } +#' @param K1 Number of clusters to be found after stage 1 (K1 << N [number of series]) +#' @param K2 Number of clusters to be found after stage 2 (K2 << K1) +#' @param nb_series_per_chunk (Maximum) number of series to retrieve in one batch; +#' this value is also used for the (maximum) number of series to cluster at a time +#' @param algoClust1 Clustering algorithm for stage 1. A function which takes (data, K) +#' as argument where data is a matrix in columns and K the desired number of clusters, +#' and outputs K medoids ranks. Default: PAM. In our method, this function is called +#' on iterated medoids during stage 1 +#' @param algoClust2 Clustering algorithm for stage 2. A function which takes (dists, K) +#' as argument where dists is a matrix of distances and K the desired number of clusters, +#' and outputs K medoids ranks. Default: PAM. In our method, this function is called +#' on a matrix of K1 x K1 (WER) distances computed between medoids after algorithm 1 +#' @param wav_filt Wavelet transform filter; see ?wavelets::wt.filter +#' @param contrib_type Type of contribution: "relative", "logit" or "absolute" (any prefix) +#' @param WER "end" to apply stage 2 after stage 1 has fully iterated, or "mix" to apply +#' stage 2 at the end of each task +#' @param nvoice Number of voices within each octave for CWT computations +#' @param random TRUE (default) for random chunks repartition +#' @param ntasks Number of tasks (parallel iterations to obtain K1 [if WER=="end"] +#' or K2 [if WER=="mix"] medoids); default: 1. +#' Note: ntasks << N (number of series), so that N is "roughly divisible" by ntasks +#' @param ncores_tasks Number of parallel tasks (1 to disable: sequential tasks) +#' @param ncores_clust Number of parallel clusterings in one task (3 should be a minimum) +#' @param sep Separator in CSV input file (if any provided) +#' @param nbytes Number of bytes to serialize a floating-point number; 4 or 8 +#' @param endian Endianness for (de)serialization ("little" or "big") +#' @param verbose Level of verbosity (0/FALSE for nothing or 1/TRUE for all; devel stage) +#' @param parll TRUE to fully parallelize; otherwise run sequentially (debug, comparison) +#' +#' @return A list with #' \itemize{ -#' \item data.frame: each line contains its ID in the first cell, and all values after -#' \item connection: any R connection object (e.g. a file) providing lines as described above -#' \item function: a custom way to retrieve the curves; it has two arguments: the start index -#' (start) and number of curves (n); see example in package vignette. +#' medoids: a matrix of the final K2 medoids curves, in columns +#' ranks: corresponding indices in the dataset +#' synchrones: a matrix of the K2 sum of series within each final group #' } -#' @param K Number of clusters -#' @param nb_series_per_chunk (Maximum) number of series in each group -#' @param min_series_per_chunk Minimum number of series in each group -#' @param writeTmp Function to write temporary wavelets coefficients (+ identifiers); -#' see defaults in defaults.R -#' @param readTmp Function to read temporary wavelets coefficients (see defaults.R) -#' @param wf Wavelet transform filter; see ?wt.filter. Default: haar -#' @param WER "end" to apply stage 2 after stage 1 has iterated and finished, or "mix" -#' to apply it after every stage 1 -#' @param ncores number of parallel processes; if NULL, use parallel::detectCores() #' -#' @return A data.frame of the final medoids curves (identifiers + values) -epclust = function(data, K, nb_series_per_chunk, min_series_per_chunk=10*K, - writeTmp=defaultWriteTmp, readTmp=defaultReadTmp, wf="haar", WER="end", ncores=NULL) +#' @references Clustering functional data using Wavelets [2013]; +#' A. Antoniadis, X. Brossat, J. Cugliari & J.-M. Poggi. +#' Inter. J. of Wavelets, Multiresolution and Information Procesing, +#' vol. 11, No 1, pp.1-30. doi:10.1142/S0219691313500033 +#' +#' @examples +#' \dontrun{ +#' # WER distances computations are too long for CRAN (for now) +#' +#' # Random series around cos(x,2x,3x)/sin(x,2x,3x) +#' x = seq(0,500,0.05) +#' L = length(x) #10001 +#' ref_series = matrix( c(cos(x),cos(2*x),cos(3*x),sin(x),sin(2*x),sin(3*x)), ncol=6 ) +#' library(wmtsa) +#' series = do.call( cbind, lapply( 1:6, function(i) +#' do.call(cbind, wmtsa::wavBootstrap(ref_series[,i], n.realization=400)) ) ) +#' #dim(series) #c(2400,10001) +#' res_ascii = claws(series, K1=60, K2=6, 200, verbose=TRUE) +#' +#' # Same example, from CSV file +#' csv_file = "/tmp/epclust_series.csv" +#' write.table(series, csv_file, sep=",", row.names=FALSE, col.names=FALSE) +#' res_csv = claws(csv_file, K1=60, K2=6, 200) +#' +#' # Same example, from binary file +#' bin_file <- "/tmp/epclust_series.bin" +#' nbytes <- 8 +#' endian <- "little" +#' binarize(csv_file, bin_file, 500, nbytes, endian) +#' getSeries <- function(indices) getDataInFile(indices, bin_file, nbytes, endian) +#' res_bin <- claws(getSeries, K1=60, K2=6, 200) +#' unlink(csv_file) +#' unlink(bin_file) +#' +#' # Same example, from SQLite database +#' library(DBI) +#' series_db <- dbConnect(RSQLite::SQLite(), "file::memory:") +#' # Prepare data.frame in DB-format +#' n <- nrow(series) +#' time_values <- data.frame( +#' id = rep(1:n,each=L), +#' time = rep( as.POSIXct(1800*(0:n),"GMT",origin="2001-01-01"), L ), +#' value = as.double(t(series)) ) +#' dbWriteTable(series_db, "times_values", times_values) +#' # Fill associative array, map index to identifier +#' indexToID_inDB <- as.character( +#' dbGetQuery(series_db, 'SELECT DISTINCT id FROM time_values')[,"id"] ) +#' serie_length <- as.integer( dbGetQuery(series_db, +#' paste("SELECT COUNT * FROM time_values WHERE id == ",indexToID_inDB[1],sep="")) ) +#' getSeries <- function(indices) { +#' request <- "SELECT id,value FROM times_values WHERE id in (" +#' for (i in indices) +#' request <- paste(request, indexToID_inDB[i], ",", sep="") +#' request <- paste(request, ")", sep="") +#' df_series <- dbGetQuery(series_db, request) +#' if (length(df_series) >= 1) +#' as.matrix(df_series[,"value"], nrow=serie_length) +#' else +#' NULL +#' } +#' res_db = claws(getSeries, K1=60, K2=6, 200)) +#' dbDisconnect(series_db) +#' +#' # All results should be the same: +#' library(digest) +#' digest::sha1(res_ascii) +#' digest::sha1(res_csv) +#' digest::sha1(res_bin) +#' digest::sha1(res_db) +#' } +#' @export +claws <- function(getSeries, K1, K2, nb_series_per_chunk, + algoClust1=function(data,K) cluster::pam(t(data),K,diss=FALSE,pamonce=1)$id.med, + algoClust2=function(dists,K) cluster::pam(dists,K,diss=TRUE,pamonce=1)$id.med, + wav_filt="d8", contrib_type="absolute", WER="end", nvoice=4, random=TRUE, + ntasks=1, ncores_tasks=1, ncores_clust=4, sep=",", nbytes=4, + endian=.Platform$endian, verbose=FALSE, parll=TRUE) { - #TODO: setRefClass(...) to avoid copy data: - #http://stackoverflow.com/questions/2603184/r-pass-by-reference - - #0) check arguments - if (!is.data.frame(data) && !is.function(data)) - tryCatch( - { - if (is.character(data)) - { - data_con = file(data, open="r") - } else if (!isOpen(data)) - { - open(data) - data_con = data - } - }, - error="data should be a data.frame, a function or a valid connection") - if (!is.integer(K) || K < 2) - stop("K should be an integer greater or equal to 2") - if (!is.integer(nb_series_per_chunk) || nb_series_per_chunk < K) - stop("nb_series_per_chunk should be an integer greater or equal to K") - if (!is.function(writeTmp) || !is.function(readTmp)) - stop("read/writeTmp should be functional (see defaults.R)") + # Check/transform arguments + if (!is.matrix(series) && !bigmemory::is.big.matrix(series) + && !is.function(series) + && !methods::is(series,"connection") && !is.character(series)) + { + stop("'series': [big]matrix, function, file or valid connection (no NA)") + } + K1 <- .toInteger(K1, function(x) x>=2) + K2 <- .toInteger(K2, function(x) x>=2) + nb_series_per_chunk <- .toInteger(nb_series_per_chunk, function(x) x>=1) + # K1 (number of clusters at step 1) cannot exceed nb_series_per_chunk, because we will need + # to load K1 series in memory for clustering stage 2. + if (K1 > nb_series_per_chunk) + stop("'K1' cannot exceed 'nb_series_per_chunk'") + random <- .toLogical(random) + tryCatch( {ignored <- wavelets::wt.filter(wav_filt)}, + error = function(e) stop("Invalid wavelet filter; see ?wavelets::wt.filter") ) + ctypes = c("relative","absolute","logit") + contrib_type = ctypes[ pmatch(contrib_type,ctypes) ] + if (is.na(contrib_type)) + stop("'contrib_type' in {'relative','absolute','logit'}") if (WER!="end" && WER!="mix") - stop("WER takes values in {'end','mix'}") - #concerning ncores, any non-integer type will be treated as "use parallel:detectCores()/4" + stop("'WER': in {'end','mix'}") + random <- .toLogical(random) + ntasks <- .toInteger(ntasks, function(x) x>=1) + ncores_tasks <- .toInteger(ncores_tasks, function(x) x>=1) + ncores_clust <- .toInteger(ncores_clust, function(x) x>=1) + if (!is.character(sep)) + stop("'sep': character") + nbytes <- .toInteger(nbytes, function(x) x==4 || x==8) + verbose <- .toLogical(verbose) + parll <- .toLogical(parll) - #1) acquire data (process curves, get as coeffs) - #TODO: for data.frame and custom function, run in parallel (connections are sequential[?!]) + # Binarize series if it is not a function; the aim is to always use a function, + # to uniformize treatments. An equally good alternative would be to use a file-backed + # bigmemory::big.matrix, but it would break the "all-is-function" pattern. + if (!is.function(series)) + { + if (verbose) + cat("...Serialize time-series (or retrieve past binary file)\n") + series_file = ".series.epclust.bin" + if (!file.exists(series_file)) + binarize(series, series_file, nb_series_per_chunk, sep, nbytes, endian) + getSeries = function(inds) getDataInFile(inds, series_file, nbytes, endian) + } + else + getSeries = series + + # Serialize all computed wavelets contributions into a file + contribs_file = ".contribs.epclust.bin" index = 1 nb_curves = 0 - repeat + if (verbose) + cat("...Compute contributions and serialize them (or retrieve past binary file)\n") + if (!file.exists(contribs_file)) { - coeffs_chunk = NULL - if (is.data.frame(data)) - { - #full data matrix - if (index < nrow(data)) - { - coeffs_chunk = curvesToCoeffs( - data[index:(min(index+nb_series_per_chunk-1,nrow(data))),], wf) - } - } else if (is.function(data)) - { - #custom user function to retrieve next n curves, probably to read from DB - coeffs_chunk = curvesToCoeffs( data(index, nb_series_per_chunk), wf ) - } else - { - #incremental connection - #TODO: find a better way to parse than using a temp file - ascii_lines = readLines(data_con, nb_series_per_chunk) - if (length(ascii_lines > 0)) - { - series_chunk_file = ".tmp/series_chunk" - writeLines(ascii_lines, series_chunk_file) - coeffs_chunk = curvesToCoeffs( read.csv(series_chunk_file), wf ) - } - } - if (is.null(coeffs_chunk)) - break - writeTmp(coeffs_chunk) - nb_curves = nb_curves + nrow(coeffs_chunk) - index = index + nb_series_per_chunk + nb_curves = binarizeTransform(getSeries, + function(curves) curvesToContribs(curves, wav_filt, contrib_type), + contribs_file, nb_series_per_chunk, nbytes, endian) + } + else + { + # TODO: duplicate from getDataInFile() in de_serialize.R + contribs_size = file.info(contribs_file)$size #number of bytes in the file + contrib_length = readBin(contribs_file, "integer", n=1, size=8, endian=endian) + nb_curves = (contribs_size-8) / (nbytes*contrib_length) } - if (exists(data_con)) - close(data_con) - if (nb_curves < min_series_per_chunk) - stop("Not enough data: less rows than min_series_per_chunk!") + getContribs = function(indices) getDataInFile(indices, contribs_file, nbytes, endian) - #2) process coeffs (by nb_series_per_chunk) and cluster them in parallel - library(parallel) - ncores = ifelse(is.integer(ncores), ncores, parallel::detectCores()%/%4) - cl = parallel::makeCluster(ncores) - parallel::clusterExport(cl=cl, varlist=c("TODO:", "what", "to", "export?"), envir=environment()) - #TODO: be careful of writing to a new temp file, then flush initial one, then re-use it... - repeat + # A few sanity checks: do not continue if too few data available. + if (nb_curves < K2) + stop("Not enough data: less series than final number of clusters") + nb_series_per_task = round(nb_curves / ntasks) + if (nb_series_per_task < K2) + stop("Too many tasks: less series in one task than final number of clusters") + + # Generate a random permutation of 1:N (if random==TRUE); + # otherwise just use arrival (storage) order. + indices_all = if (random) sample(nb_curves) else seq_len(nb_curves) + # Split (all) indices into ntasks groups of ~same size + indices_tasks = lapply(seq_len(ntasks), function(i) { + upper_bound = ifelse( i1) { - #while there is jobs to do (i.e. size of tmp "file" is greater than nb_series_per_chunk) - nb_workers = nb_curves %/% nb_series_per_chunk - indices = list() - #indices[[i]] == (start_index,number_of_elements) - for (i in 1:nb_workers) - indices[[i]] = c(nb_series_per_chunk*(i-1)+1, nb_series_per_chunk) - remainder = nb_curves %% nb_series_per_chunk - if (remainder >= min_series_per_chunk) - { - nb_workers = nb_workers + 1 - indices[[nb_workers]] = c(nb_curves-remainder+1, nb_curves) - } else if (remainder > 0) - { - #spread the load among other workers - #... - } - li = parallel::parLapply(cl, indices, processChunk, K, WER=="mix") - #C) flush tmp file (current parallel processes will write in it) + # Initialize parallel runs: outfile="" allow to output verbose traces in the console + # under Linux. All necessary variables are passed to the workers. + cl = parallel::makeCluster(ncores_tasks, outfile="") + parallel::clusterExport(cl, envir = environment(), + varlist = c("getSeries","getContribs","K1","K2","algoClust1","algoClust2", + "nb_series_per_chunk","ncores_clust","nvoice","nbytes","endian","verbose","parll")) } - parallel::stopCluster(cl) - #3) readTmp last results, apply PAM on it, and return medoids + identifiers - final_coeffs = readTmp(1, nb_series_per_chunk) - if (nrow(final_coeffs) == K) + # This function achieves one complete clustering task, divided in stage 1 + stage 2. + # stage 1: n indices --> clusteringTask1(...) --> K1 medoids + # stage 2: K1 medoids --> clusteringTask2(...) --> K2 medoids, + # where n = N / ntasks, N being the total number of curves. + runTwoStepClustering = function(inds) { - return ( list( medoids=coeffsToCurves(final_coeffs[,2:ncol(final_coeffs)]), - ids=final_coeffs[,1] ) ) + # When running in parallel, the environment is blank: we need to load the required + # packages, and pass useful variables. + if (parll && ntasks>1) + require("epclust", quietly=TRUE) + indices_medoids = clusteringTask1(inds, getContribs, K1, algoClust1, + nb_series_per_chunk, ncores_clust, verbose, parll) + if (WER=="mix") + { + indices_medoids = clusteringTask2(indices_medoids, getSeries, K2, algoClust2, + nb_series_per_chunk, nvoice, nbytes, endian, ncores_clust, verbose, parll) + } + indices_medoids } - pam_output = getClusters(as.matrix(final_coeffs[,2:ncol(final_coeffs)]), K) - medoids = coeffsToCurves(pam_output$medoids, wf) - ids = final_coeffs[,1] [pam_output$ranks] - #4) apply stage 2 (in parallel ? inside task 2) ?) - if (WER == "end") + if (verbose) { - #from center curves, apply stage 2... - #TODO: + message = paste("...Run ",ntasks," x stage 1", sep="") + if (WER=="mix") + message = paste(message," + stage 2", sep="") + cat(paste(message,"\n", sep="")) } - return (list(medoids=medoids, ids=ids)) -} + # As explained above, indices will be assigned to ntasks*K1 medoids indices [if WER=="end"], + # or nothing (empty vector) if WER=="mix"; in this case, synchrones are stored in a file. + indices_medoids_all <- + if (parll && ntasks>1) + unlist( parallel::parLapply(cl, indices_tasks, runTwoStepClustering) ) + else + unlist( lapply(indices_tasks, runTwoStepClustering) ) -processChunk = function(indice, K, WER) -{ - #1) retrieve data - coeffs = readTmp(indice[1], indice[2]) - #2) cluster - cl = getClusters(as.matrix(coeffs[,2:ncol(coeffs)]), K) - #3) WER (optional) - #TODO: -} + if (parll && ntasks>1) + parallel::stopCluster(cl) -#TODO: difficulté : retrouver courbe à partir de l'identifiant (DB ok mais le reste ?) -#aussi : que passe-t-on aux noeuds ? curvesToCoeffs en // ? -#enfin : WER ?! + # Right before the final stage, input data still is the initial set of curves, referenced + # by the ntasks*[K1 or K2] medoids indices. + + # Run last clustering tasks to obtain only K2 medoids indices, from ntasks*[K1 or K2] + # indices, depending wether WER=="end" or WER=="mix" + if (verbose) + cat("...Run final // stage 1 + stage 2\n") + indices_medoids = clusteringTask1(indices_medoids_all, getContribs, K1, algoClust1, + nb_series_per_chunk, ncores_tasks*ncores_clust, verbose, parll) + indices_medoids = clusteringTask2(indices_medoids, getContribs, K2, algoClust2, + nb_series_per_chunk, nvoice, nbytes, endian, ncores_tasks*ncores_clust, verbose, parll) + + # Compute synchrones + medoids = getSeries(indices_medoids) + synchrones = computeSynchrones(medoids, getSeries, nb_curves, nb_series_per_chunk, + ncores_tasks*ncores_clust, verbose, parll) + + # NOTE: no need to use big.matrix here, since there are only K2 << K1 << N remaining curves + list("medoids"=medoids, "ranks"=indices_medoids, "synchrones"=synchrones) +}