From: Benjamin Auder Date: Sun, 19 Feb 2017 23:37:33 +0000 (+0100) Subject: set final draft for package X-Git-Url: https://git.auder.net/doc/html/%7B%7B%20path%28%27fos_user_change_password%27%29%20%7D%7D?a=commitdiff_plain;h=5c6529795907ba1b34d4552cbfd0e0cbb77cac0f;p=epclust.git set final draft for package --- diff --git a/epclust/DESCRIPTION b/epclust/DESCRIPTION index 2b2c1f5..bb3e6cf 100644 --- a/epclust/DESCRIPTION +++ b/epclust/DESCRIPTION @@ -16,5 +16,7 @@ Depends: wavelets Suggests: testthat, - knitr + knitr, + wmtsa, + RSQLite License: MIT + file LICENSE diff --git a/epclust/R/algorithms.R b/epclust/R/algorithms.R deleted file mode 100644 index 97dce90..0000000 --- a/epclust/R/algorithms.R +++ /dev/null @@ -1,24 +0,0 @@ -#NOTE: always keep ID in first column -curvesToCoeffs = function(series, wf) -{ - library(wavelets) - L = length(series[1,]) - D = ceiling( log(L-1) ) - nb_sample_points = 2^D - #TODO: parallel::parApply() ?! - res = apply(series, 1, function(x) { - interpolated_curve = spline(1:(L-1), x[2:L], n=nb_sample_points)$y - W = wavelets::dwt(interpolated_curve, filter=wf, D)@W - nrj_coeffs = rev( sapply( W, function(v) ( sqrt( sum(v^2) ) ) ) ) - return ( c(x[1], nrj_coeffs) ) - }) - return (as.data.frame(res)) -} - -getClusters = function(data, K) -{ - library(cluster) - pam_output = cluster::pam(data, K) - return ( list( clusts=pam_output$clustering, medoids=pam_output$medoids, - ranks=pam_output$id.med ) ) -} diff --git a/epclust/R/stage2.R b/epclust/R/clustering.R similarity index 53% rename from epclust/R/stage2.R rename to epclust/R/clustering.R index 3ccbbad..e27ea35 100644 --- a/epclust/R/stage2.R +++ b/epclust/R/clustering.R @@ -1,8 +1,66 @@ -library("Rwave") +oneIteration = function(..........) +{ + cl_clust = parallel::makeCluster(ncores_clust) + parallel::clusterExport(cl_clust, .............., envir=........) + indices_clust = indices_task[[i]] + repeat + { + nb_workers = max( 1, round( length(indices_clust) / nb_series_per_chunk ) ) + indices_workers = list() + #indices[[i]] == (start_index,number_of_elements) + for (i in 1:nb_workers) + { + upper_bound = ifelse( i 0) + { + curves = computeSynchrones(cl) + dists = computeWerDists(curves) + cl = computeClusters(dists, K2) + } + cl +} + +computeClusters = function(data, K) +{ + library(cluster) + pam_output = cluster::pam(data, K) + return ( list( clusts=pam_output$clustering, medoids=pam_output$medoids, + ranks=pam_output$id.med ) ) +} + +#TODO: appendCoeffs() en C --> serialize et append to file + +computeSynchrones = function(...) +{ + +} #Entrée : courbes synchrones, soit après étape 1 itérée, soit après chaqure étape 1 -step2 = function(conso) +computeWerDist = function(conso) { + if (!require("Rwave", quietly=TRUE)) + stop("Unable to load Rwave library") n <- nrow(conso) delta <- ncol(conso) #TODO: automatic tune of all these parameters ? (for other users) diff --git a/epclust/R/computeCoeffs.R b/epclust/R/computeCoeffs.R new file mode 100644 index 0000000..5bc4744 --- /dev/null +++ b/epclust/R/computeCoeffs.R @@ -0,0 +1,46 @@ +computeCoeffs = function(data, index, nb_series_per_chunk, wf) +{ + coeffs_chunk = NULL + if (is.data.frame(data) && index < nrow(data)) + { + #full data matrix + coeffs_chunk = curvesToCoeffs( + data[index:(min(index+nb_series_per_chunk-1,nrow(data))),], wf) + } + else if (is.function(data)) + { + #custom user function to retrieve next n curves, probably to read from DB + coeffs_chunk = curvesToCoeffs( data(rank=(index-1)+seq_len(nb_series_per_chunk)), wf ) + } + else if (exists(data_con)) + { + #incremental connection ; TODO: more efficient way to parse than using a temp file + ascii_lines = readLines(data_con, nb_series_per_chunk) + if (length(ascii_lines > 0)) + { + series_chunk_file = ".series_chunk" + writeLines(ascii_lines, series_chunk_file) + coeffs_chunk = curvesToCoeffs( read.csv(series_chunk_file), wf ) + unlink(series_chunk_file) + } + } + coeffs_chunk +} + +#NOTE: always keep ID in first column +curvesToCoeffs = function(series, wf) +{ + if (!require(wavelets, quietly=TRUE)) + stop("Couldn't load wavelets library") + L = length(series[1,]) + D = ceiling( log2(L-1) ) + nb_sample_points = 2^D + #TODO: parallel::parApply() ?! + res = apply(series, 1, function(x) { + interpolated_curve = spline(1:(L-1), x[2:L], n=nb_sample_points)$y + W = wavelets::dwt(interpolated_curve, filter=wf, D)@W + nrj_coeffs = rev( sapply( W, function(v) ( sqrt( sum(v^2) ) ) ) ) + return ( c(x[1], nrj_coeffs) ) + }) + return (as.data.frame(res)) +} diff --git a/epclust/R/defaults.R b/epclust/R/defaults.R deleted file mode 100644 index 4205745..0000000 --- a/epclust/R/defaults.R +++ /dev/null @@ -1,11 +0,0 @@ -#TODO: ascii format (default) (+ binary format?) -defaultWriteTmp = function(curves [uncompressed coeffs, limited number - nbSeriesPerChunk], last=FALSE) -{ - #if last=TRUE, close the conn (??) -} - -#careful: connection must remain open -defaultReadTmp = function(start, n) -{ - -} diff --git a/epclust/R/main.R b/epclust/R/main.R index e18ea7b..e794351 100644 --- a/epclust/R/main.R +++ b/epclust/R/main.R @@ -1,5 +1,3 @@ -#' @include defaults.R - #' @title Cluster power curves with PAM in parallel #' #' @description Groups electricity power curves (or any series of similar nature) by applying PAM @@ -18,14 +16,12 @@ #' Note: ntasks << N, so that N is "roughly divisible" by N (number of series) #' @param nb_series_per_chunk (Maximum) number of series in each group, inside a task #' @param min_series_per_chunk Minimum number of series in each group -#' @param writeTmp Function to write temporary wavelets coefficients (+ identifiers); -#' see defaults in defaults.R -#' @param readTmp Function to read temporary wavelets coefficients (see defaults.R) #' @param wf Wavelet transform filter; see ?wt.filter. Default: haar #' @param WER "end" to apply stage 2 after stage 1 has iterated and finished, or "mix" #' to apply it after every stage 1 -#' @param ncores_tasks number of parallel tasks (1 to disable: sequential tasks) -#' @param ncores_clust number of parallel clusterings in one task +#' @param ncores_tasks "MPI" number of parallel tasks (1 to disable: sequential tasks) +#' @param ncores_clust "OpenMP" number of parallel clusterings in one task +#' @param random Randomize chunks repartition #' #' @return A data.frame of the final medoids curves (identifiers + values) #' @@ -37,148 +33,84 @@ #' "LIMIT ", n, " ORDER BY date", sep="")) #' return (df) #' } +#' #TODO: 3 examples, data.frame / binary file / DB sqLite +#' + sampleCurves : wavBootstrap de package wmtsa #' cl = epclust(getData, K1=200, K2=15, ntasks=1000, nb_series_per_chunk=5000, WER="mix") #' @export -epclust = function(data, K1, K2, - ntasks=1, nb_series_per_chunk=50*K1, min_series_per_chunk=5*K1, - writeTmp=defaultWriteTmp, readTmp=defaultReadTmp, wf="haar", WER="end", - ncores_tasks=1, ncores_clust=4) +epclust = function(data, K1, K2, ntasks=1, nb_series_per_chunk=50*K1, min_series_per_chunk=5*K1, + wf="haar", WER="end", ncores_tasks=1, ncores_clust=4, random=TRUE) { - #TODO: setRefClass(...) to avoid copy data: - #http://stackoverflow.com/questions/2603184/r-pass-by-reference - #0) check arguments if (!is.data.frame(data) && !is.function(data)) + { tryCatch( { if (is.character(data)) - { data_con = file(data, open="r") - } else if (!isOpen(data)) + else if (!isOpen(data)) { open(data) data_con = data } }, - error="data should be a data.frame, a function or a valid connection") - if (!is.integer(K) || K < 2) - stop("K should be an integer greater or equal to 2") - if (!is.integer(nb_series_per_chunk) || nb_series_per_chunk < K) - stop("nb_series_per_chunk should be an integer greater or equal to K") - if (!is.function(writeTmp) || !is.function(readTmp)) - stop("read/writeTmp should be functional (see defaults.R)") + error=function(e) "data should be a data.frame, a function or a valid connection" + ) + } + K1 = toInteger(K1, function(x) x>=2) + K2 = toInteger(K2, function(x) x>=2) + ntasks = toInteger(ntasks) + nb_series_per_chunk = toInteger(nb_series_per_chunk, function(x) x>=K1) + min_series_per_chunk = toInteger(K1, function(x) x>=K1 && x<=nb_series_per_chunk) + ncores_tasks = toInteger(ncores_tasks, function(x) x>=1) + ncores_clust = toInteger(ncores_clust, function(x) x>=1) if (WER!="end" && WER!="mix") stop("WER takes values in {'end','mix'}") - #concerning ncores, any non-integer type will be treated as "use parallel:detectCores()/4" - #1) acquire data (process curves, get as coeffs) - #TODO: for data.frame and custom function, run in parallel (connections are sequential[?!]) + #1) Serialize all wavelets coefficients (+ IDs) onto a file + coeffs_file = ".coeffs" index = 1 nb_curves = 0 + nb_coeffs = NA repeat { - coeffs_chunk = NULL - if (is.data.frame(data)) - { - #full data matrix - if (index < nrow(data)) - { - coeffs_chunk = curvesToCoeffs( - data[index:(min(index+nb_series_per_chunk-1,nrow(data))),], wf) - } - } else if (is.function(data)) - { - #custom user function to retrieve next n curves, probably to read from DB - coeffs_chunk = curvesToCoeffs( data(index, nb_series_per_chunk), wf ) - } else - { - #incremental connection - #TODO: find a better way to parse than using a temp file - ascii_lines = readLines(data_con, nb_series_per_chunk) - if (length(ascii_lines > 0)) - { - series_chunk_file = ".tmp/series_chunk" - writeLines(ascii_lines, series_chunk_file) - coeffs_chunk = curvesToCoeffs( read.csv(series_chunk_file), wf ) - } - } + coeffs_chunk = computeCoeffs(data, index, nb_series_per_chunk, wf) if (is.null(coeffs_chunk)) break - writeTmp(coeffs_chunk) - nb_curves = nb_curves + nrow(coeffs_chunk) + serialized_coeffs = serialize(coeffs_chunk) + appendBinary(coeffs_file, serialized_coeffs) index = index + nb_series_per_chunk + nb_curves = nb_curves + nrow(coeffs_chunk) + if (is.na(nb_coeffs)) + nb_coeffs = ncol(coeffs_chunk)-1 } - if (exists(data_con)) - close(data_con) - if (nb_curves < min_series_per_chunk) - stop("Not enough data: less rows than min_series_per_chunk!") - #2) process coeffs (by nb_series_per_chunk) and cluster them in parallel - library(parallel) - cl_tasks = parallel::makeCluster(ncores_tasks) - #Nothing to export because each worker retrieve and put data from/on files (or DB) - #parallel::clusterExport(cl=cl, varlist=c("nothing","to","export"), envir=environment()) - #TODO: be careful of writing to a new temp file, then flush initial one, then re-use it... - res_tasks = parallel::parSapply(cl_tasks, 1:ntasks, function() { - cl_clust = parallel::makeCluster(ncores_clust) - repeat - { - #while there are jobs to do - #(i.e. size of tmp "file" is greater than ntasks * nb_series_per_chunk) - nb_workers = nb_curves %/% nb_series_per_chunk - indices = list() - #indices[[i]] == (start_index,number_of_elements) - for (i in 1:nb_workers) - indices[[i]] = c(nb_series_per_chunk*(i-1)+1, nb_series_per_chunk) - remainder = nb_curves %% nb_series_per_chunk - if (remainder >= min_series_per_chunk) - { - nb_workers = nb_workers + 1 - indices[[nb_workers]] = c(nb_curves-remainder+1, nb_curves) - } else if (remainder > 0) - { - #spread the load among other workers - #... - } - res_clust = parallel::parSapply(cl, indices, processChunk, K, WER=="mix") - #C) flush tmp file (current parallel processes will write in it) - } - parallel:stopCluster(cl_clust) - }) - parallel::stopCluster(cl_tasks) +# finalizeSerialization(coeffs_file) ........, nb_curves, ) +#TODO: is it really useful ?! we will always have these informations (nb_curves, nb_coeffs) - #3) readTmp last results, apply PAM on it, and return medoids + identifiers - final_coeffs = readTmp(1, nb_series_per_chunk) - if (nrow(final_coeffs) == K) - { - return ( list( medoids=coeffsToCurves(final_coeffs[,2:ncol(final_coeffs)]), - ids=final_coeffs[,1] ) ) - } - pam_output = getClusters(as.matrix(final_coeffs[,2:ncol(final_coeffs)]), K) - medoids = coeffsToCurves(pam_output$medoids, wf) - ids = final_coeffs[,1] [pam_output$ranks] + if (nb_curves < min_series_per_chunk) + stop("Not enough data: less rows than min_series_per_chunk!") + nb_series_per_task = round(nb_curves / ntasks) + if (nb_series_per_task < min_series_per_chunk) + stop("Too many tasks: less series in one task than min_series_per_chunk!") - #4) apply stage 2 (in parallel ? inside task 2) ?) - if (WER == "end") + #2) Cluster coefficients in parallel (by nb_series_per_chunk) + # All indices, relative to complete dataset + indices = if (random) sample(nb_curves) else seq_len(nb_curves) + # Indices to be processed in each task + indices_tasks = list() + for (i in seq_len(ntasks)) { - #from center curves, apply stage 2... - #TODO: + upper_bound = ifelse( i