| 1 | #' CLAWS: CLustering with wAvelets and Wer distanceS |
| 2 | #' |
| 3 | #' Groups electricity power curves (or any series of similar nature) by applying PAM |
| 4 | #' algorithm in parallel to chunks of size \code{nb_series_per_chunk}. Input series |
| 5 | #' must be sampled on the same time grid, no missing values. |
| 6 | #' |
| 7 | #' @param getSeries Access to the (time-)series, which can be of one of the three |
| 8 | #' following types: |
| 9 | #' \itemize{ |
| 10 | #' \item [big.]matrix: each line contains all the values for one time-serie, ordered by time |
| 11 | #' \item connection: any R connection object providing lines as described above |
| 12 | #' \item character: name of a CSV file containing series in rows (no header) |
| 13 | #' \item function: a custom way to retrieve the curves; it has only one argument: |
| 14 | #' the indices of the series to be retrieved. See examples |
| 15 | #' } |
| 16 | #' @inheritParams clustering |
| 17 | #' @param K1 Number of super-consumers to be found after stage 1 (K1 << N) |
| 18 | #' @param K2 Number of clusters to be found after stage 2 (K2 << K1) |
| 19 | #' @param wf Wavelet transform filter; see ?wavelets::wt.filter |
| 20 | #' @param ctype Type of contribution: "relative" or "absolute" (or any prefix) |
| 21 | #' @param WER "end" to apply stage 2 after stage 1 has fully iterated, or "mix" to apply stage 2 |
| 22 | #' at the end of each task |
| 23 | #' @param random TRUE (default) for random chunks repartition |
| 24 | #' @param ntasks Number of tasks (parallel iterations to obtain K1 medoids); default: 1. |
| 25 | #' Note: ntasks << N, so that N is "roughly divisible" by N (number of series) |
| 26 | #' @param ncores_tasks "MPI" number of parallel tasks (1 to disable: sequential tasks) |
| 27 | #' @param ncores_clust "OpenMP" number of parallel clusterings in one task |
| 28 | #' @param nb_series_per_chunk (~Maximum) number of series in each group, inside a task |
| 29 | #' @param min_series_per_chunk Minimum number of series in each group |
| 30 | #' @param sep Separator in CSV input file (if any provided) |
| 31 | #' @param nbytes Number of bytes to serialize a floating-point number; 4 or 8 |
| 32 | #' @param endian Endianness to use for (de)serialization. Use "little" or "big" for portability |
| 33 | #' @param verbose Level of verbosity (0/FALSE for nothing or 1/TRUE for all; devel stage) |
| 34 | #' @param parll TRUE to fully parallelize; otherwise run sequentially (debug, comparison) |
| 35 | #' |
| 36 | #' @return A big.matrix of the final medoids curves (K2) in rows |
| 37 | #' |
| 38 | #' @examples |
| 39 | #' \dontrun{ |
| 40 | #' # WER distances computations are a bit too long for CRAN (for now) |
| 41 | #' |
| 42 | #' # Random series around cos(x,2x,3x)/sin(x,2x,3x) |
| 43 | #' x = seq(0,500,0.05) |
| 44 | #' L = length(x) #10001 |
| 45 | #' ref_series = matrix( c(cos(x), cos(2*x), cos(3*x), sin(x), sin(2*x), sin(3*x)), |
| 46 | #' byrow=TRUE, ncol=L ) |
| 47 | #' library(wmtsa) |
| 48 | #' series = do.call( rbind, lapply( 1:6, function(i) |
| 49 | #' do.call(rbind, wmtsa::wavBootstrap(ref_series[i,], n.realization=400)) ) ) |
| 50 | #' #dim(series) #c(2400,10001) |
| 51 | #' medoids_ascii = claws(series, K1=60, K2=6, "d8", "rel", nb_series_per_chunk=500) |
| 52 | #' |
| 53 | #' # Same example, from CSV file |
| 54 | #' csv_file = "/tmp/epclust_series.csv" |
| 55 | #' write.table(series, csv_file, sep=",", row.names=FALSE, col.names=FALSE) |
| 56 | #' medoids_csv = claws(csv_file, K1=60, K2=6, "d8", "rel", nb_series_per_chunk=500) |
| 57 | #' |
| 58 | #' # Same example, from binary file |
| 59 | #' bin_file = "/tmp/epclust_series.bin" |
| 60 | #' nbytes = 8 |
| 61 | #' endian = "little" |
| 62 | #' epclust::binarize(csv_file, bin_file, 500, nbytes, endian) |
| 63 | #' getSeries = function(indices) getDataInFile(indices, bin_file, nbytes, endian) |
| 64 | #' medoids_bin = claws(getSeries, K1=60, K2=6, "d8", "rel", nb_series_per_chunk=500) |
| 65 | #' unlink(csv_file) |
| 66 | #' unlink(bin_file) |
| 67 | #' |
| 68 | #' # Same example, from SQLite database |
| 69 | #' library(DBI) |
| 70 | #' series_db <- dbConnect(RSQLite::SQLite(), "file::memory:") |
| 71 | #' # Prepare data.frame in DB-format |
| 72 | #' n = nrow(series) |
| 73 | #' time_values = data.frame( |
| 74 | #' id = rep(1:n,each=L), |
| 75 | #' time = rep( as.POSIXct(1800*(0:n),"GMT",origin="2001-01-01"), L ), |
| 76 | #' value = as.double(t(series)) ) |
| 77 | #' dbWriteTable(series_db, "times_values", times_values) |
| 78 | #' # Fill associative array, map index to identifier |
| 79 | #' indexToID_inDB <- as.character( |
| 80 | #' dbGetQuery(series_db, 'SELECT DISTINCT id FROM time_values')[,"id"] ) |
| 81 | #' getSeries = function(indices) { |
| 82 | #' request = "SELECT id,value FROM times_values WHERE id in (" |
| 83 | #' for (i in indices) |
| 84 | #' request = paste(request, i, ",", sep="") |
| 85 | #' request = paste(request, ")", sep="") |
| 86 | #' df_series = dbGetQuery(series_db, request) |
| 87 | #' # Assume that all series share same length at this stage |
| 88 | #' ts_length = sum(df_series[,"id"] == df_series[1,"id"]) |
| 89 | #' t( as.matrix(df_series[,"value"], nrow=ts_length) ) |
| 90 | #' } |
| 91 | #' medoids_db = claws(getSeries, K1=60, K2=6, "d8", "rel", nb_series_per_chunk=500) |
| 92 | #' dbDisconnect(series_db) |
| 93 | #' |
| 94 | #' # All computed medoids should be the same: |
| 95 | #' digest::sha1(medoids_ascii) |
| 96 | #' digest::sha1(medoids_csv) |
| 97 | #' digest::sha1(medoids_bin) |
| 98 | #' digest::sha1(medoids_db) |
| 99 | #' } |
| 100 | #' @export |
| 101 | claws = function(getSeries, K1, K2, |
| 102 | wf,ctype, #stage 1 |
| 103 | WER="end", #stage 2 |
| 104 | random=TRUE, #randomize series order? |
| 105 | ntasks=1, ncores_tasks=1, ncores_clust=4, #control parallelism |
| 106 | nb_series_per_chunk=50*K1, min_series_per_chunk=5*K1, #chunk size |
| 107 | sep=",", #ASCII input separator |
| 108 | nbytes=4, endian=.Platform$endian, #serialization (write,read) |
| 109 | verbose=FALSE, parll=TRUE) |
| 110 | { |
| 111 | # Check/transform arguments |
| 112 | if (!is.matrix(getSeries) && !bigmemory::is.big.matrix(getSeries) |
| 113 | && !is.function(getSeries) |
| 114 | && !methods::is(getSeries,"connection") && !is.character(getSeries)) |
| 115 | { |
| 116 | stop("'getSeries': [big]matrix, function, file or valid connection (no NA)") |
| 117 | } |
| 118 | K1 = .toInteger(K1, function(x) x>=2) |
| 119 | K2 = .toInteger(K2, function(x) x>=2) |
| 120 | if (!is.logical(random)) |
| 121 | stop("'random': logical") |
| 122 | tryCatch( |
| 123 | {ignored <- wavelets::wt.filter(wf)}, |
| 124 | error = function(e) stop("Invalid wavelet filter; see ?wavelets::wt.filter")) |
| 125 | if (WER!="end" && WER!="mix") |
| 126 | stop("WER takes values in {'end','mix'}") |
| 127 | ntasks = .toInteger(ntasks, function(x) x>=1) |
| 128 | ncores_tasks = .toInteger(ncores_tasks, function(x) x>=1) |
| 129 | ncores_clust = .toInteger(ncores_clust, function(x) x>=1) |
| 130 | nb_series_per_chunk = .toInteger(nb_series_per_chunk, function(x) x>=K1) |
| 131 | min_series_per_chunk = .toInteger(K1, function(x) x>=K1 && x<=nb_series_per_chunk) |
| 132 | if (!is.character(sep)) |
| 133 | stop("'sep': character") |
| 134 | nbytes = .toInteger(nbytes, function(x) x==4 || x==8) |
| 135 | |
| 136 | # Serialize series if required, to always use a function |
| 137 | bin_dir = ".epclust_bin/" |
| 138 | dir.create(bin_dir, showWarnings=FALSE, mode="0755") |
| 139 | if (!is.function(getSeries)) |
| 140 | { |
| 141 | if (verbose) |
| 142 | cat("...Serialize time-series\n") |
| 143 | series_file = paste(bin_dir,"data",sep="") ; unlink(series_file) |
| 144 | binarize(getSeries, series_file, nb_series_per_chunk, sep, nbytes, endian) |
| 145 | getSeries = function(inds) getDataInFile(inds, series_file, nbytes, endian) |
| 146 | } |
| 147 | |
| 148 | # Serialize all computed wavelets contributions into a file |
| 149 | contribs_file = paste(bin_dir,"contribs",sep="") ; unlink(contribs_file) |
| 150 | index = 1 |
| 151 | nb_curves = 0 |
| 152 | if (verbose) |
| 153 | cat("...Compute contributions and serialize them\n") |
| 154 | nb_curves = binarizeTransform(getSeries, |
| 155 | function(series) curvesToContribs(series, wf, ctype), |
| 156 | contribs_file, nb_series_per_chunk, nbytes, endian) |
| 157 | getContribs = function(indices) getDataInFile(indices, contribs_file, nbytes, endian) |
| 158 | |
| 159 | if (nb_curves < min_series_per_chunk) |
| 160 | stop("Not enough data: less rows than min_series_per_chunk!") |
| 161 | nb_series_per_task = round(nb_curves / ntasks) |
| 162 | if (nb_series_per_task < min_series_per_chunk) |
| 163 | stop("Too many tasks: less series in one task than min_series_per_chunk!") |
| 164 | |
| 165 | runTwoStepClustering = function(inds) |
| 166 | { |
| 167 | if (parll && ntasks>1) |
| 168 | require("epclust", quietly=TRUE) |
| 169 | indices_medoids = clusteringTask1( |
| 170 | inds, getContribs, K1, nb_series_per_chunk, ncores_clust, verbose, parll) |
| 171 | if (WER=="mix") |
| 172 | { |
| 173 | medoids1 = bigmemory::as.big.matrix( getSeries(indices_medoids) ) |
| 174 | medoids2 = clusteringTask2(medoids1, |
| 175 | K2, getSeries, nb_curves, nb_series_per_chunk, ncores_clust, verbose, parll) |
| 176 | binarize(medoids2, synchrones_file, nb_series_per_chunk, sep, nbytes, endian) |
| 177 | return (vector("integer",0)) |
| 178 | } |
| 179 | indices_medoids |
| 180 | } |
| 181 | |
| 182 | # Cluster contributions in parallel (by nb_series_per_chunk) |
| 183 | indices_all = if (random) sample(nb_curves) else seq_len(nb_curves) |
| 184 | indices_tasks = lapply(seq_len(ntasks), function(i) { |
| 185 | upper_bound = ifelse( i<ntasks, min(nb_series_per_task*i,nb_curves), nb_curves ) |
| 186 | indices_all[((i-1)*nb_series_per_task+1):upper_bound] |
| 187 | }) |
| 188 | if (verbose) |
| 189 | cat(paste("...Run ",ntasks," x stage 1 in parallel\n",sep="")) |
| 190 | if (WER=="mix") |
| 191 | {synchrones_file = paste(bin_dir,"synchrones",sep="") ; unlink(synchrones_file)} |
| 192 | if (parll && ntasks>1) |
| 193 | { |
| 194 | cl = parallel::makeCluster(ncores_tasks) |
| 195 | varlist = c("getSeries","getContribs","K1","K2","verbose","parll", |
| 196 | "nb_series_per_chunk","ntasks","ncores_clust","sep","nbytes","endian") |
| 197 | if (WER=="mix") |
| 198 | varlist = c(varlist, "synchrones_file") |
| 199 | parallel::clusterExport(cl, varlist=varlist, envir = environment()) |
| 200 | } |
| 201 | |
| 202 | # 1000*K1 indices [if WER=="end"], or empty vector [if WER=="mix"] --> series on file |
| 203 | if (parll && ntasks>1) |
| 204 | indices = unlist( parallel::parLapply(cl, indices_tasks, runTwoStepClustering) ) |
| 205 | else |
| 206 | indices = unlist( lapply(indices_tasks, runTwoStepClustering) ) |
| 207 | if (parll && ntasks>1) |
| 208 | parallel::stopCluster(cl) |
| 209 | |
| 210 | getRefSeries = getSeries |
| 211 | if (WER=="mix") |
| 212 | { |
| 213 | indices = seq_len(ntasks*K2) |
| 214 | #Now series must be retrieved from synchrones_file |
| 215 | getSeries = function(inds) getDataInFile(inds, synchrones_file, nbytes, endian) |
| 216 | #Contributions must be re-computed |
| 217 | unlink(contribs_file) |
| 218 | index = 1 |
| 219 | if (verbose) |
| 220 | cat("...Serialize contributions computed on synchrones\n") |
| 221 | ignored = binarizeTransform(getSeries, |
| 222 | function(series) curvesToContribs(series, wf, ctype), |
| 223 | contribs_file, nb_series_per_chunk, nbytes, endian) |
| 224 | } |
| 225 | |
| 226 | # Run step2 on resulting indices or series (from file) |
| 227 | if (verbose) |
| 228 | cat("...Run final // stage 1 + stage 2\n") |
| 229 | indices_medoids = clusteringTask1( |
| 230 | indices, getContribs, K1, nb_series_per_chunk, ncores_tasks*ncores_clust, verbose, parll) |
| 231 | medoids1 = bigmemory::as.big.matrix( getSeries(indices_medoids) ) |
| 232 | medoids2 = computeClusters2(medoids1, K2, |
| 233 | getRefSeries, nb_curves, nb_series_per_chunk, ncores_tasks*ncores_clust, verbose, parll) |
| 234 | |
| 235 | # Cleanup |
| 236 | unlink(bin_dir, recursive=TRUE) |
| 237 | |
| 238 | medoids2 |
| 239 | } |
| 240 | |
| 241 | #' curvesToContribs |
| 242 | #' |
| 243 | #' Compute the discrete wavelet coefficients for each series, and aggregate them in |
| 244 | #' energy contribution across scales as described in https://arxiv.org/abs/1101.4744v2 |
| 245 | #' |
| 246 | #' @param series Matrix of series (in rows), of size n x L |
| 247 | #' @inheritParams claws |
| 248 | #' |
| 249 | #' @return A matrix of size n x log(L) containing contributions in rows |
| 250 | #' |
| 251 | #' @export |
| 252 | curvesToContribs = function(series, wf, ctype) |
| 253 | { |
| 254 | L = length(series[1,]) |
| 255 | D = ceiling( log2(L) ) |
| 256 | nb_sample_points = 2^D |
| 257 | cont_types = c("relative","absolute") |
| 258 | ctype = cont_types[ pmatch(ctype,cont_types) ] |
| 259 | t( apply(series, 1, function(x) { |
| 260 | interpolated_curve = spline(1:L, x, n=nb_sample_points)$y |
| 261 | W = wavelets::dwt(interpolated_curve, filter=wf, D)@W |
| 262 | nrj = rev( sapply( W, function(v) ( sqrt( sum(v^2) ) ) ) ) |
| 263 | if (ctype=="relative") nrj / sum(nrj) else nrj |
| 264 | }) ) |
| 265 | } |
| 266 | |
| 267 | # Check integer arguments with functional conditions |
| 268 | .toInteger <- function(x, condition) |
| 269 | { |
| 270 | if (!is.integer(x)) |
| 271 | tryCatch( |
| 272 | {x = as.integer(x)[1]}, |
| 273 | error = function(e) paste("Cannot convert argument",substitute(x),"to integer") |
| 274 | ) |
| 275 | if (!condition(x)) |
| 276 | stop(paste("Argument",substitute(x),"does not verify condition",body(condition))) |
| 277 | x |
| 278 | } |