| 1 | #' @include defaults.R |
| 2 | |
| 3 | #' @title Cluster power curves with PAM in parallel |
| 4 | #' |
| 5 | #' @description Groups electricity power curves (or any series of similar nature) by applying PAM |
| 6 | #' algorithm in parallel to chunks of size \code{nb_series_per_chunk} |
| 7 | #' |
| 8 | #' @param data Access to the data, which can be of one of the three following types: |
| 9 | #' \itemize{ |
| 10 | #' \item data.frame: each line contains its ID in the first cell, and all values after |
| 11 | #' \item connection: any R connection object (e.g. a file) providing lines as described above |
| 12 | #' \item function: a custom way to retrieve the curves; it has two arguments: the start index |
| 13 | #' (start) and number of curves (n); see example in package vignette. |
| 14 | #' } |
| 15 | #' @param K1 Number of super-consumers to be found after stage 1 (K1 << N) |
| 16 | #' @param K2 Number of clusters to be found after stage 2 (K2 << K1) |
| 17 | #' @param ntasks Number of tasks (parallel iterations to obtain K1 medoids); default: 1. |
| 18 | #' Note: ntasks << N, so that N is "roughly divisible" by N (number of series) |
| 19 | #' @param nb_series_per_chunk (Maximum) number of series in each group, inside a task |
| 20 | #' @param min_series_per_chunk Minimum number of series in each group |
| 21 | #' @param writeTmp Function to write temporary wavelets coefficients (+ identifiers); |
| 22 | #' see defaults in defaults.R |
| 23 | #' @param readTmp Function to read temporary wavelets coefficients (see defaults.R) |
| 24 | #' @param wf Wavelet transform filter; see ?wt.filter. Default: haar |
| 25 | #' @param WER "end" to apply stage 2 after stage 1 has iterated and finished, or "mix" |
| 26 | #' to apply it after every stage 1 |
| 27 | #' @param ncores_tasks number of parallel tasks (1 to disable: sequential tasks) |
| 28 | #' @param ncores_clust number of parallel clusterings in one task |
| 29 | #' |
| 30 | #' @return A data.frame of the final medoids curves (identifiers + values) |
| 31 | #' |
| 32 | #' @examples |
| 33 | #' getData = function(start, n) { |
| 34 | #' con = dbConnect(drv = RSQLite::SQLite(), dbname = "mydata.sqlite") |
| 35 | #' df = dbGetQuery(con, paste( |
| 36 | #' "SELECT * FROM times_values GROUP BY id OFFSET ",start, |
| 37 | #' "LIMIT ", n, " ORDER BY date", sep="")) |
| 38 | #' return (df) |
| 39 | #' } |
| 40 | #' cl = epclust(getData, K1=200, K2=15, ntasks=1000, nb_series_per_chunk=5000, WER="mix") |
| 41 | #' @export |
| 42 | epclust = function(data, K1, K2, |
| 43 | ntasks=1, nb_series_per_chunk=50*K1, min_series_per_chunk=5*K1, |
| 44 | writeTmp=defaultWriteTmp, readTmp=defaultReadTmp, wf="haar", WER="end", |
| 45 | ncores_tasks=1, ncores_clust=4) |
| 46 | { |
| 47 | #TODO: setRefClass(...) to avoid copy data: |
| 48 | #http://stackoverflow.com/questions/2603184/r-pass-by-reference |
| 49 | |
| 50 | #0) check arguments |
| 51 | if (!is.data.frame(data) && !is.function(data)) |
| 52 | tryCatch( |
| 53 | { |
| 54 | if (is.character(data)) |
| 55 | { |
| 56 | data_con = file(data, open="r") |
| 57 | } else if (!isOpen(data)) |
| 58 | { |
| 59 | open(data) |
| 60 | data_con = data |
| 61 | } |
| 62 | }, |
| 63 | error="data should be a data.frame, a function or a valid connection") |
| 64 | if (!is.integer(K) || K < 2) |
| 65 | stop("K should be an integer greater or equal to 2") |
| 66 | if (!is.integer(nb_series_per_chunk) || nb_series_per_chunk < K) |
| 67 | stop("nb_series_per_chunk should be an integer greater or equal to K") |
| 68 | if (!is.function(writeTmp) || !is.function(readTmp)) |
| 69 | stop("read/writeTmp should be functional (see defaults.R)") |
| 70 | if (WER!="end" && WER!="mix") |
| 71 | stop("WER takes values in {'end','mix'}") |
| 72 | #concerning ncores, any non-integer type will be treated as "use parallel:detectCores()/4" |
| 73 | |
| 74 | #1) acquire data (process curves, get as coeffs) |
| 75 | #TODO: for data.frame and custom function, run in parallel (connections are sequential[?!]) |
| 76 | index = 1 |
| 77 | nb_curves = 0 |
| 78 | repeat |
| 79 | { |
| 80 | coeffs_chunk = NULL |
| 81 | if (is.data.frame(data)) |
| 82 | { |
| 83 | #full data matrix |
| 84 | if (index < nrow(data)) |
| 85 | { |
| 86 | coeffs_chunk = curvesToCoeffs( |
| 87 | data[index:(min(index+nb_series_per_chunk-1,nrow(data))),], wf) |
| 88 | } |
| 89 | } else if (is.function(data)) |
| 90 | { |
| 91 | #custom user function to retrieve next n curves, probably to read from DB |
| 92 | coeffs_chunk = curvesToCoeffs( data(index, nb_series_per_chunk), wf ) |
| 93 | } else |
| 94 | { |
| 95 | #incremental connection |
| 96 | #TODO: find a better way to parse than using a temp file |
| 97 | ascii_lines = readLines(data_con, nb_series_per_chunk) |
| 98 | if (length(ascii_lines > 0)) |
| 99 | { |
| 100 | series_chunk_file = ".tmp/series_chunk" |
| 101 | writeLines(ascii_lines, series_chunk_file) |
| 102 | coeffs_chunk = curvesToCoeffs( read.csv(series_chunk_file), wf ) |
| 103 | } |
| 104 | } |
| 105 | if (is.null(coeffs_chunk)) |
| 106 | break |
| 107 | writeTmp(coeffs_chunk) |
| 108 | nb_curves = nb_curves + nrow(coeffs_chunk) |
| 109 | index = index + nb_series_per_chunk |
| 110 | } |
| 111 | if (exists(data_con)) |
| 112 | close(data_con) |
| 113 | if (nb_curves < min_series_per_chunk) |
| 114 | stop("Not enough data: less rows than min_series_per_chunk!") |
| 115 | |
| 116 | #2) process coeffs (by nb_series_per_chunk) and cluster them in parallel |
| 117 | library(parallel) |
| 118 | cl_tasks = parallel::makeCluster(ncores_tasks) |
| 119 | #Nothing to export because each worker retrieve and put data from/on files (or DB) |
| 120 | #parallel::clusterExport(cl=cl, varlist=c("nothing","to","export"), envir=environment()) |
| 121 | #TODO: be careful of writing to a new temp file, then flush initial one, then re-use it... |
| 122 | res_tasks = parallel::parSapply(cl_tasks, 1:ntasks, function() { |
| 123 | cl_clust = parallel::makeCluster(ncores_clust) |
| 124 | repeat |
| 125 | { |
| 126 | #while there are jobs to do |
| 127 | #(i.e. size of tmp "file" is greater than ntasks * nb_series_per_chunk) |
| 128 | nb_workers = nb_curves %/% nb_series_per_chunk |
| 129 | indices = list() |
| 130 | #indices[[i]] == (start_index,number_of_elements) |
| 131 | for (i in 1:nb_workers) |
| 132 | indices[[i]] = c(nb_series_per_chunk*(i-1)+1, nb_series_per_chunk) |
| 133 | remainder = nb_curves %% nb_series_per_chunk |
| 134 | if (remainder >= min_series_per_chunk) |
| 135 | { |
| 136 | nb_workers = nb_workers + 1 |
| 137 | indices[[nb_workers]] = c(nb_curves-remainder+1, nb_curves) |
| 138 | } else if (remainder > 0) |
| 139 | { |
| 140 | #spread the load among other workers |
| 141 | #... |
| 142 | } |
| 143 | res_clust = parallel::parSapply(cl, indices, processChunk, K, WER=="mix") |
| 144 | #C) flush tmp file (current parallel processes will write in it) |
| 145 | } |
| 146 | parallel:stopCluster(cl_clust) |
| 147 | }) |
| 148 | parallel::stopCluster(cl_tasks) |
| 149 | |
| 150 | #3) readTmp last results, apply PAM on it, and return medoids + identifiers |
| 151 | final_coeffs = readTmp(1, nb_series_per_chunk) |
| 152 | if (nrow(final_coeffs) == K) |
| 153 | { |
| 154 | return ( list( medoids=coeffsToCurves(final_coeffs[,2:ncol(final_coeffs)]), |
| 155 | ids=final_coeffs[,1] ) ) |
| 156 | } |
| 157 | pam_output = getClusters(as.matrix(final_coeffs[,2:ncol(final_coeffs)]), K) |
| 158 | medoids = coeffsToCurves(pam_output$medoids, wf) |
| 159 | ids = final_coeffs[,1] [pam_output$ranks] |
| 160 | |
| 161 | #4) apply stage 2 (in parallel ? inside task 2) ?) |
| 162 | if (WER == "end") |
| 163 | { |
| 164 | #from center curves, apply stage 2... |
| 165 | #TODO: |
| 166 | } |
| 167 | |
| 168 | return (list(medoids=medoids, ids=ids)) |
| 169 | } |
| 170 | |
| 171 | processChunk = function(indice, K, WER) |
| 172 | { |
| 173 | #1) retrieve data |
| 174 | coeffs = readTmp(indice[1], indice[2]) |
| 175 | #2) cluster |
| 176 | cl = getClusters(as.matrix(coeffs[,2:ncol(coeffs)]), K) |
| 177 | #3) WER (optional) |
| 178 | #TODO: |
| 179 | } |
| 180 | |
| 181 | #TODO: difficulté : retrouver courbe à partir de l'identifiant (DB ok mais le reste ?) |
| 182 | #aussi : que passe-t-on aux noeuds ? curvesToCoeffs en // ? |
| 183 | #enfin : WER ?! |
| 184 | #TODO: bout de code qui calcule les courbes synchrones après étapes 1+2 à partir des ID médoïdes |