19729ed5bbc0367615463088e9ce08f7cab91bda
[epclust.git] / code / draft_R_pkg / R / main.R
1 #' @include defaults.R
2
3 #' @title Cluster power curves with PAM in parallel
4 #'
5 #' @description Groups electricity power curves (or any series of similar nature) by applying PAM
6 #' algorithm in parallel to chunks of size \code{nbSeriesPerChunk}
7 #'
8 #' @param data Access to the data, which can be of one of the three following types:
9 #' \itemize{
10 #' \item data.frame: each line contains its ID in the first cell, and all values after
11 #' \item connection: any R connection object (e.g. a file) providing lines as described above
12 #' \item function: a custom way to retrieve the curves; it has two arguments: the start index
13 #' (start) and number of curves (n); see example in package vignette.
14 #' }
15 #' @param K Number of clusters
16 #' @param nbSeriesPerChunk Number of series in each group
17 #' @param writeTmp Function to write temporary wavelets coefficients (+ identifiers);
18 #' see defaults in defaults.R
19 #' @param readTmp Function to read temporary wavelets coefficients (see defaults.R)
20 #' @param WER "end" to apply stage 2 after stage 1 has iterated and finished, or "mix"
21 #' to apply it after every stage 1
22 #' @param ncores number of parallel processes; if NULL, use parallel::detectCores()
23 #'
24 #' @return A data.frame of the final medoids curves (identifiers + values)
25 epclust = function(data, K, nbSeriesPerChunk, writeTmp=ref_writeTmp, readTmp=ref_readTmp,
26 WER="end", ncores=NULL)
27 {
28 #TODO: setRefClass(...) to avoid copy data:
29 #http://stackoverflow.com/questions/2603184/r-pass-by-reference
30
31 #0) check arguments
32 if (!is.data.frame(data) && !is.function(data))
33 tryCatch(
34 {
35 if (is.character(data))
36 {
37 dataCon = file(data, open="r")
38 } else if (!isOpen(data))
39 {
40 open(data)
41 dataCon = data
42 }
43 },
44 error="data should be a data.frame, a function or a valid connection")
45 if (!is.integer(K) || K < 2)
46 stop("K should be an integer greater or equal to 2")
47 if (!is.integer(nbSeriesPerChunk) || nbSeriesPerChunk < K)
48 stop("nbSeriesPerChunk should be an integer greater or equal to K")
49 if (!is.function(writeTmp) || !is.function(readTmp))
50 stop("read/writeTmp should be functional (see defaults.R)")
51 if (WER!="end" && WER!="mix")
52 stop("WER takes values in {'end','mix'}")
53 #concerning ncores, any non-integer type will be treated as "use parallel:detectCores()"
54
55 #1) acquire data (process curves, get as coeffs)
56 index = 1
57 nbCurves = 0
58 repeat
59 {
60 if (is.data.frame(data))
61 {
62 #full data matrix
63 error = writeTmp( getCoeffs( data[index:(min(index+nbSeriesPerChunk-1,nrow(data))),] ) )
64 } else if (is.function(data))
65 {
66 #custom user function to retrieve next n curves, probably to read from DB
67 error = writeTmp( getCoeffs( data(index, nbSeriesPerChunk) ) )
68 } else
69 {
70 #incremental connection
71 #TODO: find a better way to parse than using a temp file
72 ascii_lines = readLines(dataCon, nbSeriesPerChunk)
73 seriesChunkFile = ".tmp/seriesChunk"
74 writeLines(ascii_lines, seriesChunkFile)
75 error = writeTmp( getCoeffs( read.csv(seriesChunkFile) ) )
76 }
77 index = index + nbSeriesPerChunk
78 }
79 if (exists(dataCon))
80 close(dataCon)
81
82 library(parallel)
83 ncores = ifelse(is.integer(ncores), ncores, parallel::detectCores())
84 cl = parallel::makeCluster(ncores)
85 parallel::clusterExport(cl=cl, varlist=c("X", "Y", "K", "p"), envir=environment())
86 library(cluster)
87 li = parallel::parLapply(cl, 1:B, getParamsAtIndex)
88
89 #2) process coeffs (by nbSeriesPerChunk) and cluster them in parallel
90 #TODO: be careful of writing to a new temp file, then flush initial one, then re-use it...
91 repeat
92 {
93 completed = rep(FALSE, ............)
94 #while there is jobs to do (i.e. size of tmp "file" is greater than nbSeriesPerChunk),
95 #A) determine which tasks which processor will do (OK)
96 #B) send each (sets of) tasks in parallel
97 #C) flush tmp file (current parallel processes will write in it)
98 #always check "complete" flag (array, as I did in MPI) to know if "slaves" finished
99 }
100 pam(x, k
101 parallel::stopCluster(cl)
102
103 #3) readTmp last results, apply PAM on it, and return medoids + identifiers
104
105 #4) apply stage 2 (in parallel ? inside task 2) ?)
106 if (WER == "end")
107 {
108 #from center curves, apply stage 2...
109 }
110 }
111
112 getCoeffs = function(series)
113 {
114 #... return wavelets coeffs : compute in parallel !
115 }