set final draft for package
authorBenjamin Auder <benjamin.auder@somewhere>
Sun, 19 Feb 2017 23:37:33 +0000 (00:37 +0100)
committerBenjamin Auder <benjamin.auder@somewhere>
Sun, 19 Feb 2017 23:37:33 +0000 (00:37 +0100)
epclust/DESCRIPTION
epclust/R/algorithms.R [deleted file]
epclust/R/clustering.R [moved from epclust/R/stage2.R with 53% similarity]
epclust/R/computeCoeffs.R [new file with mode: 0644]
epclust/R/defaults.R [deleted file]
epclust/R/main.R
epclust/R/sampleCurves.R [deleted file]
epclust/R/utils.R [new file with mode: 0644]

index 2b2c1f5..bb3e6cf 100644 (file)
@@ -16,5 +16,7 @@ Depends:
                wavelets
 Suggests:
     testthat,
-    knitr
+    knitr,
+               wmtsa,
+               RSQLite
 License: MIT + file LICENSE
diff --git a/epclust/R/algorithms.R b/epclust/R/algorithms.R
deleted file mode 100644 (file)
index 97dce90..0000000
+++ /dev/null
@@ -1,24 +0,0 @@
-#NOTE: always keep ID in first column
-curvesToCoeffs = function(series, wf)
-{
-       library(wavelets)
-       L = length(series[1,])
-       D = ceiling( log(L-1) )
-       nb_sample_points = 2^D
-       #TODO: parallel::parApply() ?!
-       res = apply(series, 1, function(x) {
-               interpolated_curve = spline(1:(L-1), x[2:L], n=nb_sample_points)$y
-               W = wavelets::dwt(interpolated_curve, filter=wf, D)@W
-               nrj_coeffs = rev( sapply( W, function(v) ( sqrt( sum(v^2) ) ) ) )
-               return ( c(x[1], nrj_coeffs) )
-       })
-       return (as.data.frame(res))
-}
-
-getClusters = function(data, K)
-{
-       library(cluster)
-       pam_output = cluster::pam(data, K)
-       return ( list( clusts=pam_output$clustering, medoids=pam_output$medoids,
-               ranks=pam_output$id.med ) )
-}
similarity index 53%
rename from epclust/R/stage2.R
rename to epclust/R/clustering.R
index 3ccbbad..e27ea35 100644 (file)
@@ -1,8 +1,66 @@
-library("Rwave")
+oneIteration = function(..........)
+{
+               cl_clust = parallel::makeCluster(ncores_clust)
+               parallel::clusterExport(cl_clust, .............., envir=........)
+               indices_clust = indices_task[[i]]
+               repeat
+               {
+                       nb_workers = max( 1, round( length(indices_clust) / nb_series_per_chunk ) )
+                       indices_workers = list()
+                       #indices[[i]] == (start_index,number_of_elements)
+                       for (i in 1:nb_workers)
+                       {
+                               upper_bound = ifelse( i<nb_workers,
+                                       min(nb_series_per_chunk*i,length(indices_clust)), length(indices_clust) )
+                               indices_workers[[i]] = indices_clust[(nb_series_per_chunk*(i-1)+1):upper_bound]
+                       }
+                       indices_clust = parallel::parSapply(cl, indices_workers, processChunk, K1, K2*(WER=="mix"))
+                       if ( (WER=="end" && length(indices_clust) == K1) ||
+                               (WER=="mix" && length(indices_clust) == K2) )
+                       {
+                               break
+                       }
+               }
+               parallel::stopCluster(cl_clust)
+               res_clust
+}
+
+processChunk = function(indices, K1, K2)
+{
+       #1) retrieve data (coeffs)
+       coeffs = getCoeffs(indices)
+       #2) cluster
+       cl = computeClusters(as.matrix(coeffs[,2:ncol(coeffs)]), K1)
+       #3) WER (optional)
+       if (K2 > 0)
+       {
+               curves = computeSynchrones(cl)
+               dists = computeWerDists(curves)
+               cl = computeClusters(dists, K2)
+       }
+       cl
+}
+
+computeClusters = function(data, K)
+{
+       library(cluster)
+       pam_output = cluster::pam(data, K)
+       return ( list( clusts=pam_output$clustering, medoids=pam_output$medoids,
+               ranks=pam_output$id.med ) )
+}
+
+#TODO: appendCoeffs() en C --> serialize et append to file
+
+computeSynchrones = function(...)
+{
+
+}
 
 #Entrée : courbes synchrones, soit après étape 1 itérée, soit après chaqure étape 1
-step2 = function(conso)
+computeWerDist = function(conso)
 {
+       if (!require("Rwave", quietly=TRUE))
+               stop("Unable to load Rwave library")
        n <- nrow(conso)
        delta <- ncol(conso)
        #TODO: automatic tune of all these parameters ? (for other users)
diff --git a/epclust/R/computeCoeffs.R b/epclust/R/computeCoeffs.R
new file mode 100644 (file)
index 0000000..5bc4744
--- /dev/null
@@ -0,0 +1,46 @@
+computeCoeffs = function(data, index, nb_series_per_chunk, wf)
+{
+       coeffs_chunk = NULL
+       if (is.data.frame(data) && index < nrow(data))
+       {
+               #full data matrix
+               coeffs_chunk = curvesToCoeffs(
+                       data[index:(min(index+nb_series_per_chunk-1,nrow(data))),], wf)
+       }
+       else if (is.function(data))
+       {
+               #custom user function to retrieve next n curves, probably to read from DB
+               coeffs_chunk = curvesToCoeffs( data(rank=(index-1)+seq_len(nb_series_per_chunk)), wf )
+       }
+       else if (exists(data_con))
+       {
+               #incremental connection ; TODO: more efficient way to parse than using a temp file
+               ascii_lines = readLines(data_con, nb_series_per_chunk)
+               if (length(ascii_lines > 0))
+               {
+                       series_chunk_file = ".series_chunk"
+                       writeLines(ascii_lines, series_chunk_file)
+                       coeffs_chunk = curvesToCoeffs( read.csv(series_chunk_file), wf )
+                       unlink(series_chunk_file)
+               }
+       }
+       coeffs_chunk
+}
+
+#NOTE: always keep ID in first column
+curvesToCoeffs = function(series, wf)
+{
+       if (!require(wavelets, quietly=TRUE))
+               stop("Couldn't load wavelets library")
+       L = length(series[1,])
+       D = ceiling( log2(L-1) )
+       nb_sample_points = 2^D
+       #TODO: parallel::parApply() ?!
+       res = apply(series, 1, function(x) {
+               interpolated_curve = spline(1:(L-1), x[2:L], n=nb_sample_points)$y
+               W = wavelets::dwt(interpolated_curve, filter=wf, D)@W
+               nrj_coeffs = rev( sapply( W, function(v) ( sqrt( sum(v^2) ) ) ) )
+               return ( c(x[1], nrj_coeffs) )
+       })
+       return (as.data.frame(res))
+}
diff --git a/epclust/R/defaults.R b/epclust/R/defaults.R
deleted file mode 100644 (file)
index 4205745..0000000
+++ /dev/null
@@ -1,11 +0,0 @@
-#TODO: ascii format (default) (+ binary format?)
-defaultWriteTmp = function(curves [uncompressed coeffs, limited number - nbSeriesPerChunk], last=FALSE)
-{
-       #if last=TRUE, close the conn (??)
-}
-
-#careful: connection must remain open
-defaultReadTmp = function(start, n)
-{
-       
-}
index e18ea7b..e794351 100644 (file)
@@ -1,5 +1,3 @@
-#' @include defaults.R
-
 #' @title Cluster power curves with PAM in parallel
 #'
 #' @description Groups electricity power curves (or any series of similar nature) by applying PAM
 #'   Note: ntasks << N, so that N is "roughly divisible" by N (number of series)
 #' @param nb_series_per_chunk (Maximum) number of series in each group, inside a task
 #' @param min_series_per_chunk Minimum number of series in each group
-#' @param writeTmp Function to write temporary wavelets coefficients (+ identifiers);
-#'   see defaults in defaults.R
-#' @param readTmp Function to read temporary wavelets coefficients (see defaults.R)
 #' @param wf Wavelet transform filter; see ?wt.filter. Default: haar
 #' @param WER "end" to apply stage 2 after stage 1 has iterated and finished, or "mix"
 #'   to apply it after every stage 1
-#' @param ncores_tasks number of parallel tasks (1 to disable: sequential tasks)
-#' @param ncores_clust number of parallel clusterings in one task
+#' @param ncores_tasks "MPI" number of parallel tasks (1 to disable: sequential tasks)
+#' @param ncores_clust "OpenMP" number of parallel clusterings in one task
+#' @param random Randomize chunks repartition
 #'
 #' @return A data.frame of the final medoids curves (identifiers + values)
 #'
 #'     "LIMIT ", n, " ORDER BY date", sep=""))
 #'   return (df)
 #' }
+#'   #TODO: 3 examples, data.frame / binary file / DB sqLite
+#'   + sampleCurves : wavBootstrap de package wmtsa
 #' cl = epclust(getData, K1=200, K2=15, ntasks=1000, nb_series_per_chunk=5000, WER="mix")
 #' @export
-epclust = function(data, K1, K2,
-       ntasks=1, nb_series_per_chunk=50*K1, min_series_per_chunk=5*K1,
-       writeTmp=defaultWriteTmp, readTmp=defaultReadTmp, wf="haar", WER="end",
-       ncores_tasks=1, ncores_clust=4)
+epclust = function(data, K1, K2, ntasks=1, nb_series_per_chunk=50*K1, min_series_per_chunk=5*K1,
+       wf="haar", WER="end", ncores_tasks=1, ncores_clust=4, random=TRUE)
 {
-       #TODO: setRefClass(...) to avoid copy data:
-       #http://stackoverflow.com/questions/2603184/r-pass-by-reference
-
        #0) check arguments
        if (!is.data.frame(data) && !is.function(data))
+       {
                tryCatch(
                        {
                                if (is.character(data))
-                               {
                                        data_con = file(data, open="r")
-                               else if (!isOpen(data))
+                               else if (!isOpen(data))
                                {
                                        open(data)
                                        data_con = data
                                }
                        },
-                       error="data should be a data.frame, a function or a valid connection")
-       if (!is.integer(K) || K < 2)
-               stop("K should be an integer greater or equal to 2")
-       if (!is.integer(nb_series_per_chunk) || nb_series_per_chunk < K)
-               stop("nb_series_per_chunk should be an integer greater or equal to K")
-       if (!is.function(writeTmp) || !is.function(readTmp))
-               stop("read/writeTmp should be functional (see defaults.R)")
+                       error=function(e) "data should be a data.frame, a function or a valid connection"
+               )
+       }
+       K1 = toInteger(K1, function(x) x>=2)
+       K2 = toInteger(K2, function(x) x>=2)
+       ntasks = toInteger(ntasks)
+       nb_series_per_chunk = toInteger(nb_series_per_chunk, function(x) x>=K1)
+       min_series_per_chunk = toInteger(K1, function(x) x>=K1 && x<=nb_series_per_chunk)
+       ncores_tasks = toInteger(ncores_tasks, function(x) x>=1)
+       ncores_clust = toInteger(ncores_clust, function(x) x>=1)
        if (WER!="end" && WER!="mix")
                stop("WER takes values in {'end','mix'}")
-       #concerning ncores, any non-integer type will be treated as "use parallel:detectCores()/4"
 
-       #1) acquire data (process curves, get as coeffs)
-       #TODO: for data.frame and custom function, run in parallel (connections are sequential[?!])
+       #1) Serialize all wavelets coefficients (+ IDs) onto a file
+       coeffs_file = ".coeffs"
        index = 1
        nb_curves = 0
+       nb_coeffs = NA
        repeat
        {
-               coeffs_chunk = NULL
-               if (is.data.frame(data))
-               {
-                       #full data matrix
-                       if (index < nrow(data))
-                       {
-                               coeffs_chunk = curvesToCoeffs(
-                                       data[index:(min(index+nb_series_per_chunk-1,nrow(data))),], wf)
-                       }
-               } else if (is.function(data))
-               {
-                       #custom user function to retrieve next n curves, probably to read from DB
-                       coeffs_chunk = curvesToCoeffs( data(index, nb_series_per_chunk), wf )
-               } else
-               {
-                       #incremental connection
-                       #TODO: find a better way to parse than using a temp file
-                       ascii_lines = readLines(data_con, nb_series_per_chunk)
-                       if (length(ascii_lines > 0))
-                       {
-                               series_chunk_file = ".tmp/series_chunk"
-                               writeLines(ascii_lines, series_chunk_file)
-                               coeffs_chunk = curvesToCoeffs( read.csv(series_chunk_file), wf )
-                       }
-               }
+               coeffs_chunk = computeCoeffs(data, index, nb_series_per_chunk, wf)
                if (is.null(coeffs_chunk))
                        break
-               writeTmp(coeffs_chunk)
-               nb_curves = nb_curves + nrow(coeffs_chunk)
+               serialized_coeffs = serialize(coeffs_chunk)
+               appendBinary(coeffs_file, serialized_coeffs)
                index = index + nb_series_per_chunk
+               nb_curves = nb_curves + nrow(coeffs_chunk)
+               if (is.na(nb_coeffs))
+                       nb_coeffs = ncol(coeffs_chunk)-1
        }
-       if (exists(data_con))
-               close(data_con)
-       if (nb_curves < min_series_per_chunk)
-               stop("Not enough data: less rows than min_series_per_chunk!")
 
-       #2) process coeffs (by nb_series_per_chunk) and cluster them in parallel
-       library(parallel)
-       cl_tasks = parallel::makeCluster(ncores_tasks)
-       #Nothing to export because each worker retrieve and put data from/on files (or DB)
-       #parallel::clusterExport(cl=cl, varlist=c("nothing","to","export"), envir=environment())
-       #TODO: be careful of writing to a new temp file, then flush initial one, then re-use it...
-       res_tasks = parallel::parSapply(cl_tasks, 1:ntasks, function() {
-               cl_clust = parallel::makeCluster(ncores_clust)
-               repeat
-               {
-                       #while there are jobs to do
-                       #(i.e. size of tmp "file" is greater than ntasks * nb_series_per_chunk)
-                       nb_workers = nb_curves %/% nb_series_per_chunk
-                       indices = list()
-                       #indices[[i]] == (start_index,number_of_elements)
-                       for (i in 1:nb_workers)
-                               indices[[i]] = c(nb_series_per_chunk*(i-1)+1, nb_series_per_chunk)
-                       remainder = nb_curves %% nb_series_per_chunk
-                       if (remainder >= min_series_per_chunk)
-                       {
-                               nb_workers = nb_workers + 1
-                               indices[[nb_workers]] = c(nb_curves-remainder+1, nb_curves)
-                       } else if (remainder > 0)
-                       {
-                               #spread the load among other workers
-                               #...
-                       }
-                       res_clust = parallel::parSapply(cl, indices, processChunk, K, WER=="mix")
-                       #C) flush tmp file (current parallel processes will write in it)
-               }
-               parallel:stopCluster(cl_clust)
-       })
-       parallel::stopCluster(cl_tasks)
+#      finalizeSerialization(coeffs_file) ........, nb_curves, )
+#TODO: is it really useful ?! we will always have these informations (nb_curves, nb_coeffs)
 
-       #3) readTmp last results, apply PAM on it, and return medoids + identifiers
-       final_coeffs = readTmp(1, nb_series_per_chunk)
-       if (nrow(final_coeffs) == K)
-       {
-               return ( list( medoids=coeffsToCurves(final_coeffs[,2:ncol(final_coeffs)]),
-                       ids=final_coeffs[,1] ) )
-       }
-       pam_output = getClusters(as.matrix(final_coeffs[,2:ncol(final_coeffs)]), K)
-       medoids = coeffsToCurves(pam_output$medoids, wf)
-       ids = final_coeffs[,1] [pam_output$ranks]
+       if (nb_curves < min_series_per_chunk)
+               stop("Not enough data: less rows than min_series_per_chunk!")
+       nb_series_per_task = round(nb_curves / ntasks)
+       if (nb_series_per_task < min_series_per_chunk)
+               stop("Too many tasks: less series in one task than min_series_per_chunk!")
 
-       #4) apply stage 2 (in parallel ? inside task 2) ?)
-       if (WER == "end")
+       #2) Cluster coefficients in parallel (by nb_series_per_chunk)
+       # All indices, relative to complete dataset
+       indices = if (random) sample(nb_curves) else seq_len(nb_curves)
+       # Indices to be processed in each task
+       indices_tasks = list()
+       for (i in seq_len(ntasks))
        {
-               #from center curves, apply stage 2...
-               #TODO:
+               upper_bound = ifelse( i<ntasks, min(nb_series_per_task*i,nb_curves), nb_curves )
+               indices_task[[i]] = indices[((i-1)*nb_series_per_task+1):upper_bound]
        }
+       library(parallel, quietly=TRUE)
+       cl_tasks = parallel::makeCluster(ncores_tasks)
+       parallel::clusterExport(cl_tasks, ..........ncores_clust, indices_tasks, nb_series_per_chunk, processChunk, K1,
+                                                                        K2, WER, )
+       ranks = parallel::parSapply(cl_tasks, seq_along(indices_tasks), oneIteration)
+       parallel::stopCluster(cl_tasks)
 
-       return (list(medoids=medoids, ids=ids))
-}
-
-processChunk = function(indice, K, WER)
-{
-       #1) retrieve data
-       coeffs = readTmp(indice[1], indice[2])
-       #2) cluster
-       cl = getClusters(as.matrix(coeffs[,2:ncol(coeffs)]), K)
-       #3) WER (optional)
-       #TODO:
+       #3) Run step1+2 step on resulting ranks
+       ranks = oneIteration(.........)
+       return (list("ranks"=ranks, "medoids"=getSeries(data, ranks)))
 }
-
-#TODO: difficulté : retrouver courbe à partir de l'identifiant (DB ok mais le reste ?)
-#aussi : que passe-t-on aux noeuds ? curvesToCoeffs en // ?
-#enfin : WER ?!
-#TODO: bout de code qui calcule les courbes synchrones après étapes 1+2 à partir des ID médoïdes
diff --git a/epclust/R/sampleCurves.R b/epclust/R/sampleCurves.R
deleted file mode 100644 (file)
index f9bf1ab..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#TODO
diff --git a/epclust/R/utils.R b/epclust/R/utils.R
new file mode 100644 (file)
index 0000000..8f7da38
--- /dev/null
@@ -0,0 +1,44 @@
+toInteger <- function(x, condition)
+{
+       if (!is.integer(x))
+               tryCatch(
+                       {x = as.integer(x)[1]},
+                       error = function(e) paste("cannot convert argument",substitute(x),"to integer")
+               )
+       if (!condition(x))
+               stop(paste("argument",substitute(x),"does not verify condition",body(condition)))
+       x
+}
+
+#TODO: merge these 2 next ?!
+serialize = function(coeffs)
+{
+       #.........
+       #C function (from data.frame, type of IDs ??! force integers ? [yes])
+       #return raw vector
+}
+appendBinary = function(.......)
+{
+       #take raw vector, append it (binary mode) to a file
+}
+
+#finalizeSerialization = function(...)
+#{
+#      #write number of series, and length of each...
+#}
+
+deserialize = function(coeffs, range)
+{
+       #......
+       #C function (from file name)
+}
+
+getSeries(data, rank=NULL, id=NULL)
+{
+       #TODO:
+}
+
+getCoeffs(.....) #FROM BINARY FILE !!!
+{
+
+}