#' @title Cluster power curves with PAM in parallel
#'
#' @description Groups electricity power curves (or any series of similar nature) by applying PAM
-#' algorithm in parallel to chunks of size \code{nbSeriesPerChunk}
+#' algorithm in parallel to chunks of size \code{nb_series_per_chunk}
#'
#' @param data Access to the data, which can be of one of the three following types:
#' \itemize{
#' (start) and number of curves (n); see example in package vignette.
#' }
#' @param K Number of clusters
-#' @param nbSeriesPerChunk Number of series in each group
+#' @param nb_series_per_chunk (Maximum) number of series in each group
+#' @param min_series_per_chunk Minimum number of series in each group
#' @param writeTmp Function to write temporary wavelets coefficients (+ identifiers);
#' see defaults in defaults.R
#' @param readTmp Function to read temporary wavelets coefficients (see defaults.R)
#' @param ncores number of parallel processes; if NULL, use parallel::detectCores()
#'
#' @return A data.frame of the final medoids curves (identifiers + values)
-epclust = function(data, K, nbSeriesPerChunk, writeTmp=ref_writeTmp, readTmp=ref_readTmp,
- WER="end", ncores=NULL)
+epclust = function(data, K, nb_series_per_chunk, min_series_per_chunk=10*K,
+ writeTmp=defaultWriteTmp, readTmp=defaultReadTmp, WER="end", ncores=NULL)
{
#TODO: setRefClass(...) to avoid copy data:
#http://stackoverflow.com/questions/2603184/r-pass-by-reference
{
if (is.character(data))
{
- dataCon = file(data, open="r")
+ data_con = file(data, open="r")
} else if (!isOpen(data))
{
open(data)
- dataCon = data
+ data_con = data
}
},
error="data should be a data.frame, a function or a valid connection")
if (!is.integer(K) || K < 2)
stop("K should be an integer greater or equal to 2")
- if (!is.integer(nbSeriesPerChunk) || nbSeriesPerChunk < K)
- stop("nbSeriesPerChunk should be an integer greater or equal to K")
+ if (!is.integer(nb_series_per_chunk) || nb_series_per_chunk < K)
+ stop("nb_series_per_chunk should be an integer greater or equal to K")
if (!is.function(writeTmp) || !is.function(readTmp))
stop("read/writeTmp should be functional (see defaults.R)")
if (WER!="end" && WER!="mix")
#1) acquire data (process curves, get as coeffs)
index = 1
- nbCurves = 0
+ nb_curves = 0
repeat
{
+ coeffs_chunk = NULL
if (is.data.frame(data))
{
#full data matrix
if (index < nrow(data))
{
- writeTmp( getCoeffs( data[index:(min(index+nbSeriesPerChunk-1,nrow(data))),] ) )
- } else
- {
- break
+ coeffs_chunk = curvesToCoeffs(
+ data[index:(min(index+nb_series_per_chunk-1,nrow(data))),])
}
} else if (is.function(data))
{
#custom user function to retrieve next n curves, probably to read from DB
- coeffs_chunk = getCoeffs( data(index, nbSeriesPerChunk) )
- if (!is.null(coeffs_chunk))
- {
- writeTmp(coeffs_chunk)
- } else
- {
- break
- }
+ coeffs_chunk = curvesToCoeffs( data(index, nb_series_per_chunk) )
} else
{
#incremental connection
#TODO: find a better way to parse than using a temp file
- ascii_lines = readLines(dataCon, nbSeriesPerChunk)
+ ascii_lines = readLines(data_con, nb_series_per_chunk)
if (length(ascii_lines > 0))
{
- seriesChunkFile = ".tmp/seriesChunk"
- writeLines(ascii_lines, seriesChunkFile)
- writeTmp( getCoeffs( read.csv(seriesChunkFile) ) )
- } else
- {
- break
+ series_chunk_file = ".tmp/series_chunk"
+ writeLines(ascii_lines, series_chunk_file)
+ coeffs_chunk = curvesToCoeffs( read.csv(series_chunk_file) )
}
}
- index = index + nbSeriesPerChunk
+ if (is.null(coeffs_chunk))
+ break
+ writeTmp(coeffs_chunk)
+ nb_curves = nb_curves + nrow(coeffs_chunk)
+ index = index + nb_series_per_chunk
}
- if (exists(dataCon))
- close(dataCon)
+ if (exists(data_con))
+ close(data_con)
+ if (nb_curves < min_series_per_chunk)
+ stop("Not enough data: less rows than min_series_per_chunk!")
+ #2) process coeffs (by nb_series_per_chunk) and cluster them in parallel
library(parallel)
ncores = ifelse(is.integer(ncores), ncores, parallel::detectCores())
cl = parallel::makeCluster(ncores)
parallel::clusterExport(cl=cl, varlist=c("X", "Y", "K", "p"), envir=environment())
library(cluster)
- li = parallel::parLapply(cl, 1:B, )
-
- #2) process coeffs (by nbSeriesPerChunk) and cluster them in parallel
#TODO: be careful of writing to a new temp file, then flush initial one, then re-use it...
repeat
{
- completed = rep(FALSE, ............)
- #while there is jobs to do (i.e. size of tmp "file" is greater than nbSeriesPerChunk),
- #A) determine which tasks which processor will do (OK)
- #B) send each (sets of) tasks in parallel
+ #while there is jobs to do (i.e. size of tmp "file" is greater than nb_series_per_chunk)
+ nb_workers = nb_curves %/% nb_series_per_chunk
+ indices = list()
+ #incides[[i]] == (start_index,number_of_elements)
+ for (i in 1:nb_workers)
+ indices[[i]] = c(nb_series_per_chunk*(i-1)+1, nb_series_per_chunk)
+ remainder = nb_curves %% nb_series_per_chunk
+ if (remainder >= min_series_per_chunk)
+ {
+ nb_workers = nb_workers + 1
+ indices[[nb_workers]] = c(nb_curves-remainder+1, nb_curves)
+ } else if (remainder > 0)
+ {
+ #spread the load among other workers
+
+ }
+ li = parallel::parLapply(cl, indices, processChunk, WER=="mix")
#C) flush tmp file (current parallel processes will write in it)
- #always check "complete" flag (array, as I did in MPI) to know if "slaves" finished
}
-pam(x, k)
parallel::stopCluster(cl)
#3) readTmp last results, apply PAM on it, and return medoids + identifiers
+ final_coeffs = readTmp(1, nb_series_per_chunk)
+ if (nrow(final_coeffs) == K)
+ {
+ return ( list( medoids=coeffsToCurves(final_coeffs[,2:ncol(final_coeffs)]),
+ ids=final_coeffs[,1] ) )
+ }
+ pam_output = getClusters(as.matrix(final_coeffs[,2:ncol(final_coeffs)]), K)
+ medoids = coeffsToCurves(pam_output$medoids)
+ ids = final_coeffs[,1] [pam_output$ranks]
+ return (list(medoids=medoids, ids=ids))
#4) apply stage 2 (in parallel ? inside task 2) ?)
if (WER == "end")
#from center curves, apply stage 2...
}
}
+
+processChunk = function(indice, WER)
+{
+ #1) retrieve data
+ #2) cluster
+ #3) WER (optional)
+}