#' \itemize{
#' \item data.frame: each line contains its ID in the first cell, and all values after
#' \item connection: any R connection object (e.g. a file) providing lines as described above
-#' \item function: a custom way to retrieve the curves; it has two arguments: the start index
-#' (start) and number of curves (n); see example in package vignette.
+#' \item function: a custom way to retrieve the curves; it has two arguments: the ranks to be
+#' retrieved, and the IDs - at least one of them must be present (priority: ranks).
#' }
#' @param K1 Number of super-consumers to be found after stage 1 (K1 << N)
#' @param K2 Number of clusters to be found after stage 2 (K2 << K1)
epclust = function(data, K1, K2, ntasks=1, nb_series_per_chunk=50*K1, min_series_per_chunk=5*K1,
wf="haar", WER="end", ncores_tasks=1, ncores_clust=4, random=TRUE)
{
- #0) check arguments
+ # Check arguments
if (!is.data.frame(data) && !is.function(data))
{
tryCatch(
}
K1 = toInteger(K1, function(x) x>=2)
K2 = toInteger(K2, function(x) x>=2)
- ntasks = toInteger(ntasks)
+ ntasks = toInteger(ntasks, function(x) x>=1)
nb_series_per_chunk = toInteger(nb_series_per_chunk, function(x) x>=K1)
min_series_per_chunk = toInteger(K1, function(x) x>=K1 && x<=nb_series_per_chunk)
ncores_tasks = toInteger(ncores_tasks, function(x) x>=1)
if (WER!="end" && WER!="mix")
stop("WER takes values in {'end','mix'}")
- #1) Serialize all wavelets coefficients (+ IDs) onto a file
- coeffs_file = ".coeffs"
+ # Serialize all wavelets coefficients (+ IDs) onto a file
+ unlink(".coeffs")
index = 1
nb_curves = 0
nb_coeffs = NA
coeffs_chunk = computeCoeffs(data, index, nb_series_per_chunk, wf)
if (is.null(coeffs_chunk))
break
- serialized_coeffs = serialize(coeffs_chunk)
- appendBinary(coeffs_file, serialized_coeffs)
+ writeCoeffs(coeffs_chunk)
index = index + nb_series_per_chunk
nb_curves = nb_curves + nrow(coeffs_chunk)
if (is.na(nb_coeffs))
nb_coeffs = ncol(coeffs_chunk)-1
}
-# finalizeSerialization(coeffs_file) ........, nb_curves, )
-#TODO: is it really useful ?! we will always have these informations (nb_curves, nb_coeffs)
-
if (nb_curves < min_series_per_chunk)
stop("Not enough data: less rows than min_series_per_chunk!")
nb_series_per_task = round(nb_curves / ntasks)
if (nb_series_per_task < min_series_per_chunk)
stop("Too many tasks: less series in one task than min_series_per_chunk!")
- #2) Cluster coefficients in parallel (by nb_series_per_chunk)
- # All indices, relative to complete dataset
+ # Cluster coefficients in parallel (by nb_series_per_chunk)
indices = if (random) sample(nb_curves) else seq_len(nb_curves)
- # Indices to be processed in each task
- indices_tasks = list()
- for (i in seq_len(ntasks))
- {
+ indices_tasks = lapply(seq_len(ntasks), function(i) {
upper_bound = ifelse( i<ntasks, min(nb_series_per_task*i,nb_curves), nb_curves )
- indices_task[[i]] = indices[((i-1)*nb_series_per_task+1):upper_bound]
- }
+ indices[((i-1)*nb_series_per_task+1):upper_bound]
+ })
library(parallel, quietly=TRUE)
cl_tasks = parallel::makeCluster(ncores_tasks)
- parallel::clusterExport(cl_tasks, ..........ncores_clust, indices_tasks, nb_series_per_chunk, processChunk, K1,
- K2, WER, )
- ranks = parallel::parSapply(cl_tasks, seq_along(indices_tasks), oneIteration)
+ parallel::clusterExport(cl_tasks,
+ varlist=c("K1","K2","WER","nb_series_per_chunk","ncores_clust"),#TODO: pass also
+ #nb_coeffs...and filename (in a list... ?)
+ envir=environment())
+ indices = parallel::parLapply(cl_tasks, indices_tasks, clusteringTask)
parallel::stopCluster(cl_tasks)
- #3) Run step1+2 step on resulting ranks
- ranks = oneIteration(.........)
- return (list("ranks"=ranks, "medoids"=getSeries(data, ranks)))
+ # Run step1+2 step on resulting ranks
+ indices = clusterChunk(indices, K1, K2)
+ return (list("indices"=indices, "medoids"=getSeries(data, indices)))
}