epclust = function(data, K1, K2, ntasks=1, nb_series_per_chunk=50*K1, min_series_per_chunk=5*K1,
wf="haar", WER="end", ncores_tasks=1, ncores_clust=4, random=TRUE)
{
- #0) check arguments
+ # Check arguments
if (!is.data.frame(data) && !is.function(data))
{
tryCatch(
if (WER!="end" && WER!="mix")
stop("WER takes values in {'end','mix'}")
- #1) Serialize all wavelets coefficients (+ IDs) onto a file
+ # Serialize all wavelets coefficients (+ IDs) onto a file
coeffs_file = ".coeffs"
index = 1
nb_curves = 0
nb_coeffs = ncol(coeffs_chunk)-1
}
-# finalizeSerialization(coeffs_file) ........, nb_curves, )
-#TODO: is it really useful ?! we will always have these informations (nb_curves, nb_coeffs)
-
if (nb_curves < min_series_per_chunk)
stop("Not enough data: less rows than min_series_per_chunk!")
nb_series_per_task = round(nb_curves / ntasks)
if (nb_series_per_task < min_series_per_chunk)
stop("Too many tasks: less series in one task than min_series_per_chunk!")
- #2) Cluster coefficients in parallel (by nb_series_per_chunk)
- # All indices, relative to complete dataset
- indices = if (random) sample(nb_curves) else seq_len(nb_curves)
- # Indices to be processed in each task
- indices_tasks = list()
+ # Cluster coefficients in parallel (by nb_series_per_chunk)
+ indices = if (random) sample(nb_curves) else seq_len(nb_curves) #all indices
+ indices_tasks = list() #indices to be processed in each task
for (i in seq_len(ntasks))
{
upper_bound = ifelse( i<ntasks, min(nb_series_per_task*i,nb_curves), nb_curves )
}
library(parallel, quietly=TRUE)
cl_tasks = parallel::makeCluster(ncores_tasks)
- parallel::clusterExport(cl_tasks, ..........ncores_clust, indices_tasks, nb_series_per_chunk, processChunk, K1,
- K2, WER, )
- ranks = parallel::parSapply(cl_tasks, seq_along(indices_tasks), oneIteration)
+ #parallel::clusterExport(cl=cl_tasks, varlist=c("ncores_clust", ...), envir=environment())
+ indices = parallel::parLapply(cl_tasks, indices_tasks, clusteringStep12, )
parallel::stopCluster(cl_tasks)
- #3) Run step1+2 step on resulting ranks
- ranks = oneIteration(.........)
+##TODO: passer data ?!
+
+ # Run step1+2 step on resulting ranks
+ ranks = clusteringStep12()
return (list("ranks"=ranks, "medoids"=getSeries(data, ranks)))
}