From 7b13d0c28da62d91684a29ced50c740120e2b7a9 Mon Sep 17 00:00:00 2001 From: Benjamin Auder <benjamin.auder@somewhere> Date: Mon, 20 Feb 2017 18:31:45 +0100 Subject: [PATCH] renaming, refactoring --- epclust/R/clustering.R | 82 ++++++++++++++++++++---------------------- epclust/R/main.R | 26 ++++++-------- epclust/R/utils.R | 1 + 3 files changed, 50 insertions(+), 59 deletions(-) diff --git a/epclust/R/clustering.R b/epclust/R/clustering.R index e27ea35..077becf 100644 --- a/epclust/R/clustering.R +++ b/epclust/R/clustering.R @@ -1,71 +1,65 @@ -oneIteration = function(..........) +# Cluster one full task (nb_curves / ntasks series) +clusteringTask = function(K1, K2, WER, nb_series_per_chunk, indices_tasks, ncores_clust) { - cl_clust = parallel::makeCluster(ncores_clust) - parallel::clusterExport(cl_clust, .............., envir=........) - indices_clust = indices_task[[i]] - repeat + cl_clust = parallel::makeCluster(ncores_clust) + #parallel::clusterExport(cl=cl_clust, varlist=c("fonctions_du_package"), envir=environment()) + indices_clust = indices_task[[i]] + repeat + { + nb_workers = max( 1, round( length(indices_clust) / nb_series_per_chunk ) ) + indices_workers = list() + for (i in 1:nb_workers) { - nb_workers = max( 1, round( length(indices_clust) / nb_series_per_chunk ) ) - indices_workers = list() - #indices[[i]] == (start_index,number_of_elements) - for (i in 1:nb_workers) - { - upper_bound = ifelse( i<nb_workers, - min(nb_series_per_chunk*i,length(indices_clust)), length(indices_clust) ) - indices_workers[[i]] = indices_clust[(nb_series_per_chunk*(i-1)+1):upper_bound] - } - indices_clust = parallel::parSapply(cl, indices_workers, processChunk, K1, K2*(WER=="mix")) - if ( (WER=="end" && length(indices_clust) == K1) || - (WER=="mix" && length(indices_clust) == K2) ) - { - break - } + upper_bound = ifelse( i<nb_workers, + min(nb_series_per_chunk*i,length(indices_clust)), length(indices_clust) ) + indices_workers[[i]] = indices_clust[(nb_series_per_chunk*(i-1)+1):upper_bound] } - parallel::stopCluster(cl_clust) - res_clust + indices_clust = parallel::parLapply(cl, indices_workers, clusterChunk, K1, K2*(WER=="mix")) + if ((WER=="end" && length(indices_clust)==K1) || (WER=="mix" && length(indices_clust)==K2)) + break + } + parallel::stopCluster(cl_clust) + unlist(indices_clust) } -processChunk = function(indices, K1, K2) +# Cluster a chunk of series inside one task (~max nb_series_per_chunk) +clusterChunk = function(indices, K1, K2) { - #1) retrieve data (coeffs) coeffs = getCoeffs(indices) - #2) cluster - cl = computeClusters(as.matrix(coeffs[,2:ncol(coeffs)]), K1) - #3) WER (optional) + cl = computeClusters(as.matrix(coeffs[,2:ncol(coeffs)]), K1, diss=FALSE) if (K2 > 0) { curves = computeSynchrones(cl) dists = computeWerDists(curves) - cl = computeClusters(dists, K2) + cl = computeClusters(dists, K2, diss=TRUE) } - cl + indices[cl] } -computeClusters = function(data, K) +# Apply the clustering algorithm (PAM) on a coeffs or distances matrix +computeClusters = function(md, K, diss) { - library(cluster) - pam_output = cluster::pam(data, K) - return ( list( clusts=pam_output$clustering, medoids=pam_output$medoids, - ranks=pam_output$id.med ) ) + if (!require(cluster, quietly=TRUE)) + stop("Unable to load cluster library") + cluster::pam(md, K, diss=diss)$id.med } -#TODO: appendCoeffs() en C --> serialize et append to file - -computeSynchrones = function(...) +# Compute the synchrones curves (sum of clusters elements) from a clustering result +computeSynchrones = function(indices) { - + colSums( getData(indices) ) } -#Entrée : courbes synchrones, soit après étape 1 itérée, soit après chaqure étape 1 -computeWerDist = function(conso) +# Compute the WER distance between the synchrones curves +computeWerDist = function(curves) { if (!require("Rwave", quietly=TRUE)) stop("Unable to load Rwave library") - n <- nrow(conso) - delta <- ncol(conso) + n <- nrow(curves) + delta <- ncol(curves) #TODO: automatic tune of all these parameters ? (for other users) nvoice <- 4 - # noctave = 2^13 = 8192 half hours ~ 180 days ; ~log2(ncol(conso)) + # noctave = 2^13 = 8192 half hours ~ 180 days ; ~log2(ncol(curves)) noctave = 13 # 4 here represent 2^5 = 32 half-hours ~ 1 day #NOTE: default scalevector == 2^(0:(noctave * nvoice) / nvoice) * s0 (?) @@ -79,7 +73,7 @@ computeWerDist = function(conso) # (normalized) observations node with CWT Xcwt4 <- lapply(seq_len(n), function(i) { - ts <- scale(ts(conso[i,]), center=TRUE, scale=scaled) + ts <- scale(ts(curves[i,]), center=TRUE, scale=scaled) totts.cwt = Rwave::cwt(ts,totnoct,nvoice,w0,plot=0) ts.cwt = totts.cwt[,s0log:(s0log+noctave*nvoice)] #Normalization diff --git a/epclust/R/main.R b/epclust/R/main.R index e794351..f45c945 100644 --- a/epclust/R/main.R +++ b/epclust/R/main.R @@ -40,7 +40,7 @@ epclust = function(data, K1, K2, ntasks=1, nb_series_per_chunk=50*K1, min_series_per_chunk=5*K1, wf="haar", WER="end", ncores_tasks=1, ncores_clust=4, random=TRUE) { - #0) check arguments + # Check arguments if (!is.data.frame(data) && !is.function(data)) { tryCatch( @@ -66,7 +66,7 @@ epclust = function(data, K1, K2, ntasks=1, nb_series_per_chunk=50*K1, min_series if (WER!="end" && WER!="mix") stop("WER takes values in {'end','mix'}") - #1) Serialize all wavelets coefficients (+ IDs) onto a file + # Serialize all wavelets coefficients (+ IDs) onto a file coeffs_file = ".coeffs" index = 1 nb_curves = 0 @@ -84,20 +84,15 @@ epclust = function(data, K1, K2, ntasks=1, nb_series_per_chunk=50*K1, min_series nb_coeffs = ncol(coeffs_chunk)-1 } -# finalizeSerialization(coeffs_file) ........, nb_curves, ) -#TODO: is it really useful ?! we will always have these informations (nb_curves, nb_coeffs) - if (nb_curves < min_series_per_chunk) stop("Not enough data: less rows than min_series_per_chunk!") nb_series_per_task = round(nb_curves / ntasks) if (nb_series_per_task < min_series_per_chunk) stop("Too many tasks: less series in one task than min_series_per_chunk!") - #2) Cluster coefficients in parallel (by nb_series_per_chunk) - # All indices, relative to complete dataset - indices = if (random) sample(nb_curves) else seq_len(nb_curves) - # Indices to be processed in each task - indices_tasks = list() + # Cluster coefficients in parallel (by nb_series_per_chunk) + indices = if (random) sample(nb_curves) else seq_len(nb_curves) #all indices + indices_tasks = list() #indices to be processed in each task for (i in seq_len(ntasks)) { upper_bound = ifelse( i<ntasks, min(nb_series_per_task*i,nb_curves), nb_curves ) @@ -105,12 +100,13 @@ epclust = function(data, K1, K2, ntasks=1, nb_series_per_chunk=50*K1, min_series } library(parallel, quietly=TRUE) cl_tasks = parallel::makeCluster(ncores_tasks) - parallel::clusterExport(cl_tasks, ..........ncores_clust, indices_tasks, nb_series_per_chunk, processChunk, K1, - K2, WER, ) - ranks = parallel::parSapply(cl_tasks, seq_along(indices_tasks), oneIteration) + #parallel::clusterExport(cl=cl_tasks, varlist=c("ncores_clust", ...), envir=environment()) + indices = parallel::parLapply(cl_tasks, indices_tasks, clusteringStep12, ) parallel::stopCluster(cl_tasks) - #3) Run step1+2 step on resulting ranks - ranks = oneIteration(.........) +##TODO: passer data ?! + + # Run step1+2 step on resulting ranks + ranks = clusteringStep12() return (list("ranks"=ranks, "medoids"=getSeries(data, ranks))) } diff --git a/epclust/R/utils.R b/epclust/R/utils.R index 8f7da38..6dcc2cd 100644 --- a/epclust/R/utils.R +++ b/epclust/R/utils.R @@ -20,6 +20,7 @@ serialize = function(coeffs) appendBinary = function(.......) { #take raw vector, append it (binary mode) to a file +#TODO: appendCoeffs() en C --> serialize et append to file } #finalizeSerialization = function(...) -- 2.44.0