X-Git-Url: https://git.auder.net/?p=epclust.git;a=blobdiff_plain;f=epclust%2FR%2Fmain.R;h=75041a4c6a5cea2bebaccf847c01a41d283d3222;hp=f45c9450dab724258047c4161439f3adafc83718;hb=48108c3999d28d973443fa5e78f73a0a9f2bfc07;hpb=7b13d0c28da62d91684a29ced50c740120e2b7a9 diff --git a/epclust/R/main.R b/epclust/R/main.R index f45c945..75041a4 100644 --- a/epclust/R/main.R +++ b/epclust/R/main.R @@ -7,8 +7,8 @@ #' \itemize{ #' \item data.frame: each line contains its ID in the first cell, and all values after #' \item connection: any R connection object (e.g. a file) providing lines as described above -#' \item function: a custom way to retrieve the curves; it has two arguments: the start index -#' (start) and number of curves (n); see example in package vignette. +#' \item function: a custom way to retrieve the curves; it has two arguments: the ranks to be +#' retrieved, and the IDs - at least one of them must be present (priority: ranks). #' } #' @param K1 Number of super-consumers to be found after stage 1 (K1 << N) #' @param K2 Number of clusters to be found after stage 2 (K2 << K1) @@ -58,7 +58,7 @@ epclust = function(data, K1, K2, ntasks=1, nb_series_per_chunk=50*K1, min_series } K1 = toInteger(K1, function(x) x>=2) K2 = toInteger(K2, function(x) x>=2) - ntasks = toInteger(ntasks) + ntasks = toInteger(ntasks, function(x) x>=1) nb_series_per_chunk = toInteger(nb_series_per_chunk, function(x) x>=K1) min_series_per_chunk = toInteger(K1, function(x) x>=K1 && x<=nb_series_per_chunk) ncores_tasks = toInteger(ncores_tasks, function(x) x>=1) @@ -67,7 +67,7 @@ epclust = function(data, K1, K2, ntasks=1, nb_series_per_chunk=50*K1, min_series stop("WER takes values in {'end','mix'}") # Serialize all wavelets coefficients (+ IDs) onto a file - coeffs_file = ".coeffs" + unlink(".coeffs") index = 1 nb_curves = 0 nb_coeffs = NA @@ -76,8 +76,7 @@ epclust = function(data, K1, K2, ntasks=1, nb_series_per_chunk=50*K1, min_series coeffs_chunk = computeCoeffs(data, index, nb_series_per_chunk, wf) if (is.null(coeffs_chunk)) break - serialized_coeffs = serialize(coeffs_chunk) - appendBinary(coeffs_file, serialized_coeffs) + writeCoeffs(coeffs_chunk) index = index + nb_series_per_chunk nb_curves = nb_curves + nrow(coeffs_chunk) if (is.na(nb_coeffs)) @@ -91,22 +90,21 @@ epclust = function(data, K1, K2, ntasks=1, nb_series_per_chunk=50*K1, min_series stop("Too many tasks: less series in one task than min_series_per_chunk!") # Cluster coefficients in parallel (by nb_series_per_chunk) - indices = if (random) sample(nb_curves) else seq_len(nb_curves) #all indices - indices_tasks = list() #indices to be processed in each task - for (i in seq_len(ntasks)) - { + indices = if (random) sample(nb_curves) else seq_len(nb_curves) + indices_tasks = lapply(seq_len(ntasks), function(i) { upper_bound = ifelse( i