- nb_workers = max( 1, round( length(indices) / nb_series_per_chunk ) )
- indices_workers = lapply(seq_len(nb_workers), function(i) {
- upper_bound = ifelse( i<nb_workers,
- min(nb_series_per_chunk*i,length(indices)), length(indices) )
- indices[(nb_series_per_chunk*(i-1)+1):upper_bound]
- })
- indices = unlist( parallel::parLapply(cl, indices_workers, function(inds)
- computeClusters1(inds, getCoefs, K1)) )
- if (length(indices_clust) == K1)
+ nb_workers = max( 1, floor( length(indices) / nb_series_per_chunk ) )
+ indices_workers = lapply( seq_len(nb_workers), function(i)
+ indices[(nb_series_per_chunk*(i-1)+1):(nb_series_per_chunk*i)] )
+ # Spread the remaining load among the workers
+ rem = length(indices) %% nb_series_per_chunk
+ while (rem > 0)
+ {
+ index = rem%%nb_workers + 1
+ indices_workers[[index]] = c(indices_workers[[index]], tail(indices,rem))
+ rem = rem - 1
+ }
+ indices = unlist( parallel::parLapply( cl, indices_workers, function(inds) {
+ require("epclust", quietly=TRUE)
+ inds[ computeClusters1(getCoefs(inds), K1) ]
+ } ) )
+ if (length(indices) == K1)