+ if (parll)
+ synchronicity::unlock(m)
+ }
+ Xwer_dist[i,i] = 0.
+ }
+
+ parll = (requireNamespace("synchronicity",quietly=TRUE)
+ && parll && Sys.info()['sysname'] != "Windows")
+ if (parll)
+ m <- synchronicity::boost.mutex()
+
+ ignored <-
+ if (parll)
+ {
+ parallel::mclapply(seq_len(n-1), computeDistancesLineI,
+ mc.cores=ncores_clust, mc.allow.recursive=FALSE)
+ }
+ else
+ lapply(seq_len(n-1), computeDistancesLineI)
+ Xwer_dist[n,n] = 0.
+
+ mat_dists = matrix(nrow=n, ncol=n)
+ #TODO: avoid this loop?
+ for (i in 1:n)
+ mat_dists[i,] = Xwer_dist[i,]
+ mat_dists
+}
+
+# Helper function to divide indices into balanced sets
+.spreadIndices = function(indices, nb_per_chunk)
+{
+ L = length(indices)
+ nb_workers = floor( L / nb_per_chunk )
+ if (nb_workers == 0)
+ {
+ # L < nb_series_per_chunk, simple case
+ indices_workers = list(indices)
+ }
+ else
+ {
+ indices_workers = lapply( seq_len(nb_workers), function(i)
+ indices[(nb_per_chunk*(i-1)+1):(nb_per_chunk*i)] )
+ # Spread the remaining load among the workers
+ rem = L %% nb_per_chunk
+ while (rem > 0)
+ {
+ index = rem%%nb_workers + 1
+ indices_workers[[index]] = c(indices_workers[[index]], indices[L-rem+1])
+ rem = rem - 1