+ #from cwt_file ...
+ res <- getDataInFile(c(2*index-1,2*index), cwt_file, nbytes, endian)
+ ###############TODO:
+ }
+
+ # Distance between rows i and j
+ computeDistancesIJ = function(pair)
+ {
+ if (parll)
+ {
+ require("bigmemory", quietly=TRUE)
+ require("epclust", quietly=TRUE)
+ synchrones <- bigmemory::attach.big.matrix(synchrones_desc)
+ Xwer_dist <- bigmemory::attach.big.matrix(Xwer_dist_desc)
+ }
+
+ i = pair[1] ; j = pair[2]
+ if (verbose && j==i+1)
+ cat(paste(" Distances (",i,",",j,"), (",i,",",j+1,") ...\n", sep=""))
+ cwt_i <- getCWT(i)
+ cwt_j <- getCWT(j)
+
+ num <- epclustFilter(Mod(cwt_i * Conj(cwt_j)))
+ WX <- epclustFilter(Mod(cwt_i * Conj(cwt_i)))
+ WY <- epclustFilter(Mod(cwt_j * Conj(cwt_j)))
+ wer2 <- sum(colSums(num)^2) / sum(colSums(WX) * colSums(WY))
+ Xwer_dist[i,j] <- sqrt(delta * ncol(cwt_i) * max(1 - wer2, 0.)) #FIXME: wer2 should be < 1
+ Xwer_dist[j,i] <- Xwer_dist[i,j]
+ Xwer_dist[i,i] = 0.
+ }
+
+ ignored <-
+ if (parll)
+ parallel::parLapply(cl, pairs, computeDistancesIJ)
+ else
+ lapply(pairs, computeDistancesIJ)
+
+ if (parll)
+ parallel::stopCluster(cl)
+
+ Xwer_dist[n,n] = 0.
+ distances <- Xwer_dist[,]
+ rm(Xwer_dist) ; gc()
+ distances #~small matrix K1 x K1
+}
+
+# Helper function to divide indices into balanced sets
+.spreadIndices = function(indices, max_per_set, min_nb_per_set = 1)
+{
+ L = length(indices)
+ min_nb_workers = floor( L / max_per_set )
+ rem = L %% max_per_set
+ if (nb_workers == 0 || (nb_workers==1 && rem==0))
+ {
+ # L <= max_nb_per_set, simple case
+ indices_workers = list(indices)
+ }
+ else
+ {
+ indices_workers = lapply( seq_len(nb_workers), function(i)
+ indices[(max_nb_per_set*(i-1)+1):(max_per_set*i)] )
+ # Two cases: remainder is >= min_per_set (easy)...
+ if (rem >= min_nb_per_set)
+ indices_workers = c( indices_workers, list(tail(indices,rem)) )
+ #...or < min_per_set: harder, need to remove indices from current sets to feed
+ # the too-small remainder. It may fail: then fallback to "slightly bigger sets"
+ else
+ {
+ save_indices_workers = indices_workers
+ small_set = tail(indices,rem)
+ # Try feeding small_set until it reaches min_per_set, whle keeping the others big enough
+ # Spread the remaining load among the workers
+ rem = L %% nb_per_chunk
+ while (rem > 0)