+ #from cwt_file ...
+ res <- getDataInFile(c(2*index-1,2*index), cwt_file, nbytes, endian)
+ ###############TODO:
+ }
+
+ # Distance between rows i and j
+ computeDistancesIJ = function(pair)
+ {
+ if (parll)
+ {
+ require("bigmemory", quietly=TRUE)
+ require("epclust", quietly=TRUE)
+ synchrones <- bigmemory::attach.big.matrix(synchrones_desc)
+ Xwer_dist <- bigmemory::attach.big.matrix(Xwer_dist_desc)
+ }
+
+ i = pair[1] ; j = pair[2]
+ if (verbose && j==i+1)
+ cat(paste(" Distances (",i,",",j,"), (",i,",",j+1,") ...\n", sep=""))
+ cwt_i <- getCWT(i)
+ cwt_j <- getCWT(j)
+
+ num <- epclustFilter(Mod(cwt_i * Conj(cwt_j)))
+ WX <- epclustFilter(Mod(cwt_i * Conj(cwt_i)))
+ WY <- epclustFilter(Mod(cwt_j * Conj(cwt_j)))
+ wer2 <- sum(colSums(num)^2) / sum(colSums(WX) * colSums(WY))
+ Xwer_dist[i,j] <- sqrt(L * ncol(cwt_i) * max(1 - wer2, 0.))
+ Xwer_dist[j,i] <- Xwer_dist[i,j]
+ Xwer_dist[i,i] = 0.
+ }
+
+ if (verbose)
+ {
+ cat(paste("--- Compute WER dists\n", sep=""))
+ }
+ ignored <-
+ if (parll)
+ parallel::parLapply(cl, pairs, computeDistancesIJ)
+ else
+ lapply(pairs, computeDistancesIJ)
+
+ if (parll)
+ parallel::stopCluster(cl)
+
+ Xwer_dist[n,n] = 0.
+ distances <- Xwer_dist[,]
+ rm(Xwer_dist) ; gc()
+ distances #~small matrix K1 x K1
+}
+
+# Helper function to divide indices into balanced sets
+# If max == TRUE, sets sizes cannot exceed nb_per_set
+.spreadIndices = function(indices, nb_per_set, max=FALSE)
+{
+ L = length(indices)
+ nb_workers = floor( L / nb_per_set )
+ rem = L %% nb_per_set
+ if (nb_workers == 0 || (nb_workers==1 && rem==0))
+ {
+ # L <= nb_per_set, simple case
+ indices_workers = list(indices)
+ }
+ else
+ {
+ indices_workers = lapply( seq_len(nb_workers), function(i)
+ indices[(nb_per_set*(i-1)+1):(nb_per_set*i)] )
+
+ if (max)
+ {
+ # Sets are not so well balanced, but size is supposed to be critical
+ return ( c( indices_workers, (L-rem+1):L ) )
+ }
+
+ # Spread the remaining load among the workers
+ rem = L %% nb_per_set
+ while (rem > 0)