- #TODO: later, compute CWT here (because not enough storage space for 200k series)
- # 'circular=TRUE' is wrong, should just take values on the sides; to rewrite in C
- num <- filter(Mod(Xcwt4[[i]] * Conj(Xcwt4[[j]])), fcoefs, circular=TRUE)
- WX <- filter(Mod(Xcwt4[[i]] * Conj(Xcwt4[[i]])), fcoefs, circular=TRUE)
- WY <- filter(Mod(Xcwt4[[j]] * Conj(Xcwt4[[j]])), fcoefs, circular=TRUE)
- wer2 <- sum(colSums(num)^2) / sum( sum(colSums(WX) * colSums(WY)) )
- Xwer_dist[i,j] <- sqrt(delta * ncol(Xcwt4[[1]]) * (1 - wer2))
- Xwer_dist[j,i] <- Xwer_dist[i,j]
+ require("bigmemory", quietly=TRUE)
+ require("epclust", quietly=TRUE)
+ synchrones <- bigmemory::attach.big.matrix(synchrones_desc)
+ Xwer_dist <- bigmemory::attach.big.matrix(Xwer_dist_desc)
+ }
+
+ i = pair[1] ; j = pair[2]
+ if (verbose && j==i+1)
+ cat(paste(" Distances (",i,",",j,"), (",i,",",j+1,") ...\n", sep=""))
+ cwt_i <- getCWT(i)
+ cwt_j <- getCWT(j)
+
+ num <- epclustFilter(Mod(cwt_i * Conj(cwt_j)))
+ WX <- epclustFilter(Mod(cwt_i * Conj(cwt_i)))
+ WY <- epclustFilter(Mod(cwt_j * Conj(cwt_j)))
+ wer2 <- sum(colSums(num)^2) / sum(colSums(WX) * colSums(WY))
+ Xwer_dist[i,j] <- sqrt(delta * ncol(cwt_i) * max(1 - wer2, 0.)) #FIXME: wer2 should be < 1
+ Xwer_dist[j,i] <- Xwer_dist[i,j]
+ Xwer_dist[i,i] = 0.
+ }
+
+ if (verbose)
+ {
+ cat(paste("--- Compute WER dists\n", sep=""))
+ }
+ ignored <-
+ if (parll)
+ parallel::parLapply(cl, pairs, computeDistancesIJ)
+ else
+ lapply(pairs, computeDistancesIJ)
+
+ if (parll)
+ parallel::stopCluster(cl)
+
+ Xwer_dist[n,n] = 0.
+ distances <- Xwer_dist[,]
+ rm(Xwer_dist) ; gc()
+ distances #~small matrix K1 x K1
+}
+
+# Helper function to divide indices into balanced sets
+.spreadIndices = function(indices, nb_per_set)
+{
+ L = length(indices)
+ nb_workers = floor( L / nb_per_set )
+ rem = L %% max_nb_per_set
+ if (nb_workers == 0 || (nb_workers==1 && rem==0))
+ {
+ # L <= max_nb_per_set, simple case
+ indices_workers = list(indices)
+ }
+ else
+ {
+ indices_workers = lapply( seq_len(nb_workers), function(i)
+ indices[(nb_per_chunk*(i-1)+1):(nb_per_set*i)] )
+ # Spread the remaining load among the workers
+ rem = L %% nb_per_set
+ while (rem > 0)
+ {
+ index = rem%%nb_workers + 1
+ indices_workers[[index]] = c(indices_workers[[index]], indices[L-rem+1])
+ rem = rem - 1