- cat(paste("--- Compute WER dists\n", sep=""))
-
- n <- nrow(synchrones)
- delta <- ncol(synchrones)
- #TODO: automatic tune of all these parameters ? (for other users)
- nvoice <- 4
- # noctave = 2^13 = 8192 half hours ~ 180 days ; ~log2(ncol(synchrones))
- noctave = 13
- # 4 here represent 2^5 = 32 half-hours ~ 1 day
- #NOTE: default scalevector == 2^(0:(noctave * nvoice) / nvoice) * s0 (?)
- scalevector <- 2^(4:(noctave * nvoice) / nvoice + 1)
- #condition: ( log2(s0*w0/(2*pi)) - 1 ) * nvoice + 1.5 >= 1
- s0 = 2
- w0 = 2*pi
- scaled=FALSE
- s0log = as.integer( (log2( s0*w0/(2*pi) ) - 1) * nvoice + 1.5 )
- totnoct = noctave + as.integer(s0log/nvoice) + 1
-
- Xwer_dist <- bigmemory::big.matrix(nrow=n, ncol=n, type="double")
-
- cwt_file = ".epclust_bin/cwt"
- #TODO: args, nb_per_chunk, nbytes, endian
-
- # Generate n(n-1)/2 pairs for WER distances computations
- pairs = list()
- V = seq_len(n)
- for (i in 1:n)
- {
- V = V[-1]
- pairs = c(pairs, lapply(V, function(v) c(i,v)))
- }
-
- computeSaveCWT = function(index)
- {
- ts <- scale(ts(synchrones[index,]), center=TRUE, scale=scaled)
- totts.cwt = Rwave::cwt(ts, totnoct, nvoice, w0, plot=FALSE)
- ts.cwt = totts.cwt[,s0log:(s0log+noctave*nvoice)]
- #Normalization
- sqs <- sqrt(2^(0:(noctave*nvoice)/nvoice)*s0)
- sqres <- sweep(ts.cwt,2,sqs,'*')
- res <- sqres / max(Mod(sqres))
- #TODO: serializer les CWT, les récupérer via getDataInFile ;
- #--> OK, faut juste stocker comme séries simples de taille delta*ncol (53*17519)
- binarize(c(as.double(Re(res)),as.double(Im(res))), cwt_file, ncol(res), ",", nbytes, endian)
- }
-
- if (parll)
- {
- cl = parallel::makeCluster(ncores_clust)
- synchrones_desc <- bigmemory::describe(synchrones)
- Xwer_dist_desc <- bigmemory::describe(Xwer_dist)
- parallel::clusterExport(cl, varlist=c("synchrones_desc","Xwer_dist_desc","totnoct",
- "nvoice","w0","s0log","noctave","s0","verbose","getCWT"), envir=environment())
- }
-
- #precompute and serialize all CWT
- ignored <-
- if (parll)
- parallel::parLapply(cl, 1:n, computeSaveCWT)
- else
- lapply(1:n, computeSaveCWT)
-
- getCWT = function(index)
- {
- #from cwt_file ...
- res <- getDataInFile(c(2*index-1,2*index), cwt_file, nbytes, endian)
- ###############TODO:
- }
-
- # Distance between rows i and j
- computeDistancesIJ = function(pair)
- {
- if (parll)
- {
- require("bigmemory", quietly=TRUE)
- require("epclust", quietly=TRUE)
- synchrones <- bigmemory::attach.big.matrix(synchrones_desc)
- Xwer_dist <- bigmemory::attach.big.matrix(Xwer_dist_desc)
- }
-
- i = pair[1] ; j = pair[2]
- if (verbose && j==i+1)
- cat(paste(" Distances (",i,",",j,"), (",i,",",j+1,") ...\n", sep=""))
- cwt_i <- getCWT(i)
- cwt_j <- getCWT(j)
-
- num <- epclustFilter(Mod(cwt_i * Conj(cwt_j)))
- WX <- epclustFilter(Mod(cwt_i * Conj(cwt_i)))
- WY <- epclustFilter(Mod(cwt_j * Conj(cwt_j)))
- wer2 <- sum(colSums(num)^2) / sum(colSums(WX) * colSums(WY))
- Xwer_dist[i,j] <- sqrt(delta * ncol(cwt_i) * max(1 - wer2, 0.)) #FIXME: wer2 should be < 1
- Xwer_dist[j,i] <- Xwer_dist[i,j]
- Xwer_dist[i,i] = 0.
- }
-
- ignored <-
- if (parll)
- parallel::parLapply(cl, pairs, computeDistancesIJ)
- else
- lapply(pairs, computeDistancesIJ)
-
- if (parll)
- parallel::stopCluster(cl)
-
- Xwer_dist[n,n] = 0.
- distances <- Xwer_dist[,]
- rm(Xwer_dist) ; gc()
- distances #~small matrix K1 x K1
-}
-
-# Helper function to divide indices into balanced sets
-.spreadIndices = function(indices, nb_per_chunk)
-{
- L = length(indices)
- nb_workers = floor( L / nb_per_chunk )
- if (nb_workers == 0)
- {
- # L < nb_series_per_chunk, simple case
- indices_workers = list(indices)
- }
- else
- {
- indices_workers = lapply( seq_len(nb_workers), function(i)
- indices[(nb_per_chunk*(i-1)+1):(nb_per_chunk*i)] )
- # Spread the remaining load among the workers
- rem = L %% nb_per_chunk
- while (rem > 0)
- {
- index = rem%%nb_workers + 1
- indices_workers[[index]] = c(indices_workers[[index]], indices[L-rem+1])
- rem = rem - 1
- }
- }
- indices_workers