#' computeWerDists
#'
-#' Compute the WER distances between the synchrones curves (in columns), which are
-#' returned (e.g.) by \code{computeSynchrones()}
+#' Compute the WER distances between the series at specified indices, which are
+#' obtaind by \code{getSeries(indices)}
#'
-#' @param synchrones A big.matrix of synchrones, in columns. The series have same
-#' length as the series in the initial dataset
+#' @param indices Range of series indices to cluster
+#' @param getSeries Function to retrieve series (argument: 'indices', integer vector),
+#' as columns of a matrix
+#' @param ncores Number of cores for parallel runs
#' @inheritParams claws
#'
-#' @return A distances matrix of size K1 x K1
+#' @return A distances matrix of size K x K where K == length(indices)
#'
#' @export
-computeWerDists = function(synchrones, nvoice, nbytes,endian,ncores_clust=1,
- verbose=FALSE,parll=TRUE)
+computeWerDists <- function(indices, getSeries, nb_series_per_chunk, smooth_lvl=3, nvoice=4,
+ nbytes=4, endian=.Platform$endian, ncores=3, verbose=FALSE)
{
- n <- ncol(synchrones)
- L <- nrow(synchrones)
- noctave = ceiling(log2(L)) #min power of 2 to cover serie range
-
- # Initialize result as a square big.matrix of size 'number of synchrones'
+ n <- length(indices)
+ L <- length(getSeries(1)) #TODO: not very neat way to get L
+ noctave <- ceiling(log2(L)) #min power of 2 to cover serie range
+ # Since a CWT contains noctave*nvoice complex series, we deduce the number of CWT to
+ # retrieve/put in one chunk.
+ nb_cwt_per_chunk <- max(1, floor(nb_series_per_chunk / (nvoice*noctave*2)))
+
+ # Initialize result as a square big.matrix of size 'number of medoids'
Xwer_dist <- bigmemory::big.matrix(nrow=n, ncol=n, type="double")
- # Generate n(n-1)/2 pairs for WER distances computations
- pairs = list()
- V = seq_len(n)
- for (i in 1:n)
+ cwt_file <- tempfile(pattern="epclust_cwt.bin_")
+ # Compute the getSeries(indices) CWT, and store the results in the binary file
+ computeSaveCWT <- function(inds)
{
- V = V[-1]
- pairs = c(pairs, lapply(V, function(v) c(i,v)))
- }
-
- cwt_file = ".cwt.bin"
- # Compute the synchrones[,indices] CWT, and store the results in the binary file
- computeSaveCWT = function(indices)
- {
- if (parll)
- {
- require("bigmemory", quietly=TRUE)
- require("Rwave", quietly=TRUE)
- require("epclust", quietly=TRUE)
- synchrones <- bigmemory::attach.big.matrix(synchrones_desc)
- }
+ if (verbose)
+ cat(" Compute save CWT on ",length(inds)," indices\n", sep="")
# Obtain CWT as big vectors of real part + imaginary part (concatenate)
- ts_cwt <- sapply(indices, function(i) {
- ts <- scale(ts(synchrones[,i]), center=TRUE, scale=FALSE)
+ ts_cwt <- sapply(inds, function(i) {
+ ts <- scale(ts(getSeries(i)), center=TRUE, scale=FALSE)
ts_cwt <- Rwave::cwt(ts, noctave, nvoice, w0=2*pi, twoD=TRUE, plot=FALSE)
c( as.double(Re(ts_cwt)),as.double(Im(ts_cwt)) )
})
# Serialization
- binarize(ts_cwt, cwt_file, 1, ",", nbytes, endian)
+ binarize(ts_cwt, cwt_file, nb_cwt_per_chunk, ",", nbytes, endian)
}
- if (parll)
- {
- cl = parallel::makeCluster(ncores_clust)
- synchrones_desc <- bigmemory::describe(synchrones)
- Xwer_dist_desc <- bigmemory::describe(Xwer_dist)
- parallel::clusterExport(cl, varlist=c("parll","synchrones_desc","Xwer_dist_desc",
- "noctave","nvoice","verbose","getCWT"), envir=environment())
- }
-
- if (verbose)
- cat(paste("--- Precompute and serialize synchrones CWT\n", sep=""))
-
- ignored <-
- if (parll)
- parallel::parLapply(cl, 1:n, computeSaveCWT)
- else
- lapply(1:n, computeSaveCWT)
-
# Function to retrieve a synchrone CWT from (binary) file
- getSynchroneCWT = function(index, L)
+ getCWT <- function(index, L)
{
flat_cwt <- getDataInFile(index, cwt_file, nbytes, endian)
- cwt_length = length(flat_cwt) / 2
- re_part = as.matrix(flat_cwt[1:cwt_length], nrow=L)
- im_part = as.matrix(flat_cwt[(cwt_length+1):(2*cwt_length)], nrow=L)
+ cwt_length <- length(flat_cwt) / 2
+ re_part <- as.matrix(flat_cwt[1:cwt_length], nrow=L)
+ im_part <- as.matrix(flat_cwt[(cwt_length+1):(2*cwt_length)], nrow=L)
re_part + 1i * im_part
}
-
-
-
-#TODO: better repartition here,
- #better code in .splitIndices :: never exceed nb_per_chunk; arg: min_per_chunk (default: 1)
-###TODO: reintroduire nb_items_clust ======> l'autre est typiquement + grand !!! (pas de relation !)
-
-
-
- # Compute distance between columns i and j in synchrones
- computeDistanceIJ = function(pair)
+ # Compute distances between columns i and j for j>i
+ computeDistances <- function(i)
{
if (parll)
{
# parallel workers start with an empty environment
- require("bigmemory", quietly=TRUE)
require("epclust", quietly=TRUE)
- synchrones <- bigmemory::attach.big.matrix(synchrones_desc)
Xwer_dist <- bigmemory::attach.big.matrix(Xwer_dist_desc)
}
- i = pair[1] ; j = pair[2]
- if (verbose && j==i+1 && !parll)
- cat(paste(" Distances (",i,",",j,"), (",i,",",j+1,") ...\n", sep=""))
+ if (verbose)
+ cat(paste(" Distances from ",i," to ",i+1,"...",n,"\n", sep=""))
- # Compute CWT of columns i and j in synchrones
- L = nrow(synchrones)
- cwt_i <- getSynchroneCWT(i, L)
- cwt_j <- getSynchroneCWT(j, L)
+ # Get CWT of column i, and run computations for columns j>i
+ cwt_i <- getCWT(i, L)
+ WX <- filterMA(Mod(cwt_i * Conj(cwt_i)), smooth_lvl)
- # Compute the ratio of integrals formula 5.6 for WER^2
- # in https://arxiv.org/abs/1101.4744v2 ยง5.3
- num <- filterMA(Mod(cwt_i * Conj(cwt_j)))
- WX <- filterMA(Mod(cwt_i * Conj(cwt_i)))
- WY <- filterMA(Mod(cwt_j * Conj(cwt_j)))
- wer2 <- sum(colSums(num)^2) / sum(colSums(WX) * colSums(WY))
+ for (j in (i+1):n)
+ {
+ cwt_j <- getCWT(j, L)
- Xwer_dist[i,j] <- sqrt(L * ncol(cwt_i) * (1 - wer2))
- Xwer_dist[j,i] <- Xwer_dist[i,j]
+ # Compute the ratio of integrals formula 5.6 for WER^2
+ # in https://arxiv.org/abs/1101.4744v2 paragraph 5.3
+ num <- filterMA(Mod(cwt_i * Conj(cwt_j)), smooth_lvl)
+ WY <- filterMA(Mod(cwt_j * Conj(cwt_j)), smooth_lvl)
+ wer2 <- sum(colSums(num)^2) / sum(colSums(WX) * colSums(WY))
+
+ Xwer_dist[i,j] <- sqrt(L * ncol(cwt_i) * (1 - wer2))
+ Xwer_dist[j,i] <- Xwer_dist[i,j]
+ }
Xwer_dist[i,i] <- 0.
}
+ if (verbose)
+ cat(paste("--- Precompute and serialize synchrones CWT\n", sep=""))
+
+ # Split indices by packets of length at most nb_cwt_per_chunk
+ indices_cwt <- .splitIndices(indices, nb_cwt_per_chunk)
+ # NOTE: next loop could potentially be run in //. Indices would be permuted (by
+ # serialization order), and synchronicity would be required because of concurrent
+ # writes. Probably not worth the effort - but possible.
+ for (inds in indices_cwt)
+ computeSaveCWT(inds)
+
+ parll <- (ncores > 1)
+ if (parll)
+ {
+ # outfile=="" to see stderr/stdout on terminal
+ cl <-
+ if (verbose)
+ parallel::makeCluster(ncores, outfile="")
+ else
+ parallel::makeCluster(ncores)
+ Xwer_dist_desc <- bigmemory::describe(Xwer_dist)
+ parallel::clusterExport(cl, envir=environment(),
+ varlist=c("parll","n","L","Xwer_dist_desc","getCWT","verbose"))
+ }
+
if (verbose)
cat(paste("--- Compute WER distances\n", sep=""))
ignored <-
if (parll)
- parallel::parLapply(cl, pairs, computeDistanceIJ)
+ parallel::parLapply(cl, seq_len(n-1), computeDistances)
else
- lapply(pairs, computeDistanceIJ)
+ lapply(seq_len(n-1), computeDistances)
+ Xwer_dist[n,n] <- 0.
if (parll)
parallel::stopCluster(cl)
- unlink(cwt_file)
+ unlink(cwt_file) #remove binary file
- Xwer_dist[n,n] = 0.
Xwer_dist[,] #~small matrix K1 x K1
}