use Rcpp; ongoing debug for parallel synchrones computation
[epclust.git] / epclust / R / clustering.R
index 0d37c24..4d43b2b 100644 (file)
@@ -123,17 +123,46 @@ computeSynchrones = function(medoids, getRefSeries,
        {
                ref_series = getRefSeries(indices)
                nb_series = nrow(ref_series)
-               #get medoids indices for this chunk of series
 
-               #TODO: debug this (address is OK but values are garbage: why?)
-#               mi = .Call("computeMedoidsIndices", medoids@address, ref_series, PACKAGE="epclust")
+               if (parll)
+               {
+                       require("bigmemory", quietly=TRUE)
+                       require("synchronicity", quietly=TRUE)
+                       require("epclust", quietly=TRUE)
+                       synchrones <- bigmemory::attach.big.matrix(synchrones_desc)
+                       medoids <- bigmemory::attach.big.matrix(medoids_desc)
+                       m <- synchronicity::attach.mutex(m_desc)
+               }
+
+
 
-               #R-equivalent, requiring a matrix (thus potentially breaking "fit-in-memory" hope)
-               mat_meds = medoids[,]
-               mi = rep(NA,nb_series)
-               for (i in 1:nb_series)
-                       mi[i] <- which.min( rowSums( sweep(mat_meds, 2, ref_series[i,], '-')^2 ) )
-               rm(mat_meds); gc()
+#TODO: use dbs(),
+               #https://www.r-bloggers.com/debugging-parallel-code-with-dbs/
+               #http://gforge.se/2015/02/how-to-go-parallel-in-r-basics-tips/
+
+#OK ::
+#write(length(indices), file="TOTO")
+#write( computeMedoidsIndices(medoids@address, getRefSeries(indices[1:600])), file="TOTO")
+#stop()
+
+#              write(indices, file="TOTO", ncolumns=10, append=TRUE)
+#write("medoids", file = "TOTO", ncolumns=1, append=TRUE)
+#write(medoids[1,1:3], file = "TOTO", ncolumns=1, append=TRUE)
+#write("synchrones", file = "TOTO", ncolumns=1, append=TRUE)
+#write(synchrones[1,1:3], file = "TOTO", ncolumns=1, append=TRUE)
+
+#NOT OK :: (should just be "ref_series") ...or yes ? race problems mutex then ? ?!
+               #get medoids indices for this chunk of series
+               mi = computeMedoidsIndices(medoids@address, getRefSeries(indices[1:600]))  #ref_series)
+write("MI ::::", file = "TOTO", ncolumns=1, append=TRUE)
+write(mi[1:3], file = "TOTO", ncolumns=1, append=TRUE)
+
+#              #R-equivalent, requiring a matrix (thus potentially breaking "fit-in-memory" hope)
+#              mat_meds = medoids[,]
+#              mi = rep(NA,nb_series)
+#              for (i in 1:nb_series)
+#                      mi[i] <- which.min( rowSums( sweep(mat_meds, 2, ref_series[i,], '-')^2 ) )
+#              rm(mat_meds); gc()
 
                for (i in seq_len(nb_series))
                {
@@ -155,18 +184,19 @@ computeSynchrones = function(medoids, getRefSeries,
        parll = (requireNamespace("synchronicity",quietly=TRUE)
                && parll && Sys.info()['sysname'] != "Windows")
        if (parll)
+       {
                m <- synchronicity::boost.mutex()
+               m_desc <- synchronicity::describe(m)
+               synchrones_desc = bigmemory::describe(synchrones)
+               medoids_desc = bigmemory::describe(medoids)
 
-       if (parll)
-       {
                cl = parallel::makeCluster(ncores_clust)
                parallel::clusterExport(cl,
-                       varlist=c("synchrones","counts","verbose","medoids","getRefSeries"),
+                       varlist=c("synchrones_desc","counts","verbose","m_desc","medoids_desc","getRefSeries"),
                        envir=environment())
        }
 
        indices_workers = .spreadIndices(seq_len(nb_ref_curves), nb_series_per_chunk)
-#browser()
        ignored <-
                if (parll)
                        parallel::parLapply(cl, indices_workers, computeSynchronesChunk)
@@ -233,28 +263,33 @@ computeWerDists = function(synchrones, ncores_clust=1,verbose=FALSE,parll=TRUE)
                pairs = c(pairs, lapply(V, function(v) c(i,v)))
        }
 
-       computeCWT = function(i)
-       {
-               ts <- scale(ts(synchrones[i,]), center=TRUE, scale=scaled)
-               totts.cwt = Rwave::cwt(ts, totnoct, nvoice, w0, plot=FALSE)
-               ts.cwt = totts.cwt[,s0log:(s0log+noctave*nvoice)]
-               #Normalization
-               sqs <- sqrt(2^(0:(noctave*nvoice)/nvoice)*s0)
-               sqres <- sweep(ts.cwt,2,sqs,'*')
-               sqres / max(Mod(sqres))
-       }
-
        # Distance between rows i and j
        computeDistancesIJ = function(pair)
        {
+               require("bigmemory", quietly=TRUE)
+               require("epclust", quietly=TRUE)
+               synchrones <- bigmemory::attach.big.matrix(synchrones_desc)
+               Xwer_dist <- bigmemory::attach.big.matrix(Xwer_dist_desc)
+       
+               computeCWT = function(i)
+               {
+                       ts <- scale(ts(synchrones[i,]), center=TRUE, scale=scaled)
+                       totts.cwt = Rwave::cwt(ts, totnoct, nvoice, w0, plot=FALSE)
+                       ts.cwt = totts.cwt[,s0log:(s0log+noctave*nvoice)]
+                       #Normalization
+                       sqs <- sqrt(2^(0:(noctave*nvoice)/nvoice)*s0)
+                       sqres <- sweep(ts.cwt,2,sqs,'*')
+                       sqres / max(Mod(sqres))
+               }
+
                i = pair[1] ; j = pair[2]
                if (verbose && j==i+1)
                        cat(paste("   Distances (",i,",",j,"), (",i,",",j+1,") ...\n", sep=""))
                cwt_i = computeCWT(i)
                cwt_j = computeCWT(j)
-               num <- .Call("filter", Mod(cwt_i * Conj(cwt_j)), PACKAGE="epclust")
-               WX  <- .Call("filter", Mod(cwt_i * Conj(cwt_i)), PACKAGE="epclust")
-               WY  <- .Call("filter", Mod(cwt_j * Conj(cwt_j)), PACKAGE="epclust")
+               num <- epclustFilter(Mod(cwt_i * Conj(cwt_j)))
+               WX  <- epclustFilter(Mod(cwt_i * Conj(cwt_i)))
+               WY  <- epclustFilter(Mod(cwt_j * Conj(cwt_j)))
                wer2 <- sum(colSums(num)^2) / sum(colSums(WX) * colSums(WY))
                Xwer_dist[i,j] <- sqrt(delta * ncol(cwt_i) * (1 - wer2))
                Xwer_dist[j,i] <- Xwer_dist[i,j]
@@ -264,9 +299,11 @@ computeWerDists = function(synchrones, ncores_clust=1,verbose=FALSE,parll=TRUE)
        if (parll)
        {
                cl = parallel::makeCluster(ncores_clust)
-               parallel::clusterExport(cl,
-                       varlist=c("synchrones","totnoct","nvoice","w0","s0log","noctave","s0","verbose"),
-                       envir=environment())
+               synchrones_desc <- bigmemory::describe(synchrones)
+               Xwer_dist_desc_desc <- bigmemory::describe(Xwer_dist)
+
+               parallel::clusterExport(cl, varlist=c("synchrones_desc","Xwer_dist_desc","totnoct",
+                       "nvoice","w0","s0log","noctave","s0","verbose"), envir=environment())
        }
 
        ignored <-