From e0154a59e55143dac0fbd2a4739a3509bc958e76 Mon Sep 17 00:00:00 2001
From: Benjamin Auder <benjamin.auder@somewhere>
Date: Tue, 14 Mar 2017 13:09:07 +0100
Subject: [PATCH] Bug fixed: package OK

---
 epclust/R/clustering.R      |  5 ++---
 epclust/R/computeWerDists.R | 13 ++++++++-----
 epclust/R/main.R            | 39 ++++++++++++++-----------------------
 3 files changed, 25 insertions(+), 32 deletions(-)

diff --git a/epclust/R/clustering.R b/epclust/R/clustering.R
index 3e7fd38..886bfbc 100644
--- a/epclust/R/clustering.R
+++ b/epclust/R/clustering.R
@@ -26,7 +26,7 @@ clusteringTask1 <- function(indices, getContribs, K1, algoClust1, nb_items_clust
 	ncores_clust=3, verbose=FALSE, parll=TRUE)
 {
 	if (verbose)
-		cat(paste("*** Clustering task 1 on ",length(indices)," series\n", sep=""))
+		cat(paste("*** Clustering task 1 on ",length(indices)," series [start]\n", sep=""))
 
 	if (length(indices) <= K1)
 		return (indices)
@@ -62,8 +62,7 @@ clusteringTask1 <- function(indices, getContribs, K1, algoClust1, nb_items_clust
 			}
 		if (verbose)
 		{
-			cat(paste("*** [iterated] Clustering task 1: now ",
-				length(indices)," medoids\n", sep=""))
+			cat(paste("*** Clustering task 1 on ",length(indices)," medoids [iter]\n", sep=""))
 		}
 	}
 	if (parll)
diff --git a/epclust/R/computeWerDists.R b/epclust/R/computeWerDists.R
index 8eb755c..4dbcb7b 100644
--- a/epclust/R/computeWerDists.R
+++ b/epclust/R/computeWerDists.R
@@ -27,10 +27,13 @@ computeWerDists <- function(indices, getSeries, nb_series_per_chunk, smooth_lvl,
 
 	cwt_file <- tempfile(pattern="epclust_cwt.bin_")
 	# Compute the getSeries(indices) CWT, and store the results in the binary file
-	computeSaveCWT <- function(indices)
+	computeSaveCWT <- function(inds)
 	{
+		if (verbose)
+			cat("   Compute save CWT on ",length(inds)," indices\n", sep="")
+
 		# Obtain CWT as big vectors of real part + imaginary part (concatenate)
-		ts_cwt <- sapply(indices, function(i) {
+		ts_cwt <- sapply(inds, function(i) {
 			ts <- scale(ts(getSeries(i)), center=TRUE, scale=FALSE)
 			ts_cwt <- Rwave::cwt(ts, noctave, nvoice, w0=2*pi, twoD=TRUE, plot=FALSE)
 			c( as.double(Re(ts_cwt)),as.double(Im(ts_cwt)) )
@@ -60,7 +63,7 @@ computeWerDists <- function(indices, getSeries, nb_series_per_chunk, smooth_lvl,
 			Xwer_dist <- bigmemory::attach.big.matrix(Xwer_dist_desc)
 		}
 
-		if (verbose && !parll)
+		if (verbose)
 			cat(paste("   Distances from ",i," to ",i+1,"...",n,"\n", sep=""))
 
 		# Get CWT of column i, and run computations for columns j>i
@@ -87,10 +90,10 @@ computeWerDists <- function(indices, getSeries, nb_series_per_chunk, smooth_lvl,
 		cat(paste("--- Precompute and serialize synchrones CWT\n", sep=""))
 
 	# Split indices by packets of length at most nb_cwt_per_chunk
-	indices_cwt <- .splitIndices(seq_len(n), nb_cwt_per_chunk)
+	indices_cwt <- .splitIndices(indices, nb_cwt_per_chunk)
 	# NOTE: next loop could potentially be run in //. Indices would be permuted (by
 	# serialization order), and synchronicity would be required because of concurrent
-	# writes. Probably not worth the effort - but possible to gain some bits of speed.
+	# writes. Probably not worth the effort - but possible.
 	for (inds in indices_cwt)
 		computeSaveCWT(inds)
 
diff --git a/epclust/R/main.R b/epclust/R/main.R
index e3fa807..fe78e63 100644
--- a/epclust/R/main.R
+++ b/epclust/R/main.R
@@ -67,11 +67,10 @@
 #' @param ncores_tasks Number of parallel tasks ('1' == sequential tasks)
 #' @param ncores_clust Number of parallel clusterings in one task
 #' @param sep Separator in CSV input file (if any provided)
-#' @param nbytes Number of bytes to serialize a floating-point number: 4 or 8
+#' @param nbytes 4 or 8 bytes to (de)serialize a floating-point number
 #' @param endian Endianness for (de)serialization: "little" or "big"
 #' @param verbose FALSE: nothing printed; TRUE: some execution traces
 #' @param parll TRUE: run in parallel. FALSE: run sequentially
-#' @param reuse_bin Re-use previously stored binary series and contributions
 #'
 #' @return A list:
 #' \itemize{
@@ -88,6 +87,7 @@
 #' @examples
 #' \dontrun{
 #' # WER distances computations are too long for CRAN (for now)
+#' parll = FALSE #on this small example, sequential run is faster
 #'
 #' # Random series around cos(x,2x,3x)/sin(x,2x,3x)
 #' x <- seq(0,50,0.05)
@@ -100,12 +100,13 @@
 #' permut <- (0:239)%%6 * 40 + (0:239)%/%6 + 1
 #' series = series[,permut]
 #' #dim(series) #c(240,1001)
-#' res_ascii <- claws(series, K1=30, K2=6, 100, random=FALSE, verbose=TRUE)
+#' res_ascii <- claws(series, K1=30, K2=6, nb_series_per_chunk=500,
+#'   nb_items_clust=100, random=FALSE, verbose=TRUE, parll=parll)
 #'
 #' # Same example, from CSV file
 #' csv_file <- tempfile(pattern="epclust_series.csv_")
 #' write.table(t(series), csv_file, sep=",", row.names=FALSE, col.names=FALSE)
-#' res_csv <- claws(csv_file, K1=30, K2=6, 100, random=FALSE)
+#' res_csv <- claws(csv_file, 30, 6, 500, 100, random=FALSE, parll=parll)
 #'
 #' # Same example, from binary file
 #' bin_file <- tempfile(pattern="epclust_series.bin_")
@@ -113,7 +114,7 @@
 #' endian <- "little"
 #' binarize(csv_file, bin_file, 500, ",", nbytes, endian)
 #' getSeries <- function(indices) getDataInFile(indices, bin_file, nbytes, endian)
-#' res_bin <- claws(getSeries, K1=30, K2=6, 100, random=FALSE)
+#' res_bin <- claws(getSeries, 30, 6, 500, 100, random=FALSE, parll=parll)
 #' unlink(csv_file)
 #' unlink(bin_file)
 #'
@@ -133,6 +134,9 @@
 #' serie_length <- as.integer( dbGetQuery(series_db,
 #'   paste("SELECT COUNT(*) FROM times_values WHERE id == ",indexToID_inDB[1],sep="")) )
 #' getSeries <- function(indices) {
+#'   indices = indices[ indices <= length(indexToID_inDB) ]
+#'   if (length(indices) == 0)
+#'     return (NULL)
 #'   request <- "SELECT id,value FROM times_values WHERE id in ("
 #'   for (i in seq_along(indices)) {
 #'     request <- paste(request, indexToID_inDB[ indices[i] ],  sep="")
@@ -141,13 +145,9 @@
 #'   }
 #'   request <- paste(request, ")", sep="")
 #'   df_series <- dbGetQuery(series_db, request)
-#'   if (nrow(df_series) >= 1)
-#'     matrix(df_series[,"value"], nrow=serie_length)
-#'   else
-#'     NULL
+#'   matrix(df_series[,"value"], nrow=serie_length)
 #' }
-#' # reuse_bin==FALSE: DB do not garantee ordering
-#' res_db <- claws(getSeries, K1=30, K2=6, 100, random=FALSE, reuse_bin=FALSE)
+#' res_db <- claws(getSeries, 30, 6, 500, 100, random=FALSE, parll=parll)
 #' dbDisconnect(series_db)
 #'
 #' # All results should be equal:
@@ -156,18 +156,13 @@
 #'   & res_ascii$ranks == res_db$ranks)
 #' }
 #' @export
-claws <- function(series, K1, K2, nb_series_per_chunk, nb_items_clust=7*K1,
+claws <- function(series, K1, K2, nb_series_per_chunk, nb_items_clust=5*K1,
 	algoClust1=function(data,K) cluster::pam(t(data),K,diss=FALSE,pamonce=1)$id.med,
 	algoClust2=function(dists,K) cluster::pam(dists,K,diss=TRUE,pamonce=1)$id.med,
 	wav_filt="d8", contrib_type="absolute", WER="end", smooth_lvl=3, nvoice=4,
 	random=TRUE, ntasks=1, ncores_tasks=1, ncores_clust=3, sep=",", nbytes=4,
-	endian=.Platform$endian, verbose=FALSE, parll=TRUE, reuse_bin=TRUE)
+	endian=.Platform$endian, verbose=FALSE, parll=TRUE)
 {
-
-
-#TODO: comprendre differences.......... debuguer getSeries for DB
-
-
 	# Check/transform arguments
 	if (!is.matrix(series) && !bigmemory::is.big.matrix(series)
 		&& !is.function(series)
@@ -206,11 +201,8 @@ claws <- function(series, K1, K2, nb_series_per_chunk, nb_items_clust=7*K1,
 		if (verbose)
 			cat("...Serialize time-series (or retrieve past binary file)\n")
 		series_file <- ".series.epclust.bin"
-		if (!file.exists(series_file) || !reuse_bin)
-		{
-			unlink(series_file,)
+		if (!file.exists(series_file))
 			binarize(series, series_file, nb_series_per_chunk, sep, nbytes, endian)
-		}
 		getSeries <- function(inds) getDataInFile(inds, series_file, nbytes, endian)
 	}
 	else
@@ -220,9 +212,8 @@ claws <- function(series, K1, K2, nb_series_per_chunk, nb_items_clust=7*K1,
 	contribs_file <- ".contribs.epclust.bin"
 	if (verbose)
 		cat("...Compute contributions and serialize them (or retrieve past binary file)\n")
-	if (!file.exists(contribs_file) || !reuse_bin)
+	if (!file.exists(contribs_file))
 	{
-		unlink(contribs_file,)
 		nb_curves <- binarizeTransform(getSeries,
 			function(curves) curvesToContribs(curves, wav_filt, contrib_type),
 			contribs_file, nb_series_per_chunk, nbytes, endian)
-- 
2.44.0