From cea14f3a36d329311d08b6c723c0102400f9bb6f Mon Sep 17 00:00:00 2001
From: Benjamin Auder <benjamin.auder@somewhere>
Date: Mon, 9 Jan 2017 19:39:39 +0100
Subject: [PATCH] work on main.R

---
 code/draft_R_pkg/R/algorithms.R |  11 +++-
 code/draft_R_pkg/R/main.R       | 102 +++++++++++++++++++-------------
 2 files changed, 71 insertions(+), 42 deletions(-)

diff --git a/code/draft_R_pkg/R/algorithms.R b/code/draft_R_pkg/R/algorithms.R
index e27a235..eda05e5 100644
--- a/code/draft_R_pkg/R/algorithms.R
+++ b/code/draft_R_pkg/R/algorithms.R
@@ -1,10 +1,17 @@
-getCoeffs = function(series)
+curvesToCoeffs = function(series)
 {
 	#... return wavelets coeffs : compute in parallel !
+	#TODO: always keep ID in first column
+}
+
+coeffsToCurves = function(coeffs)
+{
+	#re-expand on wavelet basis
 }
 
 getClusters = function(data, K)
 {
 	pam_output = pam(data, K)
-	return ( list(clusts=pam_output$clustering, medoids=pam_output$medoids) )
+	return ( list( clusts=pam_output$clustering, medoids=pam_output$medoids,
+		ranks=pam_output$id.med ) )
 }
diff --git a/code/draft_R_pkg/R/main.R b/code/draft_R_pkg/R/main.R
index 6dca708..bb7355b 100644
--- a/code/draft_R_pkg/R/main.R
+++ b/code/draft_R_pkg/R/main.R
@@ -3,7 +3,7 @@
 #' @title Cluster power curves with PAM in parallel
 #'
 #' @description Groups electricity power curves (or any series of similar nature) by applying PAM
-#' algorithm in parallel to chunks of size \code{nbSeriesPerChunk}
+#' algorithm in parallel to chunks of size \code{nb_series_per_chunk}
 #'
 #' @param data Access to the data, which can be of one of the three following types:
 #' \itemize{
@@ -13,7 +13,8 @@
 #'     (start) and number of curves (n); see example in package vignette.
 #' }
 #' @param K Number of clusters
-#' @param nbSeriesPerChunk Number of series in each group
+#' @param nb_series_per_chunk (Maximum) number of series in each group
+#' @param min_series_per_chunk Minimum number of series in each group
 #' @param writeTmp Function to write temporary wavelets coefficients (+ identifiers);
 #'   see defaults in defaults.R
 #' @param readTmp Function to read temporary wavelets coefficients (see defaults.R)
@@ -22,8 +23,8 @@
 #' @param ncores number of parallel processes; if NULL, use parallel::detectCores()
 #'
 #' @return A data.frame of the final medoids curves (identifiers + values)
-epclust = function(data, K, nbSeriesPerChunk, writeTmp=ref_writeTmp, readTmp=ref_readTmp,
-	WER="end", ncores=NULL)
+epclust = function(data, K, nb_series_per_chunk, min_series_per_chunk=10*K,
+	writeTmp=defaultWriteTmp, readTmp=defaultReadTmp, WER="end", ncores=NULL)
 {
 	#TODO: setRefClass(...) to avoid copy data:
 	#http://stackoverflow.com/questions/2603184/r-pass-by-reference
@@ -34,18 +35,18 @@ epclust = function(data, K, nbSeriesPerChunk, writeTmp=ref_writeTmp, readTmp=ref
 			{
 				if (is.character(data))
 				{
-					dataCon = file(data, open="r")
+					data_con = file(data, open="r")
 				} else if (!isOpen(data))
 				{
 					open(data)
-					dataCon = data
+					data_con = data
 				}
 			},
 			error="data should be a data.frame, a function or a valid connection")
 	if (!is.integer(K) || K < 2)
 		stop("K should be an integer greater or equal to 2")
-	if (!is.integer(nbSeriesPerChunk) || nbSeriesPerChunk < K)
-		stop("nbSeriesPerChunk should be an integer greater or equal to K")
+	if (!is.integer(nb_series_per_chunk) || nb_series_per_chunk < K)
+		stop("nb_series_per_chunk should be an integer greater or equal to K")
 	if (!is.function(writeTmp) || !is.function(readTmp))
 		stop("read/writeTmp should be functional (see defaults.R)")
 	if (WER!="end" && WER!="mix")
@@ -54,72 +55,86 @@ epclust = function(data, K, nbSeriesPerChunk, writeTmp=ref_writeTmp, readTmp=ref
 
 	#1) acquire data (process curves, get as coeffs)
 	index = 1
-	nbCurves = 0
+	nb_curves = 0
 	repeat
 	{
+		coeffs_chunk = NULL
 		if (is.data.frame(data))
 		{
 			#full data matrix
 			if (index < nrow(data))
 			{
-				writeTmp( getCoeffs( data[index:(min(index+nbSeriesPerChunk-1,nrow(data))),] ) )
-			} else
-			{
-				break
+				coeffs_chunk = curvesToCoeffs(
+					data[index:(min(index+nb_series_per_chunk-1,nrow(data))),])
 			}
 		} else if (is.function(data))
 		{
 			#custom user function to retrieve next n curves, probably to read from DB
-			coeffs_chunk = getCoeffs( data(index, nbSeriesPerChunk) )
-			if (!is.null(coeffs_chunk))
-			{
-				writeTmp(coeffs_chunk)
-			} else
-			{
-				break
-			}
+			coeffs_chunk = curvesToCoeffs( data(index, nb_series_per_chunk) )
 		} else
 		{
 			#incremental connection
 			#TODO: find a better way to parse than using a temp file
-			ascii_lines = readLines(dataCon, nbSeriesPerChunk)
+			ascii_lines = readLines(data_con, nb_series_per_chunk)
 			if (length(ascii_lines > 0))
 			{
-				seriesChunkFile = ".tmp/seriesChunk"
-				writeLines(ascii_lines, seriesChunkFile)
-				writeTmp( getCoeffs( read.csv(seriesChunkFile) ) )
-			} else
-			{
-				break
+				series_chunk_file = ".tmp/series_chunk"
+				writeLines(ascii_lines, series_chunk_file)
+				coeffs_chunk = curvesToCoeffs( read.csv(series_chunk_file) )
 			}
 		}
-		index = index + nbSeriesPerChunk
+		if (is.null(coeffs_chunk))
+			break
+		writeTmp(coeffs_chunk)
+		nb_curves = nb_curves + nrow(coeffs_chunk)
+		index = index + nb_series_per_chunk
 	}
-	if (exists(dataCon))
-		close(dataCon)
+	if (exists(data_con))
+		close(data_con)
+	if (nb_curves < min_series_per_chunk)
+		stop("Not enough data: less rows than min_series_per_chunk!")
 
+	#2) process coeffs (by nb_series_per_chunk) and cluster them in parallel
 	library(parallel)
 	ncores = ifelse(is.integer(ncores), ncores, parallel::detectCores())
 	cl = parallel::makeCluster(ncores)
 	parallel::clusterExport(cl=cl, varlist=c("X", "Y", "K", "p"), envir=environment())
 	library(cluster)
-	li = parallel::parLapply(cl, 1:B, )
-
-	#2) process coeffs (by nbSeriesPerChunk) and cluster them in parallel
 	#TODO: be careful of writing to a new temp file, then flush initial one, then re-use it...
 	repeat
 	{
-		completed = rep(FALSE, ............)
-		#while there is jobs to do (i.e. size of tmp "file" is greater than nbSeriesPerChunk),
-		#A) determine which tasks which processor will do (OK)
-		#B) send each (sets of) tasks in parallel
+		#while there is jobs to do (i.e. size of tmp "file" is greater than nb_series_per_chunk)
+		nb_workers = nb_curves %/% nb_series_per_chunk
+		indices = list()
+		#incides[[i]] == (start_index,number_of_elements)
+		for (i in 1:nb_workers)
+			indices[[i]] = c(nb_series_per_chunk*(i-1)+1, nb_series_per_chunk)
+		remainder = nb_curves %% nb_series_per_chunk
+		if (remainder >= min_series_per_chunk)
+		{
+			nb_workers = nb_workers + 1
+			indices[[nb_workers]] = c(nb_curves-remainder+1, nb_curves)
+		} else if (remainder > 0)
+		{
+			#spread the load among other workers
+			
+		}
+		li = parallel::parLapply(cl, indices, processChunk, WER=="mix")
 		#C) flush tmp file (current parallel processes will write in it)
-		#always check "complete" flag (array, as I did in MPI) to know if "slaves" finished
 	}
-pam(x, k)
 	parallel::stopCluster(cl)
 
 	#3) readTmp last results, apply PAM on it, and return medoids + identifiers
+	final_coeffs = readTmp(1, nb_series_per_chunk)
+	if (nrow(final_coeffs) == K)
+	{
+		return ( list( medoids=coeffsToCurves(final_coeffs[,2:ncol(final_coeffs)]),
+			ids=final_coeffs[,1] ) )
+	}
+	pam_output = getClusters(as.matrix(final_coeffs[,2:ncol(final_coeffs)]), K)
+	medoids = coeffsToCurves(pam_output$medoids)
+	ids = final_coeffs[,1] [pam_output$ranks]
+	return (list(medoids=medoids, ids=ids))
 
 	#4) apply stage 2 (in parallel ? inside task 2) ?)
 	if (WER == "end")
@@ -127,3 +142,10 @@ pam(x, k)
 		#from center curves, apply stage 2...
 	}
 }
+
+processChunk = function(indice, WER)
+{
+	#1) retrieve data
+	#2) cluster
+	#3) WER (optional)
+}
-- 
2.44.0