#' @param ncores_tasks Number of parallel tasks ('1' == sequential tasks)
#' @param ncores_clust Number of parallel clusterings in one task
#' @param sep Separator in CSV input file (if any provided)
-#' @param nbytes Number of bytes to serialize a floating-point number: 4 or 8
+#' @param nbytes 4 or 8 bytes to (de)serialize a floating-point number
#' @param endian Endianness for (de)serialization: "little" or "big"
#' @param verbose FALSE: nothing printed; TRUE: some execution traces
-#' @param parll TRUE: run in parallel. FALSE: run sequentially
#'
#' @return A list:
#' \itemize{
#' @examples
#' \dontrun{
#' # WER distances computations are too long for CRAN (for now)
+#' # Note: on this small example, sequential run is faster
#'
#' # Random series around cos(x,2x,3x)/sin(x,2x,3x)
#' x <- seq(0,50,0.05)
#' library(wmtsa)
#' series <- do.call( cbind, lapply( 1:6, function(i)
#' do.call(cbind, wmtsa::wavBootstrap(ref_series[,i], n.realization=40)) ) )
+#' # Mix series so that all groups are evenly spread
+#' permut <- (0:239)%%6 * 40 + (0:239)%/%6 + 1
+#' series = series[,permut]
#' #dim(series) #c(240,1001)
-#' res_ascii <- claws(series, K1=30, K2=6, 100, verbose=TRUE)
+#' res_ascii <- claws(series, K1=30, K2=6, nb_series_per_chunk=500,
+#' nb_items_clust=100, random=FALSE, verbose=TRUE, ncores_clust=1)
#'
#' # Same example, from CSV file
#' csv_file <- tempfile(pattern="epclust_series.csv_")
#' write.table(t(series), csv_file, sep=",", row.names=FALSE, col.names=FALSE)
-#' res_csv <- claws(csv_file, K1=30, K2=6, 100)
+#' res_csv <- claws(csv_file, 30, 6, 500, 100, random=FALSE, ncores_clust=1)
#'
#' # Same example, from binary file
#' bin_file <- tempfile(pattern="epclust_series.bin_")
#' nbytes <- 8
#' endian <- "little"
-#' binarize(csv_file, bin_file, 500, nbytes, endian)
+#' binarize(csv_file, bin_file, 500, ",", nbytes, endian)
#' getSeries <- function(indices) getDataInFile(indices, bin_file, nbytes, endian)
-#' res_bin <- claws(getSeries, K1=30, K2=6, 100)
+#' res_bin <- claws(getSeries, 30, 6, 500, 100, random=FALSE, ncores_clust=1)
#' unlink(csv_file)
#' unlink(bin_file)
#'
#' library(DBI)
#' series_db <- dbConnect(RSQLite::SQLite(), "file::memory:")
#' # Prepare data.frame in DB-format
-#' n <- nrow(series)
-#' time_values <- data.frame(
-#' id <- rep(1:n,each=L),
-#' time <- rep( as.POSIXct(1800*(0:n),"GMT",origin="2001-01-01"), L ),
-#' value <- as.double(t(series)) )
+#' n <- ncol(series)
+#' times_values <- data.frame(
+#' id = rep(1:n,each=L),
+#' time = rep( as.POSIXct(1800*(1:L),"GMT",origin="2001-01-01"), n ),
+#' value = as.double(series) )
#' dbWriteTable(series_db, "times_values", times_values)
#' # Fill associative array, map index to identifier
#' indexToID_inDB <- as.character(
-#' dbGetQuery(series_db, 'SELECT DISTINCT id FROM time_values')[,"id"] )
+#' dbGetQuery(series_db, 'SELECT DISTINCT id FROM times_values')[,"id"] )
#' serie_length <- as.integer( dbGetQuery(series_db,
-#' paste("SELECT COUNT * FROM time_values WHERE id == ",indexToID_inDB[1],sep="")) )
+#' paste("SELECT COUNT(*) FROM times_values WHERE id == ",indexToID_inDB[1],sep="")) )
#' getSeries <- function(indices) {
+#' indices = indices[ indices <= length(indexToID_inDB) ]
+#' if (length(indices) == 0)
+#' return (NULL)
#' request <- "SELECT id,value FROM times_values WHERE id in ("
-#' for (i in indices)
-#' request <- paste(request, indexToID_inDB[i], ",", sep="")
+#' for (i in seq_along(indices)) {
+#' request <- paste(request, indexToID_inDB[ indices[i] ], sep="")
+#' if (i < length(indices))
+#' request <- paste(request, ",", sep="")
+#' }
#' request <- paste(request, ")", sep="")
#' df_series <- dbGetQuery(series_db, request)
-#' if (length(df_series) >= 1)
-#' as.matrix(df_series[,"value"], nrow=serie_length)
-#' else
-#' NULL
+#' matrix(df_series[,"value"], nrow=serie_length)
#' }
-#' res_db <- claws(getSeries, K1=30, K2=6, 100))
+#' res_db <- claws(getSeries, 30, 6, 500, 100, random=FALSE, ncores_clust=1)
#' dbDisconnect(series_db)
#'
-#' # All results should be the same:
-#' library(digest)
-#' digest::sha1(res_ascii)
-#' digest::sha1(res_csv)
-#' digest::sha1(res_bin)
-#' digest::sha1(res_db)
+#' # All results should be equal:
+#' all(res_ascii$ranks == res_csv$ranks
+#' & res_ascii$ranks == res_bin$ranks
+#' & res_ascii$ranks == res_db$ranks)
#' }
#' @export
-claws <- function(series, K1, K2, nb_series_per_chunk, nb_items_clust=7*K1,
+claws <- function(series, K1, K2, nb_series_per_chunk, nb_items_clust=5*K1,
algoClust1=function(data,K) cluster::pam(t(data),K,diss=FALSE,pamonce=1)$id.med,
algoClust2=function(dists,K) cluster::pam(dists,K,diss=TRUE,pamonce=1)$id.med,
wav_filt="d8", contrib_type="absolute", WER="end", smooth_lvl=3, nvoice=4,
random=TRUE, ntasks=1, ncores_tasks=1, ncores_clust=3, sep=",", nbytes=4,
- endian=.Platform$endian, verbose=FALSE, parll=TRUE)
+ endian=.Platform$endian, verbose=FALSE)
{
# Check/transform arguments
if (!is.matrix(series) && !bigmemory::is.big.matrix(series)
stop("'sep': character")
nbytes <- .toInteger(nbytes, function(x) x==4 || x==8)
verbose <- .toLogical(verbose)
- parll <- .toLogical(parll)
# Binarize series if it is not a function; the aim is to always use a function,
# to uniformize treatments. An equally good alternative would be to use a file-backed
# Serialize all computed wavelets contributions into a file
contribs_file <- ".contribs.epclust.bin"
- index <- 1
- nb_curves <- 0
if (verbose)
cat("...Compute contributions and serialize them (or retrieve past binary file)\n")
if (!file.exists(contribs_file))
indices_all[((i-1)*nb_series_per_task+1):upper_bound]
})
+ parll <- (ncores_tasks > 1)
if (parll && ntasks>1)
{
# Initialize parallel runs: outfile="" allow to output verbose traces in the console
parallel::makeCluster(ncores_tasks, outfile="")
else
parallel::makeCluster(ncores_tasks)
- varlist <- c("ncores_clust","verbose","parll", #task 1 & 2
+ varlist <- c("ncores_clust","verbose", #task 1 & 2
"K1","getContribs","algoClust1","nb_items_clust") #task 1
if (WER=="mix")
{
if (parll && ntasks>1)
require("epclust", quietly=TRUE)
indices_medoids <- clusteringTask1(inds, getContribs, K1, algoClust1,
- nb_items_clust, ncores_clust, verbose, parll)
+ nb_items_clust, ncores_clust, verbose)
if (WER=="mix")
{
indices_medoids <- clusteringTask2(indices_medoids, getSeries, K2, algoClust2,
- nb_series_per_chunk,smooth_lvl,nvoice,nbytes,endian,ncores_clust,verbose,parll)
+ nb_series_per_chunk,smooth_lvl,nvoice,nbytes,endian,ncores_clust,verbose)
}
indices_medoids
}
}
# As explained above, we obtain after all runs ntasks*[K1 or K2] medoids indices,
- # depending wether WER=="end" or "mix", respectively.
+ # depending whether WER=="end" or "mix", respectively.
indices_medoids_all <-
if (parll && ntasks>1)
unlist( parallel::parLapply(cl, indices_tasks, runTwoStepClustering) )
# it's better to just re-use ncores_clust
ncores_last_stage <- ncores_clust
-
-
-#TODO: here, save all inputs to clusteringTask2 and compare :: must have differences...
-
-
-
# Run last clustering tasks to obtain only K2 medoids indices
if (verbose)
cat("...Run final // stage 1 + stage 2\n")
indices_medoids <- clusteringTask1(indices_medoids_all, getContribs, K1, algoClust1,
- nb_items_clust, ncores_tasks*ncores_clust, verbose, parll)
- indices_medoids <- clusteringTask2(indices_medoids, getContribs, K2, algoClust2,
- nb_series_per_chunk,smooth_lvl,nvoice,nbytes,endian,ncores_last_stage,verbose,parll)
+ nb_items_clust, ncores_tasks*ncores_clust, verbose)
+
+ indices_medoids <- clusteringTask2(indices_medoids, getSeries, K2, algoClust2,
+ nb_series_per_chunk,smooth_lvl,nvoice,nbytes,endian,ncores_last_stage,verbose)
# Compute synchrones, that is to say the cumulated power consumptions for each of the K2
# final groups.
medoids <- getSeries(indices_medoids)
synchrones <- computeSynchrones(medoids, getSeries, nb_curves, nb_series_per_chunk,
- ncores_last_stage, verbose, parll)
+ ncores_last_stage, verbose)
# NOTE: no need to use big.matrix here, since there are only K2 << K1 << N remaining curves
list("medoids"=medoids, "ranks"=indices_medoids, "synchrones"=synchrones)