| 1 | #' @name clustering |
| 2 | #' @rdname clustering |
| 3 | #' @aliases clusteringTask computeClusters1 computeClusters2 |
| 4 | #' |
| 5 | #' @title Two-stages clustering, withing one task (see \code{claws()}) |
| 6 | #' |
| 7 | #' @description \code{clusteringTask()} runs one full task, which consists in iterated stage 1 |
| 8 | #' clustering (on nb_curves / ntasks energy contributions, computed through discrete |
| 9 | #' wavelets coefficients). \code{computeClusters1()} and \code{computeClusters2()} |
| 10 | #' correspond to the atomic clustering procedures respectively for stage 1 and 2. |
| 11 | #' The former applies the clustering algorithm (PAM) on a contributions matrix, while |
| 12 | #' the latter clusters a chunk of series inside one task (~max nb_series_per_chunk) |
| 13 | #' |
| 14 | #' @param indices Range of series indices to cluster in parallel (initial data) |
| 15 | #' @param getContribs Function to retrieve contributions from initial series indices: |
| 16 | #' \code{getContribs(indices)} outpus a contributions matrix |
| 17 | #' @param contribs matrix of contributions (e.g. output of \code{curvesToContribs()}) |
| 18 | #' @inheritParams computeSynchrones |
| 19 | #' @inheritParams claws |
| 20 | #' |
| 21 | #' @return For \code{clusteringTask()} and \code{computeClusters1()}, the indices of the |
| 22 | #' computed (K1) medoids. Indices are irrelevant for stage 2 clustering, thus |
| 23 | #' \code{computeClusters2()} outputs a matrix of medoids |
| 24 | #' (of size limited by nb_series_per_chunk) |
| 25 | NULL |
| 26 | |
| 27 | #' @rdname clustering |
| 28 | #' @export |
| 29 | clusteringTask = function(indices, getContribs, K1, nb_series_per_chunk, ncores_clust) |
| 30 | { |
| 31 | |
| 32 | #NOTE: comment out parallel sections for debugging |
| 33 | #propagate verbose arg ?! |
| 34 | |
| 35 | # cl = parallel::makeCluster(ncores_clust) |
| 36 | # parallel::clusterExport(cl, varlist=c("getContribs","K1"), envir=environment()) |
| 37 | repeat |
| 38 | { |
| 39 | |
| 40 | print(length(indices)) |
| 41 | |
| 42 | nb_workers = max( 1, floor( length(indices) / nb_series_per_chunk ) ) |
| 43 | indices_workers = lapply( seq_len(nb_workers), function(i) |
| 44 | indices[(nb_series_per_chunk*(i-1)+1):(nb_series_per_chunk*i)] ) |
| 45 | # Spread the remaining load among the workers |
| 46 | rem = length(indices) %% nb_series_per_chunk |
| 47 | while (rem > 0) |
| 48 | { |
| 49 | index = rem%%nb_workers + 1 |
| 50 | indices_workers[[index]] = c(indices_workers[[index]], tail(indices,rem)) |
| 51 | rem = rem - 1 |
| 52 | } |
| 53 | # indices = unlist( parallel::parLapply( cl, indices_workers, function(inds) { |
| 54 | indices = unlist( lapply( indices_workers, function(inds) { |
| 55 | # require("epclust", quietly=TRUE) |
| 56 | |
| 57 | print(paste(" ",length(inds))) ## PROBLEME ICI : 21104 ??! |
| 58 | |
| 59 | inds[ computeClusters1(getContribs(inds), K1) ] |
| 60 | } ) ) |
| 61 | if (length(indices) == K1) |
| 62 | break |
| 63 | } |
| 64 | # parallel::stopCluster(cl) |
| 65 | indices #medoids |
| 66 | } |
| 67 | |
| 68 | #' @rdname clustering |
| 69 | #' @export |
| 70 | computeClusters1 = function(contribs, K1) |
| 71 | cluster::pam(contribs, K1, diss=FALSE)$id.med |
| 72 | |
| 73 | #' @rdname clustering |
| 74 | #' @export |
| 75 | computeClusters2 = function(medoids, K2, getRefSeries, nb_series_per_chunk) |
| 76 | { |
| 77 | synchrones = computeSynchrones(medoids, getRefSeries, nb_series_per_chunk) |
| 78 | medoids[ cluster::pam(computeWerDists(synchrones), K2, diss=TRUE)$medoids , ] |
| 79 | } |
| 80 | |
| 81 | #' computeSynchrones |
| 82 | #' |
| 83 | #' Compute the synchrones curves (sum of clusters elements) from a matrix of medoids, |
| 84 | #' using L2 distances. |
| 85 | #' |
| 86 | #' @param medoids Matrix of medoids (curves of same legnth as initial series) |
| 87 | #' @param getRefSeries Function to retrieve initial series (e.g. in stage 2 after series |
| 88 | #' have been replaced by stage-1 medoids) |
| 89 | #' @inheritParams claws |
| 90 | #' |
| 91 | #' @export |
| 92 | computeSynchrones = function(medoids, getRefSeries, nb_series_per_chunk) |
| 93 | { |
| 94 | K = nrow(medoids) |
| 95 | synchrones = matrix(0, nrow=K, ncol=ncol(medoids)) |
| 96 | counts = rep(0,K) |
| 97 | index = 1 |
| 98 | repeat |
| 99 | { |
| 100 | range = (index-1) + seq_len(nb_series_per_chunk) |
| 101 | ref_series = getRefSeries(range) |
| 102 | if (is.null(ref_series)) |
| 103 | break |
| 104 | #get medoids indices for this chunk of series |
| 105 | for (i in seq_len(nrow(ref_series))) |
| 106 | { |
| 107 | j = which.min( rowSums( sweep(medoids, 2, ref_series[i,], '-')^2 ) ) |
| 108 | synchrones[j,] = synchrones[j,] + ref_series[i,] |
| 109 | counts[j] = counts[j] + 1 |
| 110 | } |
| 111 | index = index + nb_series_per_chunk |
| 112 | } |
| 113 | #NOTE: odds for some clusters to be empty? (when series already come from stage 2) |
| 114 | # ...maybe; but let's hope resulting K1' be still quite bigger than K2 |
| 115 | synchrones = sweep(synchrones, 1, counts, '/') |
| 116 | synchrones[ sapply(seq_len(K), function(i) all(!is.nan(synchrones[i,]))) , ] |
| 117 | } |
| 118 | |
| 119 | #' computeWerDists |
| 120 | #' |
| 121 | #' Compute the WER distances between the synchrones curves (in rows), which are |
| 122 | #' returned (e.g.) by \code{computeSynchrones()} |
| 123 | #' |
| 124 | #' @param synchrones A matrix of synchrones, in rows. The series have same length as the |
| 125 | #' series in the initial dataset |
| 126 | #' |
| 127 | #' @export |
| 128 | computeWerDists = function(synchrones) |
| 129 | { |
| 130 | n <- nrow(synchrones) |
| 131 | delta <- ncol(synchrones) |
| 132 | #TODO: automatic tune of all these parameters ? (for other users) |
| 133 | nvoice <- 4 |
| 134 | # noctave = 2^13 = 8192 half hours ~ 180 days ; ~log2(ncol(synchrones)) |
| 135 | noctave = 13 |
| 136 | # 4 here represent 2^5 = 32 half-hours ~ 1 day |
| 137 | #NOTE: default scalevector == 2^(0:(noctave * nvoice) / nvoice) * s0 (?) |
| 138 | scalevector <- 2^(4:(noctave * nvoice) / nvoice) * 2 |
| 139 | #condition: ( log2(s0*w0/(2*pi)) - 1 ) * nvoice + 1.5 >= 1 |
| 140 | s0=2 |
| 141 | w0=2*pi |
| 142 | scaled=FALSE |
| 143 | s0log = as.integer( (log2( s0*w0/(2*pi) ) - 1) * nvoice + 1.5 ) |
| 144 | totnoct = noctave + as.integer(s0log/nvoice) + 1 |
| 145 | |
| 146 | # (normalized) observations node with CWT |
| 147 | Xcwt4 <- lapply(seq_len(n), function(i) { |
| 148 | ts <- scale(ts(synchrones[i,]), center=TRUE, scale=scaled) |
| 149 | totts.cwt = Rwave::cwt(ts,totnoct,nvoice,w0,plot=0) |
| 150 | ts.cwt = totts.cwt[,s0log:(s0log+noctave*nvoice)] |
| 151 | #Normalization |
| 152 | sqs <- sqrt(2^(0:(noctave*nvoice)/nvoice)*s0) |
| 153 | sqres <- sweep(ts.cwt,MARGIN=2,sqs,'*') |
| 154 | sqres / max(Mod(sqres)) |
| 155 | }) |
| 156 | |
| 157 | Xwer_dist <- matrix(0., n, n) |
| 158 | fcoefs = rep(1/3, 3) #moving average on 3 values (TODO: very slow! correct?!) |
| 159 | for (i in 1:(n-1)) |
| 160 | { |
| 161 | for (j in (i+1):n) |
| 162 | { |
| 163 | #TODO: later, compute CWT here (because not enough storage space for 200k series) |
| 164 | # 'circular=TRUE' is wrong, should just take values on the sides; to rewrite in C |
| 165 | num <- filter(Mod(Xcwt4[[i]] * Conj(Xcwt4[[j]])), fcoefs, circular=TRUE) |
| 166 | WX <- filter(Mod(Xcwt4[[i]] * Conj(Xcwt4[[i]])), fcoefs, circular=TRUE) |
| 167 | WY <- filter(Mod(Xcwt4[[j]] * Conj(Xcwt4[[j]])), fcoefs, circular=TRUE) |
| 168 | wer2 <- sum(colSums(num)^2) / sum( sum(colSums(WX) * colSums(WY)) ) |
| 169 | Xwer_dist[i,j] <- sqrt(delta * ncol(Xcwt4[[1]]) * (1 - wer2)) |
| 170 | Xwer_dist[j,i] <- Xwer_dist[i,j] |
| 171 | } |
| 172 | } |
| 173 | diag(Xwer_dist) <- numeric(n) |
| 174 | Xwer_dist |
| 175 | } |