| 1 | # Cluster one full task (nb_curves / ntasks series); only step 1 |
| 2 | clusteringTask = function(indices, getCoefs, K1, nb_series_per_chunk, ncores) |
| 3 | { |
| 4 | cl = parallel::makeCluster(ncores) |
| 5 | parallel::clusterExport(cl, varlist=c("getCoefs","K1"), envir=environment()) |
| 6 | repeat |
| 7 | { |
| 8 | nb_workers = max( 1, floor( length(indices) / nb_series_per_chunk ) ) |
| 9 | indices_workers = lapply( seq_len(nb_workers), function(i) |
| 10 | indices[(nb_series_per_chunk*(i-1)+1):(nb_series_per_chunk*i)] ) |
| 11 | # Spread the remaining load among the workers |
| 12 | rem = length(indices) %% nb_series_per_chunk |
| 13 | while (rem > 0) |
| 14 | { |
| 15 | index = rem%%nb_workers + 1 |
| 16 | indices_workers[[index]] = c(indices_workers[[index]], tail(indices,rem)) |
| 17 | rem = rem - 1 |
| 18 | } |
| 19 | indices = unlist( parallel::parLapply( cl, indices_workers, function(inds) { |
| 20 | require("epclust", quietly=TRUE) |
| 21 | inds[ computeClusters1(getCoefs(inds), K1) ] |
| 22 | } ) ) |
| 23 | if (length(indices) == K1) |
| 24 | break |
| 25 | } |
| 26 | parallel::stopCluster(cl) |
| 27 | indices #medoids |
| 28 | } |
| 29 | |
| 30 | # Apply the clustering algorithm (PAM) on a coeffs or distances matrix |
| 31 | computeClusters1 = function(coefs, K1) |
| 32 | cluster::pam(coefs, K1, diss=FALSE)$id.med |
| 33 | |
| 34 | # Cluster a chunk of series inside one task (~max nb_series_per_chunk) |
| 35 | computeClusters2 = function(medoids, K2, getRefSeries, nb_series_per_chunk) |
| 36 | { |
| 37 | synchrones = computeSynchrones(medoids, getRefSeries, nb_series_per_chunk) |
| 38 | medoids[ cluster::pam(computeWerDists(synchrones), K2, diss=TRUE)$medoids , ] |
| 39 | } |
| 40 | |
| 41 | # Compute the synchrones curves (sum of clusters elements) from a clustering result |
| 42 | computeSynchrones = function(medoids, getRefSeries, nb_series_per_chunk) |
| 43 | { |
| 44 | K = nrow(medoids) |
| 45 | synchrones = matrix(0, nrow=K, ncol=ncol(medoids)) |
| 46 | counts = rep(0,K) |
| 47 | index = 1 |
| 48 | repeat |
| 49 | { |
| 50 | range = (index-1) + seq_len(nb_series_per_chunk) |
| 51 | ref_series = getRefSeries(range) |
| 52 | if (is.null(ref_series)) |
| 53 | break |
| 54 | #get medoids indices for this chunk of series |
| 55 | for (i in seq_len(nrow(ref_series))) |
| 56 | { |
| 57 | j = which.min( rowSums( sweep(medoids, 2, ref_series[i,], '-')^2 ) ) |
| 58 | synchrones[j,] = synchrones[j,] + ref_series[i,] |
| 59 | counts[j] = counts[j] + 1 |
| 60 | } |
| 61 | index = index + nb_series_per_chunk |
| 62 | } |
| 63 | #NOTE: odds for some clusters to be empty? (when series already come from stage 2) |
| 64 | # ...maybe; but let's hope resulting K1' be still quite bigger than K2 |
| 65 | synchrones = sweep(synchrones, 1, counts, '/') |
| 66 | synchrones[ sapply(seq_len(K), function(i) all(!is.nan(synchrones[i,]))) , ] |
| 67 | } |
| 68 | |
| 69 | # Compute the WER distance between the synchrones curves (in rows) |
| 70 | computeWerDists = function(curves) |
| 71 | { |
| 72 | if (!require("Rwave", quietly=TRUE)) |
| 73 | stop("Unable to load Rwave library") |
| 74 | n <- nrow(curves) |
| 75 | delta <- ncol(curves) |
| 76 | #TODO: automatic tune of all these parameters ? (for other users) |
| 77 | nvoice <- 4 |
| 78 | # noctave = 2^13 = 8192 half hours ~ 180 days ; ~log2(ncol(curves)) |
| 79 | noctave = 13 |
| 80 | # 4 here represent 2^5 = 32 half-hours ~ 1 day |
| 81 | #NOTE: default scalevector == 2^(0:(noctave * nvoice) / nvoice) * s0 (?) |
| 82 | scalevector <- 2^(4:(noctave * nvoice) / nvoice) * 2 |
| 83 | #condition: ( log2(s0*w0/(2*pi)) - 1 ) * nvoice + 1.5 >= 1 |
| 84 | s0=2 |
| 85 | w0=2*pi |
| 86 | scaled=FALSE |
| 87 | s0log = as.integer( (log2( s0*w0/(2*pi) ) - 1) * nvoice + 1.5 ) |
| 88 | totnoct = noctave + as.integer(s0log/nvoice) + 1 |
| 89 | |
| 90 | # (normalized) observations node with CWT |
| 91 | Xcwt4 <- lapply(seq_len(n), function(i) { |
| 92 | ts <- scale(ts(curves[i,]), center=TRUE, scale=scaled) |
| 93 | totts.cwt = Rwave::cwt(ts,totnoct,nvoice,w0,plot=0) |
| 94 | ts.cwt = totts.cwt[,s0log:(s0log+noctave*nvoice)] |
| 95 | #Normalization |
| 96 | sqs <- sqrt(2^(0:(noctave*nvoice)/nvoice)*s0) |
| 97 | sqres <- sweep(ts.cwt,MARGIN=2,sqs,'*') |
| 98 | sqres / max(Mod(sqres)) |
| 99 | }) |
| 100 | |
| 101 | Xwer_dist <- matrix(0., n, n) |
| 102 | fcoefs = rep(1/3, 3) #moving average on 3 values (TODO: very slow! correct?!) |
| 103 | for (i in 1:(n-1)) |
| 104 | { |
| 105 | for (j in (i+1):n) |
| 106 | { |
| 107 | #TODO: later, compute CWT here (because not enough storage space for 200k series) |
| 108 | # 'circular=TRUE' is wrong, should just take values on the sides; to rewrite in C |
| 109 | num <- filter(Mod(Xcwt4[[i]] * Conj(Xcwt4[[j]])), fcoefs, circular=TRUE) |
| 110 | WX <- filter(Mod(Xcwt4[[i]] * Conj(Xcwt4[[i]])), fcoefs, circular=TRUE) |
| 111 | WY <- filter(Mod(Xcwt4[[j]] * Conj(Xcwt4[[j]])), fcoefs, circular=TRUE) |
| 112 | wer2 <- sum(colSums(num)^2) / sum( sum(colSums(WX) * colSums(WY)) ) |
| 113 | Xwer_dist[i,j] <- sqrt(delta * ncol(Xcwt4[[1]]) * (1 - wer2)) |
| 114 | Xwer_dist[j,i] <- Xwer_dist[i,j] |
| 115 | } |
| 116 | } |
| 117 | diag(Xwer_dist) <- numeric(n) |
| 118 | Xwer_dist |
| 119 | } |