Commit | Line | Data |
---|---|---|
56857861 BA |
1 | # Cluster one full task (nb_curves / ntasks series); only step 1 |
2 | clusteringTask = function(indices, getCoefs, K1, nb_series_per_chunk, ncores) | |
5c652979 | 3 | { |
0e2dce80 | 4 | cl = parallel::makeCluster(ncores) |
8702eb86 | 5 | parallel::clusterExport(cl, varlist=c("getCoefs","K1"), envir=environment()) |
7b13d0c2 BA |
6 | repeat |
7 | { | |
8702eb86 BA |
8 | nb_workers = max( 1, floor( length(indices) / nb_series_per_chunk ) ) |
9 | indices_workers = lapply( seq_len(nb_workers), function(i) | |
10 | indices[(nb_series_per_chunk*(i-1)+1):(nb_series_per_chunk*i)] ) | |
11 | # Spread the remaining load among the workers | |
12 | rem = length(indices) %% nb_series_per_chunk | |
13 | while (rem > 0) | |
14 | { | |
15 | index = rem%%nb_workers + 1 | |
16 | indices_workers[[index]] = c(indices_workers[[index]], tail(indices,rem)) | |
17 | rem = rem - 1 | |
18 | } | |
19 | indices = unlist( parallel::parLapply( cl, indices_workers, function(inds) { | |
20 | require("epclust", quietly=TRUE) | |
21 | inds[ computeClusters1(getCoefs(inds), K1) ] | |
22 | } ) ) | |
56857861 | 23 | if (length(indices) == K1) |
7b13d0c2 BA |
24 | break |
25 | } | |
e205f218 | 26 | parallel::stopCluster(cl) |
56857861 | 27 | indices #medoids |
5c652979 BA |
28 | } |
29 | ||
0e2dce80 | 30 | # Apply the clustering algorithm (PAM) on a coeffs or distances matrix |
56857861 | 31 | computeClusters1 = function(coefs, K1) |
8702eb86 | 32 | cluster::pam(coefs, K1, diss=FALSE)$id.med |
0e2dce80 | 33 | |
7b13d0c2 | 34 | # Cluster a chunk of series inside one task (~max nb_series_per_chunk) |
56857861 | 35 | computeClusters2 = function(medoids, K2, getRefSeries, nb_series_per_chunk) |
5c652979 | 36 | { |
56857861 | 37 | synchrones = computeSynchrones(medoids, getRefSeries, nb_series_per_chunk) |
8702eb86 | 38 | medoids[ cluster::pam(computeWerDists(synchrones), K2, diss=TRUE)$medoids , ] |
5c652979 BA |
39 | } |
40 | ||
7b13d0c2 | 41 | # Compute the synchrones curves (sum of clusters elements) from a clustering result |
56857861 | 42 | computeSynchrones = function(medoids, getRefSeries, nb_series_per_chunk) |
e205f218 | 43 | { |
3eef8d3d BA |
44 | K = nrow(medoids) |
45 | synchrones = matrix(0, nrow=K, ncol=ncol(medoids)) | |
46 | counts = rep(0,K) | |
47 | index = 1 | |
48 | repeat | |
49 | { | |
56857861 BA |
50 | range = (index-1) + seq_len(nb_series_per_chunk) |
51 | ref_series = getRefSeries(range) | |
52 | if (is.null(ref_series)) | |
3eef8d3d BA |
53 | break |
54 | #get medoids indices for this chunk of series | |
56857861 BA |
55 | for (i in seq_len(nrow(ref_series))) |
56 | { | |
8702eb86 BA |
57 | j = which.min( rowSums( sweep(medoids, 2, ref_series[i,], '-')^2 ) ) |
58 | synchrones[j,] = synchrones[j,] + ref_series[i,] | |
56857861 BA |
59 | counts[j] = counts[j] + 1 |
60 | } | |
61 | index = index + nb_series_per_chunk | |
3eef8d3d BA |
62 | } |
63 | #NOTE: odds for some clusters to be empty? (when series already come from stage 2) | |
8702eb86 BA |
64 | # ...maybe; but let's hope resulting K1' be still quite bigger than K2 |
65 | synchrones = sweep(synchrones, 1, counts, '/') | |
66 | synchrones[ sapply(seq_len(K), function(i) all(!is.nan(synchrones[i,]))) , ] | |
e205f218 | 67 | } |
1c6f223e | 68 | |
e205f218 | 69 | # Compute the WER distance between the synchrones curves (in rows) |
8702eb86 | 70 | computeWerDists = function(curves) |
d03c0621 | 71 | { |
5c652979 BA |
72 | if (!require("Rwave", quietly=TRUE)) |
73 | stop("Unable to load Rwave library") | |
7b13d0c2 BA |
74 | n <- nrow(curves) |
75 | delta <- ncol(curves) | |
db6fc17d | 76 | #TODO: automatic tune of all these parameters ? (for other users) |
d03c0621 | 77 | nvoice <- 4 |
7b13d0c2 | 78 | # noctave = 2^13 = 8192 half hours ~ 180 days ; ~log2(ncol(curves)) |
d7d55bc1 BA |
79 | noctave = 13 |
80 | # 4 here represent 2^5 = 32 half-hours ~ 1 day | |
db6fc17d BA |
81 | #NOTE: default scalevector == 2^(0:(noctave * nvoice) / nvoice) * s0 (?) |
82 | scalevector <- 2^(4:(noctave * nvoice) / nvoice) * 2 | |
83 | #condition: ( log2(s0*w0/(2*pi)) - 1 ) * nvoice + 1.5 >= 1 | |
84 | s0=2 | |
85 | w0=2*pi | |
86 | scaled=FALSE | |
87 | s0log = as.integer( (log2( s0*w0/(2*pi) ) - 1) * nvoice + 1.5 ) | |
88 | totnoct = noctave + as.integer(s0log/nvoice) + 1 | |
89 | ||
90 | # (normalized) observations node with CWT | |
91 | Xcwt4 <- lapply(seq_len(n), function(i) { | |
e205f218 | 92 | ts <- scale(ts(curves[i,]), center=TRUE, scale=scaled) |
db6fc17d BA |
93 | totts.cwt = Rwave::cwt(ts,totnoct,nvoice,w0,plot=0) |
94 | ts.cwt = totts.cwt[,s0log:(s0log+noctave*nvoice)] | |
95 | #Normalization | |
96 | sqs <- sqrt(2^(0:(noctave*nvoice)/nvoice)*s0) | |
97 | sqres <- sweep(ts.cwt,MARGIN=2,sqs,'*') | |
98 | sqres / max(Mod(sqres)) | |
99 | }) | |
3ccd1e39 | 100 | |
db6fc17d BA |
101 | Xwer_dist <- matrix(0., n, n) |
102 | fcoefs = rep(1/3, 3) #moving average on 3 values (TODO: very slow! correct?!) | |
103 | for (i in 1:(n-1)) | |
1c6f223e | 104 | { |
db6fc17d | 105 | for (j in (i+1):n) |
d03c0621 | 106 | { |
0e2dce80 | 107 | #TODO: later, compute CWT here (because not enough storage space for 200k series) |
db6fc17d BA |
108 | # 'circular=TRUE' is wrong, should just take values on the sides; to rewrite in C |
109 | num <- filter(Mod(Xcwt4[[i]] * Conj(Xcwt4[[j]])), fcoefs, circular=TRUE) | |
110 | WX <- filter(Mod(Xcwt4[[i]] * Conj(Xcwt4[[i]])), fcoefs, circular=TRUE) | |
111 | WY <- filter(Mod(Xcwt4[[j]] * Conj(Xcwt4[[j]])), fcoefs, circular=TRUE) | |
112 | wer2 <- sum(colSums(num)^2) / sum( sum(colSums(WX) * colSums(WY)) ) | |
113 | Xwer_dist[i,j] <- sqrt(delta * ncol(Xcwt4[[1]]) * (1 - wer2)) | |
114 | Xwer_dist[j,i] <- Xwer_dist[i,j] | |
d03c0621 | 115 | } |
1c6f223e | 116 | } |
d03c0621 | 117 | diag(Xwer_dist) <- numeric(n) |
c6556868 | 118 | Xwer_dist |
1c6f223e | 119 | } |