X-Git-Url: https://git.auder.net/?a=blobdiff_plain;f=epclust%2Ftests%2Ftestthat%2Ftest.clustering.R;h=e22835aaf8bb65cdbbdd395a3dbb7cc81da90826;hb=9f05a4a0b703deffd7bdb9cd99b0aaa2246a5c83;hp=eeed5761f82a3087ada4c3c16817762b0dcd11b6;hpb=d300b49cd63d0539d29bbee120fa8237f7acee9b;p=epclust.git diff --git a/epclust/tests/testthat/test.clustering.R b/epclust/tests/testthat/test.clustering.R index eeed576..e22835a 100644 --- a/epclust/tests/testthat/test.clustering.R +++ b/epclust/tests/testthat/test.clustering.R @@ -1,2 +1,116 @@ -computeClusters -computeSynchrones +context("clustering") + +test_that("computeSynchrones behave as expected", +{ + n = 300 + x = seq(0,9.5,0.1) + L = length(x) #96 1/4h + K = 3 + s1 = cos(x) + s2 = sin(x) + s3 = c( s1[1:(L%/%2)] , s2[(L%/%2+1):L] ) + #sum((s1-s2)^2) == 96 + #sum((s1-s3)^2) == 58 + #sum((s2-s3)^2) == 38 + s = list(s1, s2, s3) + series = matrix(nrow=L, ncol=n) + for (i in seq_len(n)) + series[,i] = s[[I(i,K)]] + rnorm(L,sd=0.01) + getRefSeries = function(indices) { + indices = indices[indices <= n] + if (length(indices)>0) series[,indices] else NULL + } + synchrones = computeSynchrones(bigmemory::as.big.matrix(cbind(s1,s2,s3)), getRefSeries, + n, 100, sync_mean=TRUE, verbose=TRUE, parll=FALSE) + + expect_equal(dim(synchrones), c(L,K)) + for (i in 1:K) + expect_equal(synchrones[,i], s[[i]], tolerance=0.01) +}) + +# Helper function to divide indices into balanced sets +test_that("Helper function to spread indices work properly", +{ + indices <- 1:400 + + # bigger nb_per_set than length(indices) + expect_equal(epclust:::.spreadIndices(indices,500), list(indices)) + + # nb_per_set == length(indices) + expect_equal(epclust:::.spreadIndices(indices,400), list(indices)) + + # length(indices) %% nb_per_set == 0 + expect_equal(epclust:::.spreadIndices(indices,200), + c( list(indices[1:200]), list(indices[201:400]) )) + expect_equal(epclust:::.spreadIndices(indices,100), + c( list(indices[1:100]), list(indices[101:200]), + list(indices[201:300]), list(indices[301:400]) )) + + # length(indices) / nb_per_set == 1, length(indices) %% nb_per_set == 100 + expect_equal(epclust:::.spreadIndices(indices,300), list(indices)) + # length(indices) / nb_per_set == 2, length(indices) %% nb_per_set == 42 + repartition <- epclust:::.spreadIndices(indices,179) + expect_equal(length(repartition), 2) + expect_equal(length(repartition[[1]]), 179 + 21) + expect_equal(length(repartition[[1]]), 179 + 21) +}) + +test_that("clusteringTask1 behave as expected", +{ + n = 900 + x = seq(0,9.5,0.1) + L = length(x) #96 1/4h + K1 = 60 + s = lapply( seq_len(K1), function(i) x^(1+i/30)*cos(x+i) ) + series = matrix(nrow=L, ncol=n) + for (i in seq_len(n)) + series[,i] = s[[I(i,K1)]] + rnorm(L,sd=0.01) + getSeries = function(indices) { + indices = indices[indices <= n] + if (length(indices)>0) series[,indices] else NULL + } + wf = "haar" + ctype = "absolute" + getContribs = function(indices) curvesToContribs(series[,indices],wf,ctype) + require("cluster", quietly=TRUE) + algoClust1 = function(contribs,K) cluster::pam(t(contribs),K,diss=FALSE)$id.med + indices1 = clusteringTask1(1:n, getContribs, K1, algoClust1, 75, verbose=TRUE, parll=FALSE) + medoids_K1 = getSeries(indices1) + + expect_equal(dim(medoids_K1), c(L,K1)) + # Not easy to evaluate result: at least we expect it to be better than random selection of + # medoids within initial series + distorGood = computeDistortion(series, medoids_K1) + for (i in 1:3) + expect_lte( distorGood, computeDistortion(series,series[,sample(1:n, K1)]) ) +}) + +test_that("clusteringTask2 behave as expected", +{ + n = 900 + x = seq(0,9.5,0.1) + L = length(x) #96 1/4h + K1 = 60 + K2 = 3 + #for (i in 1:60) {plot(x^(1+i/30)*cos(x+i),type="l",col=i,ylim=c(-50,50)); par(new=TRUE)} + s = lapply( seq_len(K1), function(i) x^(1+i/30)*cos(x+i) ) + series = matrix(nrow=L, ncol=n) + for (i in seq_len(n)) + series[,i] = s[[I(i,K1)]] + rnorm(L,sd=0.01) + getRefSeries = function(indices) { + indices = indices[indices <= n] + if (length(indices)>0) series[,indices] else NULL + } + # Artificially simulate 60 medoids - perfect situation, all equal to one of the refs + medoids_K1 = bigmemory::as.big.matrix( sapply( 1:K1, function(i) s[[I(i,K1)]] ) ) + algoClust2 = function(dists,K) cluster::pam(dists,K,diss=TRUE)$id.med + medoids_K2 = clusteringTask2(medoids_K1, K2, algoClust2, getRefSeries, + n, 75, sync_mean=TRUE, verbose=TRUE, parll=FALSE) + + expect_equal(dim(medoids_K2), c(L,K2)) + # Not easy to evaluate result: at least we expect it to be better than random selection of + # medoids within 1...K1 (among references) + distorGood = computeDistortion(series, medoids_K2) + for (i in 1:3) + expect_lte( distorGood, computeDistortion(series,medoids_K1[,sample(1:K1, K2)]) ) +})