X-Git-Url: https://git.auder.net/?a=blobdiff_plain;f=epclust%2Ftests%2Ftestthat%2Ftest.clustering.R;h=c10f820c4770ac9b401d00e2c71c505418d8295b;hb=d9bb53c5e1392018bf67f92140edb10137f3423c;hp=527f6bda2544bbc80ce6330e25cecc03dda2fc23;hpb=56857861dc15088cf58e7438968fe5714b22168e;p=epclust.git diff --git a/epclust/tests/testthat/test.clustering.R b/epclust/tests/testthat/test.clustering.R index 527f6bd..c10f820 100644 --- a/epclust/tests/testthat/test.clustering.R +++ b/epclust/tests/testthat/test.clustering.R @@ -1,25 +1,116 @@ context("clustering") -#TODO: load some dataset ASCII CSV -#data_bin_file <<- "/tmp/epclust_test.bin" -#unlink(data_bin_file) - -test_that("computeClusters1 behave as expected", +test_that("computeSynchrones behave as expected", { + n = 300 + x = seq(0,9.5,0.1) + L = length(x) #96 1/4h + K = 3 + s1 = cos(x) + s2 = sin(x) + s3 = c( s1[1:(L%/%2)] , s2[(L%/%2+1):L] ) + #sum((s1-s2)^2) == 96 + #sum((s1-s3)^2) == 58 + #sum((s2-s3)^2) == 38 + s = list(s1, s2, s3) + series = matrix(nrow=L, ncol=n) + for (i in seq_len(n)) + series[,i] = s[[I(i,K)]] + rnorm(L,sd=0.01) + getRefSeries = function(indices) { + indices = indices[indices <= n] + if (length(indices)>0) series[,indices] else NULL + } + synchrones = computeSynchrones(bigmemory::as.big.matrix(cbind(s1,s2,s3)), getRefSeries, + n, 100, verbose=TRUE, parll=FALSE) + expect_equal(dim(synchrones), c(L,K)) + for (i in 1:K) + expect_equal(synchrones[,i]/100, s[[i]], tolerance=0.01) }) -test_that("computeSynchrones behave as expected", +# Helper function to divide indices into balanced sets +test_that("Helper function to spread indices work properly", { + indices <- 1:400 + + # bigger nb_per_set than length(indices) + expect_equal(epclust:::.spreadIndices(indices,500), list(indices)) + + # nb_per_set == length(indices) + expect_equal(epclust:::.spreadIndices(indices,400), list(indices)) + + # length(indices) %% nb_per_set == 0 + expect_equal(epclust:::.spreadIndices(indices,200), + c( list(indices[1:200]), list(indices[201:400]) )) + expect_equal(epclust:::.spreadIndices(indices,100), + c( list(indices[1:100]), list(indices[101:200]), + list(indices[201:300]), list(indices[301:400]) )) + # length(indices) / nb_per_set == 1, length(indices) %% nb_per_set == 100 + expect_equal(epclust:::.spreadIndices(indices,300), list(indices)) + # length(indices) / nb_per_set == 2, length(indices) %% nb_per_set == 42 + repartition <- epclust:::.spreadIndices(indices,179) + expect_equal(length(repartition), 2) + expect_equal(length(repartition[[1]]), 179 + 21) + expect_equal(length(repartition[[1]]), 179 + 21) }) -test_that("computeClusters2 behave as expected", +test_that("clusteringTask1 behave as expected", { + n = 900 + x = seq(0,9.5,0.1) + L = length(x) #96 1/4h + K1 = 60 + s = lapply( seq_len(K1), function(i) x^(1+i/30)*cos(x+i) ) + series = matrix(nrow=L, ncol=n) + for (i in seq_len(n)) + series[,i] = s[[I(i,K1)]] + rnorm(L,sd=0.01) + getSeries = function(indices) { + indices = indices[indices <= n] + if (length(indices)>0) series[,indices] else NULL + } + wf = "haar" + ctype = "absolute" + getContribs = function(indices) curvesToContribs(series[,indices],wf,ctype) + require("cluster", quietly=TRUE) + algoClust1 = function(contribs,K) cluster::pam(t(contribs),K,diss=FALSE)$id.med + indices1 = clusteringTask1(1:n, getContribs, K1, algoClust1, 75, verbose=TRUE, parll=FALSE) + medoids_K1 = getSeries(indices1) + expect_equal(dim(medoids_K1), c(L,K1)) + # Not easy to evaluate result: at least we expect it to be better than random selection of + # medoids within initial series + distorGood = computeDistortion(series, medoids_K1) + for (i in 1:3) + expect_lte( distorGood, computeDistortion(series,series[,sample(1:n, K1)]) ) }) -test_that("clusteringTask + computeClusters2 behave as expected", +test_that("clusteringTask2 behave as expected", { + n = 900 + x = seq(0,9.5,0.1) + L = length(x) #96 1/4h + K1 = 60 + K2 = 3 + #for (i in 1:60) {plot(x^(1+i/30)*cos(x+i),type="l",col=i,ylim=c(-50,50)); par(new=TRUE)} + s = lapply( seq_len(K1), function(i) x^(1+i/30)*cos(x+i) ) + series = matrix(nrow=L, ncol=n) + for (i in seq_len(n)) + series[,i] = s[[I(i,K1)]] + rnorm(L,sd=0.01) + getRefSeries = function(indices) { + indices = indices[indices <= n] + if (length(indices)>0) series[,indices] else NULL + } + # Artificially simulate 60 medoids - perfect situation, all equal to one of the refs + medoids_K1 = bigmemory::as.big.matrix( sapply( 1:K1, function(i) s[[I(i,K1)]] ) ) + algoClust2 = function(dists,K) cluster::pam(dists,K,diss=TRUE)$id.med + medoids_K2 = clusteringTask2(medoids_K1, K2, algoClust2, getRefSeries, + n, 75, verbose=TRUE, parll=FALSE) + expect_equal(dim(medoids_K2), c(L,K2)) + # Not easy to evaluate result: at least we expect it to be better than random selection of + # medoids within 1...K1 (among references) + distorGood = computeDistortion(series, medoids_K2) + for (i in 1:3) + expect_lte( distorGood, computeDistortion(series,medoids_K1[,sample(1:K1, K2)]) ) })