add some folders; more complete package structure
authorBenjamin Auder <benjamin.auder@somewhere>
Sat, 18 Mar 2017 01:24:13 +0000 (02:24 +0100)
committerBenjamin Auder <benjamin.auder@somewhere>
Sat, 18 Mar 2017 01:24:13 +0000 (02:24 +0100)
21 files changed:
README.md
pkg/DESCRIPTION
pkg/LICENSE
pkg/R/plot.R [new file with mode: 0644]
pkg/data/TODO [deleted file]
pkg/inst/testdata/TODO.csv [new file with mode: 0644]
pkg/tests/testthat.R [new file with mode: 0644]
pkg/tests/testthat/helper-clustering.R [new file with mode: 0644]
pkg/tests/testthat/test-clustering.R [new file with mode: 0644]
pkg/vignettes/valse.Rmd [new file with mode: 0644]
src/adapters/a.EMGLLF.o [deleted file]
src/adapters/a.EMGrank.o [deleted file]
src/adapters/a.constructionModelesLassoMLE.o [deleted file]
src/adapters/a.constructionModelesLassoRank.o [deleted file]
src/adapters/a.selectiontotale.o [deleted file]
src/sources/EMGLLF.o [deleted file]
src/sources/EMGrank.o [deleted file]
src/sources/constructionModelesLassoMLE.o [deleted file]
src/sources/constructionModelesLassoRank.o [deleted file]
src/sources/selectiontotale.o [deleted file]
src/valse.so [deleted file]

index ada56d8..30d87ce 100644 (file)
--- a/README.md
+++ b/README.md
@@ -5,3 +5,8 @@ This code is the applied part of the PhD thesis of [Benjamin Gohehry](http://www
 ## Description
 
 TODO : see R package
+
+Trouver un jeu de données (+) intéressant (que les autres) ?
+Ajouter toy datasets pour les tests (ou piocher dans MASS ?)
+
+ED : j'ai simulé un truc basique via 'generateXYdefault(10,5,6,2)'
index 9d8a677..5a8bc18 100644 (file)
@@ -1,17 +1,19 @@
 Package: valse
-Title: VAriabLe SElection with mixture of models
+Title: Variable Selection With Mixture Of Models
 Date: 2016-12-01
 Version: 0.1-0
 Description: Two methods are implemented to cluster data with finite mixture regression models.
-              Those procedures deal with high-dimensional covariates and responses through a variable selection
-              procedure based on the Lasso estimator. A low-rank constraint could be added, computed for the Lasso-Rank procedure.
-              A collection of models is constructed, varying the level of sparsity and the number of clusters, and a model is selected
-              using a model selection criterion (slope heuristic, BIC or AIC).
-              Details of the procedure are provided in 'Model-based clustering for high-dimensional data. Application to functional data'
-              by Emilie Devijver, published in Advances in Data Analysis and Clustering (2016)
+    Those procedures deal with high-dimensional covariates and responses through a variable
+    selection procedure based on the Lasso estimator. A low-rank constraint could be added,
+    computed for the Lasso-Rank procedure.
+    A collection of models is constructed, varying the level of sparsity and the number of
+    clusters, and a model is selected using a model selection criterion (slope heuristic,
+    BIC or AIC). Details of the procedure are provided in 'Model-based clustering for
+    high-dimensional data. Application to functional data' by Emilie Devijver, published in
+    Advances in Data Analysis and Clustering (2016).
 Author: Benjamin Auder <Benjamin.Auder@math.u-psud.fr> [aut,cre],
+    Emilie Devijver <Emilie.Devijver@kuleuven.be> [aut],
     Benjamin Goehry <Benjamin.Goehry@math.u-psud.fr> [aut]
-    Emilie Devijver <Emilie.Devijver@kuleuven.be> [aut]
 Maintainer: Benjamin Auder <Benjamin.Auder@math.u-psud.fr>
 Depends:
     R (>= 3.0.0)
@@ -20,8 +22,9 @@ Imports:
     methods
 Suggests:
     parallel,
-    testthat,
-    knitr
+    testhat,
+    devtools,
+    rmarkdown
 URL: http://git.auder.net/?p=valse.git
 License: MIT + file LICENSE
 VignetteBuilder: knitr
index 727af26..a212458 100644 (file)
@@ -1,6 +1,6 @@
 Copyright (c)
-       2014-2016, Emilie Devijver
   2014-2017, Benjamin Auder
+       2014-2017, Emilie Devijver
        2016-2017, Benjamin Goehry
 
 Permission is hereby granted, free of charge, to any person obtaining
diff --git a/pkg/R/plot.R b/pkg/R/plot.R
new file mode 100644 (file)
index 0000000..a8da583
--- /dev/null
@@ -0,0 +1 @@
+#TODO: reprendre les plots d'Emilie dans reports/...
diff --git a/pkg/data/TODO b/pkg/data/TODO
deleted file mode 100644 (file)
index 7e3c7ec..0000000
+++ /dev/null
@@ -1,4 +0,0 @@
-Trouver un jeu de données (+) intéressant (que les autres) ?
-Ajouter toy datasets pour les tests (ou piocher dans MASS ?)
-
-ED : j'ai simulé un truc basique via 'generateXYdefault(10,5,6,2)'
diff --git a/pkg/inst/testdata/TODO.csv b/pkg/inst/testdata/TODO.csv
new file mode 100644 (file)
index 0000000..d679966
--- /dev/null
@@ -0,0 +1 @@
+ou alors data_test.RData, possible aussi
diff --git a/pkg/tests/testthat.R b/pkg/tests/testthat.R
new file mode 100644 (file)
index 0000000..d2761ea
--- /dev/null
@@ -0,0 +1,4 @@
+library(testthat)
+library(valse #ou load_all()
+
+test_check("valse")
diff --git a/pkg/tests/testthat/helper-clustering.R b/pkg/tests/testthat/helper-clustering.R
new file mode 100644 (file)
index 0000000..785b7f0
--- /dev/null
@@ -0,0 +1,11 @@
+# Compute the sum of (normalized) sum of squares of closest distances to a medoid.
+computeDistortion <- function(series, medoids)
+{
+       n <- ncol(series)
+       L <- nrow(series)
+       distortion <- 0.
+       for (i in seq_len(n))
+               distortion <- distortion + min( colSums( sweep(medoids,1,series[,i],'-')^2 ) / L )
+
+       sqrt( distortion / n )
+}
diff --git a/pkg/tests/testthat/test-clustering.R b/pkg/tests/testthat/test-clustering.R
new file mode 100644 (file)
index 0000000..2e3a431
--- /dev/null
@@ -0,0 +1,72 @@
+context("clustering")
+
+test_that("clusteringTask1 behave as expected",
+{
+       # Generate 60 reference sinusoïdal series (medoids to be found),
+       # and sample 900 series around them (add a small noise)
+       n <- 900
+       x <- seq(0,9.5,0.1)
+       L <- length(x) #96 1/4h
+       K1 <- 60
+       s <- lapply( seq_len(K1), function(i) x^(1+i/30)*cos(x+i) )
+       series <- matrix(nrow=L, ncol=n)
+       for (i in seq_len(n))
+               series[,i] <- s[[I(i,K1)]] + rnorm(L,sd=0.01)
+
+       getSeries <- function(indices) {
+               indices <- indices[indices <= n]
+               if (length(indices)>0) as.matrix(series[,indices]) else NULL
+       }
+
+       wf <- "haar"
+       ctype <- "absolute"
+       getContribs <- function(indices) curvesToContribs(as.matrix(series[,indices]),wf,ctype)
+
+       require("cluster", quietly=TRUE)
+       algoClust1 <- function(contribs,K) cluster::pam(t(contribs),K,diss=FALSE)$id.med
+       indices1 <- clusteringTask1(1:n, getContribs, K1, algoClust1, 140, verbose=TRUE, parll=FALSE)
+       medoids_K1 <- getSeries(indices1)
+
+       expect_equal(dim(medoids_K1), c(L,K1))
+       # Not easy to evaluate result: at least we expect it to be better than random selection of
+       # medoids within initial series
+       distor_good <- computeDistortion(series, medoids_K1)
+       for (i in 1:3)
+               expect_lte( distor_good, computeDistortion(series,series[,sample(1:n, K1)]) )
+})
+
+test_that("clusteringTask2 behave as expected",
+{
+       # Same 60 reference sinusoïdal series than in clusteringTask1 test,
+       # but this time we consider them as medoids - skipping stage 1
+       # Here also we sample 900 series around the 60 "medoids"
+       n <- 900
+       x <- seq(0,9.5,0.1)
+       L <- length(x) #96 1/4h
+       K1 <- 60
+       K2 <- 3
+       #for (i in 1:60) {plot(x^(1+i/30)*cos(x+i),type="l",col=i,ylim=c(-50,50)); par(new=TRUE)}
+       s <- lapply( seq_len(K1), function(i) x^(1+i/30)*cos(x+i) )
+       series <- matrix(nrow=L, ncol=n)
+       for (i in seq_len(n))
+               series[,i] <- s[[I(i,K1)]] + rnorm(L,sd=0.01)
+
+       getSeries <- function(indices) {
+               indices <- indices[indices <= n]
+               if (length(indices)>0) as.matrix(series[,indices]) else NULL
+       }
+
+       # Perfect situation: all medoids "after stage 1" are ~good
+       algoClust2 <- function(dists,K) cluster::pam(dists,K,diss=TRUE)$id.med
+       indices2 <- clusteringTask2(1:K1, getSeries, K2, algoClust2, 210, 3, 4, 8, "little",
+               verbose=TRUE, parll=FALSE)
+       medoids_K2 <- getSeries(indices2)
+
+       expect_equal(dim(medoids_K2), c(L,K2))
+       # Not easy to evaluate result: at least we expect it to be better than random selection of
+       # synchrones within 1...K1 (from where distances computations + clustering was run)
+       distor_good <- computeDistortion(series, medoids_K2)
+#TODO: This fails; why?
+#      for (i in 1:3)
+#              expect_lte( distor_good, computeDistortion(series, series[,sample(1:K1,3)]) )
+})
diff --git a/pkg/vignettes/valse.Rmd b/pkg/vignettes/valse.Rmd
new file mode 100644 (file)
index 0000000..e8164a1
--- /dev/null
@@ -0,0 +1,23 @@
+---
+title: "valse package vignette"
+output: html_document
+---
+
+```{r include = FALSE}
+library(valse)
+```
+
+The code below demonstrates blabla... in [valse](https://github.com/blabla/valse) package.
+Each plot displays blabla...
+
+## Des jolis plot 1
+
+```{r}
+#plotBla1(...)
+```
+
+## Des jolies couleurs 2
+
+```{r}
+#plotBla2(...)
+```
diff --git a/src/adapters/a.EMGLLF.o b/src/adapters/a.EMGLLF.o
deleted file mode 100644 (file)
index 70d7499..0000000
Binary files a/src/adapters/a.EMGLLF.o and /dev/null differ
diff --git a/src/adapters/a.EMGrank.o b/src/adapters/a.EMGrank.o
deleted file mode 100644 (file)
index d9f1d09..0000000
Binary files a/src/adapters/a.EMGrank.o and /dev/null differ
diff --git a/src/adapters/a.constructionModelesLassoMLE.o b/src/adapters/a.constructionModelesLassoMLE.o
deleted file mode 100644 (file)
index 5c98e9f..0000000
Binary files a/src/adapters/a.constructionModelesLassoMLE.o and /dev/null differ
diff --git a/src/adapters/a.constructionModelesLassoRank.o b/src/adapters/a.constructionModelesLassoRank.o
deleted file mode 100644 (file)
index 539977a..0000000
Binary files a/src/adapters/a.constructionModelesLassoRank.o and /dev/null differ
diff --git a/src/adapters/a.selectiontotale.o b/src/adapters/a.selectiontotale.o
deleted file mode 100644 (file)
index 13adb60..0000000
Binary files a/src/adapters/a.selectiontotale.o and /dev/null differ
diff --git a/src/sources/EMGLLF.o b/src/sources/EMGLLF.o
deleted file mode 100644 (file)
index e50edc6..0000000
Binary files a/src/sources/EMGLLF.o and /dev/null differ
diff --git a/src/sources/EMGrank.o b/src/sources/EMGrank.o
deleted file mode 100644 (file)
index 97a0d30..0000000
Binary files a/src/sources/EMGrank.o and /dev/null differ
diff --git a/src/sources/constructionModelesLassoMLE.o b/src/sources/constructionModelesLassoMLE.o
deleted file mode 100644 (file)
index 38ebfdd..0000000
Binary files a/src/sources/constructionModelesLassoMLE.o and /dev/null differ
diff --git a/src/sources/constructionModelesLassoRank.o b/src/sources/constructionModelesLassoRank.o
deleted file mode 100644 (file)
index dce02cf..0000000
Binary files a/src/sources/constructionModelesLassoRank.o and /dev/null differ
diff --git a/src/sources/selectiontotale.o b/src/sources/selectiontotale.o
deleted file mode 100644 (file)
index 547475d..0000000
Binary files a/src/sources/selectiontotale.o and /dev/null differ
diff --git a/src/valse.so b/src/valse.so
deleted file mode 100755 (executable)
index ee29721..0000000
Binary files a/src/valse.so and /dev/null differ