-#' @useDynLib valse
-
-Valse = setRefClass(
- Class = "Valse",
-
- fields = c(
- # User defined
-
- # regression data (size n*p, where n is the number of observations,
- # and p is the number of regressors)
- X = "matrix",
- # response data (size n*m, where n is the number of observations,
- # and m is the number of responses)
- Y = "matrix",
-
- # Optionally user defined (some default values)
-
- # power in the penalty
- gamma = "numeric",
- # minimum number of iterations for EM algorithm
- mini = "integer",
- # maximum number of iterations for EM algorithm
- maxi = "integer",
- # threshold for stopping EM algorithm
- eps = "numeric",
- # minimum number of components in the mixture
- kmin = "integer",
- # maximum number of components in the mixture
- kmax = "integer",
- # ranks for the Lasso-Rank procedure
- rank.min = "integer",
- rank.max = "integer",
-
- # Computed through the workflow
-
- # initialisation for the reparametrized conditional mean parameter
- phiInit = "numeric",
- # initialisation for the reparametrized variance parameter
- rhoInit = "numeric",
- # initialisation for the proportions
- piInit = "numeric",
- # initialisation for the allocations probabilities in each component
- tauInit = "numeric",
- # values for the regularization parameter grid
- gridLambda = "numeric",
- # je ne crois pas vraiment qu'il faille les mettre en sortie, d'autant plus qu'on construit
- # une matrice A1 et A2 pour chaque k, et elles sont grandes, donc ca coute un peu cher ...
- A1 = "integer",
- A2 = "integer",
- # collection of estimations for the reparametrized conditional mean parameters
- Phi = "numeric",
- # collection of estimations for the reparametrized variance parameters
- Rho = "numeric",
- # collection of estimations for the proportions parameters
- Pi = "numeric",
-
- #immutable (TODO:?)
- thresh = "numeric"
- ),
-
- methods = list(
- #######################
- #initialize main object
- #######################
- initialize = function(X,Y,...)
- {
- "Initialize Valse object"
-
- callSuper(...)
-
- X <<- X
- Y <<- Y
- gamma <<- ifelse (hasArg("gamma"), gamma, 1.)
- mini <<- ifelse (hasArg("mini"), mini, as.integer(5))
- maxi <<- ifelse (hasArg("maxi"), maxi, as.integer(10))
- eps <<- ifelse (hasArg("eps"), eps, 1e-6)
- kmin <<- ifelse (hasArg("kmin"), kmin, as.integer(2))
- kmax <<- ifelse (hasArg("kmax"), kmax, as.integer(3))
- rank.min <<- ifelse (hasArg("rank.min"), rank.min, as.integer(2))
- rank.max <<- ifelse (hasArg("rank.max"), rank.max, as.integer(3))
- thresh <<- 1e-15 #immutable (TODO:?)
- },
-
- ##################################
- #core workflow: compute all models
- ##################################
-
- initParameters = function(k)
+#' valse
+#'
+#' Main function
+#'
+#' @param X matrix of covariates (of size n*p)
+#' @param Y matrix of responses (of size n*m)
+#' @param procedure among 'LassoMLE' or 'LassoRank'
+#' @param selecMod method to select a model among 'DDSE', 'DJump', 'BIC' or 'AIC'
+#' @param gamma integer for the power in the penaly, by default = 1
+#' @param mini integer, minimum number of iterations in the EM algorithm, by default = 10
+#' @param maxi integer, maximum number of iterations in the EM algorithm, by default = 100
+#' @param eps real, threshold to say the EM algorithm converges, by default = 1e-4
+#' @param kmin integer, minimum number of clusters, by default = 2
+#' @param kmax integer, maximum number of clusters, by default = 10
+#' @param rang.min integer, minimum rank in the low rank procedure, by default = 1
+#' @param rang.max integer, maximum rank in the
+#' @param ncores_outer Number of cores for the outer loop on k
+#' @param ncores_inner Number of cores for the inner loop on lambda
+#' @param size_coll_mod (Maximum) size of a collection of models
+#' @param fast TRUE to use compiled C code, FALSE for R code only
+#' @param verbose TRUE to show some execution traces
+#'
+#' @return a list with estimators of parameters
+#'
+#' @examples
+#' #TODO: a few examples
+#' @export
+valse = function(X, Y, procedure='LassoMLE', selecMod='DDSE', gamma=1, mini=10, maxi=50,
+ eps=1e-4, kmin=2, kmax=4, rang.min=1, rang.max=10, ncores_outer=1, ncores_inner=1,
+ size_coll_mod=50, fast=TRUE, verbose=FALSE)
+{
+ p = dim(X)[2]
+ m = dim(Y)[2]
+ n = dim(X)[1]
+
+ if (verbose)
+ print("main loop: over all k and all lambda")
+
+ if (ncores_outer > 1)
+ {
+ cl = parallel::makeCluster(ncores_outer, outfile='')
+ parallel::clusterExport( cl=cl, envir=environment(), varlist=c("X","Y","procedure",
+ "selecMod","gamma","mini","maxi","eps","kmin","kmax","rang.min","rang.max",
+ "ncores_outer","ncores_inner","verbose","p","m") )
+ }
+
+ # Compute models with k components
+ computeModels <- function(k)
+ {
+ if (ncores_outer > 1)
+ require("valse") #nodes start with an empty environment
+
+ if (verbose)
+ print(paste("Parameters initialization for k =",k))
+ #smallEM initializes parameters by k-means and regression model in each component,
+ #doing this 20 times, and keeping the values maximizing the likelihood after 10
+ #iterations of the EM algorithm.
+ P = initSmallEM(k, X, Y)
+ grid_lambda <- computeGridLambda(P$phiInit, P$rhoInit, P$piInit, P$gamInit, X, Y,
+ gamma, mini, maxi, eps, fast)
+ if (length(grid_lambda)>size_coll_mod)
+ grid_lambda = grid_lambda[seq(1, length(grid_lambda), length.out = size_coll_mod)]
+
+ if (verbose)
+ print("Compute relevant parameters")
+ #select variables according to each regularization parameter
+ #from the grid: S$selected corresponding to selected variables
+ S = selectVariables(P$phiInit, P$rhoInit, P$piInit, P$gamInit, mini, maxi, gamma,
+ grid_lambda, X, Y, 1e-8, eps, ncores_inner, fast) #TODO: 1e-8 as arg?! eps?
+
+ if (procedure == 'LassoMLE')