--- /dev/null
+#' constructionModelesLassoMLE
+#'
+#' TODO: description
+#'
+#' @param ...
+#'
+#' @return ...
+#'
+#' export
+constructionModelesLassoMLE = function(phiInit, rhoInit, piInit, gamInit, mini, maxi,
+ gamma, X, Y, seuil, tau, selected, ncores=3, verbose=FALSE)
+{
+ if (ncores > 1)
+ {
+ cl = parallel::makeCluster(ncores)
+ parallel::clusterExport( cl, envir=environment(),
+ varlist=c("phiInit","rhoInit","gamInit","mini","maxi","gamma","X","Y","seuil",
+ "tau","selected","ncores","verbose") )
+ }
+
+ # Individual model computation
+ computeAtLambda <- function(lambda)
+ {
+ if (ncores > 1)
+ require("valse") #// nodes start with an ampty environment
+
+ if (verbose)
+ print(paste("Computations for lambda=",lambda))
+
+ n = dim(X)[1]
+ p = dim(phiInit)[1]
+ m = dim(phiInit)[2]
+ k = dim(phiInit)[3]
+
+ sel.lambda = selected[[lambda]]
+# col.sel = which(colSums(sel.lambda)!=0) #if boolean matrix
+ col.sel <- which( sapply(sel.lambda,length) > 0 ) #if list of selected vars
+
+ if (length(col.sel) == 0)
+ return (NULL)
+
+ # lambda == 0 because we compute the EMV: no penalization here
+ res = EMGLLF(phiInit[col.sel,,],rhoInit,piInit,gamInit,mini,maxi,gamma,0,
+ X[,col.sel],Y,tau)
+
+ # Eval dimension from the result + selected
+ phiLambda2 = res_EM$phi
+ rhoLambda = res_EM$rho
+ piLambda = res_EM$pi
+ phiLambda = array(0, dim = c(p,m,k))
+ for (j in seq_along(col.sel))
+ phiLambda[col.sel[j],,] = phiLambda2[j,,]
+
+ dimension = 0
+ for (j in 1:p)
+ {
+ b = setdiff(1:m, sel.lambda[,j])
+ if (length(b) > 0)
+ phiLambda[j,b,] = 0.0
+ dimension = dimension + sum(sel.lambda[,j]!=0)
+ }
+
+ # on veut calculer la vraisemblance avec toutes nos estimations
+ densite = vector("double",n)
+ for (r in 1:k)
+ {
+ delta = Y%*%rhoLambda[,,r] - (X[, col.sel]%*%phiLambda[col.sel,,r])
+ densite = densite + piLambda[r] *
+ det(rhoLambda[,,r])/(sqrt(2*base::pi))^m * exp(-tcrossprod(delta)/2.0)
+ }
+ llhLambda = c( sum(log(densite)), (dimension+m+1)*k-1 )
+ list("phi"= phiLambda, "rho"= rhoLambda, "pi"= piLambda, "llh" = llhLambda)
+ }
+
+ #Pour chaque lambda de la grille, on calcule les coefficients
+ out =
+ if (ncores > 1)
+ parLapply(cl, glambda, computeAtLambda)
+ else
+ lapply(glambda, computeAtLambda)
+
+ if (ncores > 1)
+ parallel::stopCluster(cl)
+
+ out
+}
'plot.R'
'main.R'
'selectVariables.R'
- 'filterModels.R'
'constructionModelesLassoRank.R'
'constructionModelesLassoMLE.R'
'computeGridLambda.R'
#' @include computeGridLambda.R
#' @include constructionModelesLassoMLE.R
#' @include constructionModelesLassoRank.R
-#' @include filterModels.R
#' @include selectVariables.R
#' @include main.R
#' @include plot.R
#'
#' export
constructionModelesLassoMLE = function(phiInit, rhoInit, piInit, gamInit, mini, maxi,
- gamma, X, Y, thresh, tau, S, ncores=3, artefact = 1e3, verbose=FALSE)
+ gamma, X, Y, thresh, tau, S, ncores=3, artefact = 1e3, verbose=FALSE)
{
- if (ncores > 1)
- {
- cl = parallel::makeCluster(ncores)
- parallel::clusterExport( cl, envir=environment(),
- varlist=c("phiInit","rhoInit","gamInit","mini","maxi","gamma","X","Y","thresh",
- "tau","S","ncores","verbose") )
- }
-
- # Individual model computation
- computeAtLambda <- function(lambda)
- {
- if (ncores > 1)
- require("valse") #// nodes start with an empty environment
-
- if (verbose)
- print(paste("Computations for lambda=",lambda))
-
- n = dim(X)[1]
- p = dim(phiInit)[1]
- m = dim(phiInit)[2]
- k = dim(phiInit)[3]
-
- sel.lambda = S[[lambda]]$selected
- # col.sel = which(colSums(sel.lambda)!=0) #if boolean matrix
- col.sel <- which( sapply(sel.lambda,length) > 0 ) #if list of selected vars
-
- if (length(col.sel) == 0)
- {return (NULL)} else {
-
- # lambda == 0 because we compute the EMV: no penalization here
- res_EM = EMGLLF(phiInit[col.sel,,],rhoInit,piInit,gamInit,mini,maxi,gamma,0,
- X[,col.sel],Y,tau)
-
- # Eval dimension from the result + selected
- phiLambda2 = res_EM$phi
- rhoLambda = res_EM$rho
- piLambda = res_EM$pi
- phiLambda = array(0, dim = c(p,m,k))
- for (j in seq_along(col.sel))
- phiLambda[col.sel[j],,] = phiLambda2[j,,]
-
- dimension = 0
- for (j in 1:p)
- {
- b = setdiff(1:m, sel.lambda[[j]])## je confonds un peu ligne et colonne : est-ce dans le bon sens ?
- ## moi pour la dimension, j'aurai juste mis length(unlist(sel.lambda)) mais je sais pas si c'est rapide
- if (length(b) > 0)
- phiLambda[j,b,] = 0.0
- dimension = dimension + sum(sel.lambda[[j]]!=0)
- }
-
- # Computation of the loglikelihood
- densite = vector("double",n)
- for (r in 1:k)
- {
- delta = (Y%*%rhoLambda[,,r] - (X[, col.sel]%*%phiLambda[col.sel,,r]))/artefact
- print(max(delta))
- densite = densite + piLambda[r] *
- det(rhoLambda[,,r])/(sqrt(2*base::pi))^m * exp(-tcrossprod(delta)/2.0)
- }
- llhLambda = c( sum(artefact^2 * log(densite)), (dimension+m+1)*k-1 )
- list("phi"= phiLambda, "rho"= rhoLambda, "pi"= piLambda, "llh" = llhLambda)
- }
- }
-
- # For each lambda, computation of the parameters
- out =
- if (ncores > 1)
- parLapply(cl, 1:length(S), computeAtLambda)
- else
- lapply(1:length(S), computeAtLambda)
-
- if (ncores > 1)
- parallel::stopCluster(cl)
-
- out
+ if (ncores > 1)
+ {
+ cl = parallel::makeCluster(ncores)
+ parallel::clusterExport( cl, envir=environment(),
+ varlist=c("phiInit","rhoInit","gamInit","mini","maxi","gamma","X","Y","thresh",
+ "tau","S","ncores","verbose") )
+ }
+
+ # Individual model computation
+ computeAtLambda <- function(lambda)
+ {
+ if (ncores > 1)
+ require("valse") #nodes start with an empty environment
+
+ if (verbose)
+ print(paste("Computations for lambda=",lambda))
+
+ n = dim(X)[1]
+ p = dim(phiInit)[1]
+ m = dim(phiInit)[2]
+ k = dim(phiInit)[3]
+
+ sel.lambda = S[[lambda]]$selected
+# col.sel = which(colSums(sel.lambda)!=0) #if boolean matrix
+ col.sel <- which( sapply(sel.lambda,length) > 0 ) #if list of selected vars
+
+ if (length(col.sel) == 0)
+ return (NULL)
+
+ # lambda == 0 because we compute the EMV: no penalization here
+ res = EMGLLF(phiInit[col.sel,,],rhoInit,piInit,gamInit,mini,maxi,gamma,0,
+ X[,col.sel],Y,tau)
+
+ # Eval dimension from the result + selected
+ phiLambda2 = res$phi
+ rhoLambda = res$rho
+ piLambda = res$pi
+ phiLambda = array(0, dim = c(p,m,k))
+ for (j in seq_along(col.sel))
+ phiLambda[col.sel[j],,] = phiLambda2[j,,]
+ dimension = length(unlist(sel.lambda))
+
+ # Computation of the loglikelihood
+ densite = vector("double",n)
+ for (r in 1:k)
+ {
+ delta = (Y%*%rhoLambda[,,r] - (X[, col.sel]%*%phiLambda[col.sel,,r]))/artefact
+ print(max(delta))
+ densite = densite + piLambda[r] *
+ det(rhoLambda[,,r])/(sqrt(2*base::pi))^m * exp(-tcrossprod(delta)/2.0)
+ }
+ llhLambda = c( sum(artefact^2 * log(densite)), (dimension+m+1)*k-1 )
+ list("phi"= phiLambda, "rho"= rhoLambda, "pi"= piLambda, "llh" = llhLambda)
+ }
+
+ # For each lambda, computation of the parameters
+ out =
+ if (ncores > 1)
+ parLapply(cl, 1:length(S), computeAtLambda)
+ else
+ lapply(1:length(S), computeAtLambda)
+
+ if (ncores > 1)
+ parallel::stopCluster(cl)
+
+ out
}
+++ /dev/null
-#' Among a collection of models, this function constructs a subcollection of models with
-#' models having strictly different dimensions, keeping the model which minimizes
-#' the likelihood if there were several with the same dimension
-#'
-#' @param LLF a matrix, the first column corresponds to likelihoods for several models
-#' the second column corresponds to the dimensions of the corresponding models.
-#'
-#' @return a list with indices, a vector of indices selected models,
-#' and D1, a vector of corresponding dimensions
-#'
-#' @export
-filterModels = function(LLF)
-{
- D = LLF[,2]
- D1 = unique(D)
-
- indices = rep(1, length(D1))
- #select argmax MLE
- if (length(D1)>2)
- {
- for (i in 1:length(D1))
- {
- A = c()
- for (j in 1:length(D))
- {
- if(D[[j]]==D1[[i]])
- a = c(a, LLF[j,1])
- }
- b = max(a)
- #indices[i] : first indices of the binary vector where u_i ==1
- indices[i] = which.max(LLF == b)
- }
- }
-
- return (list(indices=indices,D1=D1))
-}
if (ncores_outer > 1)
{
- cl = parallel::makeCluster(ncores_outer)
+ cl = parallel::makeCluster(ncores_outer, outfile='')
parallel::clusterExport( cl=cl, envir=environment(), varlist=c("X","Y","procedure",
"selecMod","gamma","mini","maxi","eps","kmin","kmax","rang.min","rang.max",
- "ncores_outer","ncores_inner","verbose","p","m","k","tableauRecap") )
+ "ncores_outer","ncores_inner","verbose","p","m") )
}
# Compute models with k components
P = initSmallEM(k, X, Y)
grid_lambda <- computeGridLambda(P$phiInit, P$rhoInit, P$piInit, P$gamInit, X, Y,
gamma, mini, maxi, eps)
- # TODO: 100 = magic number
if (length(grid_lambda)>size_coll_mod)
grid_lambda = grid_lambda[seq(1, length(grid_lambda), length.out = size_coll_mod)]
print('run the procedure Lasso-MLE')
#compute parameter estimations, with the Maximum Likelihood
#Estimator, restricted on selected variables.
- models <- constructionModelesLassoMLE(P$phiInit, P$rhoInit, P$piInit, P$gamInit, mini,
- maxi, gamma, X, Y, thresh, eps, S, ncores_inner, artefact = 1e3, verbose)
+ models <- constructionModelesLassoMLE(P$phiInit, P$rhoInit, P$piInit, P$gamInit,
+ mini, maxi, gamma, X, Y, thresh, eps, S, ncores_inner, artefact = 1e3, verbose)
}
else
{
models <- constructionModelesLassoRank(S$Pi, S$Rho, mini, maxi, X, Y, eps, A1,
rank.min, rank.max, ncores_inner, verbose)
}
+ #attention certains modeles sont NULL après selectVariables
+ models = models[sapply(models, function(cell) !is.null(cell))]
models
}
return (models_list)
}
- # Get summary "tableauRecap" from models ; TODO: jusqu'à ligne 114 à mon avis là c'est faux :/
- tableauRecap = sapply( models_list, function(models) {
- llh = do.call(rbind, lapply(models, function(model) model$llh))
- LLH = llh[-1,1]
- D = llh[-1,2]
- c(LLH, D, rep(k, length(LLH)), 1:length(LLH))
- })
- tableauRecap
- if (verbose)
- print('Model selection')
- tableauRecap = tableauRecap[rowSums(tableauRecap[, 2:4])!=0,]
- tableauRecap = tableauRecap[!is.infinite(tableauRecap[,1]),]
+ # Get summary "tableauRecap" from models
+ tableauRecap = do.call( rbind, lapply( models_list, function(models) {
+ #Pour un groupe de modeles (même k, différents lambda):
+ llh = matrix(ncol = 2)
+ for (l in seq_along(models))
+ llh = rbind(llh, models[[l]]$llh)
+ LLH = llh[-1,1]
+ D = llh[-1,2]
+ k = length(models[[1]]$pi)
+ cbind(LLH, D, rep(k, length(models)), 1:length(models))
+ } ) )
+ tableauRecap = tableauRecap[rowSums(tableauRecap[, 2:4])!=0,]
+ tableauRecap = tableauRecap[(tableauRecap[,1])!=Inf,]
data = cbind(1:dim(tableauRecap)[1], tableauRecap[,2], tableauRecap[,2], tableauRecap[,1])
modSel = capushe::capushe(data, n)