| 1 | #' constructionModelesLassoMLE |
| 2 | #' |
| 3 | #' Construct a collection of models with the Lasso-MLE procedure. |
| 4 | #' |
| 5 | #' @param phiInit an initialization for phi, get by initSmallEM.R |
| 6 | #' @param rhoInit an initialization for rho, get by initSmallEM.R |
| 7 | #' @param piInit an initialization for pi, get by initSmallEM.R |
| 8 | #' @param gamInit an initialization for gam, get by initSmallEM.R |
| 9 | #' @param mini integer, minimum number of iterations in the EM algorithm, by default = 10 |
| 10 | #' @param maxi integer, maximum number of iterations in the EM algorithm, by default = 100 |
| 11 | #' @param gamma integer for the power in the penaly, by default = 1 |
| 12 | #' @param X matrix of covariates (of size n*p) |
| 13 | #' @param Y matrix of responses (of size n*m) |
| 14 | #' @param eps real, threshold to say the EM algorithm converges, by default = 1e-4 |
| 15 | #' @param S output of selectVariables.R |
| 16 | #' @param ncores Number of cores, by default = 3 |
| 17 | #' @param fast TRUE to use compiled C code, FALSE for R code only |
| 18 | #' @param verbose TRUE to show some execution traces |
| 19 | #' |
| 20 | #' @return a list with several models, defined by phi (the regression parameter reparametrized), |
| 21 | #' rho (the covariance parameter reparametrized), pi (the proportion parameter is the mixture model), llh |
| 22 | #' (the value of the loglikelihood function for this estimator on the training dataset). The list is given |
| 23 | #' for several levels of sparsity, given by several regularization parameters computed automatically. |
| 24 | #' |
| 25 | #' @export |
| 26 | constructionModelesLassoMLE <- function(phiInit, rhoInit, piInit, gamInit, mini, |
| 27 | maxi, gamma, X, Y, eps, S, ncores, fast, verbose) |
| 28 | { |
| 29 | if (ncores > 1) |
| 30 | { |
| 31 | cl <- parallel::makeCluster(ncores, outfile = "") |
| 32 | parallel::clusterExport(cl, envir = environment(), varlist = c("phiInit", |
| 33 | "rhoInit", "gamInit", "mini", "maxi", "gamma", "X", "Y", "eps", "S", |
| 34 | "ncores", "fast", "verbose")) |
| 35 | } |
| 36 | |
| 37 | # Individual model computation |
| 38 | computeAtLambda <- function(lambda) |
| 39 | { |
| 40 | if (ncores > 1) |
| 41 | require("valse") #nodes start with an empty environment |
| 42 | |
| 43 | if (verbose) |
| 44 | print(paste("Computations for lambda=", lambda)) |
| 45 | |
| 46 | n <- nrow(X) |
| 47 | p <- ncol(X) |
| 48 | m <- ncol(Y) |
| 49 | k <- length(piInit) |
| 50 | sel.lambda <- S[[lambda]]$selected |
| 51 | # col.sel = which(colSums(sel.lambda)!=0) #if boolean matrix |
| 52 | col.sel <- which(sapply(sel.lambda, length) > 0) #if list of selected vars |
| 53 | if (length(col.sel) == 0) |
| 54 | return(NULL) |
| 55 | |
| 56 | # lambda == 0 because we compute the EMV: no penalization here |
| 57 | res <- EMGLLF(array(phiInit[col.sel, , ], dim=c(length(col.sel),m,k)), |
| 58 | rhoInit, piInit, gamInit, mini, maxi, gamma, 0, |
| 59 | as.matrix(X[, col.sel]), Y, eps, fast) |
| 60 | |
| 61 | # Eval dimension from the result + selected |
| 62 | phiLambda2 <- res$phi |
| 63 | rhoLambda <- res$rho |
| 64 | piLambda <- res$pi |
| 65 | phiLambda <- array(0, dim = c(p, m, k)) |
| 66 | for (j in seq_along(col.sel)) |
| 67 | phiLambda[col.sel[j], sel.lambda[[j]], ] <- phiLambda2[j, sel.lambda[[j]], ] |
| 68 | dimension <- length(unlist(sel.lambda)) |
| 69 | |
| 70 | ## Affectations |
| 71 | Gam <- matrix(0, ncol = length(piLambda), nrow = n) |
| 72 | for (i in 1:n) |
| 73 | { |
| 74 | for (r in 1:length(piLambda)) |
| 75 | { |
| 76 | sqNorm2 <- sum((Y[i, ] %*% rhoLambda[, , r] - X[i, ] %*% phiLambda[, , r])^2) |
| 77 | Gam[i, r] <- piLambda[r] * exp(-0.5 * sqNorm2) * det(rhoLambda[, , r]) |
| 78 | } |
| 79 | } |
| 80 | Gam2 <- Gam/rowSums(Gam) |
| 81 | affec <- apply(Gam2, 1, which.max) |
| 82 | proba <- Gam2 |
| 83 | LLH <- c(sum(log(apply(Gam,1,sum))), (dimension + m + 1) * k - 1) |
| 84 | # ## Computation of the loglikelihood |
| 85 | # # Precompute det(rhoLambda[,,r]) for r in 1...k |
| 86 | # detRho <- sapply(1:k, function(r) gdet(rhoLambda[, , r])) |
| 87 | # sumLogLLH <- 0 |
| 88 | # for (i in 1:n) |
| 89 | # { |
| 90 | # # Update gam[,]; use log to avoid numerical problems |
| 91 | # logGam <- sapply(1:k, function(r) { |
| 92 | # log(piLambda[r]) + log(detRho[r]) - 0.5 * |
| 93 | # sum((Y[i, ] %*% rhoLambda[, , r] - X[i, ] %*% phiLambda[, , r])^2) |
| 94 | # }) |
| 95 | # |
| 96 | # #logGam <- logGam - max(logGam) #adjust without changing proportions -> change the LLH |
| 97 | # gam <- exp(logGam) |
| 98 | # norm_fact <- sum(gam) |
| 99 | # sumLogLLH <- sumLogLLH + log(norm_fact) - m/2* log(2 * base::pi) |
| 100 | # } |
| 101 | #llhLambda <- c(-sumLogLLH/n, (dimension + m + 1) * k - 1) |
| 102 | list(phi = phiLambda, rho = rhoLambda, pi = piLambda, llh = LLH, affec = affec, proba = proba) |
| 103 | } |
| 104 | |
| 105 | # For each lambda, computation of the parameters |
| 106 | out <- |
| 107 | if (ncores > 1) { |
| 108 | parallel::parLapply(cl, 1:length(S), computeAtLambda) |
| 109 | } else { |
| 110 | lapply(1:length(S), computeAtLambda) |
| 111 | } |
| 112 | |
| 113 | if (ncores > 1) |
| 114 | parallel::stopCluster(cl) |
| 115 | |
| 116 | out |
| 117 | } |