Commit | Line | Data |
---|---|---|
0ba1b11c | 1 | #' constructionModelesLassoMLE |
3453829e BA |
2 | #' |
3 | #' Construct a collection of models with the Lasso-MLE procedure. | |
0ba1b11c | 4 | #' |
3453829e BA |
5 | #' @param phiInit an initialization for phi, get by initSmallEM.R |
6 | #' @param rhoInit an initialization for rho, get by initSmallEM.R | |
7 | #' @param piInit an initialization for pi, get by initSmallEM.R | |
8 | #' @param gamInit an initialization for gam, get by initSmallEM.R | |
9 | #' @param mini integer, minimum number of iterations in the EM algorithm, by default = 10 | |
10 | #' @param maxi integer, maximum number of iterations in the EM algorithm, by default = 100 | |
11 | #' @param gamma integer for the power in the penaly, by default = 1 | |
12 | #' @param X matrix of covariates (of size n*p) | |
13 | #' @param Y matrix of responses (of size n*m) | |
14 | #' @param eps real, threshold to say the EM algorithm converges, by default = 1e-4 | |
15 | #' @param S output of selectVariables.R | |
16 | #' @param ncores Number of cores, by default = 3 | |
17 | #' @param fast TRUE to use compiled C code, FALSE for R code only | |
18 | #' @param verbose TRUE to show some execution traces | |
0ba1b11c | 19 | #' |
3453829e BA |
20 | #' @return a list with several models, defined by phi, rho, pi, llh |
21 | #' | |
22 | #' @export | |
0ba1b11c | 23 | constructionModelesLassoMLE <- function(phiInit, rhoInit, piInit, gamInit, mini, |
fb3557f3 | 24 | maxi, gamma, X, Y, eps, S, ncores, fast, verbose) |
3453829e BA |
25 | { |
26 | if (ncores > 1) | |
27 | { | |
28 | cl <- parallel::makeCluster(ncores, outfile = "") | |
0ba1b11c BA |
29 | parallel::clusterExport(cl, envir = environment(), varlist = c("phiInit", |
30 | "rhoInit", "gamInit", "mini", "maxi", "gamma", "X", "Y", "eps", "S", | |
3453829e BA |
31 | "ncores", "fast", "verbose")) |
32 | } | |
33 | ||
34 | # Individual model computation | |
35 | computeAtLambda <- function(lambda) | |
36 | { | |
0ba1b11c | 37 | if (ncores > 1) |
3453829e BA |
38 | require("valse") #nodes start with an empty environment |
39 | ||
0ba1b11c | 40 | if (verbose) |
3453829e BA |
41 | print(paste("Computations for lambda=", lambda)) |
42 | ||
43 | n <- nrow(X) | |
44 | p <- ncol(X) | |
45 | m <- ncol(Y) | |
46 | k <- length(piInit) | |
47 | sel.lambda <- S[[lambda]]$selected | |
48 | # col.sel = which(colSums(sel.lambda)!=0) #if boolean matrix | |
49 | col.sel <- which(sapply(sel.lambda, length) > 0) #if list of selected vars | |
0ba1b11c | 50 | if (length(col.sel) == 0) |
3453829e BA |
51 | return(NULL) |
52 | ||
53 | # lambda == 0 because we compute the EMV: no penalization here | |
fb3557f3 BA |
54 | res <- EMGLLF(array(phiInit[col.sel, , ], dim=c(length(col.sel),m,k)), |
55 | rhoInit, piInit, gamInit, mini, maxi, gamma, 0, | |
56 | as.matrix(X[, col.sel]), Y, eps, fast) | |
3453829e BA |
57 | |
58 | # Eval dimension from the result + selected | |
59 | phiLambda2 <- res$phi | |
60 | rhoLambda <- res$rho | |
61 | piLambda <- res$pi | |
62 | phiLambda <- array(0, dim = c(p, m, k)) | |
63 | for (j in seq_along(col.sel)) | |
64 | phiLambda[col.sel[j], sel.lambda[[j]], ] <- phiLambda2[j, sel.lambda[[j]], ] | |
65 | dimension <- length(unlist(sel.lambda)) | |
66 | ||
67 | ## Affectations | |
68 | Gam <- matrix(0, ncol = length(piLambda), nrow = n) | |
69 | for (i in 1:n) | |
70 | { | |
71 | for (r in 1:length(piLambda)) | |
72 | { | |
73 | sqNorm2 <- sum((Y[i, ] %*% rhoLambda[, , r] - X[i, ] %*% phiLambda[, , r])^2) | |
74 | Gam[i, r] <- piLambda[r] * exp(-0.5 * sqNorm2) * det(rhoLambda[, , r]) | |
75 | } | |
76 | } | |
77 | Gam2 <- Gam/rowSums(Gam) | |
78 | affec <- apply(Gam2, 1, which.max) | |
79 | proba <- Gam2 | |
80 | LLH <- c(sum(log(apply(Gam,1,sum))), (dimension + m + 1) * k - 1) | |
81 | # ## Computation of the loglikelihood | |
82 | # # Precompute det(rhoLambda[,,r]) for r in 1...k | |
83 | # detRho <- sapply(1:k, function(r) gdet(rhoLambda[, , r])) | |
84 | # sumLogLLH <- 0 | |
85 | # for (i in 1:n) | |
86 | # { | |
87 | # # Update gam[,]; use log to avoid numerical problems | |
88 | # logGam <- sapply(1:k, function(r) { | |
89 | # log(piLambda[r]) + log(detRho[r]) - 0.5 * | |
90 | # sum((Y[i, ] %*% rhoLambda[, , r] - X[i, ] %*% phiLambda[, , r])^2) | |
91 | # }) | |
0ba1b11c | 92 | # |
3453829e BA |
93 | # #logGam <- logGam - max(logGam) #adjust without changing proportions -> change the LLH |
94 | # gam <- exp(logGam) | |
95 | # norm_fact <- sum(gam) | |
96 | # sumLogLLH <- sumLogLLH + log(norm_fact) - m/2* log(2 * base::pi) | |
97 | # } | |
98 | #llhLambda <- c(-sumLogLLH/n, (dimension + m + 1) * k - 1) | |
99 | list(phi = phiLambda, rho = rhoLambda, pi = piLambda, llh = LLH, affec = affec, proba = proba) | |
100 | } | |
101 | ||
102 | # For each lambda, computation of the parameters | |
103 | out <- | |
104 | if (ncores > 1) { | |
64cceb2e | 105 | parallel::parLapply(cl, 1:length(S), computeAtLambda) |
3453829e BA |
106 | } else { |
107 | lapply(1:length(S), computeAtLambda) | |
108 | } | |
109 | ||
0ba1b11c | 110 | if (ncores > 1) |
3453829e BA |
111 | parallel::stopCluster(cl) |
112 | ||
113 | out | |
114 | } |