#'
#' @export
EMGLLF <- function(phiInit, rhoInit, piInit, gamInit, mini, maxi, gamma, lambda,
- X, Y, eps, fast = TRUE)
+ X, Y, eps, fast)
{
if (!fast)
{
# t(m) is the largest value in the grid O.1^k such that it is nonincreasing
while (kk < 1000 && -a/n + lambda * sum(pi^gamma * b) <
- -sum(gam2 * log(pi2))/n + lambda * sum(pi2^gamma * b))
+ # na.rm=TRUE to handle 0*log(0)
+ -sum(gam2 * log(pi2), na.rm=TRUE)/n + lambda * sum(pi2^gamma * b))
{
pi2 <- pi + 0.1^kk * (1/n * gam2 - pi)
kk <- kk + 1
{
for (mm in 1:m)
{
- S[j, mm, r] <- -rho[mm, mm, r] * ps2[j, mm, r]
- + sum(phi[-j, mm, r] * Gram2[j, -j, r])
+ S[j, mm, r] <- -rho[mm, mm, r] * ps2[j, mm, r] +
+ sum(phi[-j, mm, r] * Gram2[j, -j, r])
if (abs(S[j, mm, r]) <= n * lambda * (pi[r]^gamma)) {
phi[j, mm, r] <- 0
} else if (S[j, mm, r] > n * lambda * (pi[r]^gamma)) {
# Precompute det(rho[,,r]) for r in 1...k
detRho <- sapply(1:k, function(r) det(rho[, , r]))
+ sumLogLLH <- 0
for (i in 1:n)
{
- # Update gam[,]
- for (r in 1:k)
- {
- gam[i, r] <- pi[r] * exp(-0.5
- * sum((Y[i, ] %*% rho[, , r] - X[i, ] %*% phi[, , r])^2)) * detRho[r]
- }
+ # Update gam[,]; use log to avoid numerical problems
+ logGam <- sapply(1:k, function(r) {
+ log(pi[r]) + log(detRho[r]) - 0.5 *
+ sum((Y[i, ] %*% rho[, , r] - X[i, ] %*% phi[, , r])^2)
+ })
+
+ logGam <- logGam - max(logGam) #adjust without changing proportions
+ gam[i, ] <- exp(logGam)
+ norm_fact <- sum(gam[i, ])
+ gam[i, ] <- gam[i, ] / norm_fact
+ sumLogLLH <- sumLogLLH + log(norm_fact) - log((2 * base::pi)^(m/2))
}
- norm_fact <- rowSums(gam)
- gam <- gam / norm_fact
- sumLogLLH <- sum(log(norm_fact) - log((2 * base::pi)^(m/2)))
+
sumPen <- sum(pi^gamma * b)
last_llh <- llh
llh <- -sumLogLLH/n + lambda * sumPen
#'
#' @export
computeGridLambda <- function(phiInit, rhoInit, piInit, gamInit, X, Y, gamma, mini,
- maxi, tau, fast = TRUE)
+ maxi, tau, fast)
{
n <- nrow(X)
p <- dim(phiInit)[1]
#'
#' @export
constructionModelesLassoMLE <- function(phiInit, rhoInit, piInit, gamInit, mini,
- maxi, gamma, X, Y, eps, S, ncores = 3, fast = TRUE, verbose = FALSE)
+ maxi, gamma, X, Y, eps, S, ncores = 3, fast, verbose)
{
if (ncores > 1)
{
return(NULL)
# lambda == 0 because we compute the EMV: no penalization here
- res <- EMGLLF(phiInit[col.sel, , ], rhoInit, piInit, gamInit, mini, maxi,
- gamma, 0, X[, col.sel], Y, eps, fast)
+ res <- EMGLLF(array(phiInit[col.sel, , ],dim=c(length(col.sel),m,k)), rhoInit,
+ piInit, gamInit, mini, maxi, gamma, 0, as.matrix(X[, col.sel]), Y, eps, fast)
# Eval dimension from the result + selected
phiLambda2 <- res$phi
#'
#' @export
constructionModelesLassoRank <- function(S, k, mini, maxi, X, Y, eps, rank.min, rank.max,
- ncores, fast = TRUE, verbose = FALSE)
+ ncores, fast, verbose)
{
n <- dim(X)[1]
p <- dim(X)[2]
#' @export
#' @importFrom methods new
#' @importFrom stats cutree dist hclust runif
-initSmallEM <- function(k, X, Y, fast = TRUE)
+initSmallEM <- function(k, X, Y, fast)
{
n <- nrow(Y)
m <- ncol(Y)
init_EMG <- EMGLLF(phiInit1[, , , repet], rhoInit1[, , , repet], piInit1[repet, ],
gamInit1[, , repet], miniInit, maxiInit, gamma = 1, lambda = 0, X, Y,
eps = 1e-04, fast)
- LLFEessai <- init_EMG$LLF
- LLFinit1[repet] <- LLFEessai[length(LLFEessai)]
+ LLFinit1[[repet]] <- init_EMG$llh
}
b <- which.min(LLFinit1)
phiInit <- phiInit1[, , , b]
# smallEM initializes parameters by k-means and regression model in each
# component, doing this 20 times, and keeping the values maximizing the
# likelihood after 10 iterations of the EM algorithm.
- P <- initSmallEM(k, X, Y)
+ P <- initSmallEM(k, X, Y, fast)
grid_lambda <- computeGridLambda(P$phiInit, P$rhoInit, P$piInit, P$gamInit,
X, Y, gamma, mini, maxi, eps, fast)
if (length(grid_lambda) > size_coll_mod)
#' @export
#'
selectVariables <- function(phiInit, rhoInit, piInit, gamInit, mini, maxi, gamma,
- glambda, X, Y, thresh = 1e-08, eps, ncores = 3, fast = TRUE)
+ glambda, X, Y, thresh = 1e-08, eps, ncores = 3, fast)
{
if (ncores > 1) {
cl <- parallel::makeCluster(ncores, outfile = "")
}
# For each lambda in the grid, we compute the coefficients
- out <- if (ncores > 1)
- parLapply(cl, glambda, computeCoefs) else lapply(glambda, computeCoefs)
+ out <-
+ if (ncores > 1) {
+ parLapply(cl, glambda, computeCoefs)
+ } else {
+ lapply(glambda, computeCoefs)
+ }
if (ncores > 1)
parallel::stopCluster(cl)
# Suppress models which are computed twice En fait, ca ca fait la comparaison de
# tous les parametres On veut juste supprimer ceux qui ont les memes variables
- # sélectionnées sha1_array <- lapply(out, digest::sha1) out[
- # duplicated(sha1_array) ]
+ # sélectionnées
+ # sha1_array <- lapply(out, digest::sha1) out[ duplicated(sha1_array) ]
selec <- lapply(out, function(model) model$selected)
ind_dup <- duplicated(selec)
ind_uniq <- which(!ind_dup)