From a3cbbaea1cc3c107e5ca62ed1ffe7b9499de0a91 Mon Sep 17 00:00:00 2001 From: Benjamin Auder Date: Thu, 20 Apr 2017 23:38:26 +0200 Subject: [PATCH 1/1] Fix numerical problems in EMGLLF (R version) --- pkg/R/EMGLLF.R | 31 ++++++++++++++++------------ pkg/R/computeGridLambda.R | 2 +- pkg/R/constructionModelesLassoMLE.R | 6 +++--- pkg/R/constructionModelesLassoRank.R | 2 +- pkg/R/initSmallEM.R | 5 ++--- pkg/R/main.R | 2 +- pkg/R/selectVariables.R | 14 ++++++++----- 7 files changed, 35 insertions(+), 27 deletions(-) diff --git a/pkg/R/EMGLLF.R b/pkg/R/EMGLLF.R index b71a128..6ee7ba7 100644 --- a/pkg/R/EMGLLF.R +++ b/pkg/R/EMGLLF.R @@ -23,7 +23,7 @@ #' #' @export EMGLLF <- function(phiInit, rhoInit, piInit, gamInit, mini, maxi, gamma, lambda, - X, Y, eps, fast = TRUE) + X, Y, eps, fast) { if (!fast) { @@ -111,7 +111,8 @@ EMGLLF <- function(phiInit, rhoInit, piInit, gamInit, mini, maxi, gamma, lambda, # t(m) is the largest value in the grid O.1^k such that it is nonincreasing while (kk < 1000 && -a/n + lambda * sum(pi^gamma * b) < - -sum(gam2 * log(pi2))/n + lambda * sum(pi2^gamma * b)) + # na.rm=TRUE to handle 0*log(0) + -sum(gam2 * log(pi2), na.rm=TRUE)/n + lambda * sum(pi2^gamma * b)) { pi2 <- pi + 0.1^kk * (1/n * gam2 - pi) kk <- kk + 1 @@ -138,8 +139,8 @@ EMGLLF <- function(phiInit, rhoInit, piInit, gamInit, mini, maxi, gamma, lambda, { for (mm in 1:m) { - S[j, mm, r] <- -rho[mm, mm, r] * ps2[j, mm, r] - + sum(phi[-j, mm, r] * Gram2[j, -j, r]) + S[j, mm, r] <- -rho[mm, mm, r] * ps2[j, mm, r] + + sum(phi[-j, mm, r] * Gram2[j, -j, r]) if (abs(S[j, mm, r]) <= n * lambda * (pi[r]^gamma)) { phi[j, mm, r] <- 0 } else if (S[j, mm, r] > n * lambda * (pi[r]^gamma)) { @@ -155,18 +156,22 @@ EMGLLF <- function(phiInit, rhoInit, piInit, gamInit, mini, maxi, gamma, lambda, # Precompute det(rho[,,r]) for r in 1...k detRho <- sapply(1:k, function(r) det(rho[, , r])) + sumLogLLH <- 0 for (i in 1:n) { - # Update gam[,] - for (r in 1:k) - { - gam[i, r] <- pi[r] * exp(-0.5 - * sum((Y[i, ] %*% rho[, , r] - X[i, ] %*% phi[, , r])^2)) * detRho[r] - } + # Update gam[,]; use log to avoid numerical problems + logGam <- sapply(1:k, function(r) { + log(pi[r]) + log(detRho[r]) - 0.5 * + sum((Y[i, ] %*% rho[, , r] - X[i, ] %*% phi[, , r])^2) + }) + + logGam <- logGam - max(logGam) #adjust without changing proportions + gam[i, ] <- exp(logGam) + norm_fact <- sum(gam[i, ]) + gam[i, ] <- gam[i, ] / norm_fact + sumLogLLH <- sumLogLLH + log(norm_fact) - log((2 * base::pi)^(m/2)) } - norm_fact <- rowSums(gam) - gam <- gam / norm_fact - sumLogLLH <- sum(log(norm_fact) - log((2 * base::pi)^(m/2))) + sumPen <- sum(pi^gamma * b) last_llh <- llh llh <- -sumLogLLH/n + lambda * sumPen diff --git a/pkg/R/computeGridLambda.R b/pkg/R/computeGridLambda.R index c34c707..c2e9c8c 100644 --- a/pkg/R/computeGridLambda.R +++ b/pkg/R/computeGridLambda.R @@ -17,7 +17,7 @@ #' #' @export computeGridLambda <- function(phiInit, rhoInit, piInit, gamInit, X, Y, gamma, mini, - maxi, tau, fast = TRUE) + maxi, tau, fast) { n <- nrow(X) p <- dim(phiInit)[1] diff --git a/pkg/R/constructionModelesLassoMLE.R b/pkg/R/constructionModelesLassoMLE.R index 8f93fb8..90d0a2a 100644 --- a/pkg/R/constructionModelesLassoMLE.R +++ b/pkg/R/constructionModelesLassoMLE.R @@ -21,7 +21,7 @@ #' #' @export constructionModelesLassoMLE <- function(phiInit, rhoInit, piInit, gamInit, mini, - maxi, gamma, X, Y, eps, S, ncores = 3, fast = TRUE, verbose = FALSE) + maxi, gamma, X, Y, eps, S, ncores = 3, fast, verbose) { if (ncores > 1) { @@ -51,8 +51,8 @@ constructionModelesLassoMLE <- function(phiInit, rhoInit, piInit, gamInit, mini, return(NULL) # lambda == 0 because we compute the EMV: no penalization here - res <- EMGLLF(phiInit[col.sel, , ], rhoInit, piInit, gamInit, mini, maxi, - gamma, 0, X[, col.sel], Y, eps, fast) + res <- EMGLLF(array(phiInit[col.sel, , ],dim=c(length(col.sel),m,k)), rhoInit, + piInit, gamInit, mini, maxi, gamma, 0, as.matrix(X[, col.sel]), Y, eps, fast) # Eval dimension from the result + selected phiLambda2 <- res$phi diff --git a/pkg/R/constructionModelesLassoRank.R b/pkg/R/constructionModelesLassoRank.R index 5857a42..85685e9 100644 --- a/pkg/R/constructionModelesLassoRank.R +++ b/pkg/R/constructionModelesLassoRank.R @@ -19,7 +19,7 @@ #' #' @export constructionModelesLassoRank <- function(S, k, mini, maxi, X, Y, eps, rank.min, rank.max, - ncores, fast = TRUE, verbose = FALSE) + ncores, fast, verbose) { n <- dim(X)[1] p <- dim(X)[2] diff --git a/pkg/R/initSmallEM.R b/pkg/R/initSmallEM.R index ba95586..056d7e7 100644 --- a/pkg/R/initSmallEM.R +++ b/pkg/R/initSmallEM.R @@ -8,7 +8,7 @@ #' @export #' @importFrom methods new #' @importFrom stats cutree dist hclust runif -initSmallEM <- function(k, X, Y, fast = TRUE) +initSmallEM <- function(k, X, Y, fast) { n <- nrow(Y) m <- ncol(Y) @@ -67,8 +67,7 @@ initSmallEM <- function(k, X, Y, fast = TRUE) init_EMG <- EMGLLF(phiInit1[, , , repet], rhoInit1[, , , repet], piInit1[repet, ], gamInit1[, , repet], miniInit, maxiInit, gamma = 1, lambda = 0, X, Y, eps = 1e-04, fast) - LLFEessai <- init_EMG$LLF - LLFinit1[repet] <- LLFEessai[length(LLFEessai)] + LLFinit1[[repet]] <- init_EMG$llh } b <- which.min(LLFinit1) phiInit <- phiInit1[, , , b] diff --git a/pkg/R/main.R b/pkg/R/main.R index 64e0586..fecf519 100644 --- a/pkg/R/main.R +++ b/pkg/R/main.R @@ -57,7 +57,7 @@ valse <- function(X, Y, procedure = "LassoMLE", selecMod = "DDSE", gamma = 1, mi # smallEM initializes parameters by k-means and regression model in each # component, doing this 20 times, and keeping the values maximizing the # likelihood after 10 iterations of the EM algorithm. - P <- initSmallEM(k, X, Y) + P <- initSmallEM(k, X, Y, fast) grid_lambda <- computeGridLambda(P$phiInit, P$rhoInit, P$piInit, P$gamInit, X, Y, gamma, mini, maxi, eps, fast) if (length(grid_lambda) > size_coll_mod) diff --git a/pkg/R/selectVariables.R b/pkg/R/selectVariables.R index f717cae..bfe4042 100644 --- a/pkg/R/selectVariables.R +++ b/pkg/R/selectVariables.R @@ -23,7 +23,7 @@ #' @export #' selectVariables <- function(phiInit, rhoInit, piInit, gamInit, mini, maxi, gamma, - glambda, X, Y, thresh = 1e-08, eps, ncores = 3, fast = TRUE) + glambda, X, Y, thresh = 1e-08, eps, ncores = 3, fast) { if (ncores > 1) { cl <- parallel::makeCluster(ncores, outfile = "") @@ -52,14 +52,18 @@ selectVariables <- function(phiInit, rhoInit, piInit, gamInit, mini, maxi, gamma } # For each lambda in the grid, we compute the coefficients - out <- if (ncores > 1) - parLapply(cl, glambda, computeCoefs) else lapply(glambda, computeCoefs) + out <- + if (ncores > 1) { + parLapply(cl, glambda, computeCoefs) + } else { + lapply(glambda, computeCoefs) + } if (ncores > 1) parallel::stopCluster(cl) # Suppress models which are computed twice En fait, ca ca fait la comparaison de # tous les parametres On veut juste supprimer ceux qui ont les memes variables - # sélectionnées sha1_array <- lapply(out, digest::sha1) out[ - # duplicated(sha1_array) ] + # sélectionnées + # sha1_array <- lapply(out, digest::sha1) out[ duplicated(sha1_array) ] selec <- lapply(out, function(model) model$selected) ind_dup <- duplicated(selec) ind_uniq <- which(!ind_dup) -- 2.44.0