X-Git-Url: https://git.auder.net/?p=valse.git;a=blobdiff_plain;f=pkg%2FR%2FconstructionModelesLassoRank.R;h=a37a7a6b79ea4d7f6721daedf712774177bad6b6;hp=9254473dd65a7e0650e3f893185d0eb6f4645b86;hb=6af1d4897dbab92a7be05068e0e15823378965d9;hpb=1c45d8e4f6fd4209d26709f17a58920218ee828d diff --git a/pkg/R/constructionModelesLassoRank.R b/pkg/R/constructionModelesLassoRank.R index 9254473..a37a7a6 100644 --- a/pkg/R/constructionModelesLassoRank.R +++ b/pkg/R/constructionModelesLassoRank.R @@ -1,49 +1,99 @@ -constructionModelesLassoRank = function(pi,rho,mini,maxi,X,Y,tau,A1,rangmin,rangmax) +#' constructionModelesLassoRank +#' +#' Construct a collection of models with the Lasso-Rank procedure. +#' +#' @param S output of selectVariables.R +#' @param k number of components +#' @param mini integer, minimum number of iterations in the EM algorithm, by default = 10 +#' @param maxi integer, maximum number of iterations in the EM algorithm, by default = 100 +#' @param X matrix of covariates (of size n*p) +#' @param Y matrix of responses (of size n*m) +#' @param eps real, threshold to say the EM algorithm converges, by default = 1e-4 +#' @param rank.min integer, minimum rank in the low rank procedure, by default = 1 +#' @param rank.max integer, maximum rank in the low rank procedure, by default = 5 +#' @param ncores Number of cores, by default = 3 +#' @param fast TRUE to use compiled C code, FALSE for R code only +#' @param verbose TRUE to show some execution traces +#' +#' @return a list with several models, defined by phi (the regression parameter reparametrized), +#' rho (the covariance parameter reparametrized), pi (the proportion parameter is the mixture model), llh +#' (the value of the loglikelihood function for this estimator on the training dataset). The list is given +#' for several levels of sparsity, given by several regularization parameters computed automatically, +#' and several ranks (between rank.min and rank.max). +#' +#' @export +constructionModelesLassoRank <- function(S, k, mini, maxi, X, Y, eps, rank.min, rank.max, + ncores, fast, verbose) { - #get matrix sizes - n = dim(X)[1] - p = dim(X)[2] - m = dim(rho)[2] - k = dim(rho)[3] - L = dim(A1)[2] + n <- nrow(X) + p <- ncol(X) + m <- ncol(Y) + L <- length(S) - # On cherche les rangs possiblement intéressants - deltaRank = rangmax - rangmin + 1 - Size = deltaRank^k - Rank = matrix(0, nrow=Size, ncol=k) - for(r in 1:k) - { - # On veut le tableau de toutes les combinaisons de rangs possibles - # Dans la première colonne : on répète (rangmax-rangmin)^(k-1) chaque chiffre : - # ça remplit la colonne - # Dans la deuxieme : on répète (rangmax-rangmin)^(k-2) chaque chiffre, - # et on fait ça (rangmax-rangmin)^2 fois - # ... - # Dans la dernière, on répète chaque chiffre une fois, - # et on fait ça (rangmin-rangmax)^(k-1) fois. - Rank[,r] = rangmin + rep(0:(deltaRank-1), deltaRank^(r-1), each=deltaRank^(k-r)) + # Possible interesting ranks + deltaRank <- rank.max - rank.min + 1 + Size <- deltaRank^k + RankLambda <- matrix(0, nrow = Size * L, ncol = k + 1) + for (r in 1:k) + { + # On veut le tableau de toutes les combinaisons de rangs possibles, et des + # lambdas Dans la premiere colonne : on repete (rank.max-rank.min)^(k-1) chaque + # chiffre : ca remplit la colonne Dans la deuxieme : on repete + # (rank.max-rank.min)^(k-2) chaque chiffre, et on fait ca (rank.max-rank.min)^2 + # fois ... Dans la derniere, on repete chaque chiffre une fois, et on fait ca + # (rank.min-rank.max)^(k-1) fois. + RankLambda[, r] <- rep(rank.min + rep(0:(deltaRank - 1), deltaRank^(r - 1), + each = deltaRank^(k - r)), each = L) } + RankLambda[, k + 1] <- rep(1:L, times = Size) - # output parameters - phi = array(0, dim=c(p,m,k,L*Size)) - llh = matrix(0, L*Size, 2) #log-likelihood - for(lambdaIndex in 1:L) - { - # on ne garde que les colonnes actives - # 'active' sera l'ensemble des variables informatives - active = A1[,lambdaIndex] - active = active[-(active==0)] - if (length(active) > 0) - { - for (j in 1:Size) - { - res = EMGrank(Pi[,lambdaIndex], Rho[,,,lambdaIndex], mini, maxi, - X[,active], Y, tau, Rank[j,]) - llh[(lambdaIndex-1)*Size+j,] = - c( res$LLF, sum(Rank[j,] * (length(active)- Rank[j,] + m)) ) - phi[active,,,(lambdaIndex-1)*Size+j] = res$phi + if (ncores > 1) + { + cl <- parallel::makeCluster(ncores, outfile = "") + parallel::clusterExport(cl, envir = environment(), varlist = c("A1", "Size", + "Pi", "Rho", "mini", "maxi", "X", "Y", "eps", "Rank", "m", "phi", "ncores", + "verbose")) + } + + computeAtLambda <- function(index) + { + lambdaIndex <- RankLambda[index, k + 1] + rankIndex <- RankLambda[index, 1:k] + if (ncores > 1) + require("valse") #workers start with an empty environment + + # 'relevant' will be the set of relevant columns + selected <- S[[lambdaIndex]]$selected + relevant <- c() + for (j in 1:p) + { + if (length(selected[[j]]) > 0) + relevant <- c(relevant, j) + } + if (max(rankIndex) < length(relevant)) + { + phi <- array(0, dim = c(p, m, k)) + if (length(relevant) > 0) + { + res <- EMGrank(S[[lambdaIndex]]$Pi, S[[lambdaIndex]]$Rho, mini, maxi, + X[, relevant], Y, eps, rankIndex, fast) + llh <- c(res$LLF, sum(rankIndex * (length(relevant) - rankIndex + m))) + phi[relevant, , ] <- res$phi } + list(llh = llh, phi = phi, pi = S[[lambdaIndex]]$Pi, rho = S[[lambdaIndex]]$Rho) } } - return (list("phi"=phi, "llh" = llh)) + + # For each lambda in the grid we compute the estimators + out <- + if (ncores > 1) { + parallel::parLapply(cl, seq_len(length(S) * Size), computeAtLambda) + } else { + lapply(seq_len(length(S) * Size), computeAtLambda) + } + + if (ncores > 1) + parallel::stopCluster(cl) + + out }