X-Git-Url: https://git.auder.net/?p=valse.git;a=blobdiff_plain;f=pkg%2FR%2FconstructionModelesLassoRank.R;h=a37a7a6b79ea4d7f6721daedf712774177bad6b6;hp=71713f73c180593b658d5bf13d2ae8e2ed1bab7f;hb=6af1d4897dbab92a7be05068e0e15823378965d9;hpb=0eb161e3f3d018bce7d98fc85622d14910f89d43 diff --git a/pkg/R/constructionModelesLassoRank.R b/pkg/R/constructionModelesLassoRank.R index 71713f7..a37a7a6 100644 --- a/pkg/R/constructionModelesLassoRank.R +++ b/pkg/R/constructionModelesLassoRank.R @@ -1,84 +1,99 @@ #' constructionModelesLassoRank #' -#' TODO: description +#' Construct a collection of models with the Lasso-Rank procedure. #' -#' @param ... +#' @param S output of selectVariables.R +#' @param k number of components +#' @param mini integer, minimum number of iterations in the EM algorithm, by default = 10 +#' @param maxi integer, maximum number of iterations in the EM algorithm, by default = 100 +#' @param X matrix of covariates (of size n*p) +#' @param Y matrix of responses (of size n*m) +#' @param eps real, threshold to say the EM algorithm converges, by default = 1e-4 +#' @param rank.min integer, minimum rank in the low rank procedure, by default = 1 +#' @param rank.max integer, maximum rank in the low rank procedure, by default = 5 +#' @param ncores Number of cores, by default = 3 +#' @param fast TRUE to use compiled C code, FALSE for R code only +#' @param verbose TRUE to show some execution traces #' -#' @return ... +#' @return a list with several models, defined by phi (the regression parameter reparametrized), +#' rho (the covariance parameter reparametrized), pi (the proportion parameter is the mixture model), llh +#' (the value of the loglikelihood function for this estimator on the training dataset). The list is given +#' for several levels of sparsity, given by several regularization parameters computed automatically, +#' and several ranks (between rank.min and rank.max). #' -#' export -constructionModelesLassoRank = function(pi, rho, mini, maxi, X, Y, tau, A1, rangmin, - rangmax, ncores, verbose=FALSE) +#' @export +constructionModelesLassoRank <- function(S, k, mini, maxi, X, Y, eps, rank.min, rank.max, + ncores, fast, verbose) { - n = dim(X)[1] - p = dim(X)[2] - m = dim(rho)[2] - k = dim(rho)[3] - L = dim(A1)[2] + n <- nrow(X) + p <- ncol(X) + m <- ncol(Y) + L <- length(S) - # On cherche les rangs possiblement intéressants - deltaRank = rangmax - rangmin + 1 - Size = deltaRank^k - Rank = matrix(0, nrow=Size, ncol=k) + # Possible interesting ranks + deltaRank <- rank.max - rank.min + 1 + Size <- deltaRank^k + RankLambda <- matrix(0, nrow = Size * L, ncol = k + 1) for (r in 1:k) - { - # On veut le tableau de toutes les combinaisons de rangs possibles - # Dans la première colonne : on répète (rangmax-rangmin)^(k-1) chaque chiffre : - # ça remplit la colonne - # Dans la deuxieme : on répète (rangmax-rangmin)^(k-2) chaque chiffre, - # et on fait ça (rangmax-rangmin)^2 fois - # ... - # Dans la dernière, on répète chaque chiffre une fois, - # et on fait ça (rangmin-rangmax)^(k-1) fois. - Rank[,r] = rangmin + rep(0:(deltaRank-1), deltaRank^(r-1), each=deltaRank^(k-r)) + { + # On veut le tableau de toutes les combinaisons de rangs possibles, et des + # lambdas Dans la premiere colonne : on repete (rank.max-rank.min)^(k-1) chaque + # chiffre : ca remplit la colonne Dans la deuxieme : on repete + # (rank.max-rank.min)^(k-2) chaque chiffre, et on fait ca (rank.max-rank.min)^2 + # fois ... Dans la derniere, on repete chaque chiffre une fois, et on fait ca + # (rank.min-rank.max)^(k-1) fois. + RankLambda[, r] <- rep(rank.min + rep(0:(deltaRank - 1), deltaRank^(r - 1), + each = deltaRank^(k - r)), each = L) } + RankLambda[, k + 1] <- rep(1:L, times = Size) if (ncores > 1) - { - cl = parallel::makeCluster(ncores) - parallel::clusterExport( cl, envir=environment(), - varlist=c("A1","Size","Pi","Rho","mini","maxi","X","Y","tau", - "Rank","m","phi","ncores","verbose") ) - } + { + cl <- parallel::makeCluster(ncores, outfile = "") + parallel::clusterExport(cl, envir = environment(), varlist = c("A1", "Size", + "Pi", "Rho", "mini", "maxi", "X", "Y", "eps", "Rank", "m", "phi", "ncores", + "verbose")) + } - computeAtLambda <- function(lambdaIndex) - { - if (ncores > 1) - require("valse") #workers start with an empty environment + computeAtLambda <- function(index) + { + lambdaIndex <- RankLambda[index, k + 1] + rankIndex <- RankLambda[index, 1:k] + if (ncores > 1) + require("valse") #workers start with an empty environment - # on ne garde que les colonnes actives - # 'active' sera l'ensemble des variables informatives - active = A1[,lambdaIndex] - active = active[-(active==0)] - phi = array(0, dim=c(p,m,k,Size)) - llh = matrix(0, Size, 2) #log-likelihood - if (length(active) > 0) - { - for (j in 1:Size) - { - res = EMGrank(Pi[,lambdaIndex], Rho[,,,lambdaIndex], mini, maxi, - X[,active], Y, tau, Rank[j,]) - llh = rbind(llh, - c( res$LLF, sum(Rank[j,] * (length(active)- Rank[j,] + m)) ) ) - phi[active,,,] = rbind(phi[active,,,], res$phi) + # 'relevant' will be the set of relevant columns + selected <- S[[lambdaIndex]]$selected + relevant <- c() + for (j in 1:p) + { + if (length(selected[[j]]) > 0) + relevant <- c(relevant, j) + } + if (max(rankIndex) < length(relevant)) + { + phi <- array(0, dim = c(p, m, k)) + if (length(relevant) > 0) + { + res <- EMGrank(S[[lambdaIndex]]$Pi, S[[lambdaIndex]]$Rho, mini, maxi, + X[, relevant], Y, eps, rankIndex, fast) + llh <- c(res$LLF, sum(rankIndex * (length(relevant) - rankIndex + m))) + phi[relevant, , ] <- res$phi } + list(llh = llh, phi = phi, pi = S[[lambdaIndex]]$Pi, rho = S[[lambdaIndex]]$Rho) } - list("llh"=llh, "phi"=phi) - } + } - #Pour chaque lambda de la grille, on calcule les coefficients - out = - if (ncores > 1) - parLapply(cl, seq_along(glambda), computeAtLambda) - else - lapply(seq_along(glambda), computeAtLambda) + # For each lambda in the grid we compute the estimators + out <- + if (ncores > 1) { + parallel::parLapply(cl, seq_len(length(S) * Size), computeAtLambda) + } else { + lapply(seq_len(length(S) * Size), computeAtLambda) + } - if (ncores > 1) + if (ncores > 1) parallel::stopCluster(cl) - # TODO: this is a bit ugly. Better use bigmemory and fill llh/phi in-place - # (but this also adds a dependency...) - llh <- do.call( rbind, lapply(out, function(model) model$llh) ) - phi <- do.call( rbind, lapply(out, function(model) model$phi) ) - list("llh"=llh, "phi"=phi) + out }