X-Git-Url: https://git.auder.net/?p=valse.git;a=blobdiff_plain;f=pkg%2FR%2FconstructionModelesLassoRank.R;h=a37a7a6b79ea4d7f6721daedf712774177bad6b6;hp=6e18409c7d66a0ac2295fa0d374af2564b949b96;hb=6af1d4897dbab92a7be05068e0e15823378965d9;hpb=04845e3300b5450629bf1a2c3344d2f9419e91a6 diff --git a/pkg/R/constructionModelesLassoRank.R b/pkg/R/constructionModelesLassoRank.R index 6e18409..a37a7a6 100644 --- a/pkg/R/constructionModelesLassoRank.R +++ b/pkg/R/constructionModelesLassoRank.R @@ -15,7 +15,11 @@ #' @param fast TRUE to use compiled C code, FALSE for R code only #' @param verbose TRUE to show some execution traces #' -#' @return a list with several models, defined by phi, rho, pi, llh +#' @return a list with several models, defined by phi (the regression parameter reparametrized), +#' rho (the covariance parameter reparametrized), pi (the proportion parameter is the mixture model), llh +#' (the value of the loglikelihood function for this estimator on the training dataset). The list is given +#' for several levels of sparsity, given by several regularization parameters computed automatically, +#' and several ranks (between rank.min and rank.max). #' #' @export constructionModelesLassoRank <- function(S, k, mini, maxi, X, Y, eps, rank.min, rank.max, @@ -83,7 +87,7 @@ constructionModelesLassoRank <- function(S, k, mini, maxi, X, Y, eps, rank.min, # For each lambda in the grid we compute the estimators out <- if (ncores > 1) { - parLapply(cl, seq_len(length(S) * Size), computeAtLambda) + parallel::parLapply(cl, seq_len(length(S) * Size), computeAtLambda) } else { lapply(seq_len(length(S) * Size), computeAtLambda) }