X-Git-Url: https://git.auder.net/?p=valse.git;a=blobdiff_plain;f=pkg%2FR%2FEMGrank.R;h=fa66b3de581be86ea610a01cfd42cc6163e99bcd;hp=b85a0faf4fd9b1b2cea2c4f7b11ccf5c9371a08d;hb=1196a43d961a95abc18d3c8e777e9a4e8233e562;hpb=ea5860f1b4fc91f06e371a0b26915198474a849d diff --git a/pkg/R/EMGrank.R b/pkg/R/EMGrank.R index b85a0fa..fa66b3d 100644 --- a/pkg/R/EMGrank.R +++ b/pkg/R/EMGrank.R @@ -1,36 +1,35 @@ #' EMGrank #' -#' Description de EMGrank +#' Run an generalized EM algorithm developped for mixture of Gaussian regression +#' models with variable selection by an extension of the low rank estimator. +#' Reparametrization is done to ensure invariance by homothetic transformation. +#' It returns a collection of models, varying the number of clusters and the rank of the regression mean. #' -#' @param Pi Parametre de proportion -#' @param Rho Parametre initial de variance renormalisé -#' @param mini Nombre minimal d'itérations dans l'algorithme EM -#' @param maxi Nombre maximal d'itérations dans l'algorithme EM -#' @param X Régresseurs -#' @param Y Réponse -#' @param tau Seuil pour accepter la convergence -#' @param rank Vecteur des rangs possibles +#' @param Pi An initialization for pi +#' @param Rho An initialization for rho, the variance parameter +#' @param mini integer, minimum number of iterations in the EM algorithm, by default = 10 +#' @param maxi integer, maximum number of iterations in the EM algorithm, by default = 100 +#' @param X matrix of covariates (of size n*p) +#' @param Y matrix of responses (of size n*m) +#' @param eps real, threshold to say the EM algorithm converges, by default = 1e-4 +#' @param rank vector of possible ranks +#' @param fast boolean to enable or not the C function call #' -#' @return A list ... -#' phi : parametre de moyenne renormalisé, calculé par l'EM -#' LLF : log vraisemblance associé à cet échantillon, pour les valeurs estimées des paramètres +#' @return A list (corresponding to the model collection) defined by (phi,LLF): +#' phi : regression mean for each cluster +#' LLF : log likelihood with respect to the training set #' #' @export -EMGrank <- function(Pi, Rho, mini, maxi, X, Y, tau, rank, fast = TRUE) +EMGrank <- function(Pi, Rho, mini, maxi, X, Y, eps, rank, fast) { if (!fast) { # Function in R - return(.EMGrank_R(Pi, Rho, mini, maxi, X, Y, tau, rank)) + return(.EMGrank_R(Pi, Rho, mini, maxi, X, Y, eps, rank)) } # Function in C - n <- nrow(X) #nombre d'echantillons - p <- ncol(X) #nombre de covariables - m <- ncol(Y) #taille de Y (multivarié) - k <- length(Pi) #nombre de composantes dans le mélange - .Call("EMGrank", Pi, Rho, mini, maxi, X, Y, tau, rank, phi = double(p * m * k), - LLF = double(1), n, p, m, k, PACKAGE = "valse") + .Call("EMGrank", Pi, Rho, mini, maxi, X, Y, eps, as.integer(rank), PACKAGE = "valse") } # helper to always have matrices as arg (TODO: put this elsewhere? improve?) --> @@ -43,7 +42,7 @@ matricize <- function(X) } # R version - slow but easy to read -.EMGrank_R <- function(Pi, Rho, mini, maxi, X, Y, tau, rank) +.EMGrank_R <- function(Pi, Rho, mini, maxi, X, Y, eps, rank) { # matrix dimensions n <- nrow(X) @@ -64,7 +63,7 @@ matricize <- function(X) # main loop ite <- 1 - while (ite <= mini || (ite <= maxi && sumDeltaPhi > tau)) + while (ite <= mini || (ite <= maxi && sumDeltaPhi > eps)) { # M step: update for Beta ( and then phi) for (r in 1:k)