From: Benjamin Auder Date: Tue, 7 Apr 2020 09:58:39 +0000 (+0200) Subject: Remove arg n in plot_valse (deduce from X) X-Git-Url: https://git.auder.net/variants/Cwda/vendor/app_dev.php?a=commitdiff_plain;h=3921ba9b5ea85bcc190245ac7da9ee9da1658b9f;p=valse.git Remove arg n in plot_valse (deduce from X) --- diff --git a/pkg/R/EMGLLF.R b/pkg/R/EMGLLF.R index dada0ef..1633821 100644 --- a/pkg/R/EMGLLF.R +++ b/pkg/R/EMGLLF.R @@ -1,6 +1,6 @@ #' EMGLLF #' -#' Run a generalized EM algorithm developped for mixture of Gaussian regression +#' Run a generalized EM algorithm developped for mixture of Gaussian regression #' models with variable selection by an extension of the Lasso estimator (regularization parameter lambda). #' Reparametrization is done to ensure invariance by homothetic transformation. #' It returns a collection of models, varying the number of clusters and the sparsity in the regression mean. diff --git a/pkg/R/EMGrank.R b/pkg/R/EMGrank.R index fa66b3d..9531ae4 100644 --- a/pkg/R/EMGrank.R +++ b/pkg/R/EMGrank.R @@ -1,6 +1,6 @@ #' EMGrank #' -#' Run an generalized EM algorithm developped for mixture of Gaussian regression +#' Run an generalized EM algorithm developped for mixture of Gaussian regression #' models with variable selection by an extension of the low rank estimator. #' Reparametrization is done to ensure invariance by homothetic transformation. #' It returns a collection of models, varying the number of clusters and the rank of the regression mean. @@ -36,9 +36,9 @@ EMGrank <- function(Pi, Rho, mini, maxi, X, Y, eps, rank, fast) # Yes, we should use by-columns storage everywhere... [later!] matricize <- function(X) { - if (!is.matrix(X)) + if (!is.matrix(X)) return(t(as.matrix(X))) - return(X) + X } # R version - slow but easy to read @@ -69,15 +69,15 @@ matricize <- function(X) for (r in 1:k) { Z_indice <- seq_len(n)[Z == r] #indices where Z == r - if (length(Z_indice) == 0) + if (length(Z_indice) == 0) next # U,S,V = SVD of (t(Xr)Xr)^{-1} * t(Xr) * Yr - s <- svd(MASS::ginv(crossprod(matricize(X[Z_indice, ]))) %*% - crossprod(matricize(X[Z_indice, ]), matricize(Y[Z_indice, ]))) + s <- svd(MASS::ginv(crossprod(matricize(X[Z_indice, ]))) %*% + crossprod(matricize(X[Z_indice, ]), matricize(Y[Z_indice, ]))) S <- s$d # Set m-rank(r) singular values to zero, and recompose best rank(r) approximation # of the initial product - if (rank[r] < length(S)) + if (rank[r] < length(S)) S[(rank[r] + 1):length(S)] <- 0 phi[, , r] <- s$u %*% diag(S) %*% t(s$v) %*% Rho[, , r] } @@ -107,7 +107,7 @@ matricize <- function(X) # update distance parameter to check algorithm convergence (delta(phi, Phi)) deltaPhi <- c(deltaPhi, max((abs(phi - Phi))/(1 + abs(phi)))) #TODO: explain? - if (length(deltaPhi) > deltaPhiBufferSize) + if (length(deltaPhi) > deltaPhiBufferSize) deltaPhi <- deltaPhi[2:length(deltaPhi)] sumDeltaPhi <- sum(abs(deltaPhi)) @@ -115,5 +115,5 @@ matricize <- function(X) Phi <- phi ite <- ite + 1 } - return(list(phi = phi, LLF = LLF)) + list(phi = phi, LLF = LLF) } diff --git a/pkg/R/initSmallEM.R b/pkg/R/initSmallEM.R index 487a4e1..10cb191 100644 --- a/pkg/R/initSmallEM.R +++ b/pkg/R/initSmallEM.R @@ -77,5 +77,5 @@ initSmallEM <- function(k, X, Y, fast) piInit <- piInit1[b, ] gamInit <- gamInit1[, , b] - return(list(phiInit = phiInit, rhoInit = rhoInit, piInit = piInit, gamInit = gamInit)) + list(phiInit = phiInit, rhoInit = rhoInit, piInit = piInit, gamInit = gamInit) } diff --git a/pkg/R/main.R b/pkg/R/main.R index d750fec..c74d7fb 100644 --- a/pkg/R/main.R +++ b/pkg/R/main.R @@ -148,7 +148,7 @@ runValse <- function(X, Y, procedure = "LassoMLE", selecMod = "DDSE", gamma = 1, modelSel$tableau <- tableauRecap if (plot) - print(plot_valse(X, Y, modelSel, n)) + print(plot_valse(X, Y, modelSel)) return(modelSel) } diff --git a/pkg/R/plot_valse.R b/pkg/R/plot_valse.R index 73188d2..febc65c 100644 --- a/pkg/R/plot_valse.R +++ b/pkg/R/plot_valse.R @@ -5,7 +5,6 @@ #' @param X matrix of covariates (of size n*p) #' @param Y matrix of responses (of size n*m) #' @param model the model constructed by valse procedure -#' @param n sample size #' @param comp TRUE to enable pairwise clusters comparison #' @param k1 index of the first cluster to be compared #' @param k2 index of the second cluster to be compared @@ -15,8 +14,9 @@ #' @importFrom reshape2 melt #' #' @export -plot_valse <- function(X, Y, model, n, comp = FALSE, k1 = NA, k2 = NA) +plot_valse <- function(X, Y, model, comp = FALSE, k1 = NA, k2 = NA) { + n <- nrow(X) K <- length(model$pi) ## regression matrices gReg <- list() diff --git a/pkg/R/selectVariables.R b/pkg/R/selectVariables.R index e08a941..99959ca 100644 --- a/pkg/R/selectVariables.R +++ b/pkg/R/selectVariables.R @@ -1,6 +1,6 @@ #' selectVariables #' -#' It is a function which constructs, for a given lambda, the sets for each cluster of relevant variables. +#' For a given lambda, construct the sets of relevant variables for each cluster. #' #' @param phiInit an initial estimator for phi (size: p*m*k) #' @param rhoInit an initial estimator for rho (size: m*m*k) @@ -66,8 +66,8 @@ selectVariables <- function(phiInit, rhoInit, piInit, gamInit, mini, maxi, gamma if (ncores > 1) parallel::stopCluster(cl) - print(out) - # Suppress models which are computed twice + print(out) #DEBUG TRACE + # Suppress models which are computed twice # sha1_array <- lapply(out, digest::sha1) out[ duplicated(sha1_array) ] selec <- lapply(out, function(model) model$selected) ind_dup <- duplicated(selec)