update Emilie
[valse.git] / pkg / R / EMGrank.R
CommitLineData
3453829e
BA
1#' EMGrank
2#'
e9db7970 3#' Run an generalized EM algorithm developped for mixture of Gaussian regression
4#' models with variable selection by an extension of the low rank estimator.
5#' Reparametrization is done to ensure invariance by homothetic transformation.
6#' It returns a collection of models, varying the number of clusters and the rank of the regression mean.
3453829e 7#'
e9db7970 8#' @param Pi An initialization for pi
9#' @param Rho An initialization for rho, the variance parameter
10#' @param mini integer, minimum number of iterations in the EM algorithm, by default = 10
11#' @param maxi integer, maximum number of iterations in the EM algorithm, by default = 100
12#' @param X matrix of covariates (of size n*p)
13#' @param Y matrix of responses (of size n*m)
14#' @param eps real, threshold to say the EM algorithm converges, by default = 1e-4
15#' @param rank vector of possible ranks
1196a43d 16#' @param fast boolean to enable or not the C function call
3453829e 17#'
e9db7970 18#' @return A list (corresponding to the model collection) defined by (phi,LLF):
19#' phi : regression mean for each cluster
20#' LLF : log likelihood with respect to the training set
3453829e
BA
21#'
22#' @export
1196a43d 23EMGrank <- function(Pi, Rho, mini, maxi, X, Y, eps, rank, fast)
3453829e
BA
24{
25 if (!fast)
26 {
27 # Function in R
28 return(.EMGrank_R(Pi, Rho, mini, maxi, X, Y, eps, rank))
29 }
30
31 # Function in C
1196a43d 32 .Call("EMGrank", Pi, Rho, mini, maxi, X, Y, eps, as.integer(rank), PACKAGE = "valse")
3453829e
BA
33}
34
35# helper to always have matrices as arg (TODO: put this elsewhere? improve?) -->
36# Yes, we should use by-columns storage everywhere... [later!]
37matricize <- function(X)
38{
39 if (!is.matrix(X))
40 return(t(as.matrix(X)))
41 return(X)
42}
43
44# R version - slow but easy to read
45.EMGrank_R <- function(Pi, Rho, mini, maxi, X, Y, eps, rank)
46{
47 # matrix dimensions
48 n <- nrow(X)
49 p <- ncol(X)
50 m <- ncol(Y)
51 k <- length(Pi)
52
53 # init outputs
54 phi <- array(0, dim = c(p, m, k))
55 Z <- rep(1, n)
56 LLF <- 0
57
58 # local variables
59 Phi <- array(0, dim = c(p, m, k))
60 deltaPhi <- c()
61 sumDeltaPhi <- 0
62 deltaPhiBufferSize <- 20
63
64 # main loop
65 ite <- 1
66 while (ite <= mini || (ite <= maxi && sumDeltaPhi > eps))
67 {
68 # M step: update for Beta ( and then phi)
69 for (r in 1:k)
70 {
71 Z_indice <- seq_len(n)[Z == r] #indices where Z == r
72 if (length(Z_indice) == 0)
73 next
74 # U,S,V = SVD of (t(Xr)Xr)^{-1} * t(Xr) * Yr
75 s <- svd(MASS::ginv(crossprod(matricize(X[Z_indice, ]))) %*%
76 crossprod(matricize(X[Z_indice, ]), matricize(Y[Z_indice, ])))
77 S <- s$d
78 # Set m-rank(r) singular values to zero, and recompose best rank(r) approximation
79 # of the initial product
80 if (rank[r] < length(S))
81 S[(rank[r] + 1):length(S)] <- 0
82 phi[, , r] <- s$u %*% diag(S) %*% t(s$v) %*% Rho[, , r]
83 }
84
85 # Step E and computation of the loglikelihood
86 sumLogLLF2 <- 0
87 for (i in seq_len(n))
88 {
89 sumLLF1 <- 0
90 maxLogGamIR <- -Inf
91 for (r in seq_len(k))
92 {
93 dotProduct <- tcrossprod(Y[i, ] %*% Rho[, , r] - X[i, ] %*% phi[, , r])
94 logGamIR <- log(Pi[r]) + log(gdet(Rho[, , r])) - 0.5 * dotProduct
95 # Z[i] = index of max (gam[i,])
96 if (logGamIR > maxLogGamIR)
97 {
98 Z[i] <- r
99 maxLogGamIR <- logGamIR
100 }
101 sumLLF1 <- sumLLF1 + exp(logGamIR)/(2 * pi)^(m/2)
102 }
103 sumLogLLF2 <- sumLogLLF2 + log(sumLLF1)
104 }
105
106 LLF <- -1/n * sumLogLLF2
107
108 # update distance parameter to check algorithm convergence (delta(phi, Phi))
109 deltaPhi <- c(deltaPhi, max((abs(phi - Phi))/(1 + abs(phi)))) #TODO: explain?
110 if (length(deltaPhi) > deltaPhiBufferSize)
111 deltaPhi <- deltaPhi[2:length(deltaPhi)]
112 sumDeltaPhi <- sum(abs(deltaPhi))
113
114 # update other local variables
115 Phi <- phi
116 ite <- ite + 1
117 }
118 return(list(phi = phi, LLF = LLF))
119}