Comment each function in R
[valse.git] / pkg / R / EMGrank.R
1 #' EMGrank
2 #'
3 #' Run an generalized EM algorithm developped for mixture of Gaussian regression
4 #' models with variable selection by an extension of the low rank estimator.
5 #' Reparametrization is done to ensure invariance by homothetic transformation.
6 #' It returns a collection of models, varying the number of clusters and the rank of the regression mean.
7 #'
8 #' @param Pi An initialization for pi
9 #' @param Rho An initialization for rho, the variance parameter
10 #' @param mini integer, minimum number of iterations in the EM algorithm, by default = 10
11 #' @param maxi integer, maximum number of iterations in the EM algorithm, by default = 100
12 #' @param X matrix of covariates (of size n*p)
13 #' @param Y matrix of responses (of size n*m)
14 #' @param eps real, threshold to say the EM algorithm converges, by default = 1e-4
15 #' @param rank vector of possible ranks
16 #'
17 #' @return A list (corresponding to the model collection) defined by (phi,LLF):
18 #' phi : regression mean for each cluster
19 #' LLF : log likelihood with respect to the training set
20 #'
21 #' @export
22 EMGrank <- function(Pi, Rho, mini, maxi, X, Y, eps, rank, fast = TRUE)
23 {
24 if (!fast)
25 {
26 # Function in R
27 return(.EMGrank_R(Pi, Rho, mini, maxi, X, Y, eps, rank))
28 }
29
30 # Function in C
31 n <- nrow(X) #nombre d'echantillons
32 p <- ncol(X) #nombre de covariables
33 m <- ncol(Y) #taille de Y (multivarié)
34 k <- length(Pi) #nombre de composantes dans le mélange
35 .Call("EMGrank", Pi, Rho, mini, maxi, X, Y, eps, as.integer(rank), phi = double(p * m * k),
36 LLF = double(1), n, p, m, k, PACKAGE = "valse")
37 }
38
39 # helper to always have matrices as arg (TODO: put this elsewhere? improve?) -->
40 # Yes, we should use by-columns storage everywhere... [later!]
41 matricize <- function(X)
42 {
43 if (!is.matrix(X))
44 return(t(as.matrix(X)))
45 return(X)
46 }
47
48 # R version - slow but easy to read
49 .EMGrank_R <- function(Pi, Rho, mini, maxi, X, Y, eps, rank)
50 {
51 # matrix dimensions
52 n <- nrow(X)
53 p <- ncol(X)
54 m <- ncol(Y)
55 k <- length(Pi)
56
57 # init outputs
58 phi <- array(0, dim = c(p, m, k))
59 Z <- rep(1, n)
60 LLF <- 0
61
62 # local variables
63 Phi <- array(0, dim = c(p, m, k))
64 deltaPhi <- c()
65 sumDeltaPhi <- 0
66 deltaPhiBufferSize <- 20
67
68 # main loop
69 ite <- 1
70 while (ite <= mini || (ite <= maxi && sumDeltaPhi > eps))
71 {
72 # M step: update for Beta ( and then phi)
73 for (r in 1:k)
74 {
75 Z_indice <- seq_len(n)[Z == r] #indices where Z == r
76 if (length(Z_indice) == 0)
77 next
78 # U,S,V = SVD of (t(Xr)Xr)^{-1} * t(Xr) * Yr
79 s <- svd(MASS::ginv(crossprod(matricize(X[Z_indice, ]))) %*%
80 crossprod(matricize(X[Z_indice, ]), matricize(Y[Z_indice, ])))
81 S <- s$d
82 # Set m-rank(r) singular values to zero, and recompose best rank(r) approximation
83 # of the initial product
84 if (rank[r] < length(S))
85 S[(rank[r] + 1):length(S)] <- 0
86 phi[, , r] <- s$u %*% diag(S) %*% t(s$v) %*% Rho[, , r]
87 }
88
89 # Step E and computation of the loglikelihood
90 sumLogLLF2 <- 0
91 for (i in seq_len(n))
92 {
93 sumLLF1 <- 0
94 maxLogGamIR <- -Inf
95 for (r in seq_len(k))
96 {
97 dotProduct <- tcrossprod(Y[i, ] %*% Rho[, , r] - X[i, ] %*% phi[, , r])
98 logGamIR <- log(Pi[r]) + log(gdet(Rho[, , r])) - 0.5 * dotProduct
99 # Z[i] = index of max (gam[i,])
100 if (logGamIR > maxLogGamIR)
101 {
102 Z[i] <- r
103 maxLogGamIR <- logGamIR
104 }
105 sumLLF1 <- sumLLF1 + exp(logGamIR)/(2 * pi)^(m/2)
106 }
107 sumLogLLF2 <- sumLogLLF2 + log(sumLLF1)
108 }
109
110 LLF <- -1/n * sumLogLLF2
111
112 # update distance parameter to check algorithm convergence (delta(phi, Phi))
113 deltaPhi <- c(deltaPhi, max((abs(phi - Phi))/(1 + abs(phi)))) #TODO: explain?
114 if (length(deltaPhi) > deltaPhiBufferSize)
115 deltaPhi <- deltaPhi[2:length(deltaPhi)]
116 sumDeltaPhi <- sum(abs(deltaPhi))
117
118 # update other local variables
119 Phi <- phi
120 ite <- ite + 1
121 }
122 return(list(phi = phi, LLF = LLF))
123 }