cosmetics
[valse.git] / pkg / R / EMGrank.R
CommitLineData
4fed76cc
BA
1#' EMGrank
2#'
3#' Description de EMGrank
4#'
c280fe59
BA
5#' @param Pi Parametre de proportion
6#' @param Rho Parametre initial de variance renormalisé
7#' @param mini Nombre minimal d'itérations dans l'algorithme EM
8#' @param maxi Nombre maximal d'itérations dans l'algorithme EM
9#' @param X Régresseurs
10#' @param Y Réponse
11#' @param tau Seuil pour accepter la convergence
12#' @param rank Vecteur des rangs possibles
4fed76cc 13#'
c280fe59
BA
14#' @return A list ...
15#' phi : parametre de moyenne renormalisé, calculé par l'EM
16#' LLF : log vraisemblance associé à cet échantillon, pour les valeurs estimées des paramètres
4fed76cc 17#'
4fed76cc 18#' @export
aa480ac1 19EMGrank <- function(Pi, Rho, mini, maxi, X, Y, tau, rank, fast=TRUE)
4fed76cc 20{
aa480ac1
BA
21 if (!fast)
22 {
23 # Function in R
a3105972 24 return (.EMGrank_R(Pi, Rho, mini, maxi, X, Y, tau, rank))
aa480ac1 25 }
567a7c38 26
aa480ac1 27 # Function in C
c280fe59
BA
28 n = nrow(X) #nombre d'echantillons
29 p = ncol(X) #nombre de covariables
30 m = ncol(Y) #taille de Y (multivarié)
31 k = length(Pi) #nombre de composantes dans le mélange
32 .Call("EMGrank",
33 Pi, Rho, mini, maxi, X, Y, tau, rank,
34 phi=double(p*m*k), LLF=double(1),
35 n, p, m, k,
36 PACKAGE="valse")
4fed76cc 37}
aa480ac1
BA
38
39#helper to always have matrices as arg (TODO: put this elsewhere? improve?)
40# --> Yes, we should use by-columns storage everywhere... [later!]
41matricize <- function(X)
42{
43 if (!is.matrix(X))
44 return (t(as.matrix(X)))
45 return (X)
46}
47
48# R version - slow but easy to read
a3105972 49.EMGrank_R = function(Pi, Rho, mini, maxi, X, Y, tau, rank)
aa480ac1
BA
50{
51 #matrix dimensions
52 n = dim(X)[1]
53 p = dim(X)[2]
54 m = dim(Rho)[2]
55 k = dim(Rho)[3]
56
57 #init outputs
58 phi = array(0, dim=c(p,m,k))
59 Z = rep(1, n)
60 LLF = 0
61
62 #local variables
63 Phi = array(0, dim=c(p,m,k))
64 deltaPhi = c()
65 sumDeltaPhi = 0.
66 deltaPhiBufferSize = 20
67
68 #main loop
69 ite = 1
70 while (ite<=mini || (ite<=maxi && sumDeltaPhi>tau))
71 {
43d76c49 72 #M step: update for Beta ( and then phi)
aa480ac1
BA
73 for(r in 1:k)
74 {
43d76c49 75 Z_indice = seq_len(n)[Z==r] #indices where Z == r
aa480ac1
BA
76 if (length(Z_indice) == 0)
77 next
78 #U,S,V = SVD of (t(Xr)Xr)^{-1} * t(Xr) * Yr
0930b5d3 79 s = svd( MASS::ginv(crossprod(matricize(X[Z_indice,]))) %*%
aa480ac1
BA
80 crossprod(matricize(X[Z_indice,]),matricize(Y[Z_indice,])) )
81 S = s$d
82 #Set m-rank(r) singular values to zero, and recompose
83 #best rank(r) approximation of the initial product
84 if(rank[r] < length(S))
85 S[(rank[r]+1):length(S)] = 0
86 phi[,,r] = s$u %*% diag(S) %*% t(s$v) %*% Rho[,,r]
87 }
88
43d76c49 89 #Step E and computation of the loglikelihood
aa480ac1
BA
90 sumLogLLF2 = 0
91 for(i in seq_len(n))
92 {
93 sumLLF1 = 0
94 maxLogGamIR = -Inf
95 for (r in seq_len(k))
96 {
97 dotProduct = tcrossprod(Y[i,]%*%Rho[,,r]-X[i,]%*%phi[,,r])
98 logGamIR = log(Pi[r]) + log(det(Rho[,,r])) - 0.5*dotProduct
99 #Z[i] = index of max (gam[i,])
100 if(logGamIR > maxLogGamIR)
101 {
102 Z[i] = r
103 maxLogGamIR = logGamIR
104 }
105 sumLLF1 = sumLLF1 + exp(logGamIR) / (2*pi)^(m/2)
106 }
107 sumLogLLF2 = sumLogLLF2 + log(sumLLF1)
108 }
0930b5d3 109
aa480ac1
BA
110 LLF = -1/n * sumLogLLF2
111
112 #update distance parameter to check algorithm convergence (delta(phi, Phi))
113 deltaPhi = c( deltaPhi, max( (abs(phi-Phi)) / (1+abs(phi)) ) ) #TODO: explain?
114 if (length(deltaPhi) > deltaPhiBufferSize)
115 deltaPhi = deltaPhi[2:length(deltaPhi)]
116 sumDeltaPhi = sum(abs(deltaPhi))
117
118 #update other local variables
119 Phi = phi
120 ite = ite+1
121 }
122 return(list("phi"=phi, "LLF"=LLF))
123}