OLD_MATLAB_CODE not required anymore
[valse.git] / src / test / generate_test_data / helpers / EMGrank.m
1 function[phi,LLF] = EMGrank(Pi,Rho,mini,maxi,X,Y,tau,rank)
2
3 % get matrix sizes
4 [~,m,k] = size(Rho);
5 [n,p] = size(X);
6
7 % allocate output matrices
8 phi = zeros(p,m,k);
9 Z = ones(n,1,'int64');
10 LLF = 0.0;
11
12 % local variables
13 Phi = zeros(p,m,k);
14 deltaPhi = 0.0;
15 deltaPhi = [];
16 sumDeltaPhi = 0.0;
17 deltaPhiBufferSize = 20;
18
19 %main loop (at least mini iterations)
20 ite = int64(1);
21 while ite<=mini || (ite<=maxi && sumDeltaPhi>tau)
22
23 %M step: Mise à jour de Beta (et donc phi)
24 for r=1:k
25 if (sum(Z==r) == 0)
26 continue;
27 end
28 %U,S,V = SVD of (t(Xr)Xr)^{-1} * t(Xr) * Yr
29 [U,S,V] = svd(pinv(transpose(X(Z==r,:))*X(Z==r,:))*transpose(X(Z==r,:))*Y(Z==r,:));
30 %Set m-rank(r) singular values to zero, and recompose
31 %best rank(r) approximation of the initial product
32 S(rank(r)+1:end,:) = 0;
33 phi(:,:,r) = U * S * transpose(V) * Rho(:,:,r);
34 end
35
36 %Etape E et calcul de LLF
37 sumLogLLF2 = 0.0;
38 for i=1:n
39 sumLLF1 = 0.0;
40 maxLogGamIR = -Inf;
41 for r=1:k
42 dotProduct = (Y(i,:)*Rho(:,:,r)-X(i,:)*phi(:,:,r)) * transpose(Y(i,:)*Rho(:,:,r)-X(i,:)*phi(:,:,r));
43 logGamIR = log(Pi(r)) + log(det(Rho(:,:,r))) - 0.5*dotProduct;
44 %Z(i) = index of max (gam(i,:))
45 if logGamIR > maxLogGamIR
46 Z(i) = r;
47 maxLogGamIR = logGamIR;
48 end
49 sumLLF1 = sumLLF1 + exp(logGamIR) / (2*pi)^(m/2);
50 end
51 sumLogLLF2 = sumLogLLF2 + log(sumLLF1);
52 end
53
54 LLF = -1/n * sumLogLLF2;
55
56 % update distance parameter to check algorithm convergence (delta(phi, Phi))
57 deltaPhi = [ deltaPhi, max(max(max((abs(phi-Phi))./(1+abs(phi))))) ];
58 if length(deltaPhi) > deltaPhiBufferSize
59 deltaPhi = deltaPhi(2:length(deltaPhi));
60 end
61 sumDeltaPhi = sum(abs(deltaPhi));
62
63 % update other local variables
64 Phi = phi;
65 ite = ite+1;
66
67 end
68
69 end