From: emilie Date: Fri, 17 Mar 2017 10:57:54 +0000 (+0100) Subject: update the model selection step. Beginning for the plots X-Git-Url: https://git.auder.net/variants/current/doc/scripts/css/R.css?a=commitdiff_plain;h=3f62d540d32b70f42b9090087f72426c18cb219e;p=valse.git update the model selection step. Beginning for the plots --- diff --git a/pkg/NAMESPACE b/pkg/NAMESPACE index d3bb83f..a4e6bf0 100644 --- a/pkg/NAMESPACE +++ b/pkg/NAMESPACE @@ -9,6 +9,7 @@ export(gridLambda) export(initSmallEM) export(modelSelection) export(selectVariables) +export(valse) importFrom(methods,new) importFrom(stats,cutree) importFrom(stats,dist) diff --git a/pkg/R/constructionModelesLassoMLE.R b/pkg/R/constructionModelesLassoMLE.R index ed08b38..f86e816 100644 --- a/pkg/R/constructionModelesLassoMLE.R +++ b/pkg/R/constructionModelesLassoMLE.R @@ -49,7 +49,8 @@ constructionModelesLassoMLE = function(phiInit,rhoInit,piInit,gamInit,mini,maxi, pi = list() llh = list() - for (lambda in 1:L){ + out = lapply( seq_along(selected), function(lambda) + { sel.lambda = selected[[lambda]] col.sel = which(colSums(sel.lambda)!=0) res_EM = EMGLLF(phiInit[col.sel,,],rhoInit,piInit,gamInit,mini,maxi,gamma,0.,X[,col.sel],Y,tau) @@ -78,11 +79,9 @@ constructionModelesLassoMLE = function(phiInit,rhoInit,piInit,gamInit,mini,maxi, det(rhoLambda[,,r])/(sqrt(2*base::pi))^m * exp(-tcrossprod(delta)/2.0) } llhLambda = c( sum(log(densite)), (dimension+m+1)*k-1 ) - rho[[lambda]] = rhoLambda - phi[[lambda]] = phiLambda - pi[[lambda]] = piLambda - llh[[lambda]] = llhLambda + list("phi"= phiLambda, "rho"= rhoLambda, "pi"= piLambda, "llh" = llhLambda) } + ) + return(out) } - return(list("phi"=phi, "rho"=rho, "pi"=pi, "llh" = llh)) } diff --git a/pkg/R/valse.R b/pkg/R/valse.R index 46a5fd0..779f61a 100644 --- a/pkg/R/valse.R +++ b/pkg/R/valse.R @@ -3,7 +3,7 @@ #' @param X matrix of covariates (of size n*p) #' @param Y matrix of responses (of size n*m) #' @param procedure among 'LassoMLE' or 'LassoRank' -#' @param selecMod method to select a model among 'SlopeHeuristic', 'BIC', 'AIC' +#' @param selecMod method to select a model among 'DDSE', 'DJump', 'BIC' or 'AIC' #' @param gamma integer for the power in the penaly, by default = 1 #' @param mini integer, minimum number of iterations in the EM algorithm, by default = 10 #' @param maxi integer, maximum number of iterations in the EM algorithm, by default = 100 @@ -15,22 +15,23 @@ #' @return a list with estimators of parameters #' @export #----------------------------------------------------------------------- -valse = function(X,Y,procedure = 'LassoMLE',selecMod = 'BIC',gamma = 1,mini = 10, - maxi = 100,eps = 1e-4,kmin = 2,kmax = 5, +valse = function(X,Y,procedure = 'LassoMLE',selecMod = 'DDSE',gamma = 1,mini = 10, + maxi = 100,eps = 1e-4,kmin = 2,kmax = 3, rang.min = 1,rang.max = 10) { ################################## #core workflow: compute all models ################################## - p = dim(phiInit)[1] - m = dim(phiInit)[2] + p = dim(X)[2] + m = dim(Y)[2] n = dim(X)[1] - tableauRecap = array(, dim=c(1000,4)) + model = list() + tableauRecap = array(0, dim=c(1000,4)) cpt = 0 print("main loop: over all k and all lambda") - -for (k in kmin:kmax){ + + for (k in kmin:kmax){ print(k) print("Parameters initialization") #smallEM initializes parameters by k-means and regression model in each component, @@ -42,15 +43,15 @@ for (k in kmin:kmax){ piInit <<- init$piInit gamInit <<- init$gamInit source('~/valse/pkg/R/gridLambda.R') - gridLambda <<- gridLambda(phiInit, rhoInit, piInit, gamInit, X, Y, gamma, mini, maxi, eps) + grid_lambda <<- gridLambda(phiInit, rhoInit, piInit, gamInit, X, Y, gamma, mini, maxi, eps) print("Compute relevant parameters") #select variables according to each regularization parameter #from the grid: A1 corresponding to selected variables, and #A2 corresponding to unselected variables. - params = selectiontotale(phiInit,rhoInit,piInit,gamInit,mini,maxi,gamma,gridLambda[seq(1,length(gridLambda), by=3)],X,Y,1e-8,eps) - params2 = selectVariables(phiInit,rhoInit,piInit,gamInit,mini,maxi,gamma,gridLambda[seq(1,length(gridLambda), by=3)],X,Y,1e-8,eps) + params = selectiontotale(phiInit,rhoInit,piInit,gamInit,mini,maxi,gamma,grid_lambda,X,Y,1e-8,eps) + #params2 = selectVariables(phiInit,rhoInit,piInit,gamInit,mini,maxi,gamma,grid_lambda[seq(1,length(grid_lambda), by=3)],X,Y,1e-8,eps) ## etrange : params et params 2 sont différents ... selected <<- params$selected @@ -62,8 +63,12 @@ for (k in kmin:kmax){ #compute parameter estimations, with the Maximum Likelihood #Estimator, restricted on selected variables. model[[k]] = constructionModelesLassoMLE(phiInit, rhoInit,piInit,gamInit,mini,maxi,gamma,X,Y,thresh,eps,selected) - LLH = unlist(model[[k]]$llh)[seq(1,2*length(model[[k]]),2)] - D = unlist(model[[k]]$llh)[seq(1,2*length(model[[k]]),2)+1] + llh = matrix(ncol = 2) + for (l in seq_along(model[[k]])){ + llh = rbind(llh, model[[k]][[l]]$llh) + } + LLH = llh[-1,1] + D = llh[-1,2] } else { print('run the procedure Lasso-Rank') #compute parameter estimations, with the Low Rank @@ -89,15 +94,19 @@ for (k in kmin:kmax){ cpt = cpt+length(model[[k]]) } print('Model selection') - - tableauRecap = array(dim = c()) - if (selecMod == 'SlopeHeuristic') { - + tableauRecap = tableauRecap[rowSums(tableauRecap[, 2:4])!=0,] + tableauRecap = tableauRecap[(tableauRecap[,1])!=Inf,] + data = cbind(1:dim(tableauRecap)[1], tableauRecap[,2], tableauRecap[,2], tableauRecap[,1]) + require(capushe) + modSel = capushe(data, n) + if (selecMod == 'DDSE') { + indModSel = as.numeric(modSel@DDSE@model) + } else if (selecMod == 'Djump') { + indModSel = as.numeric(modSel@Djump@model) } else if (selecMod == 'BIC') { - BIC = -2*tableauRecap[,1]+log(n)*tableauRecap[,2] - indMinBIC = which.min(BIC) - return(model[[tableauRecap[indMinBIC,3]]][[tableauRecap[indMinBIC,4]]]) + indModSel = modSel@BIC_capushe$model } else if (selecMod == 'AIC') { - + indModSel = modSel@AIC_capushe$model } + return(model[[tableauRecap[indModSel,3]]][[tableauRecap[indModSel,4]]]) } diff --git a/pkg/man/generateXY.Rd b/pkg/man/generateXY.Rd index ef76d82..6645c59 100644 --- a/pkg/man/generateXY.Rd +++ b/pkg/man/generateXY.Rd @@ -7,15 +7,15 @@ generateXY(meanX, covX, covY, pi, beta, n) } \arguments{ -\item{meanX}{matrix of group means for covariates (of size p*K)} +\item{meanX}{matrix of group means for covariates (of size p)} -\item{covX}{covariance for covariates (of size p*p*K)} +\item{covX}{covariance for covariates (of size p*p)} \item{covY}{covariance for the response vector (of size m*m*K)} \item{pi}{proportion for each cluster} -\item{beta}{regression matrix} +\item{beta}{regression matrix, of size p*m*k} \item{n}{sample size} } diff --git a/pkg/man/selectVariables.Rd b/pkg/man/selectVariables.Rd index 09a52f2..af79059 100644 --- a/pkg/man/selectVariables.Rd +++ b/pkg/man/selectVariables.Rd @@ -2,12 +2,10 @@ % Please edit documentation in R/selectVariables.R \name{selectVariables} \alias{selectVariables} -\title{selectVaribles -It is a function which construct, for a given lambda, the sets of -relevant variables and irrelevant variables.} +\title{selectVariables} \usage{ selectVariables(phiInit, rhoInit, piInit, gamInit, mini, maxi, gamma, glambda, - X, Y, thres, tau) + X, Y, thresh, tau, ncores = 1) } \arguments{ \item{phiInit}{an initial estimator for phi (size: p*m*k)} @@ -30,17 +28,15 @@ selectVariables(phiInit, rhoInit, piInit, gamInit, mini, maxi, gamma, glambda, \item{Y}{matrix of responses} -\item{thres}{threshold to consider a coefficient to be equal to 0} - \item{tau}{threshold to say that EM algorithm has converged} + +\item{thres}{threshold to consider a coefficient to be equal to 0} } \value{ -TODO +a list of outputs, for each lambda in grid: selected,Rho,Pi } \description{ -selectVaribles -It is a function which construct, for a given lambda, the sets of -relevant variables and irrelevant variables. +It is a function which construct, for a given lambda, the sets of relevant variables. } \examples{ TODO diff --git a/pkg/man/valse.Rd b/pkg/man/valse.Rd new file mode 100644 index 0000000..48271b1 --- /dev/null +++ b/pkg/man/valse.Rd @@ -0,0 +1,42 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/valse.R +\name{valse} +\alias{valse} +\title{Main function} +\usage{ +valse(X, Y, procedure = "LassoMLE", selecMod = "BIC", gamma = 1, + mini = 10, maxi = 100, eps = 1e-04, kmin = 2, kmax = 5, + rang.min = 1, rang.max = 10) +} +\arguments{ +\item{X}{matrix of covariates (of size n*p)} + +\item{Y}{matrix of responses (of size n*m)} + +\item{procedure}{among 'LassoMLE' or 'LassoRank'} + +\item{selecMod}{method to select a model among 'SlopeHeuristic', 'BIC', 'AIC'} + +\item{gamma}{integer for the power in the penaly, by default = 1} + +\item{mini}{integer, minimum number of iterations in the EM algorithm, by default = 10} + +\item{maxi}{integer, maximum number of iterations in the EM algorithm, by default = 100} + +\item{eps}{real, threshold to say the EM algorithm converges, by default = 1e-4} + +\item{kmin}{integer, minimum number of clusters, by default = 2} + +\item{kmax}{integer, maximum number of clusters, by default = 10} + +\item{rang.min}{integer, minimum rank in the low rank procedure, by default = 1} + +\item{rang.max}{integer, maximum rank in the} +} +\value{ +a list with estimators of parameters +} +\description{ +Main function +} + diff --git a/reports/bazar Emilie/dessinBeta.m b/reports/bazar Emilie/dessinBeta.m new file mode 100644 index 0000000..78c3b91 --- /dev/null +++ b/reports/bazar Emilie/dessinBeta.m @@ -0,0 +1,97 @@ +niveauColores + +for L=ind + for r=1:max(b(:,L)) + betaLassoMLE(:,:,r)=inv(rhoLassoMLE(:,:,r,L)) * phiLassoMLE(:,:,r,L); + end +end +dessinRho = zeros(max(K),size(rhoLassoMLE,1),max(ind)); +cpt=1; +gr = gray(64); + + for L=ind + k=size(find(piLassoMLE(:,L)~=0),1) + figure(1) + %for r=1:k + r=1 + subplot(1,k+1,r) + cpt=cpt+1; + colormap(gr(end:-1:1,:)); + imagesc(abs(betaLassoMLE(:,:,r))) + set(gca,'xtick',[],'ytick',[]) + set(gca, 'FontSize', 30) + title(['$\hat{\underline{\beta}_1}$'],'FontSize', 45,'Interpreter','latex') + h=colorbar; + set(gca, 'FontSize', 30) + r=2 + subplot(1,k+1,r) + cpt=cpt+1; + colormap(gr(end:-1:1,:)); + imagesc(abs(betaLassoMLE(:,:,r))) + set(gca,'xtick',[],'ytick',[]) + set(gca, 'FontSize', 30) + title(['$\hat{\underline{\beta}_2}$'],'FontSize', 45,'Interpreter','latex') + h=colorbar; + set(gca, 'FontSize', 30) + + %end + subplot(1,k+1,k+1) + gr = gray(64); + colormap(gr(end:-1:1,:)); + imagesc(abs(betaLassoMLE(:,:,1)-betaLassoMLE(:,:,2))) + set(gca,'xtick',[],'ytick',[]) + title('$|\hat{\underline{\beta}_1}-\hat{\underline{\beta}}_2|$','FontSize',45,'Interpreter','latex') + h=colorbar; + set(gca, 'FontSize', 30) + figure(L+k+1) + for r=1:k + for z=1:size(rhoLassoMLE,1) + dessinRho(r,z,L) = inv(rhoLassoMLE(z,z,r,L)^2); + end + end + + %for r=1:k + r=1 + subplot(k,1,r) +% if r==1 +% colormap(cmapRouge(end:-1:1,:)); +% else if r==2 +% colormap(cmapBleu(end:-1:1,:)); +% else colormap(cmapVert(end:-1:1,:)); +% end +% end + imagesc(dessinRho(r,:,L)) + set(gca,'xtick',[],'ytick',[]) + xlabh = get(gca,'XLabel'); + set(xlabh,'Position',get(xlabh,'Position') + [0 .15 0]) + title('$\hat{\Sigma}_1$','FontSize', 55,'Interpreter','latex') + xlabel('d_4[1] d_4[2] d_4[3] d_3[1] d_3[2] d_3[3] d_3[4] d_3[5] d_3[6]','FontSize', 30) + set(gca, 'FontSize', 45) + set(gca,'Position',[0.01 0.1+0.5*(2-r) 0.9 0.2]) + + r=2 + subplot(k,1,r) +% if r==1 +% colormap(cmapRouge(end:-1:1,:)); +% else if r==2 +% colormap(cmapBleu(end:-1:1,:)); +% else colormap(cmapVert(end:-1:1,:)); +% end +% end + imagesc(dessinRho(r,:,L)) + set(gca,'xtick',[],'ytick',[]) + xlabh = get(gca,'XLabel'); + set(xlabh,'Position',get(xlabh,'Position') + [0 .15 0]) + title('$\hat{\Sigma}_2$','FontSize', 55,'Interpreter','latex') + xlabel('d_4[1] d_4[2] d_4[3] d_3[1] d_3[2] d_3[3] d_3[4] d_3[5] d_3[6]','FontSize', 30) + set(gca, 'FontSize', 45) + set(gca,'Position',[0.01 0.1+0.5*(2-r) 0.9 0.2]) + %end + %h=colorbar; + gr = gray(64); + colormap(gr(end:-1:1,:)); + axes('Position', [0.14 0.1 0.8 0.8], 'Visible', 'off'); +% c=colorbar; +% set(gca, 'FontSize', 40) +% caxis([0 max(max(dessinRho(:,:,L)))]) +end \ No newline at end of file diff --git a/reports/bazar Emilie/niveauColores.m b/reports/bazar Emilie/niveauColores.m new file mode 100644 index 0000000..41a46ff --- /dev/null +++ b/reports/bazar Emilie/niveauColores.m @@ -0,0 +1,18 @@ +%niveau de rouge +cmapRouge = ones(100,3); +for i=1:100 + cmapRouge(i,2) = i/100; + cmapRouge(i,3) = i/100; +end +%niveau de vert +cmapVert = ones(100,3); +for i=1:100 + cmapVert(i,1) = i/100; + cmapVert(i,3) = i/100; +end +%niveau de bleu +cmapBleu = ones(100,3); +for i=1:100 + cmapBleu(i,1) = i/100; + cmapBleu(i,2) = i/100; +end \ No newline at end of file diff --git a/reports/bazar Emilie/ondelettes.m b/reports/bazar Emilie/ondelettes.m new file mode 100644 index 0000000..b54635f --- /dev/null +++ b/reports/bazar Emilie/ondelettes.m @@ -0,0 +1,83 @@ +%Dessin ondelettes +%script +figure(1) +courbe = X2(:,1)-mean(X2(:,1)); +[C,L] = wavedec(courbe, 4,'haar'); + +subplot(6,1,1) +plot(1:7,courbe(1:7),'b','LineWidth',2) +axis([0 48 -1.6 1.6]) +hold on +plot(8:48,courbe(8:48),'b','LineWidth',2) +%plot(waverec([C(1:12)',zeros(1,36)],L,'haar'),'b--','LineWidth',2) +%courbe2 = X2(:,1); +%[C2,L2] = wavedec(courbe2,4,'haar'); +%plot(waverec([zeros(1,3),C2(4:12)',zeros(1,36)],L2,'haar'),'b-.','LineWidth',2) +hold off +ylabel('z','FontSize', 30) +set(gca, 'FontSize', 20) +subplot(6,1,2) +A4=zeros(1,48); +Coeff = zeros(5,48); +for r=1:16 + A4(r)=C(1)/power(2,5/2); + A4(r+16)=C(2)/power(2,5/2); + A4(r+32) = C(3)/power(2,5/2); +end +Coeff(5,8)=C(1); +Coeff(5,24) = C(2); +Coeff(5,40) = C(3); +stairs(A4,'b','LineWidth',2) +axis([0 48 -0.5 0.5]) +ylabel('A_4','FontSize', 30) +set(gca, 'FontSize', 20) +subplot(6,1,3) +D4=zeros(1,48); +for r=1:8 + D4(r) = C(4)/power(2,4/2); + D4(r+8) = -C(4)/power(2,4/2); + D4(r+16) = C(5)/power(2,4/2); + D4(r+24) = -C(5)/power(2,4/2); + D4(r+32) = C(6)/power(2,4/2); + D4(r+40) = -C(6)/power(2,4/2); +end +Coeff(4,8) = C(4); +Coeff(4,24) = C(5); +Coeff(4,40) = C(6); +stairs(D4,'b','LineWidth',2) +axis([0 48 -0.9 0.9]) +ylabel('D_4','FontSize', 30) + set(gca, 'FontSize', 20) +subplot(6,1,4) +D3=zeros(1,48); +for k=1:12 + for r=1:4 + D3(r+4*(k-1)) = (-1)^(k+1) *C(7+floor((k-1)/2))/power(2,3/2); + end +end +stairs(D3,'b','LineWidth',2) +ylabel('D_3','FontSize', 30) +axis([0 48 -0.5 0.5]) + set(gca, 'FontSize', 20) +subplot(6,1,5) +D2=zeros(1,48); +for k=1:24 + for r=1:2 + D2(r+2*(k-1)) = (-1)^(k+1) *C(13+floor((k-1)/2))/power(2,2/2); + end +end +stairs(D2,'b','LineWidth',2) +ylabel('D_2','FontSize', 30) +axis([0 48 -0.8 0.8]) + set(gca, 'FontSize', 20) +subplot(6,1,6) +D1=zeros(1,48); +for k=1:48 + for r=1 + D1(r+1*(k-1)) = (-1)^(k+1) *C(25+floor((k-1)/2))/power(2,1/2); + end +end +plot(D1,'b','LineWidth',2); +axis([0 48 -0.9 0.9]) +ylabel('D_1','FontSize', 30) +set(gca, 'FontSize', 20) diff --git a/reports/bazar Emilie/ondelettes2.m b/reports/bazar Emilie/ondelettes2.m new file mode 100644 index 0000000..9d4768a --- /dev/null +++ b/reports/bazar Emilie/ondelettes2.m @@ -0,0 +1,67 @@ +ondelettes +niveauColores +figure(2) +subplot(6,1,1) +plot(1:7,courbe(1:7),'b','LineWidth',2) +hold on +plot(8:48,courbe(8:48),'b','LineWidth',2) +set(gca,'ytick',[]) +ylabel('z','FontSize', 30) +set(gca, 'FontSize', 20) +axis([0 48 -1.6 1.6]) +set(B, 'Position', [.91 .11 .03 .8150]) +subplot(6,1,2:6) +colormap([cmapVert([1:1:end],:);cmapBleu(end:-1:1,:)]); + %colormap(gr(end:-1:1,:)); +indice = [3,3,6,12,24]; +indice2=[0,cumsum(indice)]; +beta = zeros(5,48); +for j=1:indice(1) + for l=1:2^4 + beta(1,(j-1)*2^4+l) = C(indice2(1)+j); + end +end +for r=2:5 + for j=1:indice(r) + for l=1:2^(5-(r-1)) + beta(r,(j-1)*2^(5-(r-1))+l) = C(indice2(r)+j); + end + end +end + + +imagesc(beta) +ylabel('d_1 d_2 d_3 d_4 a_4','FontSize', 30) +set(gca, 'FontSize', 20) +%gr = gray(64); + + + +set(gca,'xtick',[],'ytick',[]) +set(gca, 'FontSize', 20) + +% axes('Position', [0.05 0.05 0.9 0.9], 'Visible', 'off'); +% c=colorbar ('FontSize',18); +B=colorbar; +set(B, 'Position', [.91 .11 .03 .8150]) +%set(gca,'Position',[.1 0.1 0.9 0.9]) +set(gca, 'FontSize', 15) +% figure(3) +% subplot(6,1,1) +% plot(courbe,'LineWidth',2) +% axis([0 48 -0.7 0.7]) +% subplot(6,1,2) +% plot([8,24,40],C(1:3),'+','LineWidth',2) +% axis([0 48 -1.6 1.6]) +% subplot(6,1,3) +% plot([8:16:48],C(4:6),'+','LineWidth',2) +% axis([0 48 -0.6 0.6]) +% subplot(6,1,4) +% plot([4:8:48],C(7:12),'+','LineWidth',2) +% axis([0 48 -0.4 0.4]) +% subplot(6,1,5) +% plot([2:4:48],C(13:24),'+','LineWidth',2) +% axis([0 48 -0.2 0.2]) +% subplot(6,1,6) +% plot([1:2:48],C(25:48),'+','LineWidth',2) +% axis([0 48 -0.2 0.2]) \ No newline at end of file diff --git a/reports/bazar Emilie/pretraitement.m b/reports/bazar Emilie/pretraitement.m new file mode 100644 index 0000000..b0cf534 --- /dev/null +++ b/reports/bazar Emilie/pretraitement.m @@ -0,0 +1,22 @@ +%Dessins prétraitements +script + +XC=X2(:,1)-mean(X2(:,1)); + +[C1,L1] = wavedec(X2(:,1),4,'haar'); +xProj1=C(4:12); + +[C2,L2]=wavedec(XC',4,'haar'); +xProj2=C(1:12); + +Xrecon1 = waverec([zeros(1,3),xProj1',zeros(1,36)],L1,'haar'); +Xrecon2 = waverec([xProj2',zeros(1,36)],L2,'haar'); + +plot(X2(:,1),'LineWidth',2) +hold on +plot(Xrecon1,'r-o','LineWidth',2) +plot(Xrecon2,'g-s','LineWidth',2) +xlabel('Instant of the day','FontSize', 30) +ylabel(['Load consumption and its reconstructions' sprintf('\n') ' after projecting and preprocessing'],'FontSize', 30) +set(gca, 'FontSize', 20, 'fontName','Times'); +legend('Original signal','Reconstruction after projection and preprocessing 1','Reconstruction after projection and preprocessing 2','Location','NorthWest') \ No newline at end of file diff --git a/reports/bazar Emilie/recomp.m b/reports/bazar Emilie/recomp.m new file mode 100644 index 0000000..b60ed53 --- /dev/null +++ b/reports/bazar Emilie/recomp.m @@ -0,0 +1,29 @@ +ychap=zeros(n,m,k,94); +%Ychap=zeros(n,m,l,94); + +for LL=1:10 + for i=1:n + for r=1:2 + ychap(i,:,r,LL)=x(i,:)*(phitrue(:,:,r,LL)*inv(rhotrue(:,:,r,LL)))'; + Ychap(i,:,r,LL)=waverec(ychap(i,:,r,LL)',L,'sym4'); + end + end +end +for i=1:n + for r=1:2 + for LL=1:10 + RMSE(i,LL,r)=sqrt(sum((donneesC(78:96,i)-Ychap(i,:,r,LL)').^2)); + end + end +end +LL=88; +plot(Ychap(89,:,2,LL),'r') +hold on +plot(donnees(49:96,89)/100,'b') +hold off + + +for LL=1:10 + Z(LL,:)=principeMAP(Y,X,phiLassoMLE(:,:,:,LL),rhoLassoMLE(:,:,:,LL),piLassoMLE(:,LL),3.14); + sum((Z(1:100)==1)+(Z(101:200)==2)) +end \ No newline at end of file diff --git a/reports/bazar Emilie/reconstruction.m b/reports/bazar Emilie/reconstruction.m new file mode 100644 index 0000000..ea6c3e6 --- /dev/null +++ b/reports/bazar Emilie/reconstruction.m @@ -0,0 +1,7 @@ +beta = betaLassoMLE(:,:,1:2,ind); +for i=1:338 + for r=1:2 + YChap(:,i,r) = X(i,:) * beta(:,:,r); + yChap(:,i,r) = waverec([zeros(1,3),YChap(:,i,r)',zeros(1,36)],L,'haar'); + end +end diff --git a/reports/bazar Emilie/resultatSousEns.m b/reports/bazar Emilie/resultatSousEns.m new file mode 100644 index 0000000..f8dca24 --- /dev/null +++ b/reports/bazar Emilie/resultatSousEns.m @@ -0,0 +1,21 @@ +ind=indiceLassoMLE([2,3,4,160,165]) +cpt=0; +for L2=ind + cpt = cpt+1 +% figure(L2) +% hold on +% for r=1:max(b(:,L2)) +% subplot(2,ceil(max(b(:,L2))/2),r) +% plot(X2Final(:,b(:,L2)==r),c(r)) +% end +% hold off + figure(1) + subplot(2,3,cpt) + hold on + for r=1:max(b(:,L2)) + plot([mean(X2Final(:,b(:,L2)==r),2);mean(Y2Final(:,(b(:,L2)==r)),2)],c(r)) + title('signal moyen de chaque classe, pour chaque modele') + end + hold off + +end \ No newline at end of file diff --git a/reports/bazar Emilie/sautDeDimension.m b/reports/bazar Emilie/sautDeDimension.m new file mode 100644 index 0000000..6dec1f8 --- /dev/null +++ b/reports/bazar Emilie/sautDeDimension.m @@ -0,0 +1,65 @@ +%Saut de dimension +figure(1) +vec=[0,10:100:1210]; + +for r=vec + tableauBis(:,:,r+1) = tableauLassoMLE; + [aaaa,bbbb]= min(tableauLassoMLE(:,4)+r*tableauLassoMLE(:,2)); + tableauBis(:,4,r+1) = tableauLassoMLE(:,4)+r*tableauLassoMLE(:,2); + reponse(r+1)=tableauLassoMLE(bbbb,3); +end + +[x1,y1] = stairs(vec,reponse(vec+1)) +plot(x1,y1,'LineWidth',2) +xlabel('$\kappa$','Interpreter','LaTex','FontSize', 45) +ylabel('Model dimension','FontSize', 45) +set(gca, 'FontSize', 40, 'fontName','Times'); +[Cx1,IA,IC] = unique(y1,'stable') +hold on +plot([x1(IA(2))-0.001,x1(IA(2))],[0,Cx1(2)],'--','LineWidth',2) +plot([2*x1(IA(2))-0.001,2*x1(IA(2))],[0,Cx1(3)],'--','LineWidth',2) +%plot([0,2*x1(IA(2))],[Cx1(3),Cx1(3)],'--','LineWidth',2) +set(gca,'Box','off') +set(gca,'YTick',unique([0:800:700])) +set(gca,'XTick',unique([0:1300:1200])) +text(x1(IA(2))-20,-20,'$\hat{\kappa}$','Interpreter','LaTex','FontSize',45) +text(2*x1(IA(2))-20,-20,'$2 \hat{\kappa}$','Interpreter','LaTex','FontSize',45) +%text(-80,Cx1(3),'$\hat{m}$','Interpreter','LaTex','FontSize',45) +axis([0 1200 0 650]) +hold off + +%LLF pénalisé + +% figure(2) +% plot(tableauLassoMLE(:,3),tableauLassoMLE(:,4)+0*tableauLassoMLE(:,2),'.','markersize', 20) +% xlabel('Model dimension','FontSize', 30) +% ylabel('Log-likelihood','Interpreter','LaTex','FontSize', 30) +% set(gca, 'FontSize', 20, 'fontName','Times'); +figure(3) +ind = find(tableauLassoMLE(:,4) >4000); +tableauLassoMLE(ind,:)=[]; +plot(tableauLassoMLE(2:end,3),tableauLassoMLE(2:end,4)+1100*tableauLassoMLE(2:end,2),'.','markersize', 30) +indiceInt = find(tableauLassoMLE(:,4)+1100*tableauLassoMLE(:,2)<-4401); +indiceBis = find(tableauLassoMLE(:,3)==53); +hold on +plot(tableauLassoMLE(indiceInt,3),tableauLassoMLE(indiceInt,4)+1100*tableauLassoMLE(indiceInt,2),'sr','markersize', 30,'linewidth',5) +plot(tableauLassoMLE(indiceBis,3),tableauLassoMLE(indiceBis,4)+1100*tableauLassoMLE(indiceBis,2),'dg','markersize', 30,'linewidth',5) +xlabel('Model dimension','Interpreter','LaTex','FontSize', 45) +ylabel('Penalized log-likelihood for $2 \hat{\kappa}$','Interpreter','LaTex','FontSize', 45) +set(gca, 'FontSize', 30, 'fontName','Times'); +set(gcf,'Units','normal') +set(gca,'Position',[.1 .1 .88 .85]) +hold off +% figure(4) +% plot(tableauLassoMLE(:,3),tableauLassoMLE(:,4)+3000*tableauLassoMLE(:,2),'.','markersize', 20) +% xlabel('Model dimension','FontSize', 30) +% ylabel('Penalized log-likelihood for $\kappa$ = 3000','Interpreter','LaTex','FontSize', 30) +% set(gca, 'FontSize', 20, 'fontName','Times'); +% +% figure(5) +% n2 = find(piLassoMLE(3,:)~=0,1)-1; +% n3 = find(piLassoMLE(4,:)~=0,1)-1; +% plot(tableauLassoMLE(:,3),tableauLassoMLE(:,4)+0*tableauLassoMLE(:,2),'.','markersize', 20) +% xlabel('Model dimension','FontSize', 30) +% ylabel('Log-likelihood','Interpreter','LaTex','FontSize', 30) +% set(gca, 'FontSize', 20, 'fontName','Times'); \ No newline at end of file diff --git a/reports/bazar Emilie/script.m b/reports/bazar Emilie/script.m new file mode 100644 index 0000000..33e24c1 --- /dev/null +++ b/reports/bazar Emilie/script.m @@ -0,0 +1,30 @@ +%clear all +load donneesSelec.mat +%donnees=[ind100,res100]; +donnees = BB; +Res=mean(donnees,2); +for i=1:349 + X2(:,i)=Res(48*(i-1)+1:48*i); +end +for i=1:348 + signal(:,i)=[X2(:,i);X2(:,i+1)]; +end + +[p1,n1]=size(X2); +for i=1:n1 + for j=1:p1 + XC(i,j)=X2(j,i)-mean(X2(:,i)); + end + + [C,L]=wavedec(XC(i,:)',4,'haar'); + xProj(i,:)=C(1:12); +end + +X=xProj(1:end-2,:); +Y=xProj(2:end-1,:); +for i=1:n1 + Xrecon(i,:)=waverec([xProj(i,:),zeros(1,36)],L,'haar'); +end +for i=1:348 + signal3(:,i)=[Xrecon(i,:),Xrecon(i+1,:)]; +end diff --git a/reports/bazar Emilie/scriptEssai.m b/reports/bazar Emilie/scriptEssai.m new file mode 100644 index 0000000..3638977 --- /dev/null +++ b/reports/bazar Emilie/scriptEssai.m @@ -0,0 +1,28 @@ +%clear all +load donneesSelec.mat +donnees=BB; + +Res=mean(donnees,2); +%Ind=mean(donnees(:,1:100),2); +for i=1:340 + X2(:,i)=Res(48*(i-1)+1:48*i); +end +for i=1:339 + signal(:,i)=[X2(:,i);X2(:,i+1)]; +end + +[p1,n1]=size(X2); +for i=1:n1 + [C,L]=wavedec(X2(:,i)',4,'haar'); + xProj(i,:)=C(4:12); +end + +X=xProj(1:end-2,:); +Y=xProj(2:end-1,:); +for i=1:n1 + Xrecon(i,:)=waverec([zeros(1,3),xProj(i,:),zeros(1,36)],L,'haar'); +end + +for i=1:339 + signal2(:,i)=[Xrecon(i,:),Xrecon(i+1,:)]; +end diff --git a/reports/bazar Emilie/scriptSimules.m b/reports/bazar Emilie/scriptSimules.m new file mode 100644 index 0000000..f068b5e --- /dev/null +++ b/reports/bazar Emilie/scriptSimules.m @@ -0,0 +1,9 @@ +for i = 1:100 + courbeX(i,:) = data(((i-1)*96+1):((i-1)*96+48)); + courbeY(i,:) = data(((i-1)*96+49):((i-1)*96+96)); +end +plot((courbeX)') +figure(2) +plot((courbeY)') + +X2 = (courbeX)' \ No newline at end of file diff --git a/reports/bazar Emilie/simulatedData.R b/reports/bazar Emilie/simulatedData.R new file mode 100644 index 0000000..7eaefb9 --- /dev/null +++ b/reports/bazar Emilie/simulatedData.R @@ -0,0 +1,120 @@ +library("mclust") +#library("R.matlab", lib.loc="~/R/x86_64-pc-linux-gnu-library/3.3") +#redrawData = TRUE +#if (redrawData==TRUE){ + ########### + ## Model + ########### + K = 2 + p = 48 + T = seq(0,1.5,length.out = p) + T2 = seq(0,3, length.out = 2*p) + n = 100 + x1 = cos(2*pi*T) + 0.2*cos(4*2*pi*T) +2*c(rep(0,round(length(T)/7)),rep(1,round(length(T)*(1-1/7)))) + plot(T,x1) + lines(T,x1) + + sigmaX = 0.085 + sigmaY = 0.1 + beta = list() + p1= 0.5 + beta[[1]] =diag(c(rep(p1,5),rep(1,5), rep(p1,5), rep(1, p-15))) + p2 = 1 + beta[[2]] = diag(c(rep(p2,5),rep(1,5), rep(p2,5), rep(1, p-15))) + ITE = 100 + ARI1 = ARI2 = ARI3 = rep(0,ITE) + XY = array(0, dim = c(ITE, 2*p,n)) + XYproj = array(0, dim=c(ITE, 96,n)) + + affec = list() + ########### + ## Iterations + ########### + for (ite in c(1:ITE)){ + ########### + ##Sample + ########### + x = x1 + matrix(rnorm(n*p, 0, sigmaX), ncol = n) + affec[[ite]] = sample(c(1,2), n, replace = TRUE) + y = x + xy = matrix(0,ncol=n, nrow= 2*p) + for (i in c(1:n)){ + y[,i] = x[,i] %*% beta[[affec[[ite]][i]]] + rnorm(p, 0, sigmaY) + xy[,i] = c(x[,i],y[,i]) + XY[ite,,i] = xy[,i] - mean(xy[,i]) + # Dx = dwt(x[,i], filter='haar')@W + # Dx = rev(unlist(Dx)) + # Dx = Dx[2:(1+3+6+12+24)] + # Ax = dwt(x[,i], filter='haar')@V + # Ax = rev(unlist(Ax)) + # Ax = Ax[2:(1+3)] + # Dy = dwt(y[,i], filter='haar')@W + # Dy = rev(unlist(Dy)) + # Dy = Dy[2:(1+3+6+12+24)] + # Ay = dwt(y[,i], filter='haar')@V + # Ay = rev(unlist(Ay)) + # Ay = Ay[2:(1+3)] + # XYproj[ite,,i] = c(Ax,Dx,Ay,Dy) + } + print(ite) + # + # + } + xy[c(7,55),] = NA + # write.table(XY,'data.csv', row.names=FALSE, col.names=FALSE) +matplot(T2,xy[,affec[[ite]]==1],type='l', col='red', lty = 1) +matplot(T2,xy[,affec[[ite]]==2],type='l', col='black', add=TRUE, lty= 1) +abline(v = 1.5) +text(0.75,0,'X', cex = 2 ) +text(0.75+1.5,0,'Y', cex = 2 ) +#proj = read.table('dataProj.csv') +#} + + +#matplot(T,x,type='l', col='black', xlab = '', ylab='', lwd=1.5,lty=1) +#matplot(T,y[,affec[[ite]]==1],type='l', col='red', xlab = '', ylab='', lwd=1.5,lty=1) +#matplot(T,y[,affec[[ite]]==2],type='l', col='black', add=TRUE,lwd=2,lty=1) +# proj2 = array(0,dim=c(ITE,2*p,n)) +# for (ite in c(1:ITE)){ +# for (i in c(1:n)){ +# A = proj[ite,(1+(i-1)*96):(i*96)] +# for (j in 1:96){ +# proj2[ite,j,i] = A[1,j] +# } +# } +# print(ite) +# } +########### +## Iterations +########### +Kmod2 = Kmod1 = rep(0,ITE) +Kmod3 = rep(0,ITE) +for (ite in c(1:ITE)){ + print(ite) + ########### + ## k-means 1 + ########### + mod1 = Mclust(t(XY[ite,,]),G = 2, mode='VII') + ARI1[ite] = adjustedRandIndex(mod1$classification, affec[[ite]]) + Kmod1[ite] = mod1$G + # ########### + # ## k-means 2 + # ########### + # #proj2 = + # mod2 = Mclust(t(XYproj[ite,,]),G = 1:8, mode='VII') + # ARI2[ite] = adjustedRandIndex(mod2$classification, affec[[ite]]) + # Kmod2[ite] = mod2$G + # ########### + # ## k-means 1 + # ########### + # #proj3 = + # mod3 = Mclust(t(XYproj[ite,c(4:12,52:60),]),G = 1:8, mode='VII') + # ARI3[ite] = adjustedRandIndex(mod3$classification, affec[[ite]]) + # Kmod3[ite] = mod3$G +} +ARI0 = rep(1,ITE) +par(cex.lab=1.5) +par(cex.axis=1.5) +boxplot(ARI0,ARI1, names = c('LassoMLE','K-means'), lwd=1.3) +table(Kmod1) +table(Kmod2) diff --git a/reports/essai16mars.R b/reports/essai16mars.R index d835599..416f895 100644 --- a/reports/essai16mars.R +++ b/reports/essai16mars.R @@ -1,88 +1,26 @@ -meanX = rep(0,6) -covX = 0.1*diag(6) +p = 10 +q = 15 +k = 2 +D = 20 + +meanX = rep(0,p) +covX = 0.1*diag(p) -covY = array(dim = c(5,5,2)) -covY[,,1] = 0.1*diag(5) -covY[,,2] = 0.2*diag(5) +covY = array(dim = c(q,q,k)) +covY[,,1] = 0.1*diag(q) +covY[,,2] = 0.2*diag(q) -beta = array(dim = c(6,5,2)) -beta[,,2] = matrix(c(rep(2,12),rep(0, 18))) -beta[,,1] = matrix(c(rep(1,12),rep(0, 18))) +beta = array(dim = c(p,q,2)) +beta[,,2] = matrix(c(rep(2,(D)),rep(0, p*q-D))) +beta[,,1] = matrix(c(rep(1,D),rep(0, p*q-D))) -n = 500 +n = 100 pi = c(0.4,0.6) -source('~/valse/R/generateSampleInputs.R') data = generateXY(meanX,covX,covY, pi, beta, n) X = data$X Y = data$Y -k = 2 - -n = nrow(Y) -m = ncol(Y) -p = ncol(X) - -Zinit1 = array(0, dim=c(n)) -betaInit1 = array(0, dim=c(p,m,k)) -sigmaInit1 = array(0, dim = c(m,m,k)) -phiInit1 = array(0, dim = c(p,m,k)) -rhoInit1 = array(0, dim = c(m,m,k)) -Gam = matrix(0, n, k) -piInit1 = matrix(0,k) -gamInit1 = array(0, dim=c(n,k)) -LLFinit1 = list() - -require(MASS) #Moore-Penrose generalized inverse of matrix - - distance_clus = dist(X) - tree_hier = hclust(distance_clus) - Zinit1 = cutree(tree_hier, k) - sum(Zinit1==1) - - for(r in 1:k) - { - Z = Zinit1 - Z_indice = seq_len(n)[Z == r] #renvoit les indices où Z==r - if (length(Z_indice) == 1) { - betaInit1[,,r] = ginv(crossprod(t(X[Z_indice,]))) %*% - crossprod(t(X[Z_indice,]), Y[Z_indice,]) - } else { - betaInit1[,,r] = ginv(crossprod(X[Z_indice,])) %*% - crossprod(X[Z_indice,], Y[Z_indice,]) - } - sigmaInit1[,,r] = diag(m) - phiInit1[,,r] = betaInit1[,,r] #/ sigmaInit1[,,r] - rhoInit1[,,r] = solve(sigmaInit1[,,r]) - piInit1[r] = mean(Z == r) - } - - for(i in 1:n) - { - for(r in 1:k) - { - dotProduct = tcrossprod(Y[i,]%*%rhoInit1[,,r]-X[i,]%*%phiInit1[,,r]) - Gam[i,r] = piInit1[r]*det(rhoInit1[,,r])*exp(-0.5*dotProduct) - } - sumGamI = sum(Gam[i,]) - gamInit1[i,]= Gam[i,] / sumGamI - } - - miniInit = 10 - maxiInit = 101 - - new_EMG = EMGLLF(phiInit1,rhoInit1,piInit1,gamInit1,miniInit,maxiInit,1,0,X,Y,1e-6) - - new_EMG$phi - new_EMG$pi - LLFEessai = new_EMG$LLF - LLFinit1 = LLFEessai[length(LLFEessai)] - - -b = which.max(LLFinit1) -phiInit = phiInit1[,,,b] -rhoInit = rhoInit1[,,,b] -piInit = piInit1[b,] -gamInit = gamInit1[,,b] +res_valse = valse(X,Y) diff --git a/reports/essaiPlot.R b/reports/essaiPlot.R new file mode 100644 index 0000000..e69de29