From: emilie Date: Tue, 6 Dec 2016 11:41:17 +0000 (+0100) Subject: ajout des commentaires Roxygen - anglicisme de certains noms de fonctions et de variables X-Git-Url: https://git.auder.net/js/doc/%7B%7B%20path%28%27mixstore_static_policy%27%29%20%7D%7D?a=commitdiff_plain;h=d1531659214edd6eaef0ac9ec835455614bba16c;p=valse.git ajout des commentaires Roxygen - anglicisme de certains noms de fonctions et de variables --- diff --git a/R/basicInitParameters.R b/R/basicInitParameters.R index 6090d0a..9801229 100644 --- a/R/basicInitParameters.R +++ b/R/basicInitParameters.R @@ -1,18 +1,28 @@ +#----------------------------------------------------------------------- +#' Initialize the parameters in a basic way (zero for the conditional mean, +#' uniform for weights, identity for covariance matrices, and uniformly distributed forthe clustering) +#' @param n sample size +#' @param p number of covariates +#' @param m size of the response +#' @param k number of clusters +#' @return list with phiInit, rhoInit,piInit,gamInit +#' @export +#----------------------------------------------------------------------- basic_Init_Parameters = function(n,p,m,k) { - phiInit = array(0, dim=c(p,m,k)) - - piInit = (1./k)*rep.int(1,k) - - rhoInit = array(0, dim=c(m,m,k)) - for(i in 1:k) - rhoInit[,,i] = diag(m) - - gamInit = 0.1*array(1, dim=c(n,k)) - R = sample(1:k,n, replace=TRUE) - for(i in 1:n) - gamInit[i,R[i]] = 0.9 - gamInit = gamInit/sum(gamInit[1,]) - - return (list(phiInit, rhoInit, piInit, gamInit)) + phiInit = array(0, dim=c(p,m,k)) + + piInit = (1./k)*rep.int(1,k) + + rhoInit = array(0, dim=c(m,m,k)) + for(i in 1:k) + rhoInit[,,i] = diag(m) + + gamInit = 0.1*array(1, dim=c(n,k)) + R = sample(1:k,n, replace=TRUE) + for(i in 1:n) + gamInit[i,R[i]] = 0.9 + gamInit = gamInit/sum(gamInit[1,]) + + return (data = list(phiInit = phiInit, rhoInit = rhoInit, piInit = piInit, gamInit = gamInit)) } diff --git a/R/discardSimilarModels.R b/R/discardSimilarModels.R new file mode 100644 index 0000000..8b12077 --- /dev/null +++ b/R/discardSimilarModels.R @@ -0,0 +1,31 @@ +#' Discard models which have the same relevant variables +#' +#' @param B1 array of relevant coefficients (of size p*m*length(gridlambda)) +#' @param B2 array of irrelevant coefficients (of size p*m*length(gridlambda)) +#' @param glambda grid of regularization parameters (vector) +#' @param rho covariance matrix (of size m*m*K*size(gridLambda)) +#' @param pi weight parameters (of size K*size(gridLambda)) +#' +#' @return a list with update B1, B2, glambda, rho and pi, and ind the vector of indices +#' of selected models. +#' @export +discardSimilarModels = function(B1,B2,glambda,rho,pi) +{ + ind = c() + for (j in 1:length(glambda)) + { + for (ll in 1:(l-1)) + { + if(B1[,,l] == B1[,,ll]) + ind = c(ind, l) + } + } + ind = unique(ind) + B1 = B1[,,-ind] + glambda = glambda[-ind] + B2 = B2[,,-ind] + rho = rho[,,,-ind] + pi = pi[,-ind] + + return (list(B1=B1,B2=B2,glambda=glambda,rho=rho,pi=pi,ind=ind)) +} diff --git a/R/discardSimilarModels2.R b/R/discardSimilarModels2.R new file mode 100644 index 0000000..d620bff --- /dev/null +++ b/R/discardSimilarModels2.R @@ -0,0 +1,21 @@ +#' Similar to discardSimilarModels, for Lasso-rank procedure (focus on columns) +#' +#' @param B1 array of relevant coefficients (of size p*m*length(gridlambda)) +#' @param rho covariance matrix +#' @param pi weight parameters +#' +#' @return +#' @export +#' +#' @examples +discardSimilarModels2 = function(B1,rho,pi) +{ ind = c() + dim_B1 = dim(B1) + B2 = array(0,dim=c(dim_B1[1],dim_B1[2],dim_B1[3])) + sizeLambda=dim_B1[3] + glambda = rep(0,sizeLambda) + + suppressmodel = discardSimilarModels(B1,B2,glambda,rho,pi) + return (list(B1 = suppressmodel$B1, ind = suppressmodel$B2, + rho = suppressmodel$rho, pi = suppressmodel$pi)) +} diff --git a/R/generateIO.R b/R/generateIO.R index f8c8194..83d8cc9 100644 --- a/R/generateIO.R +++ b/R/generateIO.R @@ -1,26 +1,35 @@ +#' Generate a sample of (X,Y) of size n +#' @param covX covariance for covariates +#' @param covY covariance for the response vector +#' @param pi proportion for each cluster +#' @param beta regression matrix +#' @param n sample size +#' @return list with X and Y +#' @export +#----------------------------------------------------------------------- generateIO = function(covX, covY, pi, beta, n) { - size_covX = dim(covX) - p = size_covX[1] - k = size_covX[3] - - size_covY = dim(covY) - m = size_covY[1] - - Y = matrix(0,n,m) - BX = array(0, dim=c(n,m,k)) - - require(MASS) #simulate from a multivariate normal distribution - for (i in 1:n) - { - for (r in 1:k) - { - BXir = rep(0,m) - for (mm in 1:m) - Bxir[[mm]] = X[i,] %*% beta[,mm,r] - Y[i,] = Y[i,] + pi[r] * mvrnorm(1,BXir, covY[,,r]) - } - } - - return (list(X=X,Y=Y)) + size_covX = dim(covX) + p = size_covX[1] + k = size_covX[3] + + size_covY = dim(covY) + m = size_covY[1] + + Y = matrix(0,n,m) + BX = array(0, dim=c(n,m,k)) + + require(MASS) #simulate from a multivariate normal distribution + for (i in 1:n) + { + for (r in 1:k) + { + BXir = rep(0,m) + for (mm in 1:m) + Bxir[[mm]] = X[i,] %*% beta[,mm,r] + Y[i,] = Y[i,] + pi[r] * mvrnorm(1,BXir, covY[,,r]) + } + } + + return (list(X=X,Y=Y)) } diff --git a/R/generateIOdefault.R b/R/generateIOdefault.R index fc01cd8..85213cc 100644 --- a/R/generateIOdefault.R +++ b/R/generateIOdefault.R @@ -1,22 +1,30 @@ +#' Generate a sample of (X,Y) of size n with default values +#' @param n sample size +#' @param p number of covariates +#' @param m size of the response +#' @param k number of clusters +#' @return list with X and Y +#' @export +#----------------------------------------------------------------------- generateIOdefault = function(n, p, m, k) { - covX = array(0, dim=c(p,p,k)) - covY = array(0, dim=c(m,m,k)) - for(r in 1:k) - { - covX[,,r] = diag(p) - covY[,,r] = diag(m) - } - - pi = rep(1./k,k) - - beta = array(0, dim=c(p,m,k)) - for(j in 1:p) - { - nonZeroCount = ceiling(m * runif(1)) - beta[j,1:nonZeroCount,] = matrix(runif(nonZeroCount*k), ncol=k) - } - - sample_IO = generateIO(covX, covY, pi, beta, n) - return (list(X=sample_IO$X,Y=sample_IO$Y)) + covX = array(0, dim=c(p,p,k)) + covY = array(0, dim=c(m,m,k)) + for(r in 1:k) + { + covX[,,r] = diag(p) + covY[,,r] = diag(m) + } + + pi = rep(1./k,k) + + beta = array(0, dim=c(p,m,k)) + for(j in 1:p) + { + nonZeroCount = ceiling(m * runif(1)) + beta[j,1:nonZeroCount,] = matrix(runif(nonZeroCount*k), ncol=k) + } + + sample_IO = generateIO(covX, covY, pi, beta, n) + return (list(X=sample_IO$X,Y=sample_IO$Y)) } diff --git a/R/gridLambda.R b/R/gridLambda.R index 66b6cc2..7b82f63 100644 --- a/R/gridLambda.R +++ b/R/gridLambda.R @@ -1,20 +1,31 @@ +#' Construct the data-driven grid for the regularization parameters used for the Lasso estimator +#' @param phiInit value for phi +#' @param rhoInt value for rho +#' @param piInit value for pi +#' @param gamInit value for gamma +#' @param mini minimum number of iterations in EM algorithm +#' @param maxi maximum number of iterations in EM algorithm +#' @param tau threshold to stop EM algorithm +#' @return the grid of regularization parameters +#' @export +#----------------------------------------------------------------------- gridLambda = function(phiInit, rhoInit, piInit, gamInit, X, Y, gamma, mini, maxi, tau) { - n = nrow(X) - p = dim(phiInit)[1] - m = dim(phiInit)[2] - k = dim(phiInit)[3] - - list_EMG = .Call("EMGLLF",phiInit,rhoInit,piInit,gamInit,mini,maxi,1,0,X,Y,tau) - - grid = array(0, dim=c(p,m,k)) - for (i in 1:p) - { - for (j in 1:m) - grid[i,j,] = abs(list_EMG$S[i,j,]) / (n*list_EMG$pi^gamma) - } - grid = unique(grid) - grid = grid[grid <=1] - - return(grid) + n = nrow(X) + p = dim(phiInit)[1] + m = dim(phiInit)[2] + k = dim(phiInit)[3] + + list_EMG = .Call("EMGLLF",phiInit,rhoInit,piInit,gamInit,mini,maxi,1,0,X,Y,tau) + + grid = array(0, dim=c(p,m,k)) + for (i in 1:p) + { + for (j in 1:m) + grid[i,j,] = abs(list_EMG$S[i,j,]) / (n*list_EMG$pi^gamma) + } + grid = unique(grid) + grid = grid[grid <=1] + + return(grid) } diff --git a/R/indicesSelection.R b/R/indicesSelection.R new file mode 100644 index 0000000..0445406 --- /dev/null +++ b/R/indicesSelection.R @@ -0,0 +1,36 @@ +#' Construct the set of relevant indices +#' +#' @param phi regression matrix, of size p*m +#' @param thresh threshold to say a cofficient is equal to zero +#' +#' @return a list with A, a matrix with relevant indices (size = p*m) and B, a +#' matrix with irrelevant indices (size = p*m) +#' @export +indicesSelection = function(phi, thresh = 1e-6) +{ + dim_phi = dim(phi) + p = dim_phi[1] + m = dim_phi[2] + + A = matrix(0, p, m) + B = matrix(0, p, m) + + for(j in 1:p) + { + cpt1 = 0 + cpt2 = 0 + for(mm in 1:m) + { + if(max(phi[j,mm,]) > thresh) + { + cpt1 = cpt1 + 1 + A[j,cpt] = mm + } else + { + cpt2 = cpt2+1 + B[j, cpt2] = mm + } + } + } + return (list(A=A,B=B)) +} diff --git a/R/initSmallEM.R b/R/initSmallEM.R index d519766..1fa2d9b 100644 --- a/R/initSmallEM.R +++ b/R/initSmallEM.R @@ -1,84 +1,75 @@ -vec_bin = function(X,r) -{ - Z = c() - indice = c() - j = 1 - for (i in 1:length(X)) - { - if(X[i] == r) - { - Z[i] = 1 - indice[j] = i - j=j+1 - } else - Z[i] = 0 - } - return (list(Z=Z,indice=indice)) -} - +#' initialization of the EM algorithm +#' +#' @param k number of components +#' @param X matrix of covariates (of size n*p) +#' @param Y matrix of responses (of size n*m) +#' @param tau threshold to stop EM algorithm +#' +#' @return a list with phiInit, rhoInit, piInit, gamInit +#' @export initSmallEM = function(k,X,Y,tau) { - n = nrow(Y) - m = ncol(Y) - p = ncol(X) - - betaInit1 = array(0, dim=c(p,m,k,20)) - sigmaInit1 = array(0, dim = c(m,m,k,20)) - phiInit1 = array(0, dim = c(p,m,k,20)) - rhoInit1 = array(0, dim = c(m,m,k,20)) - piInit1 = matrix(0,20,k) - gamInit1 = array(0, dim=c(n,k,20)) - LLFinit1 = list() - - require(MASS) #Moore-Penrose generalized inverse of matrix - for(repet in 1:20) - { - clusters = hclust(dist(y)) #default distance : euclidean - #cutree retourne les indices (à quel cluster indiv_i appartient) d'un clustering hierarchique - clusterCut = cutree(clusters,k) - Zinit1[,repet] = clusterCut - - for(r in 1:k) - { - Z = Zinit1[,repet] - Z_bin = vec_bin(Z,r) - Z_vec = Z_bin$Z #vecteur 0 et 1 aux endroits où Z==r - Z_indice = Z_bin$indice #renvoit les indices où Z==r - - betaInit1[,,r,repet] = - ginv(t(x[Z_indice,])%*%x[Z_indice,])%*%t(x[Z_indice,])%*%y[Z_indice,] - sigmaInit1[,,r,repet] = diag(m) - phiInit1[,,r,repet] = betaInit1[,,r,repet]/sigmaInit1[,,r,repet] - rhoInit1[,,r,repet] = solve(sigmaInit1[,,r,repet]) - piInit1[repet,r] = sum(Z_vec)/n - } - - for(i in 1:n) - { - for(r in 1:k) - { - dotProduct = (y[i,]%*%rhoInit1[,,r,repet]-x[i,]%*%phiInit1[,,r,repet]) %*% - (y[i,]%*%rhoInit1[,,r,repet]-x[i,]%*%phiInit1[,,r,repet]) - Gam[i,r] = piInit1[repet,r]*det(rhoInit1[,,r,repet])*exp(-0.5*dotProduct) - } - sumGamI = sum(gam[i,]) - gamInit1[i,,repet]= Gam[i,] / sumGamI - } - - miniInit = 10 - maxiInit = 11 - - new_EMG = .Call("EMGLLF",phiInit1[,,,repet],rhoInit1[,,,repet],piInit1[repet,], - gamInit1[,,repet],miniInit,maxiInit,1,0,x,y,tau) - LLFEessai = new_EMG$LLF - LLFinit1[repet] = LLFEessai[length(LLFEessai)] - } - - b = which.max(LLFinit1) - phiInit = phiInit1[,,,b] - rhoInit = rhoInit1[,,,b] - piInit = piInit1[b,] - gamInit = gamInit1[,,b] - - return (list(phiInit=phiInit, rhoInit=rhoInit, piInit=piInit, gamInit=gamInit)) + n = nrow(Y) + m = ncol(Y) + p = ncol(X) + + betaInit1 = array(0, dim=c(p,m,k,20)) + sigmaInit1 = array(0, dim = c(m,m,k,20)) + phiInit1 = array(0, dim = c(p,m,k,20)) + rhoInit1 = array(0, dim = c(m,m,k,20)) + piInit1 = matrix(0,20,k) + gamInit1 = array(0, dim=c(n,k,20)) + LLFinit1 = list() + + require(MASS) #Moore-Penrose generalized inverse of matrix + for(repet in 1:20) + { + clusters = hclust(dist(y)) #default distance : euclidean + #cutree retourne les indices (? quel cluster indiv_i appartient) d'un clustering hierarchique + clusterCut = cutree(clusters,k) + Zinit1[,repet] = clusterCut + + for(r in 1:k) + { + Z = Zinit1[,repet] + Z_bin = vec_bin(Z,r) + Z_vec = Z_bin$Z #vecteur 0 et 1 aux endroits o? Z==r + Z_indice = Z_bin$indice #renvoit les indices o? Z==r + + betaInit1[,,r,repet] = + ginv(t(x[Z_indice,])%*%x[Z_indice,])%*%t(x[Z_indice,])%*%y[Z_indice,] + sigmaInit1[,,r,repet] = diag(m) + phiInit1[,,r,repet] = betaInit1[,,r,repet]/sigmaInit1[,,r,repet] + rhoInit1[,,r,repet] = solve(sigmaInit1[,,r,repet]) + piInit1[repet,r] = sum(Z_vec)/n + } + + for(i in 1:n) + { + for(r in 1:k) + { + dotProduct = (y[i,]%*%rhoInit1[,,r,repet]-x[i,]%*%phiInit1[,,r,repet]) %*% + (y[i,]%*%rhoInit1[,,r,repet]-x[i,]%*%phiInit1[,,r,repet]) + Gam[i,r] = piInit1[repet,r]*det(rhoInit1[,,r,repet])*exp(-0.5*dotProduct) + } + sumGamI = sum(gam[i,]) + gamInit1[i,,repet]= Gam[i,] / sumGamI + } + + miniInit = 10 + maxiInit = 11 + + new_EMG = .Call("EMGLLF",phiInit1[,,,repet],rhoInit1[,,,repet],piInit1[repet,], + gamInit1[,,repet],miniInit,maxiInit,1,0,x,y,tau) + LLFEessai = new_EMG$LLF + LLFinit1[repet] = LLFEessai[length(LLFEessai)] + } + + b = which.max(LLFinit1) + phiInit = phiInit1[,,,b] + rhoInit = rhoInit1[,,,b] + piInit = piInit1[b,] + gamInit = gamInit1[,,b] + + return (list(phiInit=phiInit, rhoInit=rhoInit, piInit=piInit, gamInit=gamInit)) } diff --git a/R/main.R b/R/main.R index eab5e3f..2eec878 100644 --- a/R/main.R +++ b/R/main.R @@ -100,7 +100,7 @@ SelMix = setRefClass( "computation of the regularization grid" #(according to explicit formula given by EM algorithm) - gridLambda <<- grillelambda(sx.phiInit,sx.rhoInit,sx.piInit,sx.tauInit,sx.X,sx.Y, + gridLambda <<- gridLambda(sx.phiInit,sx.rhoInit,sx.piInit,sx.tauInit,sx.X,sx.Y, sx.gamma,sx.mini,sx.maxi,sx.eps); }, diff --git a/R/modelSelection.R b/R/modelSelection.R new file mode 100644 index 0000000..8020daa --- /dev/null +++ b/R/modelSelection.R @@ -0,0 +1,37 @@ +#' Among a collection of models, this function constructs a subcollection of models with +#' models having strictly different dimensions, keeping the model which minimizes +#' the likelihood if there were several with the same dimension +#' +#' @param LLF a matrix, the first column corresponds to likelihoods for several models +#' the second column corresponds to the dimensions of the corresponding models. +#' +#' @return a list with indices, a vector of indices selected models, +#' and D1, a vector of corresponding dimensions +#' @export +#' +#' @examples +modelSelection = function(LLF) +{ + D = LLF[,2] + D1 = unique(D) + + indices = rep(1, length(D1)) + #select argmax MLE + if (length(D1)>2) + { + for (i in 1:length(D1)) + { + A = c() + for (j in 1:length(D)) + { + if(D[[j]]==D1[[i]]) + a = c(a, LLF[j,1]) + } + b = max(a) + #indices[i] : first indices of the binary vector where u_i ==1 + indices[i] = which.max(vec_bin(LLF,b)[[1]]) + } + } + + return (list(indices=indices,D1=D1)) +} diff --git a/R/selectionindice.R b/R/selectionindice.R deleted file mode 100644 index 97014b1..0000000 --- a/R/selectionindice.R +++ /dev/null @@ -1,28 +0,0 @@ -selectionindice = function(phi, seuil) -{ - dim_phi = dim(phi) - pp = dim_phi[1] - m = dim_phi[2] - - A = matrix(0, pp, m) - B = matrix(0, pp, m) - - for(j in 1:pp) - { - cpt1 = 0 - cpt2 = 0 - for(mm in 1:m) - { - if(max(phi[j,mm,]) > seuil) - { - cpt1 = cpt1 + 1 - A[j,cpt] = mm - } else - { - cpt2 = cpt2+1 - B[j, cpt2] = mm - } - } - } - return (list(A,B)) -} diff --git a/R/selectionmodele.R b/R/selectionmodele.R deleted file mode 100644 index ed32731..0000000 --- a/R/selectionmodele.R +++ /dev/null @@ -1,45 +0,0 @@ -vec_bin = function(X,r) -{ - Z = c() - indice = c() - - j = 1 - for(i in 1:length(X)) - { - if(X[i] == r) - { - Z[i] = 1 - indice[j] = i - j=j+1 - } else - Z[i] = 0 - } - - return (list(Z=Z,indice=indice)) -} - -selectionmodele = function(vraisemblance) -{ - D = vraimsemblance[,2] - D1 = unique(D) - - indice = rep(1, length(D1)) - #select argmax MLE - if (length(D1)>2) - { - for (i in 1:length(D1)) - { - A = c() - for (j in 1:length(D)) - { - if(D[[j]]==D1[[i]]) - a = c(a, vraimsemblance[j,1]) - } - b = max(a) - #indice[i] : premier indice du vecteur binaire où u_i ==1 - indice[i] = which.max(vec_bin(vraimsemblance,b)[[1]]) - } - } - - return (list(indice=indice,D1=D1)) -} diff --git a/R/suppressionmodelesegaux.R b/R/suppressionmodelesegaux.R deleted file mode 100644 index a588062..0000000 --- a/R/suppressionmodelesegaux.R +++ /dev/null @@ -1,20 +0,0 @@ -suppressionmodelesegaux = function(B1,B2,glambda,rho,pi) -{ - ind = c() - for (j in 1:length(glambda)) - { - for (ll in 1:(l-1)) - { - if(B1[,,l] == B1[,,ll]) - ind = c(ind, l) - } - } - ind = unique(ind) - B1 = B1[,,-ind] - glambda = glambda[-ind] - B2 = B2[,,-ind] - rho = rho[,,,-ind] - pi = pi[,-ind] - - return (list(B1=B1,B2=B2,glambda=glambda,ind=ind,rho=rho,pi=pi)) -} diff --git a/R/suppressionmodelesegaux2.R b/R/suppressionmodelesegaux2.R deleted file mode 100644 index 741793b..0000000 --- a/R/suppressionmodelesegaux2.R +++ /dev/null @@ -1,24 +0,0 @@ -suppressionmodelesegaux2 = function(B1,rho,pi) -{ - ind = c() - dim_B1 = dim(B1) - B2 = array(0,dim=c(dim_B1[1],dim_B1[2],dim_B1[3])) - nombreLambda=dim_B1[[2]] - glambda = rep(0,nombreLambda) - - #for(j in 1:nombreLambda){ - # for(ll in 1:(l-1)){ - # if(B1[,,l] == B1[,,ll]){ - # ind = c(ind, l) - # } - # } - #} - #ind = unique(ind) - #B1 = B1[,,-ind] - #rho = rho[,,,-ind] - #pi = pi[,-ind] - - suppressmodel = suppressionmodelesegaux(B1,B2,glambda,rho,pi) - return (list(B1 = suppressmodel$B1, ind = suppressmodel$B2, - rho = suppressmodel$rho, pi = suppressmodel$pi)) -} diff --git a/R/vec_bin.R b/R/vec_bin.R new file mode 100644 index 0000000..01dbfe1 --- /dev/null +++ b/R/vec_bin.R @@ -0,0 +1,23 @@ +#' A function needed in initSmallEM +#' +#' @param X vector with integer values +#' @param r integer +#' +#' @return a list with Z (a binary vector of size the size of X) and indices where Z is equal to 1 +vec_bin = function(X,r) +{ + Z = rep(0,length(X)) + indice = c() + j = 1 + for (i in 1:length(X)) + { + if(X[i] == r) + { + Z[i] = 1 + indice[j] = i + j=j+1 + } else + Z[i] = 0 + } + return (list(Z=Z,indice=indice)) +} \ No newline at end of file