X-Git-Url: https://git.auder.net/?a=blobdiff_plain;f=R%2FinitSmallEM.R;h=541d7e1cb922b590d1893eea5bba3472eeb1e4ed;hb=53fa233d8fbeaf4d51a4874ba69d8472d01d04ba;hp=3f98e07ad2a1998791ccb53d8a42b3e5962b96e7;hpb=ae4fa2cb5a036ed1c8b8b69f257c007e8894238d;p=valse.git diff --git a/R/initSmallEM.R b/R/initSmallEM.R index 3f98e07..541d7e1 100644 --- a/R/initSmallEM.R +++ b/R/initSmallEM.R @@ -3,17 +3,18 @@ #' @param k number of components #' @param X matrix of covariates (of size n*p) #' @param Y matrix of responses (of size n*m) -#' @param tau threshold to stop EM algorithm #' #' @return a list with phiInit, rhoInit, piInit, gamInit #' @export -initSmallEM = function(k,X,Y,tau) +#' @importFrom methods new +#' @importFrom stats cutree dist hclust runif +initSmallEM = function(k,X,Y) { n = nrow(Y) m = ncol(Y) p = ncol(X) - Zinit1 = array(0, dim=c(n,20)) #doute sur la taille + Zinit1 = array(0, dim=c(n,20)) betaInit1 = array(0, dim=c(p,m,k,20)) sigmaInit1 = array(0, dim = c(m,m,k,20)) phiInit1 = array(0, dim = c(p,m,k,20)) @@ -24,31 +25,34 @@ initSmallEM = function(k,X,Y,tau) LLFinit1 = list() require(MASS) #Moore-Penrose generalized inverse of matrix - require(mclust) # K-means with selection of K for(repet in 1:20) { - clusters = Mclust(X,k) #default distance : euclidean #Mclust(matrix(c(X,Y)),k) - Zinit1[,repet] = clusters$classification - + distance_clus = dist(X) + tree_hier = hclust(distance_clus) + Zinit1[,repet] = cutree(tree_hier, k) + for(r in 1:k) { Z = Zinit1[,repet] - Z_bin = vec_bin(Z,r) - Z_vec = Z_bin$vec #vecteur 0 et 1 aux endroits o? Z==r - Z_indice = Z_bin$indice #renvoit les indices o? Z==r - - betaInit1[,,r,repet] = ginv(t(X[Z_indice,])%*%X[Z_indice,])%*%t(X[Z_indice,])%*%Y[Z_indice,] + Z_indice = seq_len(n)[Z == r] #renvoit les indices où Z==r + if (length(Z_indice) == 1) { + betaInit1[,,r,repet] = ginv(crossprod(t(X[Z_indice,]))) %*% + crossprod(t(X[Z_indice,]), Y[Z_indice,]) + } else { + betaInit1[,,r,repet] = ginv(crossprod(X[Z_indice,])) %*% + crossprod(X[Z_indice,], Y[Z_indice,]) + } sigmaInit1[,,r,repet] = diag(m) - phiInit1[,,r,repet] = betaInit1[,,r,repet]#/sigmaInit1[,,r,repet] + phiInit1[,,r,repet] = betaInit1[,,r,repet] #/ sigmaInit1[,,r,repet] rhoInit1[,,r,repet] = solve(sigmaInit1[,,r,repet]) - piInit1[repet,r] = sum(Z_vec)/n + piInit1[repet,r] = mean(Z == r) } for(i in 1:n) { for(r in 1:k) { - dotProduct = 3 #(Y[i,]%*%rhoInit1[,,r,repet]-X[i,]%*%phiInit1[,,r,repet]) %*% (Y[i,]%*%rhoInit1[,,r,repet]-X[i,]%*%phiInit1[,,r,repet]) + dotProduct = tcrossprod(Y[i,]%*%rhoInit1[,,r,repet]-X[i,]%*%phiInit1[,,r,repet]) Gam[i,r] = piInit1[repet,r]*det(rhoInit1[,,r,repet])*exp(-0.5*dotProduct) } sumGamI = sum(Gam[i,]) @@ -58,8 +62,9 @@ initSmallEM = function(k,X,Y,tau) miniInit = 10 maxiInit = 11 - new_EMG = .Call("EMGLLF",phiInit1[,,,repet],rhoInit1[,,,repet],piInit1[repet,], - gamInit1[,,repet],miniInit,maxiInit,1,0,X,Y,tau) + #new_EMG = .Call("EMGLLF_core",phiInit1[,,,repet],rhoInit1[,,,repet],piInit1[repet,], +# gamInit1[,,repet],miniInit,maxiInit,1,0,X,Y,1e-4) + new_EMG = EMGLLF(phiInit1[,,,repet],rhoInit1[,,,repet],piInit1[repet,],gamInit1[,,repet],miniInit,maxiInit,1,0,X,Y,1e-4) LLFEessai = new_EMG$LLF LLFinit1[repet] = LLFEessai[length(LLFEessai)] }