X-Git-Url: https://git.auder.net/?a=blobdiff_plain;f=pkg%2FR%2FoptimParams.R;h=a45f71ac4f64769f7da037324efef533ffa58ba0;hb=b357ac5457105ee87ad8650d61ad3c3845e0df95;hp=dc294f002fb42ec06233ef8c202c90bee62afcef;hpb=0a630686751ef8baa2c170067b981d2a219a9224;p=morpheus.git diff --git a/pkg/R/optimParams.R b/pkg/R/optimParams.R index dc294f0..a45f71a 100644 --- a/pkg/R/optimParams.R +++ b/pkg/R/optimParams.R @@ -117,11 +117,11 @@ setRefClass( computeW = function(θ) { - dim <- d + d^2 + d^3 - W <<- solve( matrix( .C("Compute_Omega", - X=as.double(X), Y=as.double(Y), M=as.double(Moments(θ)), - pn=as.integer(n), pd=as.integer(d), - W=as.double(W), PACKAGE="morpheus")$W, nrow=dim, ncol=dim) ) + #require(MASS) + dd <- d + d^2 + d^3 + W <<- MASS::ginv( matrix( .C("Compute_Omega", + X=as.double(X), Y=Y, M=Moments(θ), pn=as.integer(n), pd=as.integer(d), + W=as.double(W), PACKAGE="morpheus")$W, nrow=dd, ncol=dd ) ) NULL #avoid returning W }, @@ -146,10 +146,10 @@ setRefClass( f = function(θ) { - "Product t(Mi - hat_Mi) W (Mi - hat_Mi) with Mi(theta)" + "Product t(hat_Mi - Mi) W (hat_Mi - Mi) with Mi(theta)" L <- expArgs(θ) - A <- as.matrix(Moments(L) - Mhat) + A <- as.matrix(Mhat - Moments(L)) t(A) %*% W %*% A }, @@ -186,13 +186,11 @@ setRefClass( # Gradient on p: K-1 columns, dim rows km1 = 1:(K-1) - res <- cbind(res, rbind( sweep(as.matrix(β [,km1]), 2, G1[km1], '*') - G1[K] * β [,K], sweep(as.matrix(β2[,km1]), 2, G2[km1], '*') - G2[K] * β2[,K], sweep(as.matrix(β3[,km1]), 2, G3[km1], '*') - G3[K] * β3[,K] )) - # TODO: understand derivatives order and match the one in optim init param for (i in 1:d) { # i determines the derivated matrix dβ[2,3] @@ -251,18 +249,21 @@ setRefClass( else if (any(is.na(θ0$b))) stop("θ0$b cannot have missing values") - op_res = constrOptim( linArgs(θ0), .self$f, .self$grad_f, - ui=cbind( - rbind( rep(-1,K-1), diag(K-1) ), - matrix(0, nrow=K, ncol=(d+1)*K) ), - ci=c(-1,rep(0,K-1)) ) - - # debug: - computeW(expArgs(op_res$par)) - print(W) - # We get a first non-trivial estimation of W - # TODO: loop, this redefine f, so that we can call constrOptim again... - # Stopping condition? N iterations? Delta <= epsilon ? + # TODO: stopping condition? N iterations? Delta <= epsilon ? + for (loop in 1:10) + { + op_res = constrOptim( linArgs(θ0), .self$f, .self$grad_f, + ui=cbind( + rbind( rep(-1,K-1), diag(K-1) ), + matrix(0, nrow=K, ncol=(d+1)*K) ), + ci=c(-1,rep(0,K-1)) ) + + computeW(expArgs(op_res$par)) + # debug: + #print(W) + print(op_res$value) + print(expArgs(op_res$par)) + } expArgs(op_res$par) }