X-Git-Url: https://git.auder.net/?p=morpheus.git;a=blobdiff_plain;f=pkg%2FR%2FoptimParams.R;h=c1d7fe8d4d006b89a64c230d08b0f85e37d9cf1a;hp=ecfae7f5d145e49ddd13244125818fdda22917cd;hb=19d893c4554f7f2cc9a75111cec40712c698e7e2;hpb=9a6881ed8a16c31a3dbe995e3b1af76c1db6b5a0 diff --git a/pkg/R/optimParams.R b/pkg/R/optimParams.R index ecfae7f..c1d7fe8 100644 --- a/pkg/R/optimParams.R +++ b/pkg/R/optimParams.R @@ -104,7 +104,7 @@ setRefClass( list( # p: dimension K-1, need to be completed "p" = c(v[1:(K-1)], 1-sum(v[1:(K-1)])), - "β" = matrix(v[K:(K+d*K-1)], ncol=K), + "β" = t(matrix(v[K:(K+d*K-1)], ncol=d)), "b" = v[(K+d*K):(K+(d+1)*K-1)]) }, @@ -112,25 +112,20 @@ setRefClass( { "Linearize vectors+matrices from list L into a vector" - c(L$p[1:(K-1)], as.double(L$β), L$b) + # β linearized row by row, to match derivatives order + c(L$p[1:(K-1)], as.double(t(L$β)), L$b) }, - #TODO: compare with R version? - #D <- diag(d) #matrix of ej vectors - #Y * X - #Y * ( t( apply(X, 1, function(row) row %o% row) ) - Reduce('+', lapply(1:d, function(j) as.double(D[j,] %o% D[j,])), rep(0, d*d))) - #Y * ( t( apply(X, 1, function(row) row %o% row %*% row) ) - Reduce('+', lapply(1:d, function(j) ), rep(0, d*d*d))) computeW = function(θ) { - #require(MASS) + require(MASS) dd <- d + d^2 + d^3 M <- Moments(θ) Omega <- matrix( .C("Compute_Omega", - X=as.double(X), Y=as.double(Y), M=as.double(M), + X=as.double(X), Y=as.integer(Y), M=as.double(M), pn=as.integer(n), pd=as.integer(d), W=as.double(W), PACKAGE="morpheus")$W, nrow=dd, ncol=dd ) - W <<- MASS::ginv(Omega, tol=1e-4) - NULL #avoid returning W + MASS::ginv(Omega) }, Moments = function(θ) @@ -166,7 +161,7 @@ setRefClass( "Gradient of f, dimension (K-1) + d*K + K = (d+2)*K - 1" L <- expArgs(θ) - -2 * t(grad_M(L)) %*% W %*% as.matrix((Mhat - Moments(L))) + -2 * t(grad_M(L)) %*% W %*% as.matrix(Mhat - Moments(L)) }, grad_M = function(θ) @@ -245,31 +240,35 @@ setRefClass( stop("θ0: list") if (is.null(θ0$β)) stop("At least θ0$β must be provided") - if (!is.matrix(θ0$β) || any(is.na(θ0$β)) || ncol(θ0$β) != K) - stop("θ0$β: matrix, no NA, ncol == K") + if (!is.matrix(θ0$β) || any(is.na(θ0$β)) + || nrow(θ0$β) != d || ncol(θ0$β) != K) + { + stop("θ0$β: matrix, no NA, nrow = d, ncol = K") + } if (is.null(θ0$p)) θ0$p = rep(1/K, K-1) - else if (length(θ0$p) != K-1 || sum(θ0$p) > 1) - stop("θ0$p should contain positive integers and sum to < 1") - # Next test = heuristic to detect missing b (when matrix is called "beta") - if (is.null(θ0$b) || all(θ0$b == θ0$β)) + else if (!is.numeric(θ0$p) || length(θ0$p) != K-1 + || any(is.na(θ0$p)) || sum(θ0$p) > 1) + { + stop("θ0$p: length K-1, no NA, positive integers, sum to <= 1") + } + if (is.null(θ0$b)) θ0$b = rep(0, K) - else if (any(is.na(θ0$b))) - stop("θ0$b cannot have missing values") + else if (!is.numeric(θ0$b) || length(θ0$b) != K || any(is.na(θ0$b))) + stop("θ0$b: length K, no NA") # TODO: stopping condition? N iterations? Delta <= epsilon ? - for (loop in 1:10) + loopMax <- 2 + for (loop in 1:loopMax) { op_res = constrOptim( linArgs(θ0), .self$f, .self$grad_f, ui=cbind( rbind( rep(-1,K-1), diag(K-1) ), matrix(0, nrow=K, ncol=(d+1)*K) ), ci=c(-1,rep(0,K-1)) ) - - computeW(expArgs(op_res$par)) - # debug: - #print(W) - print(op_res$value) - print(expArgs(op_res$par)) + if (loop < loopMax) #avoid computing an extra W + W <<- computeW(expArgs(op_res$par)) + #print(op_res$value) #debug + #print(expArgs(op_res$par)) #debug } expArgs(op_res$par)