X-Git-Url: https://git.auder.net/?p=morpheus.git;a=blobdiff_plain;f=pkg%2FR%2FoptimParams.R;h=f42571dc130e2707dfd234c5db2647c824880963;hp=c1d7fe8d4d006b89a64c230d08b0f85e37d9cf1a;hb=2b3a6af5c55ac121405e3a8da721626ddf46b28b;hpb=19d893c4554f7f2cc9a75111cec40712c698e7e2 diff --git a/pkg/R/optimParams.R b/pkg/R/optimParams.R index c1d7fe8..f42571d 100644 --- a/pkg/R/optimParams.R +++ b/pkg/R/optimParams.R @@ -1,37 +1,42 @@ #' Wrapper function for OptimParams class #' -#' @param K Number of populations. -#' @param link The link type, 'logit' or 'probit'. #' @param X Data matrix of covariables #' @param Y Output as a binary vector +#' @param K Number of populations. +#' @param link The link type, 'logit' or 'probit'. +#' @param M the empirical cross-moments between X and Y (optional) #' -#' @return An object 'op' of class OptimParams, initialized so that \code{op$run(x0)} -#' outputs the list of optimized parameters +#' @return An object 'op' of class OptimParams, initialized so that +#' \code{op$run(θ0)} outputs the list of optimized parameters #' \itemize{ #' \item p: proportions, size K #' \item β: regression matrix, size dxK #' \item b: intercepts, size K #' } -#' θ0 is a vector containing respectively the K-1 first elements of p, then β by -#' columns, and finally b: \code{θ0 = c(p[1:(K-1)],as.double(β),b)}. +#' θ0 is a list containing the initial parameters. Only β is required +#' (p would be set to (1/K,...,1/K) and b to (0,...0)). #' #' @seealso \code{multiRun} to estimate statistics based on β, and #' \code{generateSampleIO} for I/O random generation. #' #' @examples #' # Optimize parameters from estimated μ -#' io = generateSampleIO(10000, 1/2, matrix(c(1,-2,3,1),ncol=2), c(0,0), "logit") +#' io <- generateSampleIO(100, +#' 1/2, matrix(c(1,-2,3,1),ncol=2), c(0,0), "logit") #' μ = computeMu(io$X, io$Y, list(K=2)) #' o <- optimParams(io$X, io$Y, 2, "logit") +#' \donttest{ #' θ0 <- list(p=1/2, β=μ, b=c(0,0)) #' par0 <- o$run(θ0) #' # Compare with another starting point #' θ1 <- list(p=1/2, β=2*μ, b=c(0,0)) #' par1 <- o$run(θ1) +#' # Look at the function values at par0 and par1: #' o$f( o$linArgs(par0) ) -#' o$f( o$linArgs(par1) ) +#' o$f( o$linArgs(par1) )} +#' #' @export -optimParams <- function(X, Y, K, link=c("logit","probit")) +optimParams <- function(X, Y, K, link=c("logit","probit"), M=NULL) { # Check arguments if (!is.matrix(X) || any(is.na(X))) @@ -42,23 +47,37 @@ optimParams <- function(X, Y, K, link=c("logit","probit")) if (!is.numeric(K) || K!=floor(K) || K < 2) stop("K: integer >= 2") + if (is.null(M)) + { + # Precompute empirical moments + Mtmp <- computeMoments(X, Y) + M1 <- as.double(Mtmp[[1]]) + M2 <- as.double(Mtmp[[2]]) + M3 <- as.double(Mtmp[[3]]) + M <- c(M1, M2, M3) + } + else + M <- c(M[[1]], M[[2]], M[[3]]) + # Build and return optimization algorithm object methods::new("OptimParams", "li"=link, "X"=X, - "Y"=as.integer(Y), "K"=as.integer(K)) + "Y"=as.integer(Y), "K"=as.integer(K), "Mhat"=as.double(M)) } -#' Encapsulated optimization for p (proportions), β and b (regression parameters) -#' -#' Optimize the parameters of a mixture of logistic regressions model, possibly using -#' \code{mu <- computeMu(...)} as a partial starting point. -#' -#' @field li Link function, 'logit' or 'probit' -#' @field X Data matrix of covariables -#' @field Y Output as a binary vector -#' @field K Number of populations -#' @field d Number of dimensions -#' @field W Weights matrix (iteratively refined) -#' +# Encapsulated optimization for p (proportions), β and b (regression parameters) +# +# Optimize the parameters of a mixture of logistic regressions model, possibly using +# \code{mu <- computeMu(...)} as a partial starting point. +# +# @field li Link function, 'logit' or 'probit' +# @field X Data matrix of covariables +# @field Y Output as a binary vector +# @field Mhat Vector of empirical moments +# @field K Number of populations +# @field n Number of sample points +# @field d Number of dimensions +# @field W Weights matrix (initialized at identity) +# setRefClass( Class = "OptimParams", @@ -82,19 +101,15 @@ setRefClass( "Check args and initialize K, d, W" callSuper(...) - if (!hasArg("X") || !hasArg("Y") || !hasArg("K") || !hasArg("li")) + if (!hasArg("X") || !hasArg("Y") || !hasArg("K") + || !hasArg("li") || !hasArg("Mhat")) + { stop("Missing arguments") - - # Precompute empirical moments - M <- computeMoments(X, Y) - M1 <- as.double(M[[1]]) - M2 <- as.double(M[[2]]) - M3 <- as.double(M[[3]]) - Mhat <<- c(M1, M2, M3) + } n <<- nrow(X) - d <<- length(M1) - W <<- diag(d+d^2+d^3) #initialize at W = Identity + d <<- ncol(X) + # W will be initialized when calling run() }, expArgs = function(v) @@ -116,8 +131,11 @@ setRefClass( c(L$p[1:(K-1)], as.double(t(L$β)), L$b) }, + # TODO: relocate computeW in utils.R computeW = function(θ) { + "Compute the weights matrix from a parameters list" + require(MASS) dd <- d + d^2 + d^3 M <- Moments(θ) @@ -130,7 +148,7 @@ setRefClass( Moments = function(θ) { - "Vector of moments, of size d+d^2+d^3" + "Compute the vector of theoretical moments (size d+d^2+d^3)" p <- θ$p β <- θ$β @@ -149,7 +167,7 @@ setRefClass( f = function(θ) { - "Product t(hat_Mi - Mi) W (hat_Mi - Mi) with Mi(theta)" + "Function to minimize: t(hat_Mi - Mi(θ)) . W . (hat_Mi - Mi(θ))" L <- expArgs(θ) A <- as.matrix(Mhat - Moments(L)) @@ -158,7 +176,7 @@ setRefClass( grad_f = function(θ) { - "Gradient of f, dimension (K-1) + d*K + K = (d+2)*K - 1" + "Gradient of f: vector of size (K-1) + d*K + K = (d+2)*K - 1" L <- expArgs(θ) -2 * t(grad_M(L)) %*% W %*% as.matrix(Mhat - Moments(L)) @@ -166,7 +184,7 @@ setRefClass( grad_M = function(θ) { - "Gradient of the vector of moments, size (dim=)d+d^2+d^3 x K-1+K+d*K" + "Gradient of the moments vector: matrix of size d+d^2+d^3 x K-1+K+d*K" p <- θ$p β <- θ$β @@ -256,19 +274,22 @@ setRefClass( θ0$b = rep(0, K) else if (!is.numeric(θ0$b) || length(θ0$b) != K || any(is.na(θ0$b))) stop("θ0$b: length K, no NA") - # TODO: stopping condition? N iterations? Delta <= epsilon ? - loopMax <- 2 + + # (Re)Set W to identity, to allow several run from the same object + W <<- diag(d+d^2+d^3) + + loopMax <- 2 #TODO: loopMax = 3 ? Seems not improving... + x_init <- linArgs(θ0) for (loop in 1:loopMax) { - op_res = constrOptim( linArgs(θ0), .self$f, .self$grad_f, + op_res = constrOptim( x_init, .self$f, .self$grad_f, ui=cbind( rbind( rep(-1,K-1), diag(K-1) ), matrix(0, nrow=K, ncol=(d+1)*K) ), ci=c(-1,rep(0,K-1)) ) if (loop < loopMax) #avoid computing an extra W W <<- computeW(expArgs(op_res$par)) - #print(op_res$value) #debug - #print(expArgs(op_res$par)) #debug + #x_init <- op_res$par #degrades performances (TODO: why?) } expArgs(op_res$par)