- Class = "OptimParams",
-
- fields = list(
- # Inputs
- li = "character", #link 'logit' or 'probit'
- M1 = "numeric", #order-1 moment (vector size d)
- M2 = "numeric", #M2 easier to process as a vector
- M3 = "numeric", #M3 easier to process as a vector
- # Dimensions
- K = "integer",
- d = "integer"
- ),
-
- methods = list(
- initialize = function(...)
- {
- "Check args and initialize K, d"
-
- callSuper(...)
- if (!hasArg("li") || !hasArg("M1") || !hasArg("M2") || !hasArg("M3")
- || !hasArg("K"))
- {
- stop("Missing arguments")
- }
-
- d <<- length(M1)
- },
-
- expArgs = function(x)
- {
- "Expand individual arguments from vector x"
-
- list(
- # p: dimension K-1, need to be completed
- "p" = c(x[1:(K-1)], 1-sum(x[1:(K-1)])),
- "β" = matrix(x[K:(K+d*K-1)], ncol=K),
- "b" = x[(K+d*K):(K+(d+1)*K-1)])
- },
-
- linArgs = function(o)
- {
- " Linearize vectors+matrices into a vector x"
-
- c(o$p[1:(K-1)], as.double(o$β), o$b)
- },
-
- f = function(x)
- {
- "Sum of squares (Mi - hat_Mi)^2 where Mi is obtained from formula"
-
- P <- expArgs(x)
- p <- P$p
- β <- P$β
- λ <- sqrt(colSums(β^2))
- b <- P$b
-
- # Tensorial products β^2 = β2 and β^3 = β3 must be computed from current β1
- β2 <- apply(β, 2, function(col) col %o% col)
- β3 <- apply(β, 2, function(col) col %o% col %o% col)
-
- return(
- weights[1] * sum( ( β %*% (p * .G(li,1,λ,b)) - M1 )^2 ) +
- weights[2] * sum( ( β2 %*% (p * .G(li,2,λ,b)) - M2 )^2 ) +
- weights[3] * sum( ( β3 %*% (p * .G(li,3,λ,b)) - M3 )^2 ) )
- },
-
- grad_f = function(x)
- {
- "Gradient of f, dimension (K-1) + d*K + K = (d+2)*K - 1"
-
- P <- expArgs(x)
- p <- P$p
- β <- P$β
- λ <- sqrt(colSums(β^2))
- μ <- sweep(β, 2, λ, '/')
- b <- P$b
-
- # Tensorial products β^2 = β2 and β^3 = β3 must be computed from current β1
- β2 <- apply(β, 2, function(col) col %o% col)
- β3 <- apply(β, 2, function(col) col %o% col %o% col)
-
- # Some precomputations
- G1 = .G(li,1,λ,b)
- G2 = .G(li,2,λ,b)
- G3 = .G(li,3,λ,b)
- G4 = .G(li,4,λ,b)
- G5 = .G(li,5,λ,b)
-
- # (Mi - hat_Mi)^2 ' == (Mi - hat_Mi)' 2(Mi - hat_Mi) = Mi' Fi
- F1 = as.double( 2 * ( β %*% (p * G1) - M1 ) )
- F2 = as.double( 2 * ( β2 %*% (p * G2) - M2 ) )
- F3 = as.double( 2 * ( β3 %*% (p * G3) - M3 ) )
-
- km1 = 1:(K-1)
- grad <- #gradient on p
- weights[1] * t( sweep(as.matrix(β [,km1]), 2, G1[km1], '*') - G1[K] * β [,K] ) %*% F1 +
- weights[2] * t( sweep(as.matrix(β2[,km1]), 2, G2[km1], '*') - G2[K] * β2[,K] ) %*% F2 +
- weights[3] * t( sweep(as.matrix(β3[,km1]), 2, G3[km1], '*') - G3[K] * β3[,K] ) %*% F3
-
- grad_β <- matrix(nrow=d, ncol=K)
- for (i in 1:d)
- {
- # i determines the derivated matrix dβ[2,3]
-
- dβ_left <- sweep(β, 2, p * G3 * β[i,], '*')
- dβ_right <- matrix(0, nrow=d, ncol=K)
- block <- i
- dβ_right[block,] <- dβ_right[block,] + 1
- dβ <- dβ_left + sweep(dβ_right, 2, p * G1, '*')
-
- dβ2_left <- sweep(β2, 2, p * G4 * β[i,], '*')
- dβ2_right <- do.call( rbind, lapply(1:d, function(j) {
- sweep(dβ_right, 2, β[j,], '*')
- }) )
- block <- ((i-1)*d+1):(i*d)
- dβ2_right[block,] <- dβ2_right[block,] + β
- dβ2 <- dβ2_left + sweep(dβ2_right, 2, p * G2, '*')
-
- dβ3_left <- sweep(β3, 2, p * G5 * β[i,], '*')
- dβ3_right <- do.call( rbind, lapply(1:d, function(j) {
- sweep(dβ2_right, 2, β[j,], '*')
- }) )
- block <- ((i-1)*d*d+1):(i*d*d)
- dβ3_right[block,] <- dβ3_right[block,] + β2
- dβ3 <- dβ3_left + sweep(dβ3_right, 2, p * G3, '*')
-
- grad_β[i,] <-
- weights[1] * t(dβ) %*% F1 +
- weights[2] * t(dβ2) %*% F2 +
- weights[3] * t(dβ3) %*% F3
- }
- grad <- c(grad, as.double(grad_β))
-
- grad = c(grad, #gradient on b
- weights[1] * t( sweep(β, 2, p * G2, '*') ) %*% F1 +
- weights[2] * t( sweep(β2, 2, p * G3, '*') ) %*% F2 +
- weights[3] * t( sweep(β3, 2, p * G4, '*') ) %*% F3 )
-
- grad
- },
-
- run = function(x0)
- {
- "Run optimization from x0 with solver..."
-
- if (!is.list(x0))
- stop("x0: list")
- if (is.null(x0$β))
- stop("At least x0$β must be provided")
- if (!is.matrix(x0$β) || any(is.na(x0$β)) || ncol(x0$β) != K)
- stop("x0$β: matrix, no NA, ncol == K")
- if (is.null(x0$p))
- x0$p = rep(1/K, K-1)
- else if (length(x0$p) != K-1 || sum(x0$p) > 1)
- stop("x0$p should contain positive integers and sum to < 1")
- # Next test = heuristic to detect missing b (when matrix is called "beta")
- if (is.null(x0$b) || all(x0$b == x0$β))
- x0$b = rep(0, K)
- else if (any(is.na(x0$b)))
- stop("x0$b cannot have missing values")
-
- op_res = constrOptim( linArgs(x0), .self$f, .self$grad_f,
- ui=cbind(
- rbind( rep(-1,K-1), diag(K-1) ),
- matrix(0, nrow=K, ncol=(d+1)*K) ),
- ci=c(-1,rep(0,K-1)) )
-
- expArgs(op_res$par)
- }
- )
+ Class = "OptimParams",
+
+ fields = list(
+ # Inputs
+ li = "character", #link function
+ X = "matrix",
+ Y = "numeric",
+ Mhat = "numeric", #vector of empirical moments
+ # Dimensions
+ K = "integer",
+ n = "integer",
+ d = "integer",
+ nc = "integer",
+ # Weights matrix (generalized least square)
+ W = "matrix"
+ ),
+
+ methods = list(
+ initialize = function(...)
+ {
+ "Check args and initialize K, d, W"
+
+ callSuper(...)
+ if (!hasArg("X") || !hasArg("Y") || !hasArg("K")
+ || !hasArg("li") || !hasArg("Mhat") || !hasArg("nc"))
+ {
+ stop("Missing arguments")
+ }
+
+ n <<- nrow(X)
+ d <<- ncol(X)
+ # W will be initialized when calling run()
+ },
+
+ expArgs = function(v)
+ {
+ "Expand individual arguments from vector v into a list"
+
+ list(
+ # p: dimension K-1, need to be completed
+ "p" = c(v[1:(K-1)], 1-sum(v[1:(K-1)])),
+ "β" = t(matrix(v[K:(K+d*K-1)], ncol=d)),
+ "b" = v[(K+d*K):(K+(d+1)*K-1)])
+ },
+
+ linArgs = function(L)
+ {
+ "Linearize vectors+matrices from list L into a vector"
+
+ # β linearized row by row, to match derivatives order
+ c(L$p[1:(K-1)], as.double(t(L$β)), L$b)
+ },
+
+ # TODO: relocate computeW in utils.R
+ computeW = function(θ)
+ {
+ "Compute the weights matrix from a parameters list"
+
+ require(MASS)
+ dd <- d + d^2 + d^3
+ M <- Moments(θ)
+ Omega <- matrix( .C("Compute_Omega",
+ X=as.double(X), Y=as.integer(Y), M=as.double(M),
+ pnc=as.integer(nc), pn=as.integer(n), pd=as.integer(d),
+ W=as.double(W), PACKAGE="morpheus")$W, nrow=dd, ncol=dd )
+ MASS::ginv(Omega)
+ },
+
+ Moments = function(θ)
+ {
+ "Compute the vector of theoretical moments (size d+d^2+d^3)"
+
+ p <- θ$p
+ β <- θ$β
+ λ <- sqrt(colSums(β^2))
+ b <- θ$b
+
+ # Tensorial products β^2 = β2 and β^3 = β3 must be computed from current β1
+ β2 <- apply(β, 2, function(col) col %o% col)
+ β3 <- apply(β, 2, function(col) col %o% col %o% col)
+
+ c(
+ β %*% (p * .G(li,1,λ,b)),
+ β2 %*% (p * .G(li,2,λ,b)),
+ β3 %*% (p * .G(li,3,λ,b)))
+ },
+
+ f = function(θ)
+ {
+ "Function to minimize: t(hat_Mi - Mi(θ)) . W . (hat_Mi - Mi(θ))"
+
+ L <- expArgs(θ)
+ A <- as.matrix(Mhat - Moments(L))
+ t(A) %*% W %*% A
+ },
+
+ grad_f = function(θ)
+ {
+ "Gradient of f: vector of size (K-1) + d*K + K = (d+2)*K - 1"
+
+ L <- expArgs(θ)
+ -2 * t(grad_M(L)) %*% W %*% as.matrix(Mhat - Moments(L))
+ },
+
+ grad_M = function(θ)
+ {
+ "Gradient of the moments vector: matrix of size d+d^2+d^3 x K-1+K+d*K"
+
+ p <- θ$p
+ β <- θ$β
+ λ <- sqrt(colSums(β^2))
+ μ <- sweep(β, 2, λ, '/')
+ b <- θ$b
+
+ res <- matrix(nrow=nrow(W), ncol=0)
+
+ # Tensorial products β^2 = β2 and β^3 = β3 must be computed from current β1
+ β2 <- apply(β, 2, function(col) col %o% col)
+ β3 <- apply(β, 2, function(col) col %o% col %o% col)
+
+ # Some precomputations
+ G1 = .G(li,1,λ,b)
+ G2 = .G(li,2,λ,b)
+ G3 = .G(li,3,λ,b)
+ G4 = .G(li,4,λ,b)
+ G5 = .G(li,5,λ,b)
+
+ # Gradient on p: K-1 columns, dim rows
+ km1 = 1:(K-1)
+ res <- cbind(res, rbind(
+ sweep(as.matrix(β [,km1]), 2, G1[km1], '*') - G1[K] * β [,K],
+ sweep(as.matrix(β2[,km1]), 2, G2[km1], '*') - G2[K] * β2[,K],
+ sweep(as.matrix(β3[,km1]), 2, G3[km1], '*') - G3[K] * β3[,K] ))
+
+ for (i in 1:d)
+ {
+ # i determines the derivated matrix dβ[2,3]
+
+ dβ_left <- sweep(β, 2, p * G3 * β[i,], '*')
+ dβ_right <- matrix(0, nrow=d, ncol=K)
+ block <- i
+ dβ_right[block,] <- dβ_right[block,] + 1
+ dβ <- dβ_left + sweep(dβ_right, 2, p * G1, '*')
+
+ dβ2_left <- sweep(β2, 2, p * G4 * β[i,], '*')
+ dβ2_right <- do.call( rbind, lapply(1:d, function(j) {
+ sweep(dβ_right, 2, β[j,], '*')
+ }) )
+ block <- ((i-1)*d+1):(i*d)
+ dβ2_right[block,] <- dβ2_right[block,] + β
+ dβ2 <- dβ2_left + sweep(dβ2_right, 2, p * G2, '*')
+
+ dβ3_left <- sweep(β3, 2, p * G5 * β[i,], '*')
+ dβ3_right <- do.call( rbind, lapply(1:d, function(j) {
+ sweep(dβ2_right, 2, β[j,], '*')
+ }) )
+ block <- ((i-1)*d*d+1):(i*d*d)
+ dβ3_right[block,] <- dβ3_right[block,] + β2
+ dβ3 <- dβ3_left + sweep(dβ3_right, 2, p * G3, '*')
+
+ res <- cbind(res, rbind(dβ, dβ2, dβ3))
+ }
+
+ # Gradient on b
+ res <- cbind(res, rbind(
+ sweep(β, 2, p * G2, '*'),
+ sweep(β2, 2, p * G3, '*'),
+ sweep(β3, 2, p * G4, '*') ))
+
+ res
+ },
+
+ # userW allows to bypass the W optimization by giving a W matrix
+ run = function(θ0, userW=NULL)
+ {
+ "Run optimization from θ0 with solver..."
+
+ if (!is.list(θ0))
+ stop("θ0: list")
+ if (is.null(θ0$β))
+ stop("At least θ0$β must be provided")
+ if (!is.matrix(θ0$β) || any(is.na(θ0$β))
+ || nrow(θ0$β) != d || ncol(θ0$β) != K)
+ {
+ stop("θ0$β: matrix, no NA, nrow = d, ncol = K")
+ }
+ if (is.null(θ0$p))
+ θ0$p = rep(1/K, K-1)
+ else if (!is.numeric(θ0$p) || length(θ0$p) != K-1
+ || any(is.na(θ0$p)) || sum(θ0$p) > 1)
+ {
+ stop("θ0$p: length K-1, no NA, positive integers, sum to <= 1")
+ }
+ # NOTE: [["b"]] instead of $b because $b would match $beta (in pkg-cran)
+ if (is.null(θ0[["b"]]))
+ θ0$b = rep(0, K)
+ else if (!is.numeric(θ0$b) || length(θ0$b) != K || any(is.na(θ0$b)))
+ stop("θ0$b: length K, no NA")
+
+ # (Re)Set W to identity, to allow several run from the same object
+ W <<- if (is.null(userW)) diag(d+d^2+d^3) else userW
+
+ # NOTE: loopMax = 3 seems to not improve the final results.
+ loopMax <- ifelse(is.null(userW), 2, 1)
+ x_init <- linArgs(θ0)
+ for (loop in 1:loopMax)
+ {
+ op_res <- constrOptim( x_init, .self$f, .self$grad_f,
+ ui=cbind(
+ rbind( rep(-1,K-1), diag(K-1) ),
+ matrix(0, nrow=K, ncol=(d+1)*K) ),
+ ci=c(-1,rep(0,K-1)) )
+ if (loop < loopMax) #avoid computing an extra W
+ W <<- computeW(expArgs(op_res$par))
+ #x_init <- op_res$par #degrades performances (TODO: why?)
+ }
+
+ expArgs(op_res$par)
+ }
+ )