- },
-
- expArgs = function(x)
- {
- "Expand individual arguments from vector x"
-
- list(
- # p: dimension K-1, need to be completed
- "p" = c(x[1:(K-1)], 1-sum(x[1:(K-1)])),
- "β" = matrix(x[K:(K+d*K-1)], ncol=K),
- "b" = x[(K+d*K):(K+(d+1)*K-1)])
- },
-
- linArgs = function(o)
- {
- " Linearize vectors+matrices into a vector x"
-
- c(o$p[1:(K-1)], as.double(o$β), o$b)
- },
-
- f = function(x)
- {
- "Product t(Mi - hat_Mi) W (Mi - hat_Mi) with Mi(theta)"
-
- P <- expArgs(x)
- p <- P$p
- β <- P$β
- λ <- sqrt(colSums(β^2))
- b <- P$b
-
- # Tensorial products β^2 = β2 and β^3 = β3 must be computed from current β1
- β2 <- apply(β, 2, function(col) col %o% col)
- β3 <- apply(β, 2, function(col) col %o% col %o% col)
-
- return(
- sum( ( β %*% (p * .G(li,1,λ,b)) - M1 )^2 ) +
- sum( ( β2 %*% (p * .G(li,2,λ,b)) - M2 )^2 ) +
- sum( ( β3 %*% (p * .G(li,3,λ,b)) - M3 )^2 ) )
- },
-
- grad_f = function(x)
- {
- "Gradient of f, dimension (K-1) + d*K + K = (d+2)*K - 1"
-
- P <- expArgs(x)
- p <- P$p
- β <- P$β
- λ <- sqrt(colSums(β^2))
- μ <- sweep(β, 2, λ, '/')
- b <- P$b
-
- # Tensorial products β^2 = β2 and β^3 = β3 must be computed from current β1
- β2 <- apply(β, 2, function(col) col %o% col)
- β3 <- apply(β, 2, function(col) col %o% col %o% col)
-
- # Some precomputations
- G1 = .G(li,1,λ,b)
- G2 = .G(li,2,λ,b)
- G3 = .G(li,3,λ,b)
- G4 = .G(li,4,λ,b)
- G5 = .G(li,5,λ,b)
-
- # (Mi - hat_Mi)^2 ' == (Mi - hat_Mi)' 2(Mi - hat_Mi) = Mi' Fi
- F1 = as.double( 2 * ( β %*% (p * G1) - M1 ) )
- F2 = as.double( 2 * ( β2 %*% (p * G2) - M2 ) )
- F3 = as.double( 2 * ( β3 %*% (p * G3) - M3 ) )
-
- km1 = 1:(K-1)
- grad <- #gradient on p
- t( sweep(as.matrix(β [,km1]), 2, G1[km1], '*') - G1[K] * β [,K] ) %*% F1 +
- t( sweep(as.matrix(β2[,km1]), 2, G2[km1], '*') - G2[K] * β2[,K] ) %*% F2 +
- t( sweep(as.matrix(β3[,km1]), 2, G3[km1], '*') - G3[K] * β3[,K] ) %*% F3
-
- grad_β <- matrix(nrow=d, ncol=K)
- for (i in 1:d)
- {
- # i determines the derivated matrix dβ[2,3]
-
- dβ_left <- sweep(β, 2, p * G3 * β[i,], '*')
- dβ_right <- matrix(0, nrow=d, ncol=K)
- block <- i
- dβ_right[block,] <- dβ_right[block,] + 1
- dβ <- dβ_left + sweep(dβ_right, 2, p * G1, '*')
-
- dβ2_left <- sweep(β2, 2, p * G4 * β[i,], '*')
- dβ2_right <- do.call( rbind, lapply(1:d, function(j) {
- sweep(dβ_right, 2, β[j,], '*')
- }) )
- block <- ((i-1)*d+1):(i*d)
- dβ2_right[block,] <- dβ2_right[block,] + β
- dβ2 <- dβ2_left + sweep(dβ2_right, 2, p * G2, '*')
-
- dβ3_left <- sweep(β3, 2, p * G5 * β[i,], '*')
- dβ3_right <- do.call( rbind, lapply(1:d, function(j) {
- sweep(dβ2_right, 2, β[j,], '*')
- }) )
- block <- ((i-1)*d*d+1):(i*d*d)
- dβ3_right[block,] <- dβ3_right[block,] + β2
- dβ3 <- dβ3_left + sweep(dβ3_right, 2, p * G3, '*')
-
- grad_β[i,] <- t(dβ) %*% F1 + t(dβ2) %*% F2 + t(dβ3) %*% F3
- }
- grad <- c(grad, as.double(grad_β))
-
- grad = c(grad, #gradient on b
- t( sweep(β, 2, p * G2, '*') ) %*% F1 +
- t( sweep(β2, 2, p * G3, '*') ) %*% F2 +
- t( sweep(β3, 2, p * G4, '*') ) %*% F3 )
-
- grad
- },
-
- run = function(x0)
- {
- "Run optimization from x0 with solver..."
-
- if (!is.list(x0))
- stop("x0: list")
- if (is.null(x0$β))
- stop("At least x0$β must be provided")
- if (!is.matrix(x0$β) || any(is.na(x0$β)) || ncol(x0$β) != K)
- stop("x0$β: matrix, no NA, ncol == K")
- if (is.null(x0$p))
- x0$p = rep(1/K, K-1)
- else if (length(x0$p) != K-1 || sum(x0$p) > 1)
- stop("x0$p should contain positive integers and sum to < 1")
- # Next test = heuristic to detect missing b (when matrix is called "beta")
- if (is.null(x0$b) || all(x0$b == x0$β))
- x0$b = rep(0, K)
- else if (any(is.na(x0$b)))
- stop("x0$b cannot have missing values")
-
- op_res = constrOptim( linArgs(x0), .self$f, .self$grad_f,
- ui=cbind(
- rbind( rep(-1,K-1), diag(K-1) ),
- matrix(0, nrow=K, ncol=(d+1)*K) ),
- ci=c(-1,rep(0,K-1)) )
-
- expArgs(op_res$par)
- }
- )
+ },
+
+ expArgs = function(v)
+ {
+ "Expand individual arguments from vector v into a list"
+
+ list(
+ # p: dimension K-1, need to be completed
+ "p" = c(v[1:(K-1)], 1-sum(v[1:(K-1)])),
+ "β" = t(matrix(v[K:(K+d*K-1)], ncol=d)),
+ "b" = v[(K+d*K):(K+(d+1)*K-1)])
+ },
+
+ linArgs = function(L)
+ {
+ "Linearize vectors+matrices from list L into a vector"
+
+ # β linearized row by row, to match derivatives order
+ c(L$p[1:(K-1)], as.double(t(L$β)), L$b)
+ },
+
+ computeW = function(θ)
+ {
+ require(MASS)
+ dd <- d + d^2 + d^3
+ M <- Moments(θ)
+ Omega <- matrix( .C("Compute_Omega",
+ X=as.double(X), Y=as.integer(Y), M=as.double(M),
+ pn=as.integer(n), pd=as.integer(d),
+ W=as.double(W), PACKAGE="morpheus")$W, nrow=dd, ncol=dd )
+ MASS::ginv(Omega)
+ },
+
+ Moments = function(θ)
+ {
+ "Vector of moments, of size d+d^2+d^3"
+
+ p <- θ$p
+ β <- θ$β
+ λ <- sqrt(colSums(β^2))
+ b <- θ$b
+
+ # Tensorial products β^2 = β2 and β^3 = β3 must be computed from current β1
+ β2 <- apply(β, 2, function(col) col %o% col)
+ β3 <- apply(β, 2, function(col) col %o% col %o% col)
+
+ c(
+ β %*% (p * .G(li,1,λ,b)),
+ β2 %*% (p * .G(li,2,λ,b)),
+ β3 %*% (p * .G(li,3,λ,b)))
+ },
+
+ f = function(θ)
+ {
+ "Product t(hat_Mi - Mi) W (hat_Mi - Mi) with Mi(theta)"
+
+ L <- expArgs(θ)
+ A <- as.matrix(Mhat - Moments(L))
+ t(A) %*% W %*% A
+ },
+
+ grad_f = function(θ)
+ {
+ "Gradient of f, dimension (K-1) + d*K + K = (d+2)*K - 1"
+
+ L <- expArgs(θ)
+ -2 * t(grad_M(L)) %*% W %*% as.matrix(Mhat - Moments(L))
+ },
+
+ grad_M = function(θ)
+ {
+ "Gradient of the vector of moments, size (dim=)d+d^2+d^3 x K-1+K+d*K"
+
+ p <- θ$p
+ β <- θ$β
+ λ <- sqrt(colSums(β^2))
+ μ <- sweep(β, 2, λ, '/')
+ b <- θ$b
+
+ res <- matrix(nrow=nrow(W), ncol=0)
+
+ # Tensorial products β^2 = β2 and β^3 = β3 must be computed from current β1
+ β2 <- apply(β, 2, function(col) col %o% col)
+ β3 <- apply(β, 2, function(col) col %o% col %o% col)
+
+ # Some precomputations
+ G1 = .G(li,1,λ,b)
+ G2 = .G(li,2,λ,b)
+ G3 = .G(li,3,λ,b)
+ G4 = .G(li,4,λ,b)
+ G5 = .G(li,5,λ,b)
+
+ # Gradient on p: K-1 columns, dim rows
+ km1 = 1:(K-1)
+ res <- cbind(res, rbind(
+ sweep(as.matrix(β [,km1]), 2, G1[km1], '*') - G1[K] * β [,K],
+ sweep(as.matrix(β2[,km1]), 2, G2[km1], '*') - G2[K] * β2[,K],
+ sweep(as.matrix(β3[,km1]), 2, G3[km1], '*') - G3[K] * β3[,K] ))
+
+ for (i in 1:d)
+ {
+ # i determines the derivated matrix dβ[2,3]
+
+ dβ_left <- sweep(β, 2, p * G3 * β[i,], '*')
+ dβ_right <- matrix(0, nrow=d, ncol=K)
+ block <- i
+ dβ_right[block,] <- dβ_right[block,] + 1
+ dβ <- dβ_left + sweep(dβ_right, 2, p * G1, '*')
+
+ dβ2_left <- sweep(β2, 2, p * G4 * β[i,], '*')
+ dβ2_right <- do.call( rbind, lapply(1:d, function(j) {
+ sweep(dβ_right, 2, β[j,], '*')
+ }) )
+ block <- ((i-1)*d+1):(i*d)
+ dβ2_right[block,] <- dβ2_right[block,] + β
+ dβ2 <- dβ2_left + sweep(dβ2_right, 2, p * G2, '*')
+
+ dβ3_left <- sweep(β3, 2, p * G5 * β[i,], '*')
+ dβ3_right <- do.call( rbind, lapply(1:d, function(j) {
+ sweep(dβ2_right, 2, β[j,], '*')
+ }) )
+ block <- ((i-1)*d*d+1):(i*d*d)
+ dβ3_right[block,] <- dβ3_right[block,] + β2
+ dβ3 <- dβ3_left + sweep(dβ3_right, 2, p * G3, '*')
+
+ res <- cbind(res, rbind(dβ, dβ2, dβ3))
+ }
+
+ # Gradient on b
+ res <- cbind(res, rbind(
+ sweep(β, 2, p * G2, '*'),
+ sweep(β2, 2, p * G3, '*'),
+ sweep(β3, 2, p * G4, '*') ))
+
+ res
+ },
+
+ run = function(θ0)
+ {
+ "Run optimization from θ0 with solver..."
+
+ if (!is.list(θ0))
+ stop("θ0: list")
+ if (is.null(θ0$β))
+ stop("At least θ0$β must be provided")
+ if (!is.matrix(θ0$β) || any(is.na(θ0$β))
+ || nrow(θ0$β) != d || ncol(θ0$β) != K)
+ {
+ stop("θ0$β: matrix, no NA, nrow = d, ncol = K")
+ }
+ if (is.null(θ0$p))
+ θ0$p = rep(1/K, K-1)
+ else if (!is.numeric(θ0$p) || length(θ0$p) != K-1
+ || any(is.na(θ0$p)) || sum(θ0$p) > 1)
+ {
+ stop("θ0$p: length K-1, no NA, positive integers, sum to <= 1")
+ }
+ if (is.null(θ0$b))
+ θ0$b = rep(0, K)
+ else if (!is.numeric(θ0$b) || length(θ0$b) != K || any(is.na(θ0$b)))
+ stop("θ0$b: length K, no NA")
+ # TODO: stopping condition? N iterations? Delta <= epsilon ?
+ loopMax <- 2
+ for (loop in 1:loopMax)
+ {
+ op_res = constrOptim( linArgs(θ0), .self$f, .self$grad_f,
+ ui=cbind(
+ rbind( rep(-1,K-1), diag(K-1) ),
+ matrix(0, nrow=K, ncol=(d+1)*K) ),
+ ci=c(-1,rep(0,K-1)) )
+ if (loop < loopMax) #avoid computing an extra W
+ W <<- computeW(expArgs(op_res$par))
+ #print(op_res$value) #debug
+ #print(expArgs(op_res$par)) #debug
+ }
+
+ expArgs(op_res$par)
+ }
+ )