3 #' Optimize the parameters of a mixture of logistic regressions model, possibly using
4 #' \code{mu <- computeMu(...)} as a partial starting point.
6 #' @param K Number of populations.
7 #' @param link The link type, 'logit' or 'probit'.
8 #' @param optargs a list with optional arguments:
10 #' \item 'M' : list of moments of order 1,2,3: will be computed if not provided.
11 #' \item 'X,Y' : input/output, mandatory if moments not given
12 #' \item 'exact': use exact formulas when available?
13 #' \item weights Weights on moments when minimizing sum of squares
16 #' @return An object 'op' of class OptimParams, initialized so that \code{op$run(x0)}
17 #' outputs the list of optimized parameters
19 #' \item p: proportions, size K
20 #' \item β: regression matrix, size dxK
21 #' \item b: intercepts, size K
23 #' x0 is a vector containing respectively the K-1 first elements of p, then β by
24 #' columns, and finally b: \code{x0 = c(p[1:(K-1)],as.double(β),b)}.
26 #' @seealso \code{multiRun} to estimate statistics based on β, and
27 #' \code{generateSampleIO} for I/O random generation.
30 #' # Optimize parameters from estimated μ
31 #' io = generateSampleIO(10000, 1/2, matrix(c(1,-2,3,1),ncol=2), c(0,0), "logit")
32 #' μ = computeMu(io$X, io$Y, list(K=2))
33 #' M <- computeMoments(io$X, io$Y)
34 #' o <- optimParams(2, "logit", list(M=M))
35 #' x0 <- c(1/2, as.double(μ), c(0,0))
37 #' # Compare with another starting point
38 #' x1 <- c(1/2, 2*as.double(μ), c(0,0))
40 #' o$f( o$linArgs(par0) )
41 #' o$f( o$linArgs(par1) )
43 optimParams = function(K, link=c("logit","probit"), optargs=list())
46 link <- match.arg(link)
47 if (!is.list(optargs))
49 if (!is.numeric(K) || K < 2)
50 stop("K: integer >= 2")
55 if (is.null(optargs$X) || is.null(optargs$Y))
56 stop("If moments are not provided, X and Y are required")
57 M <- computeMoments(optargs$X,optargs$Y)
60 weights <- optargs$weights
64 # Build and return optimization algorithm object
65 methods::new("OptimParams", "li"=link, "M1"=as.double(M[[1]]),
66 "M2"=as.double(M[[2]]), "M3"=as.double(M[[3]]),
67 "weights"=weights, "K"=as.integer(K))
70 # Encapsulated optimization for p (proportions), β and b (regression parameters)
72 # @field li Link, 'logit' or 'probit'
73 # @field M1 Estimated first-order moment
74 # @field M2 Estimated second-order moment (flattened)
75 # @field M3 Estimated third-order moment (flattened)
76 # @field weights Vector of moments' weights
77 # @field K Number of populations
78 # @field d Number of dimensions
81 Class = "OptimParams",
85 li = "character", #link 'logit' or 'probit'
86 M1 = "numeric", #order-1 moment (vector size d)
87 M2 = "numeric", #M2 easier to process as a vector
88 M3 = "numeric", #M3 easier to process as a vector
95 initialize = function(...)
97 "Check args and initialize K, d"
100 if (!hasArg("li") || !hasArg("M1") || !hasArg("M2") || !hasArg("M3")
103 stop("Missing arguments")
109 expArgs = function(x)
111 "Expand individual arguments from vector x"
114 # p: dimension K-1, need to be completed
115 "p" = c(x[1:(K-1)], 1-sum(x[1:(K-1)])),
116 "β" = matrix(x[K:(K+d*K-1)], ncol=K),
117 "b" = x[(K+d*K):(K+(d+1)*K-1)])
120 linArgs = function(o)
122 " Linearize vectors+matrices into a vector x"
124 c(o$p[1:(K-1)], as.double(o$β), o$b)
129 "Sum of squares (Mi - hat_Mi)^2 where Mi is obtained from formula"
134 λ <- sqrt(colSums(β^2))
137 # Tensorial products β^2 = β2 and β^3 = β3 must be computed from current β1
138 β2 <- apply(β, 2, function(col) col %o% col)
139 β3 <- apply(β, 2, function(col) col %o% col %o% col)
142 weights[1] * sum( ( β %*% (p * .G(li,1,λ,b)) - M1 )^2 ) +
143 weights[2] * sum( ( β2 %*% (p * .G(li,2,λ,b)) - M2 )^2 ) +
144 weights[3] * sum( ( β3 %*% (p * .G(li,3,λ,b)) - M3 )^2 ) )
149 "Gradient of f, dimension (K-1) + d*K + K = (d+2)*K - 1"
154 λ <- sqrt(colSums(β^2))
155 μ <- sweep(β, 2, λ, '/')
158 # Tensorial products β^2 = β2 and β^3 = β3 must be computed from current β1
159 β2 <- apply(β, 2, function(col) col %o% col)
160 β3 <- apply(β, 2, function(col) col %o% col %o% col)
162 # Some precomputations
169 # (Mi - hat_Mi)^2 ' == (Mi - hat_Mi)' 2(Mi - hat_Mi) = Mi' Fi
170 F1 = as.double( 2 * ( β %*% (p * G1) - M1 ) )
171 F2 = as.double( 2 * ( β2 %*% (p * G2) - M2 ) )
172 F3 = as.double( 2 * ( β3 %*% (p * G3) - M3 ) )
175 grad <- #gradient on p
176 weights[1] * t( sweep(as.matrix(β [,km1]), 2, G1[km1], '*') - G1[K] * β [,K] ) %*% F1 +
177 weights[2] * t( sweep(as.matrix(β2[,km1]), 2, G2[km1], '*') - G2[K] * β2[,K] ) %*% F2 +
178 weights[3] * t( sweep(as.matrix(β3[,km1]), 2, G3[km1], '*') - G3[K] * β3[,K] ) %*% F3
180 grad_β <- matrix(nrow=d, ncol=K)
183 # i determines the derivated matrix dβ[2,3]
185 dβ_left <- sweep(β, 2, p * G3 * β[i,], '*')
186 dβ_right <- matrix(0, nrow=d, ncol=K)
188 dβ_right[block,] <- dβ_right[block,] + 1
189 dβ <- dβ_left + sweep(dβ_right, 2, p * G1, '*')
191 dβ2_left <- sweep(β2, 2, p * G4 * β[i,], '*')
192 dβ2_right <- do.call( rbind, lapply(1:d, function(j) {
193 sweep(dβ_right, 2, β[j,], '*')
195 block <- ((i-1)*d+1):(i*d)
196 dβ2_right[block,] <- dβ2_right[block,] + β
197 dβ2 <- dβ2_left + sweep(dβ2_right, 2, p * G2, '*')
199 dβ3_left <- sweep(β3, 2, p * G5 * β[i,], '*')
200 dβ3_right <- do.call( rbind, lapply(1:d, function(j) {
201 sweep(dβ2_right, 2, β[j,], '*')
203 block <- ((i-1)*d*d+1):(i*d*d)
204 dβ3_right[block,] <- dβ3_right[block,] + β2
205 dβ3 <- dβ3_left + sweep(dβ3_right, 2, p * G3, '*')
208 weights[1] * t(dβ) %*% F1 +
209 weights[2] * t(dβ2) %*% F2 +
210 weights[3] * t(dβ3) %*% F3
212 grad <- c(grad, as.double(grad_β))
214 grad = c(grad, #gradient on b
215 weights[1] * t( sweep(β, 2, p * G2, '*') ) %*% F1 +
216 weights[2] * t( sweep(β2, 2, p * G3, '*') ) %*% F2 +
217 weights[3] * t( sweep(β3, 2, p * G4, '*') ) %*% F3 )
224 "Run optimization from x0 with solver..."
229 stop("At least x0$β must be provided")
230 if (!is.matrix(x0$β) || any(is.na(x0$β)) || ncol(x0$β) != K)
231 stop("x0$β: matrix, no NA, ncol == K")
234 else if (length(x0$p) != K-1 || sum(x0$p) > 1)
235 stop("x0$p should contain positive integers and sum to < 1")
236 # Next test = heuristic to detect missing b (when matrix is called "beta")
237 if (is.null(x0$b) || all(x0$b == x0$β))
239 else if (any(is.na(x0$b)))
240 stop("x0$b cannot have missing values")
242 op_res = constrOptim( linArgs(x0), .self$f, .self$grad_f,
244 rbind( rep(-1,K-1), diag(K-1) ),
245 matrix(0, nrow=K, ncol=(d+1)*K) ),
246 ci=c(-1,rep(0,K-1)) )
253 # Compute vectorial E[g^{(order)}(<β,x> + b)] with x~N(0,Id) (integral in R^d)
254 # = E[g^{(order)}(z)] with z~N(b,diag(λ))
256 # @param link Link, 'logit' or 'probit'
257 # @param order Order of derivative
258 # @param λ Norm of columns of β
261 .G <- function(link, order, λ, b)
263 # NOTE: weird "integral divergent" error on inputs:
264 # link="probit"; order=2; λ=c(531.8099,586.8893,523.5816); b=c(-118.512674,-3.488020,2.109969)
265 # Switch to pracma package for that (but it seems slow...)
267 exactComp <- FALSE #TODO: global, or argument...
269 if (exactComp && link == "probit")
271 # Use exact computations
272 sapply( seq_along(λ), function(k) {
273 .exactProbitIntegral(order, λ[k], b[k])
279 # Numerical integration
280 sapply( seq_along(λ), function(k) {
283 # Fast code, may fail:
284 res <- stats::integrate(
285 function(z) .deriv[[link]][[order]](λ[k]*z+b[k]) * exp(-z^2/2) / sqrt(2*pi),
286 lower=-Inf, upper=Inf )$value
287 }, error = function(e) {
288 # Robust slow code, no fails observed:
289 sink("/dev/null") #pracma package has some useless printed outputs...
290 res <- pracma::integral(
291 function(z) .deriv[[link]][[order]](λ[k]*z+b[k]) * exp(-z^2/2) / sqrt(2*pi),
292 xmin=-Inf, xmax=Inf, method="Kronrod")
300 # TODO: check these computations (wrong atm)
301 .exactProbitIntegral <- function(order, λ, b)
303 c1 = (1/sqrt(2*pi)) * exp( -.5 * b/((λ^2+1)^2) )
306 c2 = b - λ^2 / (λ^2+1)
310 return (c1 * (λ^2 - 1 + c2^2))
312 return ( (c1*c2/((λ^2+1)^2)) * (-λ^4*((b+1)^2+1) -
313 2*λ^3 + λ^2*(2-2*b*(b-1)) + 6*λ + 3 - b^2) )
314 if (order == 5) #only remaining case...
315 return ( c1 * (3*λ^4+c2^4+6*c1^2*(λ^2-1) - 6*λ^2 + 6) )
318 # Derivatives list: g^(k)(x) for links 'logit' and 'probit'
322 # 'probit' derivatives list;
323 # TODO: exact values for the integral E[g^(k)(λz+b)]
324 function(x) exp(-x^2/2)/(sqrt(2*pi)), #g'
325 function(x) exp(-x^2/2)/(sqrt(2*pi)) * -x, #g''
326 function(x) exp(-x^2/2)/(sqrt(2*pi)) * ( x^2 - 1), #g^(3)
327 function(x) exp(-x^2/2)/(sqrt(2*pi)) * (-x^3 + 3*x), #g^(4)
328 function(x) exp(-x^2/2)/(sqrt(2*pi)) * ( x^4 - 6*x^2 + 3) #g^(5)
331 # Sigmoid derivatives list, obtained with http://www.derivative-calculator.net/
332 # @seealso http://www.ece.uc.edu/~aminai/papers/minai_sigmoids_NN93.pdf
333 function(x) {e=exp(x); .zin(e /(e+1)^2)}, #g'
334 function(x) {e=exp(x); .zin(e*(-e + 1) /(e+1)^3)}, #g''
335 function(x) {e=exp(x); .zin(e*( e^2 - 4*e + 1) /(e+1)^4)}, #g^(3)
336 function(x) {e=exp(x); .zin(e*(-e^3 + 11*e^2 - 11*e + 1) /(e+1)^5)}, #g^(4)
337 function(x) {e=exp(x); .zin(e*( e^4 - 26*e^3 + 66*e^2 - 26*e + 1)/(e+1)^6)} #g^(5)
341 # Utility for integration: "[return] zero if [argument is] NaN" (Inf / Inf divs)
343 # @param x Ratio of polynoms of exponentials, as in .S[[i]]