Add a number_of_cores parameter for OpenMP // in Compute_Omega
[morpheus.git] / pkg / R / optimParams.R
... / ...
CommitLineData
1#' Wrapper function for OptimParams class
2#'
3#' @param X Data matrix of covariables
4#' @param Y Output as a binary vector
5#' @param K Number of populations.
6#' @param link The link type, 'logit' or 'probit'.
7#' @param M the empirical cross-moments between X and Y (optional)
8#' @param nc Number of cores (default: 0 to use all)
9#'
10#' @return An object 'op' of class OptimParams, initialized so that
11#' \code{op$run(θ0)} outputs the list of optimized parameters
12#' \itemize{
13#' \item p: proportions, size K
14#' \item β: regression matrix, size dxK
15#' \item b: intercepts, size K
16#' }
17#' θ0 is a list containing the initial parameters. Only β is required
18#' (p would be set to (1/K,...,1/K) and b to (0,...0)).
19#'
20#' @seealso \code{multiRun} to estimate statistics based on β, and
21#' \code{generateSampleIO} for I/O random generation.
22#'
23#' @examples
24#' # Optimize parameters from estimated μ
25#' io <- generateSampleIO(100,
26#' 1/2, matrix(c(1,-2,3,1),ncol=2), c(0,0), "logit")
27#' μ = computeMu(io$X, io$Y, list(K=2))
28#' o <- optimParams(io$X, io$Y, 2, "logit")
29#' \donttest{
30#' θ0 <- list(p=1/2, β=μ, b=c(0,0))
31#' par0 <- o$run(θ0)
32#' # Compare with another starting point
33#' θ1 <- list(p=1/2, β=2*μ, b=c(0,0))
34#' par1 <- o$run(θ1)
35#' # Look at the function values at par0 and par1:
36#' o$f( o$linArgs(par0) )
37#' o$f( o$linArgs(par1) )}
38#'
39#' @export
40optimParams <- function(X, Y, K, link=c("logit","probit"), M=NULL, nc=0)
41{
42 # Check arguments
43 if (!is.matrix(X) || any(is.na(X)))
44 stop("X: numeric matrix, no NAs")
45 if (!is.numeric(Y) || any(is.na(Y)) || any(Y!=0 & Y!=1))
46 stop("Y: binary vector with 0 and 1 only")
47 link <- match.arg(link)
48 if (!is.numeric(K) || K!=floor(K) || K < 2)
49 stop("K: integer >= 2")
50
51 if (is.null(M))
52 {
53 # Precompute empirical moments
54 Mtmp <- computeMoments(X, Y)
55 M1 <- as.double(Mtmp[[1]])
56 M2 <- as.double(Mtmp[[2]])
57 M3 <- as.double(Mtmp[[3]])
58 M <- c(M1, M2, M3)
59 }
60 else
61 M <- c(M[[1]], M[[2]], M[[3]])
62
63 # Build and return optimization algorithm object
64 methods::new("OptimParams", "li"=link, "X"=X,
65 "Y"=as.integer(Y), "K"=as.integer(K), "Mhat"=as.double(M), "nc"=as.integer(nc))
66}
67
68# Encapsulated optimization for p (proportions), β and b (regression parameters)
69#
70# Optimize the parameters of a mixture of logistic regressions model, possibly using
71# \code{mu <- computeMu(...)} as a partial starting point.
72#
73# @field li Link function, 'logit' or 'probit'
74# @field X Data matrix of covariables
75# @field Y Output as a binary vector
76# @field Mhat Vector of empirical moments
77# @field K Number of populations
78# @field n Number of sample points
79# @field d Number of dimensions
80# @field nc Number of cores (OpenMP //)
81# @field W Weights matrix (initialized at identity)
82#
83setRefClass(
84 Class = "OptimParams",
85
86 fields = list(
87 # Inputs
88 li = "character", #link function
89 X = "matrix",
90 Y = "numeric",
91 Mhat = "numeric", #vector of empirical moments
92 # Dimensions
93 K = "integer",
94 n = "integer",
95 d = "integer",
96 nc = "integer",
97 # Weights matrix (generalized least square)
98 W = "matrix"
99 ),
100
101 methods = list(
102 initialize = function(...)
103 {
104 "Check args and initialize K, d, W"
105
106 callSuper(...)
107 if (!hasArg("X") || !hasArg("Y") || !hasArg("K")
108 || !hasArg("li") || !hasArg("Mhat") || !hasArg("nc"))
109 {
110 stop("Missing arguments")
111 }
112
113 n <<- nrow(X)
114 d <<- ncol(X)
115 # W will be initialized when calling run()
116 },
117
118 expArgs = function(v)
119 {
120 "Expand individual arguments from vector v into a list"
121
122 list(
123 # p: dimension K-1, need to be completed
124 "p" = c(v[1:(K-1)], 1-sum(v[1:(K-1)])),
125 "β" = t(matrix(v[K:(K+d*K-1)], ncol=d)),
126 "b" = v[(K+d*K):(K+(d+1)*K-1)])
127 },
128
129 linArgs = function(L)
130 {
131 "Linearize vectors+matrices from list L into a vector"
132
133 # β linearized row by row, to match derivatives order
134 c(L$p[1:(K-1)], as.double(t(L$β)), L$b)
135 },
136
137 # TODO: relocate computeW in utils.R
138 computeW = function(θ)
139 {
140 "Compute the weights matrix from a parameters list"
141
142 require(MASS)
143 dd <- d + d^2 + d^3
144 M <- Moments(θ)
145 Omega <- matrix( .C("Compute_Omega",
146 X=as.double(X), Y=as.integer(Y), M=as.double(M),
147 pnc=as.integer(nc), pn=as.integer(n), pd=as.integer(d),
148 W=as.double(W), PACKAGE="morpheus")$W, nrow=dd, ncol=dd )
149 MASS::ginv(Omega)
150 },
151
152 Moments = function(θ)
153 {
154 "Compute the vector of theoretical moments (size d+d^2+d^3)"
155
156 p <- θ$p
157 β <- θ$β
158 λ <- sqrt(colSums(β^2))
159 b <- θ$b
160
161 # Tensorial products β^2 = β2 and β^3 = β3 must be computed from current β1
162 β2 <- apply(β, 2, function(col) col %o% col)
163 β3 <- apply(β, 2, function(col) col %o% col %o% col)
164
165 c(
166 β %*% (p * .G(li,1,λ,b)),
167 β2 %*% (p * .G(li,2,λ,b)),
168 β3 %*% (p * .G(li,3,λ,b)))
169 },
170
171 f = function(θ)
172 {
173 "Function to minimize: t(hat_Mi - Mi(θ)) . W . (hat_Mi - Mi(θ))"
174
175 L <- expArgs(θ)
176 A <- as.matrix(Mhat - Moments(L))
177 t(A) %*% W %*% A
178 },
179
180 grad_f = function(θ)
181 {
182 "Gradient of f: vector of size (K-1) + d*K + K = (d+2)*K - 1"
183
184 L <- expArgs(θ)
185 -2 * t(grad_M(L)) %*% W %*% as.matrix(Mhat - Moments(L))
186 },
187
188 grad_M = function(θ)
189 {
190 "Gradient of the moments vector: matrix of size d+d^2+d^3 x K-1+K+d*K"
191
192 p <- θ$p
193 β <- θ$β
194 λ <- sqrt(colSums(β^2))
195 μ <- sweep(β, 2, λ, '/')
196 b <- θ$b
197
198 res <- matrix(nrow=nrow(W), ncol=0)
199
200 # Tensorial products β^2 = β2 and β^3 = β3 must be computed from current β1
201 β2 <- apply(β, 2, function(col) col %o% col)
202 β3 <- apply(β, 2, function(col) col %o% col %o% col)
203
204 # Some precomputations
205 G1 = .G(li,1,λ,b)
206 G2 = .G(li,2,λ,b)
207 G3 = .G(li,3,λ,b)
208 G4 = .G(li,4,λ,b)
209 G5 = .G(li,5,λ,b)
210
211 # Gradient on p: K-1 columns, dim rows
212 km1 = 1:(K-1)
213 res <- cbind(res, rbind(
214 sweep(as.matrix(β [,km1]), 2, G1[km1], '*') - G1[K] * β [,K],
215 sweep(as.matrix(β2[,km1]), 2, G2[km1], '*') - G2[K] * β2[,K],
216 sweep(as.matrix(β3[,km1]), 2, G3[km1], '*') - G3[K] * β3[,K] ))
217
218 for (i in 1:d)
219 {
220 # i determines the derivated matrix dβ[2,3]
221
222 dβ_left <- sweep(β, 2, p * G3 * β[i,], '*')
223 dβ_right <- matrix(0, nrow=d, ncol=K)
224 block <- i
225 dβ_right[block,] <- dβ_right[block,] + 1
226 dβ <- dβ_left + sweep(dβ_right, 2, p * G1, '*')
227
228 dβ2_left <- sweep(β2, 2, p * G4 * β[i,], '*')
229 dβ2_right <- do.call( rbind, lapply(1:d, function(j) {
230 sweep(dβ_right, 2, β[j,], '*')
231 }) )
232 block <- ((i-1)*d+1):(i*d)
233 dβ2_right[block,] <- dβ2_right[block,] + β
234 dβ2 <- dβ2_left + sweep(dβ2_right, 2, p * G2, '*')
235
236 dβ3_left <- sweep(β3, 2, p * G5 * β[i,], '*')
237 dβ3_right <- do.call( rbind, lapply(1:d, function(j) {
238 sweep(dβ2_right, 2, β[j,], '*')
239 }) )
240 block <- ((i-1)*d*d+1):(i*d*d)
241 dβ3_right[block,] <- dβ3_right[block,] + β2
242 dβ3 <- dβ3_left + sweep(dβ3_right, 2, p * G3, '*')
243
244 res <- cbind(res, rbind(dβ, dβ2, dβ3))
245 }
246
247 # Gradient on b
248 res <- cbind(res, rbind(
249 sweep(β, 2, p * G2, '*'),
250 sweep(β2, 2, p * G3, '*'),
251 sweep(β3, 2, p * G4, '*') ))
252
253 res
254 },
255
256 run = function(θ0)
257 {
258 "Run optimization from θ0 with solver..."
259
260 if (!is.list(θ0))
261 stop("θ0: list")
262 if (is.null(θ0$β))
263 stop("At least θ0$β must be provided")
264 if (!is.matrix(θ0$β) || any(is.na(θ0$β))
265 || nrow(θ0$β) != d || ncol(θ0$β) != K)
266 {
267 stop("θ0$β: matrix, no NA, nrow = d, ncol = K")
268 }
269 if (is.null(θ0$p))
270 θ0$p = rep(1/K, K-1)
271 else if (!is.numeric(θ0$p) || length(θ0$p) != K-1
272 || any(is.na(θ0$p)) || sum(θ0$p) > 1)
273 {
274 stop("θ0$p: length K-1, no NA, positive integers, sum to <= 1")
275 }
276 if (is.null(θ0$b))
277 θ0$b = rep(0, K)
278 else if (!is.numeric(θ0$b) || length(θ0$b) != K || any(is.na(θ0$b)))
279 stop("θ0$b: length K, no NA")
280
281 # (Re)Set W to identity, to allow several run from the same object
282 W <<- diag(d+d^2+d^3)
283
284 loopMax <- 2 #TODO: loopMax = 3 ? Seems not improving...
285 x_init <- linArgs(θ0)
286 for (loop in 1:loopMax)
287 {
288 op_res = constrOptim( x_init, .self$f, .self$grad_f,
289 ui=cbind(
290 rbind( rep(-1,K-1), diag(K-1) ),
291 matrix(0, nrow=K, ncol=(d+1)*K) ),
292 ci=c(-1,rep(0,K-1)) )
293 if (loop < loopMax) #avoid computing an extra W
294 W <<- computeW(expArgs(op_res$par))
295 #x_init <- op_res$par #degrades performances (TODO: why?)
296 }
297
298 expArgs(op_res$par)
299 }
300 )
301)
302
303# Compute vectorial E[g^{(order)}(<β,x> + b)] with x~N(0,Id) (integral in R^d)
304# = E[g^{(order)}(z)] with z~N(b,diag(λ))
305# by numerically evaluating the integral.
306#
307# @param link Link, 'logit' or 'probit'
308# @param order Order of derivative
309# @param λ Norm of columns of β
310# @param b Intercept
311#
312.G <- function(link, order, λ, b)
313{
314 # NOTE: weird "integral divergent" error on inputs:
315 # link="probit"; order=2; λ=c(531.8099,586.8893,523.5816); b=c(-118.512674,-3.488020,2.109969)
316 # Switch to pracma package for that (but it seems slow...)
317 sapply( seq_along(λ), function(k) {
318 res <- NULL
319 tryCatch({
320 # Fast code, may fail:
321 res <- stats::integrate(
322 function(z) .deriv[[link]][[order]](λ[k]*z+b[k]) * exp(-z^2/2) / sqrt(2*pi),
323 lower=-Inf, upper=Inf )$value
324 }, error = function(e) {
325 # Robust slow code, no fails observed:
326 sink("/dev/null") #pracma package has some useless printed outputs...
327 res <- pracma::integral(
328 function(z) .deriv[[link]][[order]](λ[k]*z+b[k]) * exp(-z^2/2) / sqrt(2*pi),
329 xmin=-Inf, xmax=Inf, method="Kronrod")
330 sink()
331 })
332 res
333 })
334}
335
336# Derivatives list: g^(k)(x) for links 'logit' and 'probit'
337#
338.deriv <- list(
339 "probit"=list(
340 # 'probit' derivatives list;
341 # NOTE: exact values for the integral E[g^(k)(λz+b)] could be computed
342 function(x) exp(-x^2/2)/(sqrt(2*pi)), #g'
343 function(x) exp(-x^2/2)/(sqrt(2*pi)) * -x, #g''
344 function(x) exp(-x^2/2)/(sqrt(2*pi)) * ( x^2 - 1), #g^(3)
345 function(x) exp(-x^2/2)/(sqrt(2*pi)) * (-x^3 + 3*x), #g^(4)
346 function(x) exp(-x^2/2)/(sqrt(2*pi)) * ( x^4 - 6*x^2 + 3) #g^(5)
347 ),
348 "logit"=list(
349 # Sigmoid derivatives list, obtained with http://www.derivative-calculator.net/
350 # @seealso http://www.ece.uc.edu/~aminai/papers/minai_sigmoids_NN93.pdf
351 function(x) {e=exp(x); .zin(e /(e+1)^2)}, #g'
352 function(x) {e=exp(x); .zin(e*(-e + 1) /(e+1)^3)}, #g''
353 function(x) {e=exp(x); .zin(e*( e^2 - 4*e + 1) /(e+1)^4)}, #g^(3)
354 function(x) {e=exp(x); .zin(e*(-e^3 + 11*e^2 - 11*e + 1) /(e+1)^5)}, #g^(4)
355 function(x) {e=exp(x); .zin(e*( e^4 - 26*e^3 + 66*e^2 - 26*e + 1)/(e+1)^6)} #g^(5)
356 )
357)
358
359# Utility for integration: "[return] zero if [argument is] NaN" (Inf / Inf divs)
360#
361# @param x Ratio of polynoms of exponentials, as in .S[[i]]
362#
363.zin <- function(x)
364{
365 x[is.nan(x)] <- 0.
366 x
367}