From: Benjamin Auder Date: Tue, 10 Dec 2019 11:42:29 +0000 (+0100) Subject: Replace tabs by 2spaces X-Git-Url: https://git.auder.net/%7B%7B%20asset%28%27mixstore/css/user/pieces/%24%7BgetWhatsApp%28link%29%7D?a=commitdiff_plain;h=6dd5c2acccd10635449230faa824b7e8906911bf;p=morpheus.git Replace tabs by 2spaces --- diff --git a/pkg/R/computeMu.R b/pkg/R/computeMu.R index 2a62793..f7f82ad 100644 --- a/pkg/R/computeMu.R +++ b/pkg/R/computeMu.R @@ -26,67 +26,67 @@ #' @export computeMu = function(X, Y, optargs=list()) { - if (!is.matrix(X) || !is.numeric(X) || any(is.na(X))) - stop("X: real matrix, no NA") - n = nrow(X) - d = ncol(X) - if (!is.numeric(Y) || length(Y)!=n || any(Y!=0 & Y!=1)) - stop("Y: vector of 0 and 1, size nrow(X), no NA") - if (!is.list(optargs)) - stop("optargs: list") + if (!is.matrix(X) || !is.numeric(X) || any(is.na(X))) + stop("X: real matrix, no NA") + n = nrow(X) + d = ncol(X) + if (!is.numeric(Y) || length(Y)!=n || any(Y!=0 & Y!=1)) + stop("Y: vector of 0 and 1, size nrow(X), no NA") + if (!is.list(optargs)) + stop("optargs: list") - # Step 0: Obtain the empirically estimated moments tensor, estimate also K - M = if (is.null(optargs$M)) computeMoments(X,Y) else optargs$M - K = optargs$K - if (is.null(K)) - { - # TODO: improve this basic heuristic - Σ = svd(M[[2]])$d - large_ratio <- ( abs(Σ[-d] / Σ[-1]) > 3 ) - K <- if (any(large_ratio)) max(2, which.min(large_ratio)) else d - } + # Step 0: Obtain the empirically estimated moments tensor, estimate also K + M = if (is.null(optargs$M)) computeMoments(X,Y) else optargs$M + K = optargs$K + if (is.null(K)) + { + # TODO: improve this basic heuristic + Σ = svd(M[[2]])$d + large_ratio <- ( abs(Σ[-d] / Σ[-1]) > 3 ) + K <- if (any(large_ratio)) max(2, which.min(large_ratio)) else d + } - # Step 1: generate a family of d matrices to joint-diagonalize to increase robustness - d = ncol(X) - fixed_design = FALSE - jd_nvects = ifelse(!is.null(optargs$jd_nvects), optargs$jd_nvects, 0) - if (jd_nvects == 0) - { - jd_nvects = d - fixed_design = TRUE - } - M2_t = array(dim=c(d,d,jd_nvects)) - for (i in seq_len(jd_nvects)) - { - rho = if (fixed_design) c(rep(0,i-1),1,rep(0,d-i)) else normalize( rnorm(d) ) - M2_t[,,i] = .T_I_I_w(M[[3]],rho) - } + # Step 1: generate a family of d matrices to joint-diagonalize to increase robustness + d = ncol(X) + fixed_design = FALSE + jd_nvects = ifelse(!is.null(optargs$jd_nvects), optargs$jd_nvects, 0) + if (jd_nvects == 0) + { + jd_nvects = d + fixed_design = TRUE + } + M2_t = array(dim=c(d,d,jd_nvects)) + for (i in seq_len(jd_nvects)) + { + rho = if (fixed_design) c(rep(0,i-1),1,rep(0,d-i)) else normalize( rnorm(d) ) + M2_t[,,i] = .T_I_I_w(M[[3]],rho) + } - # Step 2: obtain factors u_i (and their inverse) from the joint diagonalisation of M2_t - jd_method = ifelse(!is.null(optargs$jd_method), optargs$jd_method, "uwedge") - V = - if (jd_nvects > 1) { - #NOTE: increasing itermax does not help to converge, thus we suppress warnings - suppressWarnings({jd = jointDiag::ajd(M2_t, method=jd_method)}) -# if (jd_method=="uwedge") jd$B else solve(jd$A) - if (jd_method=="uwedge") jd$B else MASS::ginv(jd$A) - } - else - eigen(M2_t[,,1])$vectors + # Step 2: obtain factors u_i (and their inverse) from the joint diagonalisation of M2_t + jd_method = ifelse(!is.null(optargs$jd_method), optargs$jd_method, "uwedge") + V = + if (jd_nvects > 1) { + #NOTE: increasing itermax does not help to converge, thus we suppress warnings + suppressWarnings({jd = jointDiag::ajd(M2_t, method=jd_method)}) +# if (jd_method=="uwedge") jd$B else solve(jd$A) + if (jd_method=="uwedge") jd$B else MASS::ginv(jd$A) + } + else + eigen(M2_t[,,1])$vectors - # Step 3: obtain final factors from joint diagonalisation of T(I,I,u_i) - M2_t = array(dim=c(d,d,K)) - for (i in seq_len(K)) - M2_t[,,i] = .T_I_I_w(M[[3]],V[,i]) - suppressWarnings({jd = jointDiag::ajd(M2_t, method=jd_method)}) -# U = if (jd_method=="uwedge") solve(jd$B) else jd$A - U = if (jd_method=="uwedge") MASS::ginv(jd$B) else jd$A - μ = normalize(U[,1:K]) + # Step 3: obtain final factors from joint diagonalisation of T(I,I,u_i) + M2_t = array(dim=c(d,d,K)) + for (i in seq_len(K)) + M2_t[,,i] = .T_I_I_w(M[[3]],V[,i]) + suppressWarnings({jd = jointDiag::ajd(M2_t, method=jd_method)}) +# U = if (jd_method=="uwedge") solve(jd$B) else jd$A + U = if (jd_method=="uwedge") MASS::ginv(jd$B) else jd$A + μ = normalize(U[,1:K]) - # M1 also writes M1 = sum_k coeff_k * μ_k, where coeff_k >= 0 - # ==> search decomposition of vector M1 onto the (truncated) basis μ (of size dxK) - # This is a linear system μ %*% C = M1 with C of size K ==> C = psinv(μ) %*% M1 - C = MASS::ginv(μ) %*% M[[1]] - μ[,C < 0] = - μ[,C < 0] - μ + # M1 also writes M1 = sum_k coeff_k * μ_k, where coeff_k >= 0 + # ==> search decomposition of vector M1 onto the (truncated) basis μ (of size dxK) + # This is a linear system μ %*% C = M1 with C of size K ==> C = psinv(μ) %*% M1 + C = MASS::ginv(μ) %*% M[[1]] + μ[,C < 0] = - μ[,C < 0] + μ } diff --git a/pkg/R/multiRun.R b/pkg/R/multiRun.R index 8b70d6d..56c3e19 100644 --- a/pkg/R/multiRun.R +++ b/pkg/R/multiRun.R @@ -82,48 +82,48 @@ #' res[[i]] <- alignMatrices(res[[i]], ref=β, ls_mode="exact")} #' @export multiRun <- function(fargs, estimParams, - prepareArgs = function(x,i) x, N=10, ncores=3, agg=lapply, verbose=FALSE) + prepareArgs = function(x,i) x, N=10, ncores=3, agg=lapply, verbose=FALSE) { - if (!is.list(fargs)) - stop("fargs: list") - # No checks on fargs: supposedly done in estimParams[[i]]() - if (!is.list(estimParams)) - estimParams = list(estimParams) - # Verify that the provided parameters estimations are indeed functions - lapply(seq_along(estimParams), function(i) { - if (!is.function(estimParams[[i]])) - stop("estimParams: list of function(fargs)") - }) - if (!is.numeric(N) || N < 1) - stop("N: positive integer") + if (!is.list(fargs)) + stop("fargs: list") + # No checks on fargs: supposedly done in estimParams[[i]]() + if (!is.list(estimParams)) + estimParams = list(estimParams) + # Verify that the provided parameters estimations are indeed functions + lapply(seq_along(estimParams), function(i) { + if (!is.function(estimParams[[i]])) + stop("estimParams: list of function(fargs)") + }) + if (!is.numeric(N) || N < 1) + stop("N: positive integer") - estimParamAtIndex <- function(index) - { - fargs <- prepareArgs(fargs, index) - if (verbose) - cat("Run ",index,"\n") - lapply(seq_along(estimParams), function(i) { - if (verbose) - cat(" Method ",i,"\n") - out <- estimParams[[i]](fargs) - if (is.list(out)) - do.call(rbind, out) - else - out - }) - } + estimParamAtIndex <- function(index) + { + fargs <- prepareArgs(fargs, index) + if (verbose) + cat("Run ",index,"\n") + lapply(seq_along(estimParams), function(i) { + if (verbose) + cat(" Method ",i,"\n") + out <- estimParams[[i]](fargs) + if (is.list(out)) + do.call(rbind, out) + else + out + }) + } - if (ncores > 1) - { - cl = parallel::makeCluster(ncores, outfile="") - parallel::clusterExport(cl, c("fargs","verbose"), environment()) - list_res = parallel::clusterApplyLB(cl, 1:N, estimParamAtIndex) - parallel::stopCluster(cl) - } - else - list_res = lapply(1:N, estimParamAtIndex) + if (ncores > 1) + { + cl = parallel::makeCluster(ncores, outfile="") + parallel::clusterExport(cl, c("fargs","verbose"), environment()) + list_res = parallel::clusterApplyLB(cl, 1:N, estimParamAtIndex) + parallel::stopCluster(cl) + } + else + list_res = lapply(1:N, estimParamAtIndex) - # De-interlace results: output one list per function - nf <- length(estimParams) - lapply( seq_len(nf), function(i) lapply(seq_len(N), function(j) list_res[[j]][[i]]) ) + # De-interlace results: output one list per function + nf <- length(estimParams) + lapply( seq_len(nf), function(i) lapply(seq_len(N), function(j) list_res[[j]][[i]]) ) } diff --git a/pkg/R/optimParams.R b/pkg/R/optimParams.R index a45f71a..505b665 100644 --- a/pkg/R/optimParams.R +++ b/pkg/R/optimParams.R @@ -33,17 +33,17 @@ #' @export optimParams <- function(X, Y, K, link=c("logit","probit")) { - # Check arguments + # Check arguments if (!is.matrix(X) || any(is.na(X))) stop("X: numeric matrix, no NAs") if (!is.numeric(Y) || any(is.na(Y)) || any(Y!=0 & Y!=1)) stop("Y: binary vector with 0 and 1 only") - link <- match.arg(link) + link <- match.arg(link) if (!is.numeric(K) || K!=floor(K) || K < 2) stop("K: integer >= 2") - # Build and return optimization algorithm object - methods::new("OptimParams", "li"=link, "X"=X, + # Build and return optimization algorithm object + methods::new("OptimParams", "li"=link, "X"=X, "Y"=as.integer(Y), "K"=as.integer(K)) } @@ -60,30 +60,30 @@ optimParams <- function(X, Y, K, link=c("logit","probit")) #' @field W Weights matrix (iteratively refined) #' setRefClass( - Class = "OptimParams", + Class = "OptimParams", - fields = list( - # Inputs - li = "character", #link function - X = "matrix", - Y = "numeric", + fields = list( + # Inputs + li = "character", #link function + X = "matrix", + Y = "numeric", Mhat = "numeric", #vector of empirical moments - # Dimensions - K = "integer", + # Dimensions + K = "integer", n = "integer", - d = "integer", + d = "integer", # Weights matrix (generalized least square) W = "matrix" - ), + ), - methods = list( - initialize = function(...) - { - "Check args and initialize K, d, W" + methods = list( + initialize = function(...) + { + "Check args and initialize K, d, W" callSuper(...) - if (!hasArg("X") || !hasArg("Y") || !hasArg("K") || !hasArg("li")) - stop("Missing arguments") + if (!hasArg("X") || !hasArg("Y") || !hasArg("K") || !hasArg("li")) + stop("Missing arguments") # Precompute empirical moments M <- computeMoments(X, Y) @@ -92,28 +92,28 @@ setRefClass( M3 <- as.double(M[[3]]) Mhat <<- c(M1, M2, M3) - n <<- nrow(X) - d <<- length(M1) + n <<- nrow(X) + d <<- length(M1) W <<- diag(d+d^2+d^3) #initialize at W = Identity - }, + }, - expArgs = function(v) - { - "Expand individual arguments from vector v into a list" + expArgs = function(v) + { + "Expand individual arguments from vector v into a list" - list( - # p: dimension K-1, need to be completed - "p" = c(v[1:(K-1)], 1-sum(v[1:(K-1)])), - "β" = matrix(v[K:(K+d*K-1)], ncol=K), - "b" = v[(K+d*K):(K+(d+1)*K-1)]) - }, + list( + # p: dimension K-1, need to be completed + "p" = c(v[1:(K-1)], 1-sum(v[1:(K-1)])), + "β" = matrix(v[K:(K+d*K-1)], ncol=K), + "b" = v[(K+d*K):(K+(d+1)*K-1)]) + }, - linArgs = function(L) - { - "Linearize vectors+matrices from list L into a vector" + linArgs = function(L) + { + "Linearize vectors+matrices from list L into a vector" - c(L$p[1:(K-1)], as.double(L$β), L$b) - }, + c(L$p[1:(K-1)], as.double(L$β), L$b) + }, computeW = function(θ) { @@ -130,32 +130,32 @@ setRefClass( "Vector of moments, of size d+d^2+d^3" p <- θ$p - β <- θ$β - λ <- sqrt(colSums(β^2)) - b <- θ$b - - # Tensorial products β^2 = β2 and β^3 = β3 must be computed from current β1 - β2 <- apply(β, 2, function(col) col %o% col) - β3 <- apply(β, 2, function(col) col %o% col %o% col) - - c( - β %*% (p * .G(li,1,λ,b)), - β2 %*% (p * .G(li,2,λ,b)), - β3 %*% (p * .G(li,3,λ,b))) + β <- θ$β + λ <- sqrt(colSums(β^2)) + b <- θ$b + + # Tensorial products β^2 = β2 and β^3 = β3 must be computed from current β1 + β2 <- apply(β, 2, function(col) col %o% col) + β3 <- apply(β, 2, function(col) col %o% col %o% col) + + c( + β %*% (p * .G(li,1,λ,b)), + β2 %*% (p * .G(li,2,λ,b)), + β3 %*% (p * .G(li,3,λ,b))) }, f = function(θ) { - "Product t(hat_Mi - Mi) W (hat_Mi - Mi) with Mi(theta)" + "Product t(hat_Mi - Mi) W (hat_Mi - Mi) with Mi(theta)" L <- expArgs(θ) - A <- as.matrix(Mhat - Moments(L)) + A <- as.matrix(Mhat - Moments(L)) t(A) %*% W %*% A }, - grad_f = function(θ) - { - "Gradient of f, dimension (K-1) + d*K + K = (d+2)*K - 1" + grad_f = function(θ) + { + "Gradient of f, dimension (K-1) + d*K + K = (d+2)*K - 1" L <- expArgs(θ) -2 * t(grad_M(L)) %*% W %*% as.matrix((Mhat - Moments(L))) @@ -165,80 +165,80 @@ setRefClass( { "Gradient of the vector of moments, size (dim=)d+d^2+d^3 x K-1+K+d*K" - p <- θ$p - β <- θ$β - λ <- sqrt(colSums(β^2)) - μ <- sweep(β, 2, λ, '/') - b <- θ$b + p <- θ$p + β <- θ$β + λ <- sqrt(colSums(β^2)) + μ <- sweep(β, 2, λ, '/') + b <- θ$b res <- matrix(nrow=nrow(W), ncol=0) - # Tensorial products β^2 = β2 and β^3 = β3 must be computed from current β1 - β2 <- apply(β, 2, function(col) col %o% col) - β3 <- apply(β, 2, function(col) col %o% col %o% col) + # Tensorial products β^2 = β2 and β^3 = β3 must be computed from current β1 + β2 <- apply(β, 2, function(col) col %o% col) + β3 <- apply(β, 2, function(col) col %o% col %o% col) - # Some precomputations - G1 = .G(li,1,λ,b) - G2 = .G(li,2,λ,b) - G3 = .G(li,3,λ,b) - G4 = .G(li,4,λ,b) - G5 = .G(li,5,λ,b) + # Some precomputations + G1 = .G(li,1,λ,b) + G2 = .G(li,2,λ,b) + G3 = .G(li,3,λ,b) + G4 = .G(li,4,λ,b) + G5 = .G(li,5,λ,b) # Gradient on p: K-1 columns, dim rows - km1 = 1:(K-1) - res <- cbind(res, rbind( + km1 = 1:(K-1) + res <- cbind(res, rbind( sweep(as.matrix(β [,km1]), 2, G1[km1], '*') - G1[K] * β [,K], sweep(as.matrix(β2[,km1]), 2, G2[km1], '*') - G2[K] * β2[,K], sweep(as.matrix(β3[,km1]), 2, G3[km1], '*') - G3[K] * β3[,K] )) - for (i in 1:d) - { - # i determines the derivated matrix dβ[2,3] - - dβ_left <- sweep(β, 2, p * G3 * β[i,], '*') - dβ_right <- matrix(0, nrow=d, ncol=K) - block <- i - dβ_right[block,] <- dβ_right[block,] + 1 - dβ <- dβ_left + sweep(dβ_right, 2, p * G1, '*') - - dβ2_left <- sweep(β2, 2, p * G4 * β[i,], '*') - dβ2_right <- do.call( rbind, lapply(1:d, function(j) { - sweep(dβ_right, 2, β[j,], '*') - }) ) - block <- ((i-1)*d+1):(i*d) - dβ2_right[block,] <- dβ2_right[block,] + β - dβ2 <- dβ2_left + sweep(dβ2_right, 2, p * G2, '*') - - dβ3_left <- sweep(β3, 2, p * G5 * β[i,], '*') - dβ3_right <- do.call( rbind, lapply(1:d, function(j) { - sweep(dβ2_right, 2, β[j,], '*') - }) ) - block <- ((i-1)*d*d+1):(i*d*d) - dβ3_right[block,] <- dβ3_right[block,] + β2 - dβ3 <- dβ3_left + sweep(dβ3_right, 2, p * G3, '*') - - res <- cbind(res, rbind(dβ, dβ2, dβ3)) - } + for (i in 1:d) + { + # i determines the derivated matrix dβ[2,3] + + dβ_left <- sweep(β, 2, p * G3 * β[i,], '*') + dβ_right <- matrix(0, nrow=d, ncol=K) + block <- i + dβ_right[block,] <- dβ_right[block,] + 1 + dβ <- dβ_left + sweep(dβ_right, 2, p * G1, '*') + + dβ2_left <- sweep(β2, 2, p * G4 * β[i,], '*') + dβ2_right <- do.call( rbind, lapply(1:d, function(j) { + sweep(dβ_right, 2, β[j,], '*') + }) ) + block <- ((i-1)*d+1):(i*d) + dβ2_right[block,] <- dβ2_right[block,] + β + dβ2 <- dβ2_left + sweep(dβ2_right, 2, p * G2, '*') + + dβ3_left <- sweep(β3, 2, p * G5 * β[i,], '*') + dβ3_right <- do.call( rbind, lapply(1:d, function(j) { + sweep(dβ2_right, 2, β[j,], '*') + }) ) + block <- ((i-1)*d*d+1):(i*d*d) + dβ3_right[block,] <- dβ3_right[block,] + β2 + dβ3 <- dβ3_left + sweep(dβ3_right, 2, p * G3, '*') + + res <- cbind(res, rbind(dβ, dβ2, dβ3)) + } # Gradient on b - res <- cbind(res, rbind( - sweep(β, 2, p * G2, '*'), - sweep(β2, 2, p * G3, '*'), - sweep(β3, 2, p * G4, '*') )) + res <- cbind(res, rbind( + sweep(β, 2, p * G2, '*'), + sweep(β2, 2, p * G3, '*'), + sweep(β3, 2, p * G4, '*') )) - res - }, + res + }, - run = function(θ0) - { - "Run optimization from θ0 with solver..." + run = function(θ0) + { + "Run optimization from θ0 with solver..." - if (!is.list(θ0)) - stop("θ0: list") + if (!is.list(θ0)) + stop("θ0: list") if (is.null(θ0$β)) stop("At least θ0$β must be provided") - if (!is.matrix(θ0$β) || any(is.na(θ0$β)) || ncol(θ0$β) != K) - stop("θ0$β: matrix, no NA, ncol == K") + if (!is.matrix(θ0$β) || any(is.na(θ0$β)) || ncol(θ0$β) != K) + stop("θ0$β: matrix, no NA, ncol == K") if (is.null(θ0$p)) θ0$p = rep(1/K, K-1) else if (length(θ0$p) != K-1 || sum(θ0$p) > 1) @@ -265,9 +265,9 @@ setRefClass( print(expArgs(op_res$par)) } - expArgs(op_res$par) - } - ) + expArgs(op_res$par) + } + ) ) # Compute vectorial E[g^{(order)}(<β,x> + b)] with x~N(0,Id) (integral in R^d) @@ -281,9 +281,9 @@ setRefClass( # .G <- function(link, order, λ, b) { - # NOTE: weird "integral divergent" error on inputs: - # link="probit"; order=2; λ=c(531.8099,586.8893,523.5816); b=c(-118.512674,-3.488020,2.109969) - # Switch to pracma package for that (but it seems slow...) + # NOTE: weird "integral divergent" error on inputs: + # link="probit"; order=2; λ=c(531.8099,586.8893,523.5816); b=c(-118.512674,-3.488020,2.109969) + # Switch to pracma package for that (but it seems slow...) sapply( seq_along(λ), function(k) { res <- NULL tryCatch({ @@ -306,24 +306,24 @@ setRefClass( # Derivatives list: g^(k)(x) for links 'logit' and 'probit' # .deriv <- list( - "probit"=list( - # 'probit' derivatives list; - # NOTE: exact values for the integral E[g^(k)(λz+b)] could be computed - function(x) exp(-x^2/2)/(sqrt(2*pi)), #g' - function(x) exp(-x^2/2)/(sqrt(2*pi)) * -x, #g'' - function(x) exp(-x^2/2)/(sqrt(2*pi)) * ( x^2 - 1), #g^(3) - function(x) exp(-x^2/2)/(sqrt(2*pi)) * (-x^3 + 3*x), #g^(4) - function(x) exp(-x^2/2)/(sqrt(2*pi)) * ( x^4 - 6*x^2 + 3) #g^(5) - ), - "logit"=list( - # Sigmoid derivatives list, obtained with http://www.derivative-calculator.net/ - # @seealso http://www.ece.uc.edu/~aminai/papers/minai_sigmoids_NN93.pdf - function(x) {e=exp(x); .zin(e /(e+1)^2)}, #g' - function(x) {e=exp(x); .zin(e*(-e + 1) /(e+1)^3)}, #g'' - function(x) {e=exp(x); .zin(e*( e^2 - 4*e + 1) /(e+1)^4)}, #g^(3) - function(x) {e=exp(x); .zin(e*(-e^3 + 11*e^2 - 11*e + 1) /(e+1)^5)}, #g^(4) - function(x) {e=exp(x); .zin(e*( e^4 - 26*e^3 + 66*e^2 - 26*e + 1)/(e+1)^6)} #g^(5) - ) + "probit"=list( + # 'probit' derivatives list; + # NOTE: exact values for the integral E[g^(k)(λz+b)] could be computed + function(x) exp(-x^2/2)/(sqrt(2*pi)), #g' + function(x) exp(-x^2/2)/(sqrt(2*pi)) * -x, #g'' + function(x) exp(-x^2/2)/(sqrt(2*pi)) * ( x^2 - 1), #g^(3) + function(x) exp(-x^2/2)/(sqrt(2*pi)) * (-x^3 + 3*x), #g^(4) + function(x) exp(-x^2/2)/(sqrt(2*pi)) * ( x^4 - 6*x^2 + 3) #g^(5) + ), + "logit"=list( + # Sigmoid derivatives list, obtained with http://www.derivative-calculator.net/ + # @seealso http://www.ece.uc.edu/~aminai/papers/minai_sigmoids_NN93.pdf + function(x) {e=exp(x); .zin(e /(e+1)^2)}, #g' + function(x) {e=exp(x); .zin(e*(-e + 1) /(e+1)^3)}, #g'' + function(x) {e=exp(x); .zin(e*( e^2 - 4*e + 1) /(e+1)^4)}, #g^(3) + function(x) {e=exp(x); .zin(e*(-e^3 + 11*e^2 - 11*e + 1) /(e+1)^5)}, #g^(4) + function(x) {e=exp(x); .zin(e*( e^4 - 26*e^3 + 66*e^2 - 26*e + 1)/(e+1)^6)} #g^(5) + ) ) # Utility for integration: "[return] zero if [argument is] NaN" (Inf / Inf divs) @@ -332,6 +332,6 @@ setRefClass( # .zin <- function(x) { - x[is.nan(x)] <- 0. - x + x[is.nan(x)] <- 0. + x } diff --git a/pkg/R/plot.R b/pkg/R/plot.R index 29a254e..9727493 100644 --- a/pkg/R/plot.R +++ b/pkg/R/plot.R @@ -6,10 +6,10 @@ # extractParam <- function(mr, x=1, y=1) { - # Obtain L vectors where L = number of res lists in mr - lapply( mr, function(mr_list) { - sapply(mr_list, function(m) m[x,y]) - } ) + # Obtain L vectors where L = number of res lists in mr + lapply( mr, function(mr_list) { + sapply(mr_list, function(m) m[x,y]) + } ) } #' plotHist @@ -31,12 +31,12 @@ extractParam <- function(mr, x=1, y=1) #' @export plotHist <- function(mr, x, y) { - params <- extractParam(mr, x, y) - L = length(params) - # Plot histograms side by side - par(mfrow=c(1,L), cex.axis=1.5, cex.lab=1.5, mar=c(4.7,5,1,1)) - for (i in 1:L) - hist(params[[i]], breaks=40, freq=FALSE, xlab="Parameter value", ylab="Density") + params <- extractParam(mr, x, y) + L = length(params) + # Plot histograms side by side + par(mfrow=c(1,L), cex.axis=1.5, cex.lab=1.5, mar=c(4.7,5,1,1)) + for (i in 1:L) + hist(params[[i]], breaks=40, freq=FALSE, xlab="Parameter value", ylab="Density") } #' plotBox @@ -50,12 +50,12 @@ plotHist <- function(mr, x, y) #' @export plotBox <- function(mr, x, y, xtitle="") { - params <- extractParam(mr, x, y) - L = length(params) - # Plot boxplots side by side - par(mfrow=c(1,L), cex.axis=1.5, cex.lab=1.5, mar=c(4.7,5,1,1)) - for (i in 1:L) - boxplot(params[[i]], xlab=xtitle, ylab="Parameter value") + params <- extractParam(mr, x, y) + L = length(params) + # Plot boxplots side by side + par(mfrow=c(1,L), cex.axis=1.5, cex.lab=1.5, mar=c(4.7,5,1,1)) + for (i in 1:L) + boxplot(params[[i]], xlab=xtitle, ylab="Parameter value") } #' plotCoefs @@ -71,32 +71,32 @@ plotBox <- function(mr, x, y, xtitle="") #' @export plotCoefs <- function(mr, params, idx, xtitle="Parameter") { - L <- nrow(mr[[1]][[1]]) - K <- ncol(mr[[1]][[1]]) + L <- nrow(mr[[1]][[1]]) + K <- ncol(mr[[1]][[1]]) - params_hat <- matrix(nrow=L, ncol=K) - stdev <- matrix(nrow=L, ncol=K) - for (x in 1:L) - { - for (y in 1:K) - { - estims <- extractParam(mr, x, y) - params_hat[x,y] <- mean(estims[[idx]]) -# stdev[x,y] <- sqrt( mean( (estims[[idx]] - params[x,y])^2 ) ) - # HACK remove extreme quantile in estims[[i]] before computing sd() - stdev[x,y] <- sd( estims[[idx]] ) #[ estims[[idx]] < max(estims[[idx]]) & estims[[idx]] > min(estims[[idx]]) ] ) - } - } + params_hat <- matrix(nrow=L, ncol=K) + stdev <- matrix(nrow=L, ncol=K) + for (x in 1:L) + { + for (y in 1:K) + { + estims <- extractParam(mr, x, y) + params_hat[x,y] <- mean(estims[[idx]]) +# stdev[x,y] <- sqrt( mean( (estims[[idx]] - params[x,y])^2 ) ) + # HACK remove extreme quantile in estims[[i]] before computing sd() + stdev[x,y] <- sd( estims[[idx]] ) #[ estims[[idx]] < max(estims[[idx]]) & estims[[idx]] > min(estims[[idx]]) ] ) + } + } - par(cex.axis=1.5, cex.lab=1.5, mar=c(4.7,5,1,1)) - params <- as.double(params) - o <- order(params) - avg_param <- as.double(params_hat) - std_param <- as.double(stdev) - matplot(cbind(params[o],avg_param[o],avg_param[o]+std_param[o],avg_param[o]-std_param[o]), - col=1, lty=c(1,5,3,3), type="l", lwd=2, xlab=xtitle, ylab="") + par(cex.axis=1.5, cex.lab=1.5, mar=c(4.7,5,1,1)) + params <- as.double(params) + o <- order(params) + avg_param <- as.double(params_hat) + std_param <- as.double(stdev) + matplot(cbind(params[o],avg_param[o],avg_param[o]+std_param[o],avg_param[o]-std_param[o]), + col=1, lty=c(1,5,3,3), type="l", lwd=2, xlab=xtitle, ylab="") - #print(o) #not returning o to avoid weird Jupyter issue... (TODO:) + #print(o) #not returning o to avoid weird Jupyter issue... (TODO:) } #' plotQn @@ -113,19 +113,19 @@ plotCoefs <- function(mr, params, idx, xtitle="Parameter") #' @export plotQn <- function(N, n, p, β, b, link) { - d <- nrow(β) - K <- ncol(β) - io <- generateSampleIO(n, p, β, b, link) - op <- optimParams(K, link, list(X=io$X, Y=io$Y)) - # N random starting points gaussian (TODO: around true β?) - res <- matrix(nrow=d*K+1, ncol=N) - for (i in seq_len(N)) - { - β_init <- rnorm(d*K) - par <- op$run( c(rep(1/K,K-1), β_init, rep(0,K)) ) - par <- op$linArgs(par) - Qn <- op$f(par) - res[,i] = c(Qn, par[K:(K+d*K-1)]) - } - res #TODO: plot this, not just return it... + d <- nrow(β) + K <- ncol(β) + io <- generateSampleIO(n, p, β, b, link) + op <- optimParams(K, link, list(X=io$X, Y=io$Y)) + # N random starting points gaussian (TODO: around true β?) + res <- matrix(nrow=d*K+1, ncol=N) + for (i in seq_len(N)) + { + β_init <- rnorm(d*K) + par <- op$run( c(rep(1/K,K-1), β_init, rep(0,K)) ) + par <- op$linArgs(par) + Qn <- op$f(par) + res[,i] = c(Qn, par[K:(K+d*K-1)]) + } + res #TODO: plot this, not just return it... } diff --git a/pkg/R/sampleIO.R b/pkg/R/sampleIO.R index 6fa38ae..3d46092 100644 --- a/pkg/R/sampleIO.R +++ b/pkg/R/sampleIO.R @@ -20,54 +20,54 @@ #' @export generateSampleIO = function(n, p, β, b, link) { - # Check arguments - tryCatch({n = as.integer(n)}, error=function(e) stop("Cannot convert n to integer")) - if (length(n) > 1) - warning("n is a vector but should be scalar: only first element used") - if (n <= 0) - stop("n: positive integer") - if (!is.matrix(β) || !is.numeric(β) || any(is.na(β))) - stop("β: real matrix, no NAs") - K = ncol(β) - if (!is.numeric(p) || length(p)!=K-1 || any(is.na(p)) || any(p<0) || sum(p) > 1) - stop("p: positive vector of size K-1, no NA, sum<=1") - p <- c(p, 1-sum(p)) - if (!is.numeric(b) || length(b)!=K || any(is.na(b))) - stop("b: real vector of size K, no NA") + # Check arguments + tryCatch({n = as.integer(n)}, error=function(e) stop("Cannot convert n to integer")) + if (length(n) > 1) + warning("n is a vector but should be scalar: only first element used") + if (n <= 0) + stop("n: positive integer") + if (!is.matrix(β) || !is.numeric(β) || any(is.na(β))) + stop("β: real matrix, no NAs") + K = ncol(β) + if (!is.numeric(p) || length(p)!=K-1 || any(is.na(p)) || any(p<0) || sum(p) > 1) + stop("p: positive vector of size K-1, no NA, sum<=1") + p <- c(p, 1-sum(p)) + if (!is.numeric(b) || length(b)!=K || any(is.na(b))) + stop("b: real vector of size K, no NA") - #random generation of the size of each population in X~Y (unordered) - classes = rmultinom(1, n, p) + #random generation of the size of each population in X~Y (unordered) + classes = rmultinom(1, n, p) - d = nrow(β) - zero_mean = rep(0,d) - id_sigma = diag(rep(1,d)) - # Always consider an intercept (use b=0 for none) - d = d + 1 - β = rbind(β, b) - X = matrix(nrow=0, ncol=d) - Y = c() - index = c() - for (i in 1:ncol(β)) - { - index = c(index, rep(i, classes[i])) - newXblock = cbind( MASS::mvrnorm(classes[i], zero_mean, id_sigma), 1 ) - arg_link = newXblock %*% β[,i] #β - probas = - if (link == "logit") - { - e_arg_link = exp(arg_link) - e_arg_link / (1 + e_arg_link) - } - else #"probit" - pnorm(arg_link) - probas[is.nan(probas)] = 1 #overflow of exp(x) - #probas = rowSums(p * probas) - X = rbind(X, newXblock) - #Y = c( Y, vapply(probas, function(p) (ifelse(p >= .5, 1, 0)), 1) ) - Y = c( Y, vapply(probas, function(p) (rbinom(1,1,p)), 1) ) - } - shuffle = sample(n) - # Returned X should not contain an intercept column (it's an argument of estimation - # methods) - list("X"=X[shuffle,-d], "Y"=Y[shuffle], "index"=index[shuffle]) + d = nrow(β) + zero_mean = rep(0,d) + id_sigma = diag(rep(1,d)) + # Always consider an intercept (use b=0 for none) + d = d + 1 + β = rbind(β, b) + X = matrix(nrow=0, ncol=d) + Y = c() + index = c() + for (i in 1:ncol(β)) + { + index = c(index, rep(i, classes[i])) + newXblock = cbind( MASS::mvrnorm(classes[i], zero_mean, id_sigma), 1 ) + arg_link = newXblock %*% β[,i] #β + probas = + if (link == "logit") + { + e_arg_link = exp(arg_link) + e_arg_link / (1 + e_arg_link) + } + else #"probit" + pnorm(arg_link) + probas[is.nan(probas)] = 1 #overflow of exp(x) + #probas = rowSums(p * probas) + X = rbind(X, newXblock) + #Y = c( Y, vapply(probas, function(p) (ifelse(p >= .5, 1, 0)), 1) ) + Y = c( Y, vapply(probas, function(p) (rbinom(1,1,p)), 1) ) + } + shuffle = sample(n) + # Returned X should not contain an intercept column (it's an argument of estimation + # methods) + list("X"=X[shuffle,-d], "Y"=Y[shuffle], "index"=index[shuffle]) } diff --git a/pkg/R/utils.R b/pkg/R/utils.R index 6d1c361..ff988a0 100644 --- a/pkg/R/utils.R +++ b/pkg/R/utils.R @@ -9,9 +9,9 @@ #' @export normalize = function(X) { - X = as.matrix(X) - norm2 = sqrt( colSums(X^2) ) - sweep(X, 2, norm2, '/') + X = as.matrix(X) + norm2 = sqrt( colSums(X^2) ) + sweep(X, 2, norm2, '/') } # Computes a tensor-vector product @@ -23,11 +23,11 @@ normalize = function(X) # .T_I_I_w = function(Te, w) { - d = length(w) - Ma = matrix(0,nrow=d,ncol=d) - for (j in 1:d) - Ma = Ma + w[j] * Te[,,j] - Ma + d = length(w) + Ma = matrix(0,nrow=d,ncol=d) + for (j in 1:d) + Ma = Ma + w[j] * Te[,,j] + Ma } # Computes the second-order empirical moment between input X and output Y @@ -39,11 +39,11 @@ normalize = function(X) # .Moments_M2 = function(X, Y) { - n = nrow(X) - d = ncol(X) - M2 = matrix(0,nrow=d,ncol=d) - matrix( .C("Moments_M2", X=as.double(X), Y=as.double(Y), pn=as.integer(n), - pd=as.integer(d), M2=as.double(M2), PACKAGE="morpheus")$M2, nrow=d, ncol=d) + n = nrow(X) + d = ncol(X) + M2 = matrix(0,nrow=d,ncol=d) + matrix( .C("Moments_M2", X=as.double(X), Y=as.double(Y), pn=as.integer(n), + pd=as.integer(d), M2=as.double(M2), PACKAGE="morpheus")$M2, nrow=d, ncol=d) } # Computes the third-order empirical moment between input X and output Y @@ -55,11 +55,11 @@ normalize = function(X) # .Moments_M3 = function(X, Y) { - n = nrow(X) - d = ncol(X) - M3 = array(0,dim=c(d,d,d)) - array( .C("Moments_M3", X=as.double(X), Y=as.double(Y), pn=as.integer(n), - pd=as.integer(d), M3=as.double(M3), PACKAGE="morpheus")$M3, dim=c(d,d,d) ) + n = nrow(X) + d = ncol(X) + M3 = array(0,dim=c(d,d,d)) + array( .C("Moments_M3", X=as.double(X), Y=as.double(Y), pn=as.integer(n), + pd=as.integer(d), M3=as.double(M3), PACKAGE="morpheus")$M3, dim=c(d,d,d) ) } #' computeMoments @@ -72,7 +72,7 @@ normalize = function(X) #' #' @export computeMoments = function(X, Y) - list( colMeans(Y * X), .Moments_M2(X,Y), .Moments_M3(X,Y) ) + list( colMeans(Y * X), .Moments_M2(X,Y), .Moments_M3(X,Y) ) # Find the optimal assignment (permutation) between two sets (minimize cost) # @@ -83,9 +83,9 @@ computeMoments = function(X, Y) # .hungarianAlgorithm = function(distances) { - n = nrow(distances) - .C("hungarianAlgorithm", distances=as.double(distances), pn=as.integer(n), - assignment=integer(n), PACKAGE="morpheus")$assignment + n = nrow(distances) + .C("hungarianAlgorithm", distances=as.double(distances), pn=as.integer(n), + assignment=integer(n), PACKAGE="morpheus")$assignment } #' alignMatrices @@ -105,65 +105,65 @@ computeMoments = function(X, Y) #' @export alignMatrices = function(Ms, ref, ls_mode) { - if (!is.matrix(ref) && ref != "mean") - stop("ref: matrix or 'mean'") - if (!ls_mode %in% c("exact","approx1","approx2")) - stop("ls_mode in {'exact','approx1','approx2'}") + if (!is.matrix(ref) && ref != "mean") + stop("ref: matrix or 'mean'") + if (!ls_mode %in% c("exact","approx1","approx2")) + stop("ls_mode in {'exact','approx1','approx2'}") - K <- ncol(Ms[[1]]) - if (is.character(ref)) #ref=="mean" - m_sum = Ms[[1]] - L <- length(Ms) - for (i in ifelse(is.character(ref),2,1):L) - { - m_ref = if (is.character(ref)) m_sum / (i-1) else ref - m = Ms[[i]] #shorthand + K <- ncol(Ms[[1]]) + if (is.character(ref)) #ref=="mean" + m_sum = Ms[[1]] + L <- length(Ms) + for (i in ifelse(is.character(ref),2,1):L) + { + m_ref = if (is.character(ref)) m_sum / (i-1) else ref + m = Ms[[i]] #shorthand - if (ls_mode == "exact") - { - #distances[i,j] = distance between m column i and ref column j - distances = apply( m_ref, 2, function(col) ( sqrt(colSums((m-col)^2)) ) ) - assignment = .hungarianAlgorithm(distances) - col <- m[,assignment] - if (is.list(Ms)) Ms[[i]] <- col else Ms[,,i] <- col - } - else - { - # Greedy matching: - # approx1: li[[i]][,j] is assigned to m[,k] minimizing dist(li[[i]][,j],m[,k']) - # approx2: m[,j] is assigned to li[[i]][,k] minimizing dist(m[,j],li[[i]][,k']) - available_indices = 1:K - for (j in 1:K) - { - distances = - if (ls_mode == "approx1") - { - apply(as.matrix(m[,available_indices]), 2, - function(col) ( sqrt(sum((col - m_ref[,j])^2)) ) ) - } - else #approx2 - { - apply(as.matrix(m_ref[,available_indices]), 2, - function(col) ( sqrt(sum((col - m[,j])^2)) ) ) - } - indMin = which.min(distances) - if (ls_mode == "approx1") - { - col <- m[ , available_indices[indMin] ] - if (is.list(Ms)) Ms[[i]][,j] <- col else Ms[,j,i] <- col - } - else #approx2 - { - col <- available_indices[indMin] - if (is.list(Ms)) Ms[[i]][,col] <- m[,j] else Ms[,col,i] <- m[,j] - } - available_indices = available_indices[-indMin] - } - } + if (ls_mode == "exact") + { + #distances[i,j] = distance between m column i and ref column j + distances = apply( m_ref, 2, function(col) ( sqrt(colSums((m-col)^2)) ) ) + assignment = .hungarianAlgorithm(distances) + col <- m[,assignment] + if (is.list(Ms)) Ms[[i]] <- col else Ms[,,i] <- col + } + else + { + # Greedy matching: + # approx1: li[[i]][,j] is assigned to m[,k] minimizing dist(li[[i]][,j],m[,k']) + # approx2: m[,j] is assigned to li[[i]][,k] minimizing dist(m[,j],li[[i]][,k']) + available_indices = 1:K + for (j in 1:K) + { + distances = + if (ls_mode == "approx1") + { + apply(as.matrix(m[,available_indices]), 2, + function(col) ( sqrt(sum((col - m_ref[,j])^2)) ) ) + } + else #approx2 + { + apply(as.matrix(m_ref[,available_indices]), 2, + function(col) ( sqrt(sum((col - m[,j])^2)) ) ) + } + indMin = which.min(distances) + if (ls_mode == "approx1") + { + col <- m[ , available_indices[indMin] ] + if (is.list(Ms)) Ms[[i]][,j] <- col else Ms[,j,i] <- col + } + else #approx2 + { + col <- available_indices[indMin] + if (is.list(Ms)) Ms[[i]][,col] <- m[,j] else Ms[,col,i] <- m[,j] + } + available_indices = available_indices[-indMin] + } + } - # Update current sum with "label-switched" li[[i]] - if (is.character(ref)) #ref=="mean" - m_sum = m_sum + Ms[[i]] - } - Ms + # Update current sum with "label-switched" li[[i]] + if (is.character(ref)) #ref=="mean" + m_sum = m_sum + Ms[[i]] + } + Ms } diff --git a/pkg/src/functions.c b/pkg/src/functions.c index f311a34..e534c57 100644 --- a/pkg/src/functions.c +++ b/pkg/src/functions.c @@ -3,62 +3,62 @@ // Index matrix (by columns) int mi(int i, int j, int d1, int d2) { - return j*d1 + i; + return j*d1 + i; } // Index 3-tensor (by columns, matrices ordered by last dim) int ti(int i, int j, int k, int d1, int d2, int d3) { - return k*d1*d2 + j*d1 + i; + return k*d1*d2 + j*d1 + i; } // Empirical cross-moment of order 2 between X size nxd and Y size n void Moments_M2(double* X, double* Y, int* pn, int* pd, double* M2) { - int n=*pn, d=*pd; - //double* M2 = (double*)calloc(d*d,sizeof(double)); + int n=*pn, d=*pd; + //double* M2 = (double*)calloc(d*d,sizeof(double)); - // M2 = E[Y*X^*2] - E[Y*e^*2] = E[Y (X^*2 - I)] - for (int j=0; jnum_rows = rows; - p->num_cols = cols; - - p->cost = (double**)calloc(rows,sizeof(double*)); - p->assignment = (int**)calloc(rows,sizeof(int*)); - - for(i=0; inum_rows; i++) { - p->cost[i] = (double*)calloc(cols,sizeof(double)); - p->assignment[i] = (int*)calloc(cols,sizeof(int)); - for(j=0; jnum_cols; j++) { - p->cost[i][j] = (i < org_rows && j < org_cols) ? cost_matrix[i][j] : 0.; - p->assignment[i][j] = 0; - - if (max_cost < p->cost[i][j]) - max_cost = p->cost[i][j]; - } - } - - if (mode == HUNGARIAN_MODE_MAXIMIZE_UTIL) { - for(i=0; inum_rows; i++) { - for(j=0; jnum_cols; j++) - p->cost[i][j] = max_cost - p->cost[i][j]; - } - } - else if (mode == HUNGARIAN_MODE_MINIMIZE_COST) { - // nothing to do - } -// else -// fprintf(stderr,"%s: unknown mode. Mode was set to HUNGARIAN_MODE_MINIMIZE_COST !\n", __FUNCTION__); - - return rows; + int i,j; + double max_cost = 0.; + int org_cols = cols, + org_rows = rows; + + // is the number of cols not equal to number of rows ? + // if yes, expand with 0-cols / 0-cols + rows = hungarian_imax(cols, rows); + cols = rows; + + p->num_rows = rows; + p->num_cols = cols; + + p->cost = (double**)calloc(rows,sizeof(double*)); + p->assignment = (int**)calloc(rows,sizeof(int*)); + + for(i=0; inum_rows; i++) { + p->cost[i] = (double*)calloc(cols,sizeof(double)); + p->assignment[i] = (int*)calloc(cols,sizeof(int)); + for(j=0; jnum_cols; j++) { + p->cost[i][j] = (i < org_rows && j < org_cols) ? cost_matrix[i][j] : 0.; + p->assignment[i][j] = 0; + + if (max_cost < p->cost[i][j]) + max_cost = p->cost[i][j]; + } + } + + if (mode == HUNGARIAN_MODE_MAXIMIZE_UTIL) { + for(i=0; inum_rows; i++) { + for(j=0; jnum_cols; j++) + p->cost[i][j] = max_cost - p->cost[i][j]; + } + } + else if (mode == HUNGARIAN_MODE_MINIMIZE_COST) { + // nothing to do + } +// else +// fprintf(stderr,"%s: unknown mode. Mode was set to HUNGARIAN_MODE_MINIMIZE_COST !\n", __FUNCTION__); + + return rows; } /** Free the memory allocated by init. **/ void hungarian_free(hungarian_problem_t* p) { - int i; - for(i=0; inum_rows; i++) { - free(p->cost[i]); - free(p->assignment[i]); - } - free(p->cost); - free(p->assignment); - p->cost = NULL; - p->assignment = NULL; + int i; + for(i=0; inum_rows; i++) { + free(p->cost[i]); + free(p->assignment[i]); + } + free(p->cost); + free(p->assignment); + p->cost = NULL; + p->assignment = NULL; } /** This method computes the optimal assignment. **/ void hungarian_solve(hungarian_problem_t* p) { - int i, j, m, n, k, l, t, q, unmatched; - double cost, s; - int* col_mate; - int* row_mate; - int* parent_row; - int* unchosen_row; - double* row_dec; - double* col_inc; - double* slack; - int* slack_row; - - cost = 0.; - m =p->num_rows; - n =p->num_cols; - - col_mate = (int*)calloc(p->num_rows,sizeof(int)); - unchosen_row = (int*)calloc(p->num_rows,sizeof(int)); - row_dec = (double*)calloc(p->num_rows,sizeof(double)); - slack_row = (int*)calloc(p->num_rows,sizeof(int)); - - row_mate = (int*)calloc(p->num_cols,sizeof(int)); - parent_row = (int*)calloc(p->num_cols,sizeof(int)); - col_inc = (double*)calloc(p->num_cols,sizeof(double)); - slack = (double*)calloc(p->num_cols,sizeof(double)); - - for (i=0; inum_rows; i++) { - col_mate[i]=0; - unchosen_row[i]=0; - row_dec[i]=0.; - slack_row[i]=0; - } - for (j=0; jnum_cols; j++) { - row_mate[j]=0; - parent_row[j] = 0; - col_inc[j]=0.; - slack[j]=0.; - } - - for (i=0; inum_rows; ++i) - for (j=0; jnum_cols; ++j) - p->assignment[i][j]=HUNGARIAN_NOT_ASSIGNED; - - // Begin subtract column minima in order to start with lots of zeroes 12 - for (l=0; lcost[0][l]; - for (k=1; kcost[k][l]cost[k][l]; - cost+=s; - if (s!=0.) - for (k=0; kcost[k][l]-=s; - } - // End subtract column minima in order to start with lots of zeroes 12 - - // Begin initial state 16 - t=0; - for (l=0; lcost[k][0]; - for (l=1; lcost[k][l]cost[k][l]; - row_dec[k]=s; - for (l=0; lcost[k][l] && row_mate[l]<0) - { - col_mate[k]=l; - row_mate[l]=k; - goto row_done; - } - col_mate[k]= -1; - unchosen_row[t++]=k; + int i, j, m, n, k, l, t, q, unmatched; + double cost, s; + int* col_mate; + int* row_mate; + int* parent_row; + int* unchosen_row; + double* row_dec; + double* col_inc; + double* slack; + int* slack_row; + + cost = 0.; + m =p->num_rows; + n =p->num_cols; + + col_mate = (int*)calloc(p->num_rows,sizeof(int)); + unchosen_row = (int*)calloc(p->num_rows,sizeof(int)); + row_dec = (double*)calloc(p->num_rows,sizeof(double)); + slack_row = (int*)calloc(p->num_rows,sizeof(int)); + + row_mate = (int*)calloc(p->num_cols,sizeof(int)); + parent_row = (int*)calloc(p->num_cols,sizeof(int)); + col_inc = (double*)calloc(p->num_cols,sizeof(double)); + slack = (double*)calloc(p->num_cols,sizeof(double)); + + for (i=0; inum_rows; i++) { + col_mate[i]=0; + unchosen_row[i]=0; + row_dec[i]=0.; + slack_row[i]=0; + } + for (j=0; jnum_cols; j++) { + row_mate[j]=0; + parent_row[j] = 0; + col_inc[j]=0.; + slack[j]=0.; + } + + for (i=0; inum_rows; ++i) + for (j=0; jnum_cols; ++j) + p->assignment[i][j]=HUNGARIAN_NOT_ASSIGNED; + + // Begin subtract column minima in order to start with lots of zeroes 12 + for (l=0; lcost[0][l]; + for (k=1; kcost[k][l]cost[k][l]; + cost+=s; + if (s!=0.) + for (k=0; kcost[k][l]-=s; + } + // End subtract column minima in order to start with lots of zeroes 12 + + // Begin initial state 16 + t=0; + for (l=0; lcost[k][0]; + for (l=1; lcost[k][l]cost[k][l]; + row_dec[k]=s; + for (l=0; lcost[k][l] && row_mate[l]<0) + { + col_mate[k]=l; + row_mate[l]=k; + goto row_done; + } + col_mate[k]= -1; + unchosen_row[t++]=k; row_done: - ; - } - // End initial state 16 - - // Begin Hungarian algorithm 18 - if (t==0) - goto done; - unmatched=t; - while (1) - { - q=0; - while (1) - { - while (q 0.) - { - double del=p->cost[k][l]-s+col_inc[l]; - if (del0. && slack[l]0.) - { - slack[l]-=s; - if (slack[l]==0.) - { - // Begin look at a new zero 22 - k=slack_row[l]; - if (row_mate[l]<0) - { - for (j=l+1; j 0.) + { + double del=p->cost[k][l]-s+col_inc[l]; + if (del0. && slack[l]0.) + { + slack[l]-=s; + if (slack[l]==0.) + { + // Begin look at a new zero 22 + k=slack_row[l]; + if (row_mate[l]<0) + { + for (j=l+1; jcost[k][l]cost[k][l]!=row_dec[k]-col_inc[l]) - exit(0); - } - k=0; - for (l=0; lm) - exit(0); - // End doublecheck the solution 23 -*/ // End Hungarian algorithm 18 - - for (i=0; iassignment[i][col_mate[i]]=HUNGARIAN_ASSIGNED; - /*TRACE("%d - %d\n", i, col_mate[i]);*/ - } - for (k=0; kcost[k][l]-row_dec[k]+col_inc[l]);*/ - p->cost[k][l]=p->cost[k][l]-row_dec[k]+col_inc[l]; - } - /*TRACE("\n");*/ - } - for (i=0; icost[k][l]cost[k][l]!=row_dec[k]-col_inc[l]) + exit(0); + } + k=0; + for (l=0; lm) + exit(0); + // End doublecheck the solution 23 +*/ // End Hungarian algorithm 18 + + for (i=0; iassignment[i][col_mate[i]]=HUNGARIAN_ASSIGNED; + /*TRACE("%d - %d\n", i, col_mate[i]);*/ + } + for (k=0; kcost[k][l]-row_dec[k]+col_inc[l]);*/ + p->cost[k][l]=p->cost[k][l]-row_dec[k]+col_inc[l]; + } + /*TRACE("\n");*/ + } + for (i=0; i