| 1 | #' standardCV_core |
| 2 | #' |
| 3 | #' Cross-validation method, added here as an example. |
| 4 | #' Parameters are described in ?agghoo and ?AgghooCV |
| 5 | standardCV_core <- function(data, target, task, gmodel, params, loss, CV) { |
| 6 | n <- nrow(data) |
| 7 | shuffle_inds <- NULL |
| 8 | if (CV$type == "vfold" && CV$shuffle) |
| 9 | shuffle_inds <- sample(n, n) |
| 10 | list_testinds <- list() |
| 11 | for (v in seq_len(CV$V)) |
| 12 | list_testinds[[v]] <- get_testIndices(n, CV, v, shuffle_inds) |
| 13 | gmodel <- agghoo::Model$new(data, target, task, gmodel, params) |
| 14 | best_error <- Inf |
| 15 | best_p <- NULL |
| 16 | for (p in seq_len(gmodel$nmodels)) { |
| 17 | error <- Reduce('+', lapply(seq_len(CV$V), function(v) { |
| 18 | testIdx <- list_testinds[[v]] |
| 19 | d <- splitTrainTest(data, target, testIdx) |
| 20 | model_pred <- gmodel$get(d$dataTrain, d$targetTrain, p) |
| 21 | prediction <- model_pred(d$dataTest) |
| 22 | loss(prediction, d$targetTest) |
| 23 | }) ) |
| 24 | if (error <= best_error) { |
| 25 | if (error == best_error) |
| 26 | best_p[[length(best_p)+1]] <- p |
| 27 | else { |
| 28 | best_p <- list(p) |
| 29 | best_error <- error |
| 30 | } |
| 31 | } |
| 32 | } |
| 33 | chosenP <- best_p[[ sample(length(best_p), 1) ]] |
| 34 | list(model=gmodel$get(data, target, chosenP), param=gmodel$getParam(chosenP)) |
| 35 | } |
| 36 | |
| 37 | #' CVvoting_core |
| 38 | #' |
| 39 | #' "voting" cross-validation method, added here as an example. |
| 40 | #' Parameters are described in ?agghoo and ?AgghooCV |
| 41 | CVvoting_core <- function(data, target, task, gmodel, params, loss, CV) { |
| 42 | CV <- checkCV(CV) |
| 43 | n <- nrow(data) |
| 44 | shuffle_inds <- NULL |
| 45 | if (CV$type == "vfold" && CV$shuffle) |
| 46 | shuffle_inds <- sample(n, n) |
| 47 | gmodel <- agghoo::Model$new(data, target, task, gmodel, params) |
| 48 | bestP <- rep(0, gmodel$nmodels) |
| 49 | for (v in seq_len(CV$V)) { |
| 50 | test_indices <- get_testIndices(n, CV, v, shuffle_inds) |
| 51 | d <- splitTrainTest(data, target, test_indices) |
| 52 | best_p <- NULL |
| 53 | best_error <- Inf |
| 54 | for (p in seq_len(gmodel$nmodels)) { |
| 55 | model_pred <- gmodel$get(d$dataTrain, d$targetTrain, p) |
| 56 | prediction <- model_pred(d$dataTest) |
| 57 | error <- loss(prediction, d$targetTest) |
| 58 | if (error <= best_error) { |
| 59 | if (error == best_error) |
| 60 | best_p[[length(best_p)+1]] <- p |
| 61 | else { |
| 62 | best_p <- list(p) |
| 63 | best_error <- error |
| 64 | } |
| 65 | } |
| 66 | } |
| 67 | for (p in best_p) |
| 68 | bestP[p] <- bestP[p] + 1 |
| 69 | } |
| 70 | # Choose a param at random in case of ex-aequos: |
| 71 | maxP <- max(bestP) |
| 72 | chosenP <- sample(which(bestP == maxP), 1) |
| 73 | list(model=gmodel$get(data, target, chosenP), param=gmodel$getParam(chosenP)) |
| 74 | } |
| 75 | |
| 76 | #' standardCV_run |
| 77 | #' |
| 78 | #' Run and eval the standard cross-validation procedure. |
| 79 | #' Parameters are rather explicit except "floss", which corresponds to the |
| 80 | #' "final" loss function, applied to compute the error on testing dataset. |
| 81 | standardCV_run <- function( |
| 82 | dataTrain, dataTest, targetTrain, targetTest, floss, verbose, ... |
| 83 | ) { |
| 84 | args <- list(...) |
| 85 | task <- checkTask(args$task, targetTrain) |
| 86 | modPar <- checkModPar(args$gmodel, args$params) |
| 87 | loss <- checkLoss(args$loss, task) |
| 88 | CV <- checkCV(args$CV) |
| 89 | s <- standardCV_core( |
| 90 | dataTrain, targetTrain, task, modPar$gmodel, modPar$params, loss, CV) |
| 91 | if (verbose) |
| 92 | print(paste( "Parameter:", s$param )) |
| 93 | p <- s$model(dataTest) |
| 94 | err <- floss(p, targetTest) |
| 95 | if (verbose) |
| 96 | print(paste("error CV:", err)) |
| 97 | invisible(err) |
| 98 | } |
| 99 | |
| 100 | #' CVvoting_run |
| 101 | #' |
| 102 | #' Run and eval the voting cross-validation procedure. |
| 103 | #' Parameters are rather explicit except "floss", which corresponds to the |
| 104 | #' "final" loss function, applied to compute the error on testing dataset. |
| 105 | CVvoting_run <- function( |
| 106 | dataTrain, dataTest, targetTrain, targetTest, floss, verbose, ... |
| 107 | ) { |
| 108 | args <- list(...) |
| 109 | task <- checkTask(args$task, targetTrain) |
| 110 | modPar <- checkModPar(args$gmodel, args$params) |
| 111 | loss <- checkLoss(args$loss, task) |
| 112 | CV <- checkCV(args$CV) |
| 113 | s <- CVvoting_core( |
| 114 | dataTrain, targetTrain, task, modPar$gmodel, modPar$params, loss, CV) |
| 115 | if (verbose) |
| 116 | print(paste( "Parameter:", s$param )) |
| 117 | p <- s$model(dataTest) |
| 118 | err <- floss(p, targetTest) |
| 119 | if (verbose) |
| 120 | print(paste("error CV:", err)) |
| 121 | invisible(err) |
| 122 | } |
| 123 | |
| 124 | #' agghoo_run |
| 125 | #' |
| 126 | #' Run and eval the agghoo procedure. |
| 127 | #' Parameters are rather explicit except "floss", which corresponds to the |
| 128 | #' "final" loss function, applied to compute the error on testing dataset. |
| 129 | agghoo_run <- function( |
| 130 | dataTrain, dataTest, targetTrain, targetTest, floss, verbose, ... |
| 131 | ) { |
| 132 | args <- list(...) |
| 133 | CV <- checkCV(args$CV) |
| 134 | # Must remove CV arg, or agghoo will complain "error: unused arg" |
| 135 | args$CV <- NULL |
| 136 | a <- do.call(agghoo, c(list(data=dataTrain, target=targetTrain), args)) |
| 137 | a$fit(CV) |
| 138 | if (verbose) { |
| 139 | print("Parameters:") |
| 140 | print(unlist(a$getParams())) |
| 141 | } |
| 142 | pa <- a$predict(dataTest) |
| 143 | err <- floss(pa, targetTest) |
| 144 | if (verbose) |
| 145 | print(paste("error agghoo:", err)) |
| 146 | invisible(err) |
| 147 | } |
| 148 | |
| 149 | #' compareTo |
| 150 | #' |
| 151 | #' Compare a list of learning methods (or run only one), on data/target. |
| 152 | #' |
| 153 | #' @param data Data matrix or data.frame |
| 154 | #' @param target Target vector (generally) |
| 155 | #' @param method_s Either a single function, or a list |
| 156 | #' (examples: agghoo_run, standardCV_run) |
| 157 | #' @param rseed Seed of the random generator (-1 means "random seed") |
| 158 | #' @param floss Loss function to compute the error on testing dataset. |
| 159 | #' @param verbose TRUE to request methods to be verbose. |
| 160 | #' @param ... arguments passed to method_s function(s) |
| 161 | #' |
| 162 | #' @export |
| 163 | compareTo <- function( |
| 164 | data, target, method_s, rseed=-1, floss=NULL, verbose=TRUE, ... |
| 165 | ) { |
| 166 | if (rseed >= 0) |
| 167 | set.seed(rseed) |
| 168 | n <- nrow(data) |
| 169 | test_indices <- sample( n, round(n / ifelse(n >= 500, 10, 5)) ) |
| 170 | d <- splitTrainTest(data, target, test_indices) |
| 171 | |
| 172 | # Set error function to be used on model outputs (not in core method) |
| 173 | task <- checkTask(list(...)$task, target) |
| 174 | if (is.null(floss)) { |
| 175 | floss <- function(y1, y2) { |
| 176 | ifelse(task == "classification", mean(y1 != y2), mean(abs(y1 - y2))) |
| 177 | } |
| 178 | } |
| 179 | |
| 180 | # Run (and compare) all methods: |
| 181 | runOne <- function(o) { |
| 182 | o(d$dataTrain, d$dataTest, d$targetTrain, d$targetTest, floss, verbose, ...) |
| 183 | } |
| 184 | errors <- c() |
| 185 | if (is.list(method_s)) |
| 186 | errors <- sapply(method_s, runOne) |
| 187 | else if (is.function(method_s)) |
| 188 | errors <- runOne(method_s) |
| 189 | invisible(errors) |
| 190 | } |
| 191 | |
| 192 | #' compareMulti |
| 193 | #' |
| 194 | #' Run compareTo N times in parallel. |
| 195 | #' |
| 196 | #' @inheritParams compareTo |
| 197 | #' @param N Number of calls to method(s) |
| 198 | #' @param nc Number of cores. Set to parallel::detectCores() if undefined. |
| 199 | #' Set it to any value <=1 to say "no parallelism". |
| 200 | #' @param verbose TRUE to print task numbers and "Errors:" in the end. |
| 201 | #' |
| 202 | #' @export |
| 203 | compareMulti <- function( |
| 204 | data, target, method_s, N=100, nc=NA, floss=NULL, verbose=TRUE, ... |
| 205 | ) { |
| 206 | if (is.na(nc)) |
| 207 | nc <- parallel::detectCores() |
| 208 | |
| 209 | # "One" comparison for each method in method_s (list) |
| 210 | compareOne <- function(n) { |
| 211 | if (verbose) |
| 212 | print(n) |
| 213 | compareTo(data, target, method_s, n, floss, verbose=FALSE, ...) |
| 214 | } |
| 215 | |
| 216 | errors <- if (nc >= 2) { |
| 217 | parallel::mclapply(1:N, compareOne, mc.cores = nc) |
| 218 | } else { |
| 219 | lapply(1:N, compareOne) |
| 220 | } |
| 221 | if (verbose) |
| 222 | print("Errors:") |
| 223 | Reduce('+', errors) / N |
| 224 | } |
| 225 | |
| 226 | #' compareRange |
| 227 | #' |
| 228 | #' Run compareMulti on several values of the parameter V. |
| 229 | #' |
| 230 | #' @inheritParams compareMulti |
| 231 | #' @param V_range Values of V to be tested. |
| 232 | #' |
| 233 | #' @export |
| 234 | compareRange <- function( |
| 235 | data, target, method_s, N=100, nc=NA, floss=NULL, V_range=c(10,15,20), ... |
| 236 | ) { |
| 237 | args <- list(...) |
| 238 | # Avoid warnings if V is left unspecified: |
| 239 | CV <- suppressWarnings( checkCV(args$CV) ) |
| 240 | errors <- lapply(V_range, function(V) { |
| 241 | args$CV$V <- V |
| 242 | do.call(compareMulti, c(list(data=data, target=target, method_s=method_s, |
| 243 | N=N, nc=nc, floss=floss, verbose=F), args)) |
| 244 | }) |
| 245 | print(paste(V_range, errors)) |
| 246 | } |