3 #' Cross-validation method, added here as an example.
4 #' Parameters are described in ?agghoo and ?AgghooCV
5 standardCV_core <- function(data, target, task, gmodel, params, loss, CV) {
8 if (CV$type == "vfold" && CV$shuffle)
9 shuffle_inds <- sample(n, n)
10 list_testinds <- list()
11 for (v in seq_len(CV$V))
12 list_testinds[[v]] <- get_testIndices(n, CV, v, shuffle_inds)
13 gmodel <- agghoo::Model$new(data, target, task, gmodel, params)
16 for (p in seq_len(gmodel$nmodels)) {
17 error <- Reduce('+', lapply(seq_len(CV$V), function(v) {
18 testIdx <- list_testinds[[v]]
19 d <- splitTrainTest(data, target, testIdx)
20 model_pred <- gmodel$get(d$dataTrain, d$targetTrain, p)
21 prediction <- model_pred(d$dataTest)
22 loss(prediction, d$targetTest)
24 if (error <= best_error) {
25 if (error == best_error)
26 best_p[[length(best_p)+1]] <- p
33 chosenP <- best_p[[ sample(length(best_p), 1) ]]
34 list(model=gmodel$get(data, target, chosenP), param=gmodel$getParam(chosenP))
39 #' "voting" cross-validation method, added here as an example.
40 #' Parameters are described in ?agghoo and ?AgghooCV
41 CVvoting_core <- function(data, target, task, gmodel, params, loss, CV) {
45 if (CV$type == "vfold" && CV$shuffle)
46 shuffle_inds <- sample(n, n)
47 gmodel <- agghoo::Model$new(data, target, task, gmodel, params)
48 bestP <- rep(0, gmodel$nmodels)
49 for (v in seq_len(CV$V)) {
50 test_indices <- get_testIndices(n, CV, v, shuffle_inds)
51 d <- splitTrainTest(data, target, test_indices)
54 for (p in seq_len(gmodel$nmodels)) {
55 model_pred <- gmodel$get(d$dataTrain, d$targetTrain, p)
56 prediction <- model_pred(d$dataTest)
57 error <- loss(prediction, d$targetTest)
58 if (error <= best_error) {
59 if (error == best_error)
60 best_p[[length(best_p)+1]] <- p
68 bestP[p] <- bestP[p] + 1
70 # Choose a param at random in case of ex-aequos:
72 chosenP <- sample(which(bestP == maxP), 1)
73 list(model=gmodel$get(data, target, chosenP), param=gmodel$getParam(chosenP))
78 #' Run and eval the standard cross-validation procedure.
79 #' Parameters are rather explicit except "floss", which corresponds to the
80 #' "final" loss function, applied to compute the error on testing dataset.
81 standardCV_run <- function(
82 dataTrain, dataTest, targetTrain, targetTest, floss, verbose, ...
85 task <- checkTask(args$task, targetTrain)
86 modPar <- checkModPar(args$gmodel, args$params)
87 loss <- checkLoss(args$loss, task)
88 CV <- checkCV(args$CV)
90 dataTrain, targetTrain, task, modPar$gmodel, modPar$params, loss, CV)
92 print(paste( "Parameter:", s$param ))
93 p <- s$model(dataTest)
94 err <- floss(p, targetTest)
96 print(paste("error CV:", err))
102 #' Run and eval the voting cross-validation procedure.
103 #' Parameters are rather explicit except "floss", which corresponds to the
104 #' "final" loss function, applied to compute the error on testing dataset.
105 CVvoting_run <- function(
106 dataTrain, dataTest, targetTrain, targetTest, floss, verbose, ...
109 task <- checkTask(args$task, targetTrain)
110 modPar <- checkModPar(args$gmodel, args$params)
111 loss <- checkLoss(args$loss, task)
112 CV <- checkCV(args$CV)
114 dataTrain, targetTrain, task, modPar$gmodel, modPar$params, loss, CV)
116 print(paste( "Parameter:", s$param ))
117 p <- s$model(dataTest)
118 err <- floss(p, targetTest)
120 print(paste("error CV:", err))
126 #' Run and eval the agghoo procedure.
127 #' Parameters are rather explicit except "floss", which corresponds to the
128 #' "final" loss function, applied to compute the error on testing dataset.
129 agghoo_run <- function(
130 dataTrain, dataTest, targetTrain, targetTest, floss, verbose, ...
133 CV <- checkCV(args$CV)
134 # Must remove CV arg, or agghoo will complain "error: unused arg"
136 a <- do.call(agghoo, c(list(data=dataTrain, target=targetTrain), args))
140 print(unlist(a$getParams()))
142 pa <- a$predict(dataTest)
143 err <- floss(pa, targetTest)
145 print(paste("error agghoo:", err))
151 #' Compare a list of learning methods (or run only one), on data/target.
153 #' @param data Data matrix or data.frame
154 #' @param target Target vector (generally)
155 #' @param method_s Either a single function, or a list
156 #' (examples: agghoo_run, standardCV_run)
157 #' @param rseed Seed of the random generator (-1 means "random seed")
158 #' @param floss Loss function to compute the error on testing dataset.
159 #' @param verbose TRUE to request methods to be verbose.
160 #' @param ... arguments passed to method_s function(s)
163 compareTo <- function(
164 data, target, method_s, rseed=-1, floss=NULL, verbose=TRUE, ...
169 test_indices <- sample( n, round(n / ifelse(n >= 500, 10, 5)) )
170 d <- splitTrainTest(data, target, test_indices)
172 # Set error function to be used on model outputs (not in core method)
173 task <- checkTask(list(...)$task, target)
174 if (is.null(floss)) {
175 floss <- function(y1, y2) {
176 ifelse(task == "classification", mean(y1 != y2), mean(abs(y1 - y2)))
180 # Run (and compare) all methods:
181 runOne <- function(o) {
182 o(d$dataTrain, d$dataTest, d$targetTrain, d$targetTest, floss, verbose, ...)
185 if (is.list(method_s))
186 errors <- sapply(method_s, runOne)
187 else if (is.function(method_s))
188 errors <- runOne(method_s)
194 #' Run compareTo N times in parallel.
196 #' @inheritParams compareTo
197 #' @param N Number of calls to method(s)
198 #' @param nc Number of cores. Set to parallel::detectCores() if undefined.
199 #' Set it to any value <=1 to say "no parallelism".
200 #' @param verbose TRUE to print task numbers and "Errors:" in the end.
203 compareMulti <- function(
204 data, target, method_s, N=100, nc=NA, floss=NULL, verbose=TRUE, ...
207 nc <- parallel::detectCores()
209 # "One" comparison for each method in method_s (list)
210 compareOne <- function(n) {
213 compareTo(data, target, method_s, n, floss, verbose=FALSE, ...)
216 errors <- if (nc >= 2) {
217 parallel::mclapply(1:N, compareOne, mc.cores = nc)
219 lapply(1:N, compareOne)
223 Reduce('+', errors) / N
228 #' Run compareMulti on several values of the parameter V.
230 #' @inheritParams compareMulti
231 #' @param V_range Values of V to be tested.
234 compareRange <- function(
235 data, target, method_s, N=100, nc=NA, floss=NULL, V_range=c(10,15,20), ...
238 # Avoid warnings if V is left unspecified:
239 CV <- suppressWarnings( checkCV(args$CV) )
240 errors <- lapply(V_range, function(V) {
242 do.call(compareMulti, c(list(data=data, target=target, method_s=method_s,
243 N=N, nc=nc, floss=floss, verbose=F), args))
245 print(paste(V_range, errors))