Add CV-voting, remove random forests
[agghoo.git] / R / compareTo.R
1 #' CVvoting_core
2 #'
3 #' "voting" cross-validation method, added here as an example.
4 #' Parameters are described in ?agghoo and ?AgghooCV
5 CVvoting_core <- function(data, target, task, gmodel, params, loss, CV) {
6 CV <- checkCV(CV)
7 n <- nrow(data)
8 shuffle_inds <- NULL
9 if (CV$type == "vfold" && CV$shuffle)
10 shuffle_inds <- sample(n, n)
11 bestP <- rep(0, gmodel$nmodels)
12 gmodel <- agghoo::Model$new(data, target, task, gmodel, params)
13 for (v in seq_len(CV$V)) {
14 test_indices <- get_testIndices(n, CV, v, shuffle_inds)
15 d <- splitTrainTest(data, target, test_indices)
16 best_p <- NULL
17 best_error <- Inf
18 for (p in seq_len(gmodel$nmodels)) {
19 model_pred <- gmodel$get(d$dataTrain, d$targetTrain, p)
20 prediction <- model_pred(d$dataTest)
21 error <- loss(prediction, d$targetTest)
22 if (error <= best_error) {
23 if (error == best_error)
24 best_p[[length(best_p)+1]] <- p
25 else {
26 best_p <- list(p)
27 best_error <- error
28 }
29 }
30 }
31 for (p in best_p)
32 bestP[p] <- bestP[p] + 1
33 }
34 # Choose a param at random in case of ex-aequos:
35 maxP <- max(bestP)
36 chosenP <- sample(which(bestP == maxP), 1)
37 list(model=gmodel$get(data, target, chosenP), param=gmodel$getParam(chosenP))
38 }
39
40 #' standardCV_core
41 #'
42 #' Cross-validation method, added here as an example.
43 #' Parameters are described in ?agghoo and ?AgghooCV
44 standardCV_core <- function(data, target, task, gmodel, params, loss, CV) {
45 n <- nrow(data)
46 shuffle_inds <- NULL
47 if (CV$type == "vfold" && CV$shuffle)
48 shuffle_inds <- sample(n, n)
49 list_testinds <- list()
50 for (v in seq_len(CV$V))
51 list_testinds[[v]] <- get_testIndices(n, CV, v, shuffle_inds)
52 gmodel <- agghoo::Model$new(data, target, task, gmodel, params)
53 best_error <- Inf
54 best_p <- NULL
55 for (p in seq_len(gmodel$nmodels)) {
56 error <- Reduce('+', lapply(seq_len(CV$V), function(v) {
57 testIdx <- list_testinds[[v]]
58 d <- splitTrainTest(data, target, testIdx)
59 model_pred <- gmodel$get(d$dataTrain, d$targetTrain, p)
60 prediction <- model_pred(d$dataTest)
61 loss(prediction, d$targetTest)
62 }) )
63 if (error <= best_error) {
64 if (error == best_error)
65 best_p[[length(best_p)+1]] <- p
66 else {
67 best_p <- list(p)
68 best_error <- error
69 }
70 }
71 }
72 chosenP <- best_p[[ sample(length(best_p), 1) ]]
73 list(model=gmodel$get(data, target, chosenP), param=gmodel$getParam(chosenP))
74 }
75
76 #' standardCV_run
77 #'
78 #' Run and eval the standard cross-validation procedure.
79 #' Parameters are rather explicit except "floss", which corresponds to the
80 #' "final" loss function, applied to compute the error on testing dataset.
81 #'
82 #' @export
83 standardCV_run <- function(
84 dataTrain, dataTest, targetTrain, targetTest, floss, verbose, ...
85 ) {
86 args <- list(...)
87 task <- checkTask(args$task, targetTrain)
88 modPar <- checkModPar(args$gmodel, args$params)
89 loss <- checkLoss(args$loss, task)
90 CV <- checkCV(args$CV)
91 s <- standardCV_core(
92 dataTrain, targetTrain, task, modPar$gmodel, modPar$params, loss, CV)
93 if (verbose)
94 print(paste( "Parameter:", s$param ))
95 p <- s$model(dataTest)
96 err <- floss(p, targetTest)
97 if (verbose)
98 print(paste("error CV:", err))
99 invisible(err)
100 }
101
102 #' agghoo_run
103 #'
104 #' Run and eval the agghoo procedure.
105 #' Parameters are rather explicit except "floss", which corresponds to the
106 #' "final" loss function, applied to compute the error on testing dataset.
107 #'
108 #' @export
109 agghoo_run <- function(
110 dataTrain, dataTest, targetTrain, targetTest, floss, verbose, ...
111 ) {
112 args <- list(...)
113 CV <- checkCV(args$CV)
114 # Must remove CV arg, or agghoo will complain "error: unused arg"
115 args$CV <- NULL
116 a <- do.call(agghoo, c(list(data=dataTrain, target=targetTrain), args))
117 a$fit(CV)
118 if (verbose) {
119 print("Parameters:")
120 print(unlist(a$getParams()))
121 }
122 pa <- a$predict(dataTest)
123 err <- floss(pa, targetTest)
124 if (verbose)
125 print(paste("error agghoo:", err))
126 invisible(err)
127 }
128
129 #' compareTo
130 #'
131 #' Compare a list of learning methods (or run only one), on data/target.
132 #'
133 #' @param data Data matrix or data.frame
134 #' @param target Target vector (generally)
135 #' @param method_s Either a single function, or a list
136 #' (examples: agghoo_run, standardCV_run)
137 #' @param rseed Seed of the random generator (-1 means "random seed")
138 #' @param floss Loss function to compute the error on testing dataset.
139 #' @param verbose TRUE to request methods to be verbose.
140 #' @param ... arguments passed to method_s function(s)
141 #'
142 #' @export
143 compareTo <- function(
144 data, target, method_s, rseed=-1, floss=NULL, verbose=TRUE, ...
145 ) {
146 if (rseed >= 0)
147 set.seed(rseed)
148 n <- nrow(data)
149 test_indices <- sample( n, round(n / ifelse(n >= 500, 10, 5)) )
150 d <- splitTrainTest(data, target, test_indices)
151
152 # Set error function to be used on model outputs (not in core method)
153 task <- checkTask(list(...)$task, target)
154 if (is.null(floss)) {
155 floss <- function(y1, y2) {
156 ifelse(task == "classification", mean(y1 != y2), mean(abs(y1 - y2)))
157 }
158 }
159
160 # Run (and compare) all methods:
161 runOne <- function(o) {
162 o(d$dataTrain, d$dataTest, d$targetTrain, d$targetTest, floss, verbose, ...)
163 }
164 errors <- c()
165 if (is.list(method_s))
166 errors <- sapply(method_s, runOne)
167 else if (is.function(method_s))
168 errors <- runOne(method_s)
169 invisible(errors)
170 }
171
172 #' compareMulti
173 #'
174 #' Run compareTo N times in parallel.
175 #'
176 #' @inheritParams compareTo
177 #' @param N Number of calls to method(s)
178 #' @param nc Number of cores. Set to parallel::detectCores() if undefined.
179 #' Set it to any value <=1 to say "no parallelism".
180 #' @param verbose TRUE to print task numbers and "Errors:" in the end.
181 #'
182 #' @export
183 compareMulti <- function(
184 data, target, method_s, N=100, nc=NA, floss=NULL, verbose=TRUE, ...
185 ) {
186 require(parallel)
187 if (is.na(nc))
188 nc <- parallel::detectCores()
189
190 # "One" comparison for each method in method_s (list)
191 compareOne <- function(n) {
192 if (verbose)
193 print(n)
194 compareTo(data, target, method_s, n, floss, verbose=FALSE, ...)
195 }
196
197 errors <- if (nc >= 2) {
198 parallel::mclapply(1:N, compareOne, mc.cores = nc)
199 } else {
200 lapply(1:N, compareOne)
201 }
202 if (verbose)
203 print("Errors:")
204 Reduce('+', errors) / N
205 }
206
207 #' compareRange
208 #'
209 #' Run compareMulti on several values of the parameter V.
210 #'
211 #' @inheritParams compareMulti
212 #' @param V_range Values of V to be tested.
213 #'
214 #' @export
215 compareRange <- function(
216 data, target, method_s, N=100, nc=NA, floss=NULL, V_range=c(10,15,20), ...
217 ) {
218 args <- list(...)
219 # Avoid warnings if V is left unspecified:
220 CV <- suppressWarnings( checkCV(args$CV) )
221 errors <- lapply(V_range, function(V) {
222 args$CV$V <- V
223 do.call(compareMulti, c(list(data=data, target=target, method_s=method_s,
224 N=N, nc=nc, floss=floss, verbose=F), args))
225 })
226 print(paste(V_range, errors))
227 }