Some fixes + refactoring
[agghoo.git] / R / compareTo.R
1 #' standardCV_core
2 #'
3 #' Cross-validation method, added here as an example.
4 #' Parameters are described in ?agghoo and ?AgghooCV
5 standardCV_core <- function(data, target, task, gmodel, params, loss, CV) {
6 n <- nrow(data)
7 shuffle_inds <- NULL
8 if (CV$type == "vfold" && CV$shuffle)
9 shuffle_inds <- sample(n, n)
10 list_testinds <- list()
11 for (v in seq_len(CV$V))
12 list_testinds[[v]] <- get_testIndices(n, CV, v, shuffle_inds)
13 gmodel <- agghoo::Model$new(data, target, task, gmodel, params)
14 best_error <- Inf
15 best_model <- NULL
16 for (p in seq_len(gmodel$nmodels)) {
17 error <- Reduce('+', lapply(seq_len(CV$V), function(v) {
18 testIdx <- list_testinds[[v]]
19 d <- splitTrainTest(data, target, testIdx)
20 model_pred <- gmodel$get(d$dataTrain, d$targetTrain, p)
21 prediction <- model_pred(d$dataTest)
22 loss(prediction, d$targetTest)
23 }) )
24 if (error <= best_error) {
25 newModel <- list(model=gmodel$get(data, target, p),
26 param=gmodel$getParam(p))
27 if (error == best_error)
28 best_model[[length(best_model)+1]] <- newModel
29 else {
30 best_model <- list(newModel)
31 best_error <- error
32 }
33 }
34 }
35 best_model[[ sample(length(best_model), 1) ]]
36 }
37
38 #' standardCV_run
39 #'
40 #' Run and eval the standard cross-validation procedure.
41 #' Parameters are rather explicit except "floss", which corresponds to the
42 #' "final" loss function, applied to compute the error on testing dataset.
43 #'
44 #' @export
45 standardCV_run <- function(
46 dataTrain, dataTest, targetTrain, targetTest, floss, verbose, ...
47 ) {
48 args <- list(...)
49 task <- checkTask(args$task, targetTrain)
50 modPar <- checkModPar(args$gmodel, args$params)
51 loss <- checkLoss(args$loss, task)
52 CV <- checkCV(args$CV)
53 s <- standardCV_core(
54 dataTrain, targetTrain, task, modPar$gmodel, modPar$params, loss, CV)
55 if (verbose)
56 print(paste( "Parameter:", s$param ))
57 p <- s$model(dataTest)
58 err <- floss(p, targetTest)
59 if (verbose)
60 print(paste("error CV:", err))
61 invisible(err)
62 }
63
64 #' agghoo_run
65 #'
66 #' Run and eval the agghoo procedure.
67 #' Parameters are rather explicit except "floss", which corresponds to the
68 #' "final" loss function, applied to compute the error on testing dataset.
69 #'
70 #' @export
71 agghoo_run <- function(
72 dataTrain, dataTest, targetTrain, targetTest, floss, verbose, ...
73 ) {
74 args <- list(...)
75 CV <- checkCV(args$CV)
76 # Must remove CV arg, or agghoo will complain "error: unused arg"
77 args$CV <- NULL
78 a <- do.call(agghoo, c(list(data=dataTrain, target=targetTrain), args))
79 a$fit(CV)
80 if (verbose) {
81 print("Parameters:")
82 print(unlist(a$getParams()))
83 }
84 pa <- a$predict(dataTest)
85 err <- floss(pa, targetTest)
86 if (verbose)
87 print(paste("error agghoo:", err))
88 invisible(err)
89 }
90
91 #' compareTo
92 #'
93 #' Compare a list of learning methods (or run only one), on data/target.
94 #'
95 #' @param data Data matrix or data.frame
96 #' @param target Target vector (generally)
97 #' @param method_s Either a single function, or a list
98 #' (examples: agghoo_run, standardCV_run)
99 #' @param rseed Seed of the random generator (-1 means "random seed")
100 #' @param floss Loss function to compute the error on testing dataset.
101 #' @param verbose TRUE to request methods to be verbose.
102 #' @param ... arguments passed to method_s function(s)
103 #'
104 #' @export
105 compareTo <- function(
106 data, target, method_s, rseed=-1, floss=NULL, verbose=TRUE, ...
107 ) {
108 if (rseed >= 0)
109 set.seed(rseed)
110 n <- nrow(data)
111 test_indices <- sample( n, round(n / ifelse(n >= 500, 10, 5)) )
112 d <- splitTrainTest(data, target, test_indices)
113
114 # Set error function to be used on model outputs (not in core method)
115 task <- checkTask(list(...)$task, target)
116 if (is.null(floss)) {
117 floss <- function(y1, y2) {
118 ifelse(task == "classification", mean(y1 != y2), mean(abs(y1 - y2)))
119 }
120 }
121
122 # Run (and compare) all methods:
123 runOne <- function(o) {
124 o(d$dataTrain, d$dataTest, d$targetTrain, d$targetTest, floss, verbose, ...)
125 }
126 errors <- c()
127 if (is.list(method_s))
128 errors <- sapply(method_s, runOne)
129 else if (is.function(method_s))
130 errors <- runOne(method_s)
131 invisible(errors)
132 }
133
134 #' compareMulti
135 #'
136 #' Run compareTo N times in parallel.
137 #'
138 #' @inheritParams compareTo
139 #' @param N Number of calls to method(s)
140 #' @param nc Number of cores. Set to parallel::detectCores() if undefined.
141 #' Set it to any value <=1 to say "no parallelism".
142 #' @param verbose TRUE to print task numbers and "Errors:" in the end.
143 #'
144 #' @export
145 compareMulti <- function(
146 data, target, method_s, N=100, nc=NA, floss=NULL, verbose=TRUE, ...
147 ) {
148 require(parallel)
149 if (is.na(nc))
150 nc <- parallel::detectCores()
151
152 # "One" comparison for each method in method_s (list)
153 compareOne <- function(n) {
154 if (verbose)
155 print(n)
156 compareTo(data, target, method_s, n, floss, verbose=FALSE, ...)
157 }
158
159 errors <- if (nc >= 2) {
160 parallel::mclapply(1:N, compareOne, mc.cores = nc)
161 } else {
162 lapply(1:N, compareOne)
163 }
164 if (verbose)
165 print("Errors:")
166 Reduce('+', errors) / N
167 }
168
169 #' compareRange
170 #'
171 #' Run compareMulti on several values of the parameter V.
172 #'
173 #' @inheritParams compareMulti
174 #' @param V_range Values of V to be tested.
175 #'
176 #' @export
177 compareRange <- function(
178 data, target, method_s, N=100, nc=NA, floss=NULL, V_range=c(10,15,20,), ...
179 ) {
180 args <- list(...)
181 # Avoid warnings if V is left unspecified:
182 CV <- suppressWarnings( checkCV(args$CV) )
183 errors <- lapply(V_range, function(V) {
184 args$CV$V <- V
185 do.call(compareMulti, c(list(data=data, target=target, method_s=method_s,
186 N=N, nc=nc, floss=floss, verbose=F), args))
187 })
188 print(paste(V_range, errors))
189 }