Update in progress - unfinished
[agghoo.git] / R / compareTo.R
1 standardCV_core <- function(data, target, task = NULL, gmodel = NULL, params = NULL,
2 loss = NULL, CV = list(type = "MC", V = 10, test_size = 0.2, shuffle = TRUE)
3 ) {
4 if (!is.null(task))
5 task = match.arg(task, c("classification", "regression"))
6 if (is.character(gmodel))
7 gmodel <- match.arg(gmodel, c("knn", "ppr", "rf", "tree"))
8 if (is.numeric(params) || is.character(params))
9 params <- as.list(params)
10 if (is.null(task)) {
11 if (is.numeric(target))
12 task = "regression"
13 else
14 task = "classification"
15 }
16
17 if (is.null(loss)) {
18 loss <- function(y1, y2) {
19 if (task == "classification") {
20 if (is.null(dim(y1)))
21 mean(y1 != y2)
22 else {
23 if (!is.null(dim(y2)))
24 mean(rowSums(abs(y1 - y2)))
25 else {
26 y2 <- as.character(y2)
27 names <- colnames(y1)
28 positions <- list()
29 for (idx in seq_along(names))
30 positions[[ names[idx] ]] <- idx
31 mean(vapply(
32 seq_along(y2),
33 function(idx) sum(abs(y1[idx,] - positions[[ y2[idx] ]])),
34 0))
35 }
36 }
37 }
38 else
39 mean(abs(y1 - y2))
40 }
41 }
42
43 n <- nrow(data)
44 shuffle_inds <- NULL
45 if (CV$type == "vfold" && CV$shuffle)
46 shuffle_inds <- sample(n, n)
47 get_testIndices <- function(v, shuffle_inds) {
48 if (CV$type == "vfold") {
49 first_index = round((v-1) * n / CV$V) + 1
50 last_index = round(v * n / CV$V)
51 test_indices = first_index:last_index
52 if (!is.null(shuffle_inds))
53 test_indices <- shuffle_inds[test_indices]
54 }
55 else
56 test_indices = sample(n, round(n * CV$test_size))
57 test_indices
58 }
59 list_testinds <- list()
60 for (v in seq_len(CV$V))
61 list_testinds[[v]] <- get_testIndices(v, shuffle_inds)
62
63 gmodel <- agghoo::Model$new(data, target, task, gmodel, params)
64 best_error <- Inf
65 best_model <- NULL
66 for (p in seq_len(gmodel$nmodels)) {
67 error <- 0
68 for (v in seq_len(CV$V)) {
69 testIdx <- list_testinds[[v]]
70 dataHO <- data[-testIdx,]
71 testX <- data[testIdx,]
72 targetHO <- target[-testIdx]
73 testY <- target[testIdx]
74 if (!is.matrix(dataHO) && !is.data.frame(dataHO))
75 dataHO <- as.matrix(dataHO)
76 if (!is.matrix(testX) && !is.data.frame(testX))
77 testX <- as.matrix(testX)
78 model_pred <- gmodel$get(dataHO, targetHO, p)
79 prediction <- model_pred(testX)
80 error <- error + loss(prediction, testY)
81 }
82 if (error <= best_error) {
83 newModel <- list(model=model_pred, param=gmodel$getParam(p))
84 if (error == best_error)
85 best_model[[length(best_model)+1]] <- newModel
86 else {
87 best_model <- list(newModel)
88 best_error <- error
89 }
90 }
91 }
92 best_model[[ sample(length(best_model), 1) ]]
93 }
94
95 standardCV_run <- function(
96 dataTrain, dataTest, targetTrain, targetTest, verbose, CV, floss, ...
97 ) {
98 s <- standardCV_core(dataTrain, targetTrain, ...)
99 if (verbose)
100 print(paste( "Parameter:", s$param ))
101 ps <- s$model(test)
102 err_s <- floss(ps, targetTest)
103 if (verbose)
104 print(paste("error CV:", err_s))
105 invisible(c(errors, err_s))
106 }
107
108 agghoo_run <- function(
109 dataTrain, dataTest, targetTrain, targetTest, verbose, CV, floss, ...
110 ) {
111 a <- agghoo(dataTrain, targetTrain, ...)
112 a$fit(CV)
113 if (verbose) {
114 print("Parameters:")
115 print(unlist(a$getParams()))
116 }
117 pa <- a$predict(dataTest)
118 err <- floss(pa, targetTest)
119 if (verbose)
120 print(paste("error agghoo:", err))
121 }
122
123 # ... arguments passed to agghoo or any other procedure
124 compareTo <- function(
125 data, target, rseed=-1, verbose=TRUE, floss=NULL,
126 CV = list(type = "MC",
127 V = 10,
128 test_size = 0.2,
129 shuffle = TRUE),
130 method_s=NULL, ...
131 ) {
132 if (rseed >= 0)
133 set.seed(rseed)
134 n <- nrow(data)
135 test_indices <- sample( n, round(n / ifelse(n >= 500, 10, 5)) )
136 trainData <- as.matrix(data[-test_indices,])
137 trainTarget <- target[-test_indices]
138 testData <- as.matrix(data[test_indices,])
139 testTarget <- target[test_indices]
140
141 # Set error function to be used on model outputs (not in core method)
142 if (is.null(floss)) {
143 floss <- function(y1, y2) {
144 ifelse(task == "classification", mean(y1 != y2), mean(abs(y1 - y2)))
145 }
146 }
147
148 # Run (and compare) all methods:
149 runOne <- function(o) {
150 o(dataTrain, dataTest, targetTrain, targetTest, verbose, CV, floss, ...)
151 }
152 if (is.list(method_s))
153 errors <- sapply(method_s, runOne)
154 else if (is.function(method_s))
155 errors <- runOne(method_s)
156 else
157 errors <- c()
158 invisible(errors)
159 }
160
161 # Run compareTo N times in parallel
162 compareMulti <- function(
163 data, target, N = 100, nc = NA,
164 CV = list(type = "MC",
165 V = 10,
166 test_size = 0.2,
167 shuffle = TRUE),
168 method_s=NULL, ...
169 ) {
170 if (is.na(nc))
171 nc <- parallel::detectCores()
172 compareOne <- function(n) {
173 print(n)
174 compareTo(data, target, n, verbose=FALSE, CV, method_s, ...)
175 }
176 errors <- if (nc >= 2) {
177 require(parallel)
178 parallel::mclapply(1:N, compareOne, mc.cores = nc)
179 } else {
180 lapply(1:N, compareOne)
181 }
182 print("Errors:")
183 Reduce('+', errors) / N
184 }
185
186 # TODO: unfinished !