- if (mode == "agghoo") {
- vperfs <- list()
- for (v in 1:CV$V) {
- test_indices <- private$get_testIndices(CV, v, n, shuffle_inds)
- vperf <- private$get_modelPerf(test_indices)
- vperfs[[v]] <- vperf
- }
- private$run_res <- vperfs
- }
- else {
- # Standard cross-validation
- best_index = 0
- best_perf <- -1
- for (p in 1:private$gmodel$nmodels) {
- tot_perf <- 0
- for (v in 1:CV$V) {
- test_indices <- private$get_testIndices(CV, v, n, shuffle_inds)
- perf <- private$get_modelPerf(test_indices, p)
- tot_perf <- tot_perf + perf / CV$V
- }
- if (tot_perf > best_perf) {
- # TODO: if ex-aequos: models list + choose at random
- best_index <- p
- best_perf <- tot_perf
+ # Result: list of V predictive models (+ parameters for info)
+ private$pmodels <- list()
+ for (v in seq_len(CV$V)) {
+ # Prepare train / test data and target, from full dataset.
+ # dataHO: "data Hold-Out" etc.
+ test_indices <- private$get_testIndices(CV, v, n, shuffle_inds)
+ dataHO <- private$data[-test_indices,]
+ testX <- private$data[test_indices,]
+ targetHO <- private$target[-test_indices]
+ testY <- private$target[test_indices]
+ # [HACK] R will cast 1-dim matrices into vectors:
+ if (!is.matrix(dataHO) && !is.data.frame(dataHO))
+ dataHO <- as.matrix(dataHO)
+ if (!is.matrix(testX) && !is.data.frame(testX))
+ testX <- as.matrix(testX)
+ best_model <- NULL
+ best_error <- Inf
+ for (p in seq_len(private$gmodel$nmodels)) {
+ model_pred <- private$gmodel$get(dataHO, targetHO, p)
+ prediction <- model_pred(testX)
+ error <- private$loss(prediction, testY)
+ if (error <= best_error) {
+ newModel <- list(model=model_pred, param=private$gmodel$getParam(p))
+ if (error == best_error)
+ best_model[[length(best_model)+1]] <- newModel
+ else {
+ best_model <- list(newModel)
+ best_error <- error
+ }