From b76a24cd3444299e154dda153fa9392f13adf0ed Mon Sep 17 00:00:00 2001
From: Benjamin Auder <benjamin.auder@somewhere>
Date: Mon, 22 Jan 2018 20:43:56 +0100
Subject: [PATCH 1/1] First commit

---
 .gitignore                    |  10 +++
 README.md                     |  18 +++++
 TODO                          |   2 +
 pkg/DESCRIPTION               |  39 +++++++++
 pkg/LICENSE                   |  22 +++++
 pkg/NAMESPACE                 |  13 +++
 pkg/R/A_NAMESPACE.R           |   3 +
 pkg/R/b_Algorithm.R           | 111 +++++++++++++++++++++++++
 pkg/R/b_LinearAlgorithm.R     |  65 +++++++++++++++
 pkg/R/d_dataset.R             |  28 +++++++
 pkg/R/m_ExponentialWeights.R  |  51 ++++++++++++
 pkg/R/m_GeneralizedAdditive.R |  42 ++++++++++
 pkg/R/m_KnearestNeighbors.R   |  48 +++++++++++
 pkg/R/m_MLPoly.R              |  51 ++++++++++++
 pkg/R/m_RegressionTree.R      |  36 +++++++++
 pkg/R/m_RidgeRegression.R     |  49 +++++++++++
 pkg/R/m_SVMclassif.R          |  47 +++++++++++
 pkg/R/z_getData.R             |  28 +++++++
 pkg/R/z_plot.R                | 148 ++++++++++++++++++++++++++++++++++
 pkg/R/z_plotHelper.R          | 100 +++++++++++++++++++++++
 pkg/R/z_runAlgorithm.R        |  72 +++++++++++++++++
 pkg/R/z_util.R                |  49 +++++++++++
 pkg/data/stations.RData       | Bin 0 -> 6874 bytes
 pkg/man/aggexp-package.Rd     |  38 +++++++++
 pkg/src/ew.predict_noNA.c     |  69 ++++++++++++++++
 pkg/src/ml.predict_noNA.c     |  64 +++++++++++++++
 26 files changed, 1203 insertions(+)
 create mode 100644 .gitignore
 create mode 100644 README.md
 create mode 100644 TODO
 create mode 100644 pkg/DESCRIPTION
 create mode 100644 pkg/LICENSE
 create mode 100644 pkg/NAMESPACE
 create mode 100644 pkg/R/A_NAMESPACE.R
 create mode 100644 pkg/R/b_Algorithm.R
 create mode 100644 pkg/R/b_LinearAlgorithm.R
 create mode 100644 pkg/R/d_dataset.R
 create mode 100644 pkg/R/m_ExponentialWeights.R
 create mode 100644 pkg/R/m_GeneralizedAdditive.R
 create mode 100644 pkg/R/m_KnearestNeighbors.R
 create mode 100644 pkg/R/m_MLPoly.R
 create mode 100644 pkg/R/m_RegressionTree.R
 create mode 100644 pkg/R/m_RidgeRegression.R
 create mode 100644 pkg/R/m_SVMclassif.R
 create mode 100644 pkg/R/z_getData.R
 create mode 100644 pkg/R/z_plot.R
 create mode 100644 pkg/R/z_plotHelper.R
 create mode 100644 pkg/R/z_runAlgorithm.R
 create mode 100644 pkg/R/z_util.R
 create mode 100644 pkg/data/stations.RData
 create mode 100644 pkg/man/aggexp-package.Rd
 create mode 100644 pkg/src/ew.predict_noNA.c
 create mode 100644 pkg/src/ml.predict_noNA.c

diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..8cfbb70
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,10 @@
+.RData
+!/pkg/data/*.RData
+.Rhistory
+.ipynb_checkpoints/
+*.so
+*.o
+*.swp
+*~
+/pkg/man/*
+!/pkg/man/aggexp-package.Rd
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..b15de94
--- /dev/null
+++ b/README.md
@@ -0,0 +1,18 @@
+# Experts aggregation for air quality forecasting
+
+Joint work with [Jean-Michel Poggi](http://www.math.u-psud.fr/~poggi/) and [Bruno Portier](http://lmi2.insa-rouen.fr/~bportier/)
+
+---
+
+This project gathers public material of a contract with [AirNormand](http://www.airnormand.fr/), located in Normandie (France). 
+This institute is in charge of monitoring and forecasting the air quality in its region.
+Private parts (intermediate reports, custom code) were stripped.
+
+Several forecasting models are available, but it is difficult to choose one and discard the others, because 
+the performances vary significantly over time. 
+Therefore, the main goal of our study is to experiment several rules of experts (sequential) aggregation, and 
+compare the performances against individual forecasters and some oracles.
+
+---
+
+The final report may be found at [this location](http://www.airnormand.fr/Publications/Publications-telechargeables/Rapports-d-etudes)
diff --git a/TODO b/TODO
new file mode 100644
index 0000000..196c62a
--- /dev/null
+++ b/TODO
@@ -0,0 +1,2 @@
+Clarify what ridge method is really doing.
+Améliorer / augmenter doc
diff --git a/pkg/DESCRIPTION b/pkg/DESCRIPTION
new file mode 100644
index 0000000..f38407a
--- /dev/null
+++ b/pkg/DESCRIPTION
@@ -0,0 +1,39 @@
+Package: aggexp
+Title: aggexp : AGGregation of EXPerts to forecast time-series
+Version: 0.2-3
+Description: As the title suggests, past predictions of a set of given experts
+    are aggregated until time t to predict at time t+1, (generally) as a weighted
+    sum of values at time t. Several weights optimization algorithm are compared:
+    exponential weights, MLPoly, and some classical statistical learning procedures
+    (Ridge, SVM...).
+Author: Benjamin Auder <Benjamin.Auder@math.u-psud.fr> [aut,cre],
+    Jean-Michel Poggi <Jean-Michel.Poggi@parisdescartes.fr> [ctb],
+    Bruno Portier <Bruno.Portier@insa-rouen.fr>, [ctb]
+Maintainer: Benjamin Auder <Benjamin.Auder@math.u-psud.fr>
+Depends:
+    R (>= 3.0)
+Suggests:
+    gam,
+    tree,
+    kernlab
+LazyData: yes
+URL: http://git.auder.net/?p=aggexp.git
+License: MIT + file LICENSE
+Collate:
+    'A_NAMESPACE.R'
+    'z_util.R'
+    'b_Algorithm.R'
+    'b_LinearAlgorithm.R'
+    'd_dataset.R'
+    'm_ExponentialWeights.R'
+    'm_GeneralizedAdditive.R'
+    'm_KnearestNeighbors.R'
+    'm_MLPoly.R'
+    'm_RegressionTree.R'
+    'm_RidgeRegression.R'
+    'm_SVMclassif.R'
+    'z_getData.R'
+    'z_runAlgorithm.R'
+    'z_plotHelper.R'
+    'z_plot.R'
+RoxygenNote: 5.0.1
diff --git a/pkg/LICENSE b/pkg/LICENSE
new file mode 100644
index 0000000..f02a780
--- /dev/null
+++ b/pkg/LICENSE
@@ -0,0 +1,22 @@
+Copyright (c) 2014-2016, Benjamin AUDER
+              2014-2016, Jean-Michel Poggi
+              2014-2016, Bruno Portier
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/pkg/NAMESPACE b/pkg/NAMESPACE
new file mode 100644
index 0000000..766b75b
--- /dev/null
+++ b/pkg/NAMESPACE
@@ -0,0 +1,13 @@
+# Generated by roxygen2: do not edit by hand
+
+export(getBestConvexCombination)
+export(getBestExpert)
+export(getBestLinearCombination)
+export(getData)
+export(getIndicators)
+export(plotCloud)
+export(plotCurves)
+export(plotError)
+export(plotRegret)
+export(runAlgorithm)
+useDynLib(aggexp)
diff --git a/pkg/R/A_NAMESPACE.R b/pkg/R/A_NAMESPACE.R
new file mode 100644
index 0000000..4651887
--- /dev/null
+++ b/pkg/R/A_NAMESPACE.R
@@ -0,0 +1,3 @@
+#' @useDynLib aggexp
+#'
+NULL
diff --git a/pkg/R/b_Algorithm.R b/pkg/R/b_Algorithm.R
new file mode 100644
index 0000000..3ff9cc9
--- /dev/null
+++ b/pkg/R/b_Algorithm.R
@@ -0,0 +1,111 @@
+#' @include z_util.R
+
+#' @title Algorithm
+#'
+#' @description Generic class to represent an algorithm
+#'
+#' @field H The window [t-H+1, t] considered for prediction at time step t+1
+#' @field data Data frame of the last H experts forecasts + observations.
+#'
+Algorithm = setRefClass(
+	Class = "Algorithm",
+
+	fields = list(
+		H = "numeric",
+		data = "data.frame"
+	),
+
+	methods = list(
+		initialize = function(...)
+		{
+			"Initialize (generic) Algorithm object"
+
+			callSuper(...)
+			if (length(H) == 0 || H < 1)
+				H <<- Inf
+		},
+		inputNextForecasts = function(x)
+		{
+			"Obtain a new series of vectors of experts forecasts (1 to K)"
+
+			nd = nrow(data)
+			nx = nrow(x)
+			indices = (nd+1):(nd+nx)
+
+			appendedData = as.data.frame(matrix(nrow=nx, ncol=ncol(data), NA))
+			names(appendedData) = names(data)
+			data <<- rbind(data, appendedData)
+			data[indices,names(x)] <<- x
+		},
+		inputNextObservations = function(y)
+		{
+			"Obtain the observations corresponding to last input forecasts"
+
+			#if all experts made a large unilateral error and prediction is very bad, remove data
+			n = nrow(data)
+			lastTime = data[n,"Date"]
+			xy = subset(data, subset=(Date == lastTime))
+			xy[,"Measure"] = y
+			x = xy[,names(xy) != "Measure"]
+			y = xy[,"Measure"]
+			ranges = apply(x-y, 1, range)
+			predictableIndices = (ranges[2,] > -MAX_ERROR & ranges[1,] < MAX_ERROR)
+#			predictableIndices = 1:length(y)
+			data <<- data[1:(n-nrow(xy)),]
+			data <<- rbind(data, xy[predictableIndices,])
+
+			#oldest rows are removed to prevent infinitely growing memory usage,
+			#or to allow a window effect (parameter H)
+			delta = nrow(data) - min(H, MAX_HISTORY)
+			if (delta > 0)
+				data <<- data[-(1:delta),]
+		},
+		predict_withNA = function()
+		{
+			"Predict observations corresponding to the last input forecasts. Potential NAs"
+
+			n = nrow(data)
+			if (data[n,"Date"] == 1)
+			{
+				#no measures added so far
+				return (rep(NA, n))
+			}
+
+			nx = n - nrow(subset(data, subset = (Date == data[n,"Date"])))
+			x = data[(nx+1):n, !names(data) %in% c("Date","Measure","Station")]
+			experts = names(x)
+			prediction = c()
+
+			#extract a maximal submatrix of data without NAs
+
+			iy = getNoNAindices(x, 2)
+			if (!any(iy))
+			{
+				#all columns of x have at least one NA
+				return (rep(NA, n-nx))
+			}
+
+			data_noNA = data[1:nx,c(experts[iy], "Measure")]
+			ix = getNoNAindices(data_noNA)
+			if (!any(ix))
+			{
+				#no full line with NA-pattern similar to x[,iy]
+				return (rep(NA, n-nx))
+			}
+
+			data_noNA = data_noNA[ix,]
+			xiy = as.data.frame(x[,iy])
+			names(xiy) = names(x)[iy]
+			res = predict_noNA(data_noNA, xiy)
+			#basic sanitization: force all values >=0
+			res[res < 0.] = 0.
+			return (res)
+		},
+		predict_noNA = function(XY, x)
+		{
+			"Predict observations corresponding to x. No NAs"
+
+			#empty default implementation: to implement in inherited classes
+		}
+	)
+)
diff --git a/pkg/R/b_LinearAlgorithm.R b/pkg/R/b_LinearAlgorithm.R
new file mode 100644
index 0000000..960b067
--- /dev/null
+++ b/pkg/R/b_LinearAlgorithm.R
@@ -0,0 +1,65 @@
+#' @include b_Algorithm.R
+
+#' @title Linear Algorithm
+#'
+#' @description Generic class to represent a linear algorithm. 
+#' TODO: not needed in production environment; weights growing infinitely. 
+#' Inherits \code{\link{Algorithm}}
+#'
+#' @field weights The matrix of weights (in rows) associated to each expert (in columns)
+#'
+LinearAlgorithm = setRefClass(
+	Class = "LinearAlgorithm",
+
+	fields = c(
+		weights = "matrix"
+	),
+
+	contains = "Algorithm",
+
+	methods = list(
+		initialize = function(...)
+		{
+			callSuper(...)
+			weights <<- matrix(nrow=0, ncol=ncol(data)-3)
+		},
+
+		appendWeight = function(weight)
+		{
+			"Append the last computed weights to the weights matrix, for further plotting"
+
+			n = nrow(data)
+			nx = n - nrow(subset(data, subset = (Date == data[n,"Date"])))
+			x = data[(nx+1):n, !names(data) %in% c("Date","Measure","Station")]
+			iy = getNoNAindices(x, 2)
+
+			completedWeight = rep(NA, ncol(x))
+			completedWeight[iy] = weight
+			weights <<- rbind(weights, completedWeight)
+		},
+
+		plotWeights = function(station=1, start=1, ...)
+		{
+			"Plot the weights of each expert over time"
+
+			if (is.character(station))
+				station = match(station, stations)
+
+			#keep only full weights (1 to K)
+			weights_ = weights[getNoNAindices(weights),]
+			weights_ = weights_[start:nrow(weights_),]
+
+			yRange = range(weights_, na.rm=TRUE)
+			K = ncol(weights_)
+			cols = rainbow(K)
+			par(mar=c(5,4.5,1,1), cex=1.5)
+			for (i in 1:K)
+			{
+				plot(weights_[,i], type="l", xaxt="n", ylim=yRange, col=cols[i], xlab="", ylab="",cex.axis=1.5, ...)
+				par(new=TRUE)
+			}
+			axis(side=1, at=seq(from=1,to=nrow(weights_),by=30), labels=seq(from=0,to=nrow(weights_),by=30) + start, cex.axis=1.5)
+			title(xlab="Time",ylab="Weight", cex.lab=1.6)
+		}
+	)
+)
diff --git a/pkg/R/d_dataset.R b/pkg/R/d_dataset.R
new file mode 100644
index 0000000..6300284
--- /dev/null
+++ b/pkg/R/d_dataset.R
@@ -0,0 +1,28 @@
+#' Sample data built from DataMarket Rhine River time-series
+#'
+#' 3 "stations": original serie, reversed series, average of both.\cr
+#' "Experts": persistence (P), moving average with window==3 (MA3) and 10 (MA10).\cr
+#' -----\cr
+#' Generating R code:\cr
+#' library(rdatamarket)\cr
+#' serie = dmseries("https://datamarket.com/data/set/22wp/rhine-river-near-basle-switzerland-1807-1957")\cr
+#' dates = seq(as.Date("1807-07-01"),as.Date("1956-07-01"),"years")\cr
+#' serie = list(serie, rev(serie), (serie+rev(serie))/2)\cr
+#' st = list()\cr
+#' for (i in 1:3) {\cr
+#'	st[[i]] = data.frame(\cr
+#'		Date=dates,\cr
+#'		P=c(NA,serie[[i]][1:149]),\cr
+#'		MA3=c(rep(NA,3),sapply(4:150, function(j) mean(serie[[i]][(j-3):(j-1)]) )),\cr
+#'		MA10=c(rep(NA,10),sapply(11:150, function(j) mean(serie[[i]][(j-10):(j-1)]) )),\cr
+#'		Measure=as.double(serie[[i]])
+#'	)\cr
+#' }\cr
+#' save(st, file="stations.RData")
+#'
+#' @name stations
+#' @docType data
+#' @usage data(stations)
+#' @references \url{https://datamarket.com/data/set/22wp/rhine-river-near-basle-switzerland-1807-1957}
+#' @format A list of 3 dataframes with 150 rows and 5 columns: Date,P,MA3,MA10,Measure
+NULL
diff --git a/pkg/R/m_ExponentialWeights.R b/pkg/R/m_ExponentialWeights.R
new file mode 100644
index 0000000..0916287
--- /dev/null
+++ b/pkg/R/m_ExponentialWeights.R
@@ -0,0 +1,51 @@
+#' @include b_LinearAlgorithm.R
+
+#' @title Exponential Weights Algorithm
+#'
+#' @description Exponential Weights Algorithm.
+#' Inherits \code{\link{LinearAlgorithm}}
+#'
+#' @field alpha Importance of weights redistribution, in [0,1]. Default: 0
+#' @field grad Whether to use or not the (sub)gradient trick. Default: FALSE
+#'
+ExponentialWeights = setRefClass(
+	Class = "ExponentialWeights",
+
+	fields = c(
+		alpha = "numeric",
+		grad = "logical"
+	),
+
+	contains = "LinearAlgorithm",
+
+	methods = list(
+		initialize = function(...)
+		{
+			callSuper(...)
+			if (length(alpha) == 0 || alpha < 0. || alpha > 1.)
+				alpha <<- 0. #no redistribution
+			if (length(grad) == 0)
+				grad <<- FALSE
+		},
+		predict_noNA = function(XY, x)
+		{
+			K = ncol(XY) - 1
+			if (K == 1)
+			{
+				#shortcut: nothing to combine
+				finalWeight = 1.
+			}
+
+			else
+			{
+				X = XY[,names(XY) != "Measure"]
+				Y = XY[,"Measure"]
+				finalWeight = .C("ew_predict_noNA", X = as.double(t(X)), Y = as.double(Y), n = as.integer(nrow(XY)), 
+					K = as.integer(K), alpha=as.double(alpha), grad = as.integer(grad), weight=double(K))$weight
+			}
+
+			appendWeight(finalWeight)
+			return (matricize(x) %*% finalWeight)
+		}
+	)
+)
diff --git a/pkg/R/m_GeneralizedAdditive.R b/pkg/R/m_GeneralizedAdditive.R
new file mode 100644
index 0000000..5baf60b
--- /dev/null
+++ b/pkg/R/m_GeneralizedAdditive.R
@@ -0,0 +1,42 @@
+#' @include b_Algorithm.R
+
+#' @title Generalized Additive Model
+#'
+#' @description Generalized Additive Model using the \code{gam} package.
+#' Inherits \code{\link{Algorithm}}
+#'
+#' @field family Family of the distribution to be used. Default: gaussian().
+#'
+GeneralizedAdditive = setRefClass(
+	Class = "GeneralizedAdditive",
+
+	fields = c(
+		"family" #class "family"
+	),
+
+	contains = "Algorithm",
+
+	methods = list(
+		initialize = function(...)
+		{
+			callSuper(...)
+			if (class(family) == "uninitializedField")
+				family <<- gaussian()
+		},
+		predict_noNA = function(XY, x)
+		{
+			#GAM need some data to provide reliable results
+			if (nrow(XY) < 30)
+			{
+				X = XY[,names(XY) != "Measure"]
+				Y = XY[,"Measure"]
+				weight = ridgeSolve(X, Y, LAMBDA)
+				return (matricize(x) %*% weight)
+			}
+
+			suppressPackageStartupMessages( require(gam) )
+			g = gam(Measure ~ ., data=XY, family=family)
+			return (stats::predict(g, x))
+		}
+	)
+)
diff --git a/pkg/R/m_KnearestNeighbors.R b/pkg/R/m_KnearestNeighbors.R
new file mode 100644
index 0000000..926b22b
--- /dev/null
+++ b/pkg/R/m_KnearestNeighbors.R
@@ -0,0 +1,48 @@
+#' @include b_Algorithm.R
+
+#' @title K Nearest Neighbors Algorithm
+#'
+#' @description K Nearest Neighbors Algorithm.
+#' Inherits \code{\link{Algorithm}}
+#'
+#' @field k Number of neighbors to consider. Default: \code{n^(2/3)}
+#'
+KnearestNeighbors = setRefClass(
+	Class = "KnearestNeighbors",
+
+	fields = c(
+		k = "numeric"
+	),
+
+	contains = "Algorithm",
+
+	methods = list(
+		predictOne = function(X, Y, x)
+		{
+			"Find the neighbors of one row, and solve a constrained linear system to obtain weights"
+
+			distances = sqrt(apply(X, 1, function(z)(return (sum((z-x)^2)))))
+			rankedHistory = sort(distances, index.return=TRUE)
+			n = length(Y)
+			k_ = ifelse(length(k) == 0 || k <= 0. || k > n, getKnn(n), as.integer(k))
+			weight = ridgeSolve(matricize(X[rankedHistory$ix[1:k_],]), Y[rankedHistory$ix[1:k_]], LAMBDA)
+
+			return (sum(x * weight))
+		},
+		predict_noNA = function(XY, x)
+		{
+			X = XY[,names(XY) != "Measure"]
+			K = ncol(XY) - 1
+			if (K == 1)
+				X = as.matrix(X)
+			else if (length(XY[["Measure"]]) == 1)
+				X = t(as.matrix(X))
+			Y = XY[,"Measure"]
+			x = matricize(x)
+			res = c()
+			for (i in 1:nrow(x))
+				res = c(res, predictOne(X, Y, x[i,]))
+			return (res)
+		}
+	)
+)
diff --git a/pkg/R/m_MLPoly.R b/pkg/R/m_MLPoly.R
new file mode 100644
index 0000000..a19a2c9
--- /dev/null
+++ b/pkg/R/m_MLPoly.R
@@ -0,0 +1,51 @@
+#' @include b_LinearAlgorithm.R
+
+#' @title MLpoly Algorithm
+#'
+#' @description MLpoly Algorithm.
+#' Inherits \code{\link{LinearAlgorithm}}
+#'
+#' @field alpha Importance of weights redistribution, in [0,1]. Default: 0
+#' @field grad Whether to use or not the (sub)gradient trick. Default: FALSE
+#'
+MLpoly = setRefClass(
+	Class = "MLpoly",
+
+	fields = c(
+		alpha = "numeric",
+		grad = "logical"
+	),
+
+	contains = "LinearAlgorithm",
+
+	methods = list(
+		initialize = function(...)
+		{
+			callSuper(...)
+			if (length(alpha) == 0 || alpha < 0. || alpha > 1.)
+				alpha <<- 0. #no redistribution
+			if (length(grad) == 0)
+				grad <<- FALSE
+		},
+		predict_noNA = function(XY, x)
+		{
+			K = ncol(XY) - 1
+			if (K == 1)
+			{
+				#shortcut: nothing to combine
+				finalWeight = 1.
+			}
+
+			else
+			{
+				X = XY[,names(XY) != "Measure"]
+				Y = XY[,"Measure"]
+				finalWeight = .C("ml_predict_noNA", X = as.double(t(X)), Y = as.double(Y), n = as.integer(nrow(XY)), 
+					K = as.integer(K), alpha=as.double(alpha), grad = as.integer(grad), weight=double(K))$weight
+			}
+
+			appendWeight(finalWeight)
+			return (matricize(x) %*% finalWeight)
+		}
+	)
+)
diff --git a/pkg/R/m_RegressionTree.R b/pkg/R/m_RegressionTree.R
new file mode 100644
index 0000000..d51e408
--- /dev/null
+++ b/pkg/R/m_RegressionTree.R
@@ -0,0 +1,36 @@
+#' @include b_Algorithm.R
+
+#' @title Regression Tree
+#'
+#' @description Regression Tree using the \code{tree} package.
+#' Inherits \code{\link{Algorithm}}
+#'
+#' @field nleaf Number of leaf nodes after pruning. Default: Inf (no pruning)
+#'
+RegressionTree = setRefClass(
+	Class = "RegressionTree",
+
+	fields = c(
+		nleaf = "numeric"
+	),
+
+	contains = "Algorithm",
+
+	methods = list(
+		initialize = function(...)
+		{
+			callSuper(...)
+			if (length(nleaf) == 0 || nleaf < 1)
+				nleaf <<- Inf
+		},
+		predict_noNA = function(XY, x)
+		{
+			require(tree, quietly=TRUE)
+			rt = tree(Measure ~ ., data=XY)
+			treeSize = sum( rt$frame[["var"]] == "<leaf>" )
+			if (treeSize > nleaf)
+				rt = prune.tree(rt, best = nleaf)
+			return (stats::predict(rt, as.data.frame(x)))
+		}
+	)
+)
diff --git a/pkg/R/m_RidgeRegression.R b/pkg/R/m_RidgeRegression.R
new file mode 100644
index 0000000..020894d
--- /dev/null
+++ b/pkg/R/m_RidgeRegression.R
@@ -0,0 +1,49 @@
+#' @include b_LinearAlgorithm.R
+
+#' @title Ridge Regression Algorithm
+#'
+#' @description Ridge Regression Algorithm.
+#' Inherits \code{\link{LinearAlgorithm}}
+#'
+#' @field lambda Value of lambda (let undefined for cross-validation). Default: undefined
+#' @field lambdas Vector of "optimal" lambda values over time. TODO: remove for production
+#'
+RidgeRegression = setRefClass(
+	Class = "RidgeRegression",
+
+	fields = c(
+		lambda = "numeric",
+		lambdas = "numeric"
+	),
+
+	contains = "LinearAlgorithm",
+	
+	methods = list(
+		predict_noNA = function(XY, x)
+		{
+			if (length(lambda) > 0 || nrow(XY) < 30) #TODO: magic number
+			{
+				#simple ridge regression with fixed lambda (not enough history for CV)
+				X = matricize(XY[,names(XY) != "Measure"])
+				Y = XY[,"Measure"]
+				lambda_ = ifelse(length(lambda) > 0, lambda, LAMBDA)
+				weight = ridgeSolve(X, Y, lambda_)
+			}
+
+			else
+			{
+				#enough data for cross-validations
+				require(MASS, quietly=TRUE)
+				gridLambda = seq(0.05,5.05,0.1)
+				res_lmr = lm.ridge(Measure ~ . + 0, data=XY, lambda = gridLambda)
+				lambda_ = res_lmr$lambda[which.min(res_lmr$GCV)]
+				weight = as.matrix(coef(res_lmr))[which.min(res_lmr$GCV),]
+			}
+
+			lambdas <<- c(lambdas, lambda_)
+
+			appendWeight(weight)
+			return (matricize(x) %*% weight)
+		}
+	)
+)
diff --git a/pkg/R/m_SVMclassif.R b/pkg/R/m_SVMclassif.R
new file mode 100644
index 0000000..30e9a2b
--- /dev/null
+++ b/pkg/R/m_SVMclassif.R
@@ -0,0 +1,47 @@
+#' @include b_Algorithm.R
+
+#' @title SVM Algorithm
+#'
+#' @description SVM classifier.
+#' Inherits \code{\link{Algorithm}}
+#'
+#' @field kernel TODO
+#' @field someParam TODO
+#'
+SVMclassif = setRefClass(
+	Class = "SVMclassif",
+
+	fields = c(
+		kernel = "numeric",
+		someParam = "logical"
+	),
+
+	contains = "Algorithm",
+
+	methods = list(
+		initialize = function(...)
+		{
+			callSuper(...)
+			#TODO
+		},
+		predict_noNA = function(XY, x)
+		{
+			if (nrow(XY) <= 5)
+				return (10) #TODO
+
+			require(kernlab, quietly=TRUE)
+			XY[,"alert"] = XY[,"Measure"] > 30
+			alertsIndices = XY[,"alert"]
+			XY[alertsIndices,"alert"] = "alert"
+			XY[!alertsIndices,"alert"] = "noalert"
+			XY[,"alert"] = as.factor(XY[,"alert"])
+			XY[,"Measure"] = NULL
+
+			ks = ksvm(alert ~ ., data=XY)
+			pred = as.character(predict(ks, as.data.frame(x)))
+			pred[pred == "alert"] = 70
+			pred[pred == "noalert"] = 10
+			return (as.numeric(pred))
+		}
+	)
+)
diff --git a/pkg/R/z_getData.R b/pkg/R/z_getData.R
new file mode 100644
index 0000000..43c458b
--- /dev/null
+++ b/pkg/R/z_getData.R
@@ -0,0 +1,28 @@
+#' @title Get forecasts + observations
+#'
+#' @description Get forecasts of all specified experts for all specified stations, also with (ordered) dates and (unordered) stations indices.
+#'
+#' @param station List of stations dataframes (as in the sample)
+#' @param experts Names of the experts (as in dataframe header)
+#'
+#' @export
+getData = function(stations, experts)
+{
+	data = as.data.frame(matrix(nrow=0, ncol=1 + length(experts) + 2))
+	names(data) = c("Date", experts, "Measure", "Station")
+	for (i in 1:length(stations))
+	{
+		#date index is sufficient; also add station index
+		stationInfo = cbind(
+			Date = 1:nrow(stations[[i]]),
+			stations[[i]] [,names(stations[[i]]) %in% experts],
+			Measure = stations[[i]][,"Measure"],
+			Station = i)
+		data = rbind(data, stationInfo)
+	}
+
+	#extra step: order by date (would be a DB request)
+	data = data[order(data[,"Date"]),]
+
+	return (data)
+}
diff --git a/pkg/R/z_plot.R b/pkg/R/z_plot.R
new file mode 100644
index 0000000..9e94913
--- /dev/null
+++ b/pkg/R/z_plot.R
@@ -0,0 +1,148 @@
+#' @include z_plotHelper.R
+
+#' @title Plot forecasts/observations
+#'
+#' @description Plot the measures at one station versus all experts forecasts.
+#'
+#' @param r Output of \code{\link{runAlgorithm}}.
+#' @param station Name or index of the station to consider. Default: the first one
+#' @param interval Time interval for the plot. Default: all time range.
+#' @param experts Subset of experts for the plot. Default: all experts.
+#' @param ... Additional arguments to be passed to graphics::plot method.
+#'
+#' @export
+plotCurves = function(r, station=1, interval=1:(nrow(r$data)/length(r$stations)), experts=r$experts, cols=rainbow(length(experts)), ...)
+{
+	if (is.character(station))
+		station = match(station, r$stations)
+	if (is.numeric(experts))
+		experts = r$experts[experts]
+
+	XY = subset(r$data[interval,], subset = (Station == station), select = c(experts,"Measure"))
+	indices = getNoNAindices(XY)
+	XY = XY[indices,]
+	X = as.matrix(XY[,names(XY) %in% experts])
+	Y = XY[,"Measure"]
+
+	yRange = range(XY)
+	par(mar=c(5,4.5,1,1), cex=1.5)
+	for (i in 1:length(experts))
+	{
+		plot(X[,i],ylim=yRange,type="l",lty="dotted",col=cols[i],xlab="",ylab="",xaxt="n",yaxt="n", lwd=2, ...)
+		par(new=TRUE)
+	}
+	plot(Y, type="l", ylim=yRange, xlab="", ylab="", lwd=2, cex.axis=1.5, ...)
+	title(xlab="Time",ylab="Forecasts / Measures", cex.lab=1.6)
+	legend("topright", lwd=c(2,1),lty=c("solid","dotted"),horiz=TRUE,legend=c("Measures","Forecasts"))
+}
+
+#' @title Plot error
+#'
+#' @description Plot the absolute error over time at one station.
+#'
+#' @param r Output of \code{\link{runAlgorithm}}.
+#' @param station Name or index of the station to consider. Default: the first one
+#' @param start First index to consider (too much variability in early errors)
+#' @param noNA TRUE to show only errors associated with full lines (old behavior)
+#' @param ... Additional arguments to be passed to graphics::plot method.
+#'
+#' @export
+plotError = function(r, station=1, start=1, noNA=TRUE, ...)
+{
+	if (is.character(station))
+		station = match(station, r$stations)
+
+	XY = subset(r$data, subset = (Station == station), select = c(r$experts,"Measure","Prediction"))
+	Y = XY[,"Measure"]
+	hatY = XY[,"Prediction"]
+	indices = !is.na(Y) & !is.na(hatY)
+	if (noNA)
+	{
+		X = XY[,names(XY) %in% r$experts]
+		indices = indices & getNoNAindices(X)
+	}
+	Y = Y[indices]
+	hatY = hatY[indices]
+
+	error = abs(Y - hatY)
+	par(mar=c(5,4.5,1,1), cex=1.5)
+	plot(error, type="l", xaxt="n", xlab="Time",ylab="L1 error", cex.lab=1.6, cex.axis=1.5, ...)
+	axis(side=1, at=(seq(from=start,to=length(Y),by=30) - start), labels=seq(from=start,to=length(Y),by=30), cex.axis=1.5)
+}
+
+#' @title Plot regret
+#'
+#' @description Plot the regret over time at one station.
+#'
+#' @param r Output of \code{\link{runAlgorithm}}.
+#' @param vs Linear weights to compare with. Can be obtained by the \code{getBestXXX} methods, or by any other mean.
+#' @param station Name or index of the station to consider. Default: the first one
+#' @param start First index to consider (too much variability in early errors)
+#' @param ... Additional arguments to be passed to graphics::plot method.
+#'
+#' @export
+plotRegret = function(r, vs, station=1, start=1, ...)
+{
+	if (is.character(station))
+		station = match(station, r$stations)
+
+	XY = subset(r$data, subset = (Station == station), select = c(r$experts,"Measure","Prediction"))
+	X = XY[,names(XY) %in% r$experts]
+	Y = XY[,"Measure"]
+	hatY = XY[,"Prediction"]
+
+	indices = !is.na(Y) & !is.na(hatY) & getNoNAindices(X)
+	X = as.matrix(X[indices,])
+	Y = Y[indices]
+	hatY = hatY[indices]
+
+	error2 = abs(Y - hatY)^2
+	vsError2 = abs(Y - X %*% vs)^2
+	cumErr2 = cumsum(error2) / seq_along(error2)
+	cumVsErr2 = cumsum(vsError2) / seq_along(vsError2)
+	regret = cumErr2 - cumVsErr2
+
+	par(mar=c(5,4.5,1,1), cex=1.5)
+	plot(regret, type="l", xaxt="n", xlab="Time", ylab="Regret", cex.lab=1.6, cex.axis=1.5, ...)
+	abline(a=0., b=0., col=2)
+	axis(side=1, at=(seq(from=start,to=length(Y),by=30) - start), labels=seq(from=start,to=length(Y),by=30), cex.axis=1.5)
+}
+
+#' @title Plot predicted/expected cloud
+#'
+#' @description Plot the cloud of forecasts/observations + statistical indicators.
+#'
+#' @param r Output of \code{\link{runAlgorithm}}.
+#' @param thresh Threshold to consider for alerts (usually 30 or 50)
+#' @param hintThresh thresholds to draw on the plot to help visualization. Often \code{c(30,50,80)}
+#' @param station Name or index of the station to consider. Default: the first one
+#' @param noNA TRUE to show only errors associated with full lines (old behavior)
+#' @param ... Additional arguments to be passed to graphics::plot method.
+#'
+#' @export
+plotCloud = function(r, thresh=30, hintThresh=c(30,50,80), station=1, noNA=TRUE, ...)
+{
+	if (is.character(station))
+		station = match(station, r$stations)
+
+	XY = subset(r$data, subset = (Station == station), select = c(r$experts,"Measure","Prediction"))
+	Y = XY[,"Measure"]
+	hatY = XY[,"Prediction"]
+	indices = !is.na(Y) & !is.na(hatY)
+	if (noNA)
+	{
+		X = XY[,names(XY) %in% r$experts]
+		indices = indices & getNoNAindices(X)
+	}
+	Y = Y[indices]
+	hatY = hatY[indices]
+
+	indics = getIndicators(r, thresh, station, noNA)
+
+	par(mar=c(5,5,3,2), cex=1.5)
+	plot(Y, hatY, xlab="Measured PM10", ylab="Predicted PM10",
+		cex.lab=1.6, cex.axis=1.5, xlim=c(0,120), ylim=c(0,120), ...)
+	abline(0,1,h=hintThresh,v=hintThresh,col=2,lwd=2)
+	legend("topleft",legend=paste("RMSE ",indics$RMSE))
+	legend("bottomright",legend=c(paste("TS ",indics$TS)))
+}
diff --git a/pkg/R/z_plotHelper.R b/pkg/R/z_plotHelper.R
new file mode 100644
index 0000000..f522f0f
--- /dev/null
+++ b/pkg/R/z_plotHelper.R
@@ -0,0 +1,100 @@
+#' @include z_runAlgorithm.R
+
+#' @title Get best expert index
+#'
+#' @description Return the weights corresponding to the best expert (...0,1,0...)
+#'
+#' @param r Output of \code{\link{runAlgorithm}}
+#'
+#' @export
+getBestExpert = function(r)
+{
+	X = as.matrix(r$data[,names(r$data) %in% r$experts])
+	Y = r$data[,"Measure"]
+
+	bestIndex = which.min(colMeans(abs(X - Y)^2, na.rm=TRUE))
+	res = rep(0.0, length(r$experts))
+	res[bestIndex] = 1.0
+	return (res)
+}
+
+#' @title Get best convex combination
+#'
+#' @description Return the weights p minimizing the quadratic error ||X*p-Y||^2 under convexity contraint.
+#'
+#' @param r Output of \code{\link{runAlgorithm}}
+#'
+#' @export
+getBestConvexCombination = function(r)
+{
+	X = r$data[,r$experts]
+	Y = as.double(r$data[,"Measure"])
+	indices = getNoNAindices(X) & !is.na(Y)
+	X = as.matrix(X[indices,])
+	Y = Y[indices]
+
+	K = length(r$experts)
+	return (constrOptim(theta=rep(1.0/K,K),
+		method="Nelder-Mead", #TODO: others not better... why?
+		f=function(p){return(sum((X%*%p-Y)^2))}, 
+		grad=NULL, #function(p){return(2.*t(X)%*%(X%*%p-Y))}, 
+		ui=rbind(rep(1.,K),rep(-1.,K),diag(K)), ci=c(0.99999,-1.00001, rep(0.,K)), 
+		control=list(ndeps=1e-3,maxit=10000))$par)
+}
+
+#' @title Get best linear combination
+#'
+#' @description Return the weights u minimizing the quadratic error ||r$X*u-r$Y||^2
+#'
+#' @param r Output of \code{\link{runAlgorithm}}
+#'
+#' @export
+getBestLinearCombination = function(r)
+{
+	X = r$data[,r$experts]
+	Y = r$data[,"Measure"]
+	indices = getNoNAindices(X) & !is.na(Y)
+	X = as.matrix(X[indices,])
+	Y = Y[indices]
+
+	return (mpPsInv(X) %*% Y)
+}
+
+#' @title Get statistical indicators
+#'
+#' @description Return respectively the TS, FA, MA, RMSE, EV indicators in a list.
+#'
+#' @param r Output of \code{\link{runAlgorithm}}
+#' @param thresh Threshold to compute alerts indicators.
+#' @param station Name or index of the station to consider. Default: the first one
+#' @param noNA TRUE to show only errors associated with full lines (old behavior)
+#'
+#' @export
+getIndicators = function(r, thresh, station=1, noNA=TRUE)
+{
+	if (is.character(station))
+		station = match(station, r$stations)
+
+	#TODO: duplicated block (same in plotCloud())
+	XY = subset(r$data, subset = (Station == station), select = c(r$experts,"Measure","Prediction"))
+	Y = XY[,"Measure"]
+	hatY = XY[,"Prediction"]
+	indices = !is.na(Y) & !is.na(hatY)
+	if (noNA)
+	{
+		X = XY[,names(XY) %in% r$experts]
+		indices = indices & getNoNAindices(X)
+	}
+	Y = Y[indices]
+	hatY = hatY[indices]
+
+	RMSE = round(sqrt(sum((Y - hatY)^2) / length(Y)),2)
+	EV = round(1 - var(Y-hatY) / var(Y), 2)
+	A = sum(hatY >= thresh & Y >= thresh, na.rm=TRUE) #right alarm
+	B = sum(hatY >= thresh & Y < thresh, na.rm=TRUE) #false alarm
+	C = sum(hatY < thresh & Y >= thresh, na.rm=TRUE) #missed alert
+	TS = round(A/(A+B+C),2)
+	FA = B/(A+B)
+	MA = C/(A+C)
+	return (list("TS"=TS, "FA"=FA, "MA"=MA, "RMSE"=RMSE, "EV"=EV))
+}
diff --git a/pkg/R/z_runAlgorithm.R b/pkg/R/z_runAlgorithm.R
new file mode 100644
index 0000000..ed75454
--- /dev/null
+++ b/pkg/R/z_runAlgorithm.R
@@ -0,0 +1,72 @@
+#' @include b_Algorithm.R
+
+algoNameDictionary = list(
+	ew = "ExponentialWeights",
+	kn = "KnearestNeighbors",
+	ga = "GeneralizedAdditive",
+	ml = "MLpoly",
+	rt = "RegressionTree",
+	rr = "RidgeRegression",
+	sv = "SVMclassif"
+)
+
+#' @title Simulate real-time predict
+#'
+#' @description Run the algorithm coded by \code{shortAlgoName} on data specified by the \code{stations} argument.
+#'
+#' @param shortAlgoName Short name of the algorithm.
+#' \itemize{
+#'   \item ew : Exponential Weights
+#'   \item ga : Generalized Additive Model
+#'   \item kn : K Nearest Neighbors
+#'   \item ml : MLpoly
+#'   \item rt : Regression Tree
+#'   \item rr : Ridge Regression
+#' }
+#' @param stations List of stations dataframes to consider.
+#' @param experts Vector of experts to consider (names).
+#' @param ... Additional arguments to be passed to the Algorithm object.
+#'
+#' @return A list with the following slots
+#' \itemize{
+#'   \item{data : data frame of all forecasts + measures (may contain NAs) + predictions, with date and station indices.}
+#'   \item{algo : object of class \code{Algorithm} (or sub-class).}
+#'   \item{stations : list of dataframes of stations for this run.}
+#'   \item{experts : character vector of experts for this run.}
+#' }
+#'
+#' @examples
+#' data(stations)
+#' r = runAlgorithm("ew", list(st[[1]]), c("P","MA3"))
+#' plotCurves(r)
+#' r2 = runAlgorithm("ml", st[c(1,2)], c("MA3","MA10"))
+#' plotError(r2)
+#' @export
+runAlgorithm = function(shortAlgoName, stations, experts, ...)
+{
+	#very basic input checks
+	if (! shortAlgoName %in% names(algoNameDictionary))
+		stop("Unknown algorithm:")
+	experts = unique(experts)
+
+	#get data == ordered date indices + forecasts + measures + stations indices (would be DB in prod)
+	oracleData = getData(stations, experts)
+
+	#simulate incremental forecasts acquisition + prediction + get measure
+	algoData = as.data.frame(matrix(nrow=0, ncol=ncol(oracleData)))
+	names(algoData) = names(oracleData)
+	algorithm = new(algoNameDictionary[[shortAlgoName]], data=algoData, ...)
+	predictions = c()
+	T = oracleData[nrow(oracleData),"Date"]
+	for (t in 1:T)
+	{
+		#NOTE: bet that subset extract rows in the order they appear
+		tData = subset(oracleData, subset = (Date==t))
+		algorithm$inputNextForecasts(tData[,names(tData) != "Measure"])
+		predictions = c(predictions, algorithm$predict_withNA())
+		algorithm$inputNextObservations(tData[,"Measure"])
+	}
+
+	oracleData = cbind(oracleData, Prediction = predictions)
+	return (list(data = oracleData, algo = algorithm, experts = experts, stations = stations))
+}
diff --git a/pkg/R/z_util.R b/pkg/R/z_util.R
new file mode 100644
index 0000000..996a5f8
--- /dev/null
+++ b/pkg/R/z_util.R
@@ -0,0 +1,49 @@
+#Maximum size of stored data to predict next PM10
+MAX_HISTORY = 10000
+
+#Default lambda value (when too few data)
+LAMBDA = 2.
+
+#Maximum error to keep a line in (incremental) data
+MAX_ERROR = 20.
+
+#Turn a "vector" into 1D matrix if needed (because R auto cast 1D matrices)
+matricize = function(x)
+{
+	if (!is.null(dim(x)))
+		return (as.matrix(x))
+	return (t(as.matrix(x)))
+}
+
+#Moore-Penrose pseudo inverse
+mpPsInv = function(M)
+{
+	epsilon = 1e-10
+    s = svd(M)
+    sd = s$d ; sd[sd < epsilon] = Inf
+    sd = diag(1.0 / sd, min(nrow(M),ncol(M)))
+    return (s$v %*% sd %*% t(s$u))
+}
+
+#Heuristic for k in knn algorithms
+getKnn = function(n)
+{
+	return ( max(1, min(50, ceiling(n^(2./3.)))) )
+}
+
+#Minimize lambda*||u||^2 + ||Xu - Y||^2
+ridgeSolve = function(X, Y, lambda)
+{
+	s = svd(X)
+	deltaDiag = s$d / (s$d^2 + lambda)
+	deltaDiag[!is.finite(deltaDiag)] = 0.0
+	if (length(deltaDiag) > 1)
+		deltaDiag = diag(deltaDiag)
+	return (s$v %*% deltaDiag %*% t(s$u) %*% Y)
+}
+
+#Return the indices (of rows, by default) without any NA
+getNoNAindices = function(M, margin=1)
+{
+	return (apply(M, margin, function(z)(!any(is.na(z)))))
+}
diff --git a/pkg/data/stations.RData b/pkg/data/stations.RData
new file mode 100644
index 0000000000000000000000000000000000000000..00cc6d129540abdcedc2483c27b935ec0d4d7ef3
GIT binary patch
literal 6874
zcmV<08YSf)iwFP!000001MORRSWRo&PXo#j$vkBYk<3Hlc9c0&=CMK%)lnJCaF9}a
zul20GOXMKKF=dWKhE8OrBtj`g8dQcjP6^+$_Ve89eD8U$?|QH6{r)+B<ht^-?!DJq
z&u|aF`}bp#{q&wD(@pgB^z`)%3=Q=34d}0i`t+}AdWL$&vfbB@{@sB7(2oq{(TI+9
z>9{SS_$$y+FQNGU1~%?_$wnV;w~u0DBW8bF$n0<9nf<Mx$3|yv@5${(%wCl4%*G?k
zUKGIX<G9@-K{pnbF?(SWvlm8iyN3lE2XT8{W-rKL_JSBT?}9*XpU&-Fm_5Ie%{xDt
z+4J{uyDzs7XY<Z)!t8H~nElOVW`85HdB1Vt_TJoHo!RrAv3ci3Gke}zZlA#IZP~nE
ze`5C6H<|tQ4sKt<=KXpwx7TC#SGmmo>KvQ*s~~Ql!R=j{J@-4CcWw%^=kDkB<=j4k
z%{#Xlv*#2ud(IVR&ym=?bLMk<A8xP7>@T0QdA~fy>@U}I`$TSU&*q)YpPS8}m))1y
zv!1hgW%2v6`1fDjV)hrF?7kOf%$^y`?3uHeJyVa}mvMmEGe$A{^HOGiu4VT<@5k)V
zGMW9^FU<a|1H13(9rpQCZ}$09bM|?99Q!<d4zs80Gy9W+%>HByyYKOPW`7*U?2iX9
zds-H|FKr{Ur&%)lqkGK$Xc@cjQA1`=y~ymT^O!x=fZg}-Fta~&WcHK~%$~B1-Irp^
z?8#Tzb+R$LelUq$-`~l`dwI-$&zfD|UB#~N{LV)cX1_g|UEkWpu5Z5KqYbm)Sk11J
zuCeQ++HAZ&h1sv|Vb_TT>^iXr8-Mp__Jr%~I-xGRzH)+%mxr=3K8=kR-Pssd&c^d9
zHpaGQ<JmK8jIq~afBq%28O>kn;_FL)srSE(hW0Lg3+ecs{^*k2`uFmUHTFSh4KuF@
z^#ma8iK}DkN7$O~`nJWeZXiDCH|EBn_aLqA-fYj6l_2%<SXZ9W4uoy3LyYyaKsvK8
zCC~XM2(yk~Ftf!m*j7Mq|8Nw9cKtT=_PhaGcfHeFl%WFk!}hgS8H@nQEc}=1?O%Xe
zT(fFv+eJWnmuIw1$pb0-#-Vq+&Vb}BOf_txh0t$~9*tU!2XZg<!;!D~KpZ3YTv>4&
zLa#kr?;3jo$i%Y)rq^=?ZKix%H`qERBlOs^I-ok8UHy-+;UJhq{9)x$03ohL`H$U_
zAvA3meQ^iSbW7M|=CvJ!^jigoTGHpPxw7Kc*#XM?B+F*H_o#LZ2`G^kfyR6Bs<dyP
zKw3LeexWiC?UaFY`wUqN8sE)Lw0@T$6t<}2F5U!n;WgcR$csVpMuBGP?Thq_KuQi4
zu1fn3+Bx2Ix^*Gs*Lk{mYTSG4)`9yIypFDRX($Ai*3LIL8K*o~YcrZo+*^4+-me-=
z|MuOetT!liT#b3&sUYaPZiMq2KpP{!ZYqer7j^R$!^GL0K3{|2r)5nlCyWGf{`hR$
z1v8cPilyl$c0V70kTyfN_8W5(LVTyVB@g`w;kD&sSq_>W@^LwXlsrdv2$1fw@2M`D
zn;Biw2hHf!y7LU}Ke6e98uLKApREt)892ay9r+QV_)kdar}t0oQ$RWq+@oXRY9Kr5
zZhEW-u_t|8wf)Mx$y`I-K5w;>m+c3^A2d3D#raA5<kyUY@Xqyg{wplVX#nGX1NE~R
zje@N7L4CNH{_+mPK}>q^j2e`4$M2z^<9<j><)I}Az4rdF9$Bc|zcu{A=NU065bny(
zU06PvF2DiE*`9Qp4S_^|D~om52VwKLU-Nj;oNl07m-LCg=G|^k+qoN@JYNWsn)$Ib
zn|{2m0yZ^}7WtfN0~)=ZzrAw$6Qm(*{o3m_b^8;X(@B+Ya0)a_#_86f$zt<X2RV4o
zt~d^AgOC1az9#{(V9yJ`&psEfcGlK<nGLG9lXUTf`$+PZ(MwMVdvHtV-`d4jnIE({
zv?a0QFCf+Rr|xL4JV!I>q%IGr5=RBT_g<{TU1;mme?}2&(2gw9t(OGV)x~kpg%fM1
zF8u`LGxM9xOGm33j@=Hy4t?wQHY`%&M+iLUTNFd%?Es4(^<3NSH*+^aSgQ$n&O^5X
ziKA)dNaf%2*55}^xdu`9w*Ylz!T0rn(I8}hTkU**h7ynD&@1YA20-GCPwyT40i+A7
zzc)JC8p4Kw)v4rQ(7G1*dp2%B<MmK?=b&Yv+S26NY_BX(XPTcJUuQFjUAO;Ik^BIN
zQPbeo$G!m>wSGwAV|zJojGUc*0f>jwo5{s%fYd!c*)BE|M7KJd9jgrlanhTY@mVK9
z)Jt9XJfa$qQ%xMz#$SL8zczk{(-{!$4E#s88we`>8{Hy1EC$uQyJIJ<oD8al#Sg7U
zd;-BH&+5@N7Z8tk@7grl9LVVvYfK4^qwN_9&HBCqF=0wimw*-^jHOZE+Y89vS|6uh
z?Fi&@QhC>TT96i9x2JzVIyX<}*P0r>H8#$V2C1E1m}*vU5N9=Cwp?f<-~Y}1O^&?Z
zav;!rEE?XN8Uva>pO<vI+!RRg(i8o>E`n5jxbH9NnIJZxkJsD)VV?UsBb%PEb#d8&
zyXUMSSTtXgGvWlOyT?s@8`v7iJlEWLW3zzRGrv<?ZrW{E=?ki{2LkW=69}EVJHz4n
zRS5muq{|MQEAn&4J1zMEBw2`Udz{9%*|zStUB7{Pj5&SrQ&2nXr1NbHsu5NJE{O&p
z?7v6<+y~XGmNV~q)4XVvOmFuCQEW8n@}Ryne~%iO6ypF|!_7CRp2-BM^wFqVk#zqL
z<?8AjNhw}^`|xYIE{vITkFLAh$3FA~Xuc#S#l3Pt9ai>Bdd4RRIT@JsIp8pav<h_U
z*f<e{$F)b%2nF%n0`uPG;dI|?_lITMLHxw>pLln8=A8M#puSa-KA}T92;CLhvw0Ye
zn?QQ(r}OE2TXo&IWe-SGE89nmSOn6K!Htf#qVsEFY8l_X7}Q^u(Fa$6;Mwr&tX&;J
zT(NQ(^<4RVBllfh0^-F?uNTYpLF|2>)-PJWr-w#<v1kOu>{i`g^#DZa(c0C{4?yK_
zIcc)<Q6N*2Y5q?HvP!?Zopw7&Nw=Zqr9Xi5`gN=Mv{I0Y;*L1Bw+2l^i|aH4f#&zM
z_GL9Tg7mSZ;7<JnkQ$vTymILjkaLpNh_Xr`8$VfQwYvf8eS4dKvo8bnJ6jzNpz)$R
zf6df8^{=MUIv&vC!@F0JK(fo5<sIw{L_4=#5}63njJQ$OLJE*t9~yeA`h#F!%VO`L
z`yj1RQAhIz>AJ^z&74*s^|fU2I6@AC2C^S4cAsSi<l2yt>+fnnYs~VlX1i}?x5<M+
znm*H2d+`8h+IOwYY#abu(-lqMe;oiCD}y+{?71M=K3V#ybsGpZ9dUxr6oknutUU6c
z09o!p)0#bqw?fX}XkkIW_c;0J{oN2^l-F#=lj{)D+dZn>cnO5|q*bfI5fI2hP2)av
z-+ZZ?eh326b8t-A&vQXaiT7-5RsmAiu&#r4m;9%^_1}&_iqkT(MT)D;_iwUvcLZ{E
z#M01V+kkxRa-y?u1W2Z_FOHScc)pWlZu`ZO<_#wgFpURNw(xBGd9#4LwedT&>^6|G
zH!Cl++XtfavzU|sn&+ZwAD?!K*5P!ME$OZM0x{h;GVJ<DAk+A@#qyi?x6=HU6FWNl
z&$b|r-}QU`wg3=TFASrJQm%_XJbM{~5dX4p+$${z!lg9>Hc~%r5YvB|b2tdUp4#kE
zPU}+ribFduW`f|h`C!|@O+f6IViy_y6Ns^WDs9KR0O=ApeykVGXXiY>9LPTg<dS>w
z_niSigk`!qN!<N(_<@`%8h_`flQdt9+ync9<b2Uiy+0A8F7xS7U)~q*E;EPII%NEP
z%cem<HYI&NzU?50w+r6XtU=><=#gM?OEVC@ZA}Yr(gj36)tT_ZTo7wdXmF&94G5DS
z-m6!438;sA#?N?>2<qK1AiAtSs3xXczdu3q^weu<OFAZiI&I0HD|TN4;g*eVePZA5
zJ)_+0XguugG{Ddr)R%wWy*)Dn)bE{MP{anpxbrO{Oank&6qdE_w?9GXmb=I}Eu7+z
z37z(4UjVUD=OLX<;z8Vy=`-7|6vTYLhx2=H0@A^i!jW2Z{VymWJ`ZGS|H&acQ)C=+
z?~g0}<$XBR^36^lss)`KD`|cBXmNBwB7OcJ9ZS#aJA?2p{9Wr8#UPw{wmIr`eGop@
z-sCpX3k0tV6%jeTK=_*X%IQlos5OZpEx(3=dT{?OGe&;~_4R6-%3Cdv<7cnS>n|WC
zA4=)8_a%rEymOxW*8-tpXiTTrry!(eHnknN7=(=a)8i~@{(70xf3`dQp4$%Ffs4-2
zcn>x;OQdyj^u@31Bt0Mw3t4@e9I~=SL_PUD5^YyIfsnRgaFGRFhgp*}8sR{yIe1sr
zp9`dhVV}ibw61qeQl*+y0NI_9G<^ZZVM*Qt%^zH+xaRpY507dfS?5~>o(=@5v*aAU
zv<^u7ePa%WHUnvpPZ#eYPC$+(uGJ4|Mf(iBL1T=IfIPo3{KHgQ=k8j!nBIZL$HXzO
z))o$=^PKpoWYRhyF0t0e-)WpLbdQ;I*d9n@ugI=$1jKhot995x^FWUqD=bRYKuTAY
zpYGvC>r1`d-@7~oG0Ssr+lq90|5%!Ktm%AxXj!6udCV=b<Az;865bwp_=i47`c@?s
z!@h&0^$9P`IStal1z#3^3kAudY)<>#bbTMScLz_K3{tIM>GTXh8ktz5mWt-#jP47n
z51{$0>1*BhiQV@6X&M2<78mJuv;uXv2fDsM-JA6dYL`t*#GMU52wU@&0&)=Uzw6MV
zsVxY9?i#SIViSn^E8^$^FmF}$hgJPyRexC3AO6$(Lm;9qj{i=EdAWElL^90iU&t`_
zd$kGt7X!zkz3_9#p*?K)?=>73y7(^lS@^f-8vA&!w8m%Gp+EXEwfE!44-fW5{U3iL
z;7k^{__<iS`OsS!4OgW(u(3*WV57w5U8OlNdzI$E?Eh%Z;oq{{|1C4{^sJ<7Oy8K!
zF}*A4o~M5-2Us4kTqxy(mlG^6SZ);9$IB6xCoET3zObBOdBbvt<qyjtmPag?_#7;!
zSYEN*V)?~#jO7{2HI{EI=UCpc+++Dy-~fjQ3S8ju0pSF~3xpd8KM;-}JVCgE@CD%v
z!W)D;2!9X`Aw0tS5kBGf5MCkNLimMnOo0^~t|_pS!#RX^2=@^FAsj?_h;R|%Bf?39
zmk2izej*%2c#3cp=ZA0>;jIF9`F<#Hn8Rb7H^OIx(+a%ia2w}|a9n8*I9x~gj&L5~
zJ;Hs2|9C&P2iPts?E`Nou)V<NV*Fq`g73$61=|-~FSa*IdxbEVw?o(-VY`Iwld@jk
zUSZr}`-Sb8(w^~oVEd-DSG>K$b`RS>YzLM0knaz+kJwIPdx`BPwx3En%HsvwRrGUg
zXL)=3_q_FQw3~D<N=J`!dQYMI6#9?Tfk+P`U5NA{(uqhfBHf7eBhrxyJ;~`xq%RdZ
zlhd0D-O1@sq(hM&MY<H}Q>0TBdX>|yNWUT-tI)HYu2tw;PUj-Mi*&C-|8hDQ>0zXc
zkv>K`8R=!Dn~{E2=xC&;IbDtPHPYEgZzJ7}^f%Js3O$Z=Ij7H&PDgqj>2`&F=X5;M
z^GMeteUEfL()&pFEA&5?15h47xd7z@loL>1K)C_s2b3dFo<O-mkuSKMf$|2*9VmY&
zatN14P%c6F1mzTzS5R(2`32<|lxI+`LHP#d9F%uZ?m_tn<sg)YP%c9G2<0S{mr!m(
z`3dDHl&2KAipy6hXQ8}>au>>9iX6t}F_g<tK0`ST<uyfa<MJEIaVXEBT!-?VBIj{=
z59L0T{}efp%Y!HvqI`&QBFc*>H=_KAawN)=C|4@-CC~RLZ*sZwKjp3eT4oSGe!Mg7
zP!Q6B3QefchMY!3T2Y}HIqirvB+`-!P049Xq%o1!RA^34dnz<2(xRLuMcP!MQIS^V
zG^;|pavBzCS)^%^wnZ9Op>>hwMcNl>V5EhSCPvzr)5u6GD>O6G&YXruT3Vs0k+wz}
z8)<E%xjF5PG&s`YNRuONuF&Y5R!5p0X?KN&M_QiK^hnzyjgPcG()<eT&t(9V1r(V8
zWdoEEP*&hF1Ii94LnyKY$`mMDpp3y~4U{<)*#l(|E{mW{qR1vFqi|V8ky*Ixf-(%s
zGAPraY=bfm$~q|XpzMP(5XwS|OvGg)l#x(YQe-ABJE07PvJ}cxifn~47Rp*EbD`|T
zWiXV*P$om!Op(#Jtft6pTy|4rIF#kMOoy@^%6KU2DKa0I{ZIx}WI-+yqHL(hh$t(f
z%*bU&MTX?EB+8Urw#2+u)gM;%hgJPyRe$(T?+;N9W&D=`@?ZadWx-H>RF#lWB_vb{
z3GBWqA%TrmLc+fa34$3TIEa_GE>V-fYTZ`$ARt5z@EPGS05o;WbpV6dw*7Tw<WeBn
zqe}07Z3ODacDhDGSnyKUm}uX%k{JvLDfRiD<T(e_Q;nX`mItIJSALpVIv<36UOE67
z>KUzHd(=f*C|d1qUNaIjO$X+TIAIRjZMnJvBG%#Wv3@i&FenV9jN>xml!-ZtsLoR0
zqM?PX0LXxb6uF*-(B=_?qx-G_b)3u}Xh5@g=zcHjGoYSWWHY6wE2x&zx9Beivg-7&
zFPDD-sY9KYb(;Ht<Xc@=_(Hq~>KZ=Has~)R6)k5qdw{0DK1CpZ&;nJT+AV-Qu5rSA
zO)hAbtz<xSYxDbk4o{bXN}bJ)v~f<YU<JRXlMYyEXSv*=C0s7NIzXnWnW6*Ws$jVf
zr2yk~W~a}IB80|x>k4aV0~ZPq3m~Lkx-UgQ@;>N*j`TC%{}!`!fJ(I?s!sD|E9CP@
zcw0gn(Sly?QROBOa+$*R(JD}vytp_2!%(`u8ZF;MrqTJzxMDhxSkM8Atq0o=$-CVW
zR9%*JEL`;%LUP;D2hqm8%Xt}bg6hs%9k5lU#>6`8y#ty@pBO-X-$O=BAU2#PBX}U8
zopcRoaQRJ{X`=w5LbKn<3CNxk^C<-QYvgw2d;C6p4n7y(gYU(8;Jk32IB#4Bt_#<R
z>&AV+eZhUgeZzgkeZ_soeaCaabHQ`MbHj7QbH#JUb4Ncwzfk;y`;Fp9+^^8j(C-vK
z<bH{MihhfJjDC%Nj()Gi0gnre6O0>-BaADIGmJZoLySv|Q;b_Bj(J>ToMYT$9>BbS
zc>?nW<`K**m}fBWU>@Rm>F;^#--)$vul<|^+MqVvJmrcFZIGkpn7_J|MjO`4;($Ia
zL8^0aZ}V>sK&r7uR-E+4VqFaV{)r2W80=ZX2uP#bwIaV*fM7ZCmgKNrCQ3#&dS4xc
zgrKQsG9H3jb$MRrB2N(AJyr~Qod(hoQ=MN(m-6)5mOTThsVD6`4e9((Y38)tD-&<j
zpYMX?&`V372;@Ta-Rd4mAQU^&_t}Cvu`!JUOAz~o4DS}%0mM%ew#9G11!CaF$HOuf
zgD`Ma$ARY}K+Fw}4sb~X$tc2QkLg(uv+o(CMqdNsT>p8ko-IJ!XXkkG+e482-|Egq
zj85sh%fJJOjm_8oM;w6+oA&zjx-s%RB2Epv0Hk<ic9A7*@LgL^9P~TgFI7;ltNPpJ
zb&Ouz#uucE>ALT0>P^!%YP6B0o%3Xn+-f{a8od~Z-VfSe(1tQ_Q*$eI284W6`xBF5
zP}P%#6FU%&9_ZI8H5tgPFFL;yvpl2sOxX#-GMnn^xOfn%8M^mxxenAGp{ADcHlXg&
z*sQ|sqfAK9zBvF$*@M#3(sv*Q7tz2k2C;I>h=(;NgW7!dgGv(@2zg@{^8CVQP+LU*
z@Uz(|``wN@clv<(YiMISaS(rxVf${{Med|P^4_^9F5(f8W5;4YEi?x8`3~&gS9dzO
z*xtQA2z}?!w9yqvK+Bd<F>RF*?f5;s51)h2#rNQQaURON_&k+)^L605aGkhr+y~qj
z+$Y>O+(+D3+-KZ(JO?}%JSRLi<sA9B;yL5FqaUDOD1O5IM)4!=SLkQxcZwf!zf}B`
z`>o=~+^-cs=YFrm0gnre6O0>-BaADIGbQeL94c|i;}qi-;~3*wiE|$Jm<N=+!1IKX
zH+UXV@(Ry0O5S04=s)GH|Nj3g_*ya|eDpi-)YfG{THZB`cBZ(gt7I`Np&1=3y2Um>
z1)~3!I|kQJgE(emN&Ji~5JSeEPit%sWYCSPL5r!sIoSBroYe%xI};y{v!yt2O5}?f
zwQB*1Z1<b3Fcye<&xARq2Z0<&?raiI*BNzIv$_GrU*S=8Jt`jn*%cM<^<)*0*4~V;
z`6_h|7Prv-v)bua-V4OMl#1zJ%s_nGkxYBI3Pd~6EOE^O5Kq<ZE7lwiq~42zE}dw;
zcS|3=vIRZ=)OX)c`5gw*h7oT>?eW<E=AS@BQ&}Mbvead_O|=O?W;=UWH8uirrFP%?
zy^Z8Ls;k%J-I(shQ=b4S(i%B+r1k7urVigqV+XT;kNPA!y3H>$X&o-Pef>F+{a1%C
zNZV#+-H&_9`)4}w7{w=NO12%0qVu-#lod1(4g@$J(HjCnPX7;&N+y9?YZY7Kz7o{;
z`rdV#U<^W-f4|$2g&-bT>t&cQ1H?B2bwZ6${&atLdkF-MVR7NU8z3ywm{fG9eirt6
z;Gvx~56+GA&wNkgI?Sxj4u|s~&Uk4wxSK18J_SLCpE!b$HDhabeP0kB$IFZ_2-nx?
z>Z#acNGrj41Bhu>!+S^2``(IoemX(pti7e*qM#%4z8CHGdJE#u8DkSC`1~a#@N4{B
z`98m2c^-d0zE62SpAXIt=Zo|IYdw5@xL#a8-w)g$+%Mce+)vzJ+;7}}JP$k{JTK+^
z@I3K+`FShn&;3F15A+xGAM_{mFZ4I`KlDfLpNhY7|3!aB|5p4R{h!B!5+4{ZJbsjT
z!uY~?!}!B^#Q5a#s>CmkXN+%*cP0LLKEV8-<O|FnET3TBs_GA``opUJu&O`&r}u}L
Ur~W}gg5LN40iW7}qc=$a0B0q+f&c&j

literal 0
HcmV?d00001

diff --git a/pkg/man/aggexp-package.Rd b/pkg/man/aggexp-package.Rd
new file mode 100644
index 0000000..bee26bf
--- /dev/null
+++ b/pkg/man/aggexp-package.Rd
@@ -0,0 +1,38 @@
+\name{aggexp-package}
+\alias{aggexp-package}
+\alias{aggexp}
+\docType{package}
+
+\title{
+	\packageTitle{aggexp}
+}
+
+\description{
+	\packageDescription{aggexp}
+}
+
+\details{
+	The package devtools should be useful in development stage, since we rely on testthat for
+	unit tests, and roxygen2 for documentation. knitr is used to generate the package vignette.
+
+	The main entry point is located in R/z_runAlgorithm.R, and take threee parameters:
+	\itemize{
+		\item{the algorithm (short) name,}
+		\item{the list of stations dataframes,}
+		\item{the vector of experts names.}
+	}
+}
+
+\author{
+	\packageAuthor{aggexp}
+
+	Maintainer: \packageMaintainer{aggexp}
+}
+
+%\references{
+%	TODO: Literature or other references for background information
+%}
+
+%\examples{
+%	TODO: simple examples of the most important functions
+%}
diff --git a/pkg/src/ew.predict_noNA.c b/pkg/src/ew.predict_noNA.c
new file mode 100644
index 0000000..33e3b8b
--- /dev/null
+++ b/pkg/src/ew.predict_noNA.c
@@ -0,0 +1,69 @@
+#include <math.h>
+#include <stdlib.h>
+
+void ew_predict_noNA(double* X, double* Y, int* n_, int* K_, double* alpha_, int* grad_, double* weight)
+{
+	int K = *K_;
+	int n = *n_;
+	double alpha = *alpha_;
+	int grad = *grad_;
+
+	//at least two experts to combine: various inits
+	double invMaxError = 1. / 50; //TODO: magic number
+	double logK = log(K);
+	double initWeight = 1. / K;
+	for (int i=0; i<K; i++)
+		weight[i] = initWeight;
+	double* error = (double*)malloc(K*sizeof(double));
+	double* cumError = (double*)calloc(K, sizeof(double));
+
+	//start main loop
+	for (int t=0; t<n; t++ < n)
+	{
+		if (grad)
+		{
+			double hatY = 0.;
+			for (int i=0; i<K; i++)
+				hatY += X[t*K+i] * weight[i];
+			for (int i=0; i<K; i++)
+				error[i] = 2. * (hatY - Y[t]) * X[t*K+i];
+		}
+		else
+		{
+			for (int i=0; i<K; i++)
+			{
+				double delta = X[t*K+i] - Y[t];
+				error[i] = delta * delta;
+/*				if ((X[t*K+i] <= 30 && Y[t] > 30) || (X[t*K+i] > 30 && Y[t] <= 30))
+					error[i] = 1.0;
+				else
+					error[i] = 0.0;
+*/
+			}
+		}
+		for (int i=0; i<K; i++)
+			cumError[i] += error[i];
+
+		if (t < n-1 && !grad)
+		{
+			//weight update is useless
+			continue;
+		}
+
+		//double eta = invMaxError * sqrt(8*logK/(t+1)); //TODO: good formula ?
+		double eta = invMaxError * 1. / (t+1); //TODO: good formula ?
+		for (int i=0; i<K; i++)
+			weight[i] = exp(-eta * cumError[i]);
+		double sumWeight = 0.0;
+		for (int i=0; i<K; i++)
+			sumWeight += weight[i];
+		for (int i=0; i<K; i++)
+			weight[i] /= sumWeight;
+		//redistribute weights if alpha > 0 (all weights are 0 or more, sum > 0)
+		for (int i=0; i<K; i++)
+			weight[i] = (1. - alpha) * weight[i] + alpha/K;
+	}
+
+	free(error);
+	free(cumError);
+}
diff --git a/pkg/src/ml.predict_noNA.c b/pkg/src/ml.predict_noNA.c
new file mode 100644
index 0000000..03a5355
--- /dev/null
+++ b/pkg/src/ml.predict_noNA.c
@@ -0,0 +1,64 @@
+#include <math.h>
+#include <stdlib.h>
+
+void ml_predict_noNA(double* X, double* Y, int* n_, int* K_, double* alpha_, int* grad_, double* weight)
+{
+	int K = *K_;
+	int n = *n_;
+	double alpha = *alpha_;
+	int grad = *grad_;
+
+	//at least two experts to combine: various inits
+	double initWeight = 1. / K;
+	for (int i=0; i<K; i++)
+		weight[i] = initWeight;
+	double* error = (double*)malloc(K*sizeof(double));
+	double* cumDeltaError = (double*)calloc(K, sizeof(double));
+	double* regret = (double*)calloc(K, sizeof(double));
+
+	//start main loop
+	for (int t=0; t<n; t++ < n)
+	{
+		if (grad)
+		{
+			double hatY = 0.;
+			for (int i=0; i<K; i++)
+				hatY += X[t*K+i] * weight[i];
+			for (int i=0; i<K; i++)
+				error[i] = 2. * (hatY - Y[t]) * X[t*K+i];
+		}
+		else
+		{
+			for (int i=0; i<K; i++)
+			{
+				double delta = X[t*K+i] - Y[t];
+				error[i] = delta * delta;
+			}
+		}
+
+		double hatError = 0.;
+		for (int i=0; i<K; i++)
+			hatError += error[i] * weight[i];
+		for (int i=0; i<K; i++)
+		{
+			double deltaError = hatError - error[i];
+			cumDeltaError[i] += deltaError * deltaError;
+			regret[i] += deltaError;
+			double eta = 1. / (1. + cumDeltaError[i]);
+			weight[i] = regret[i] > 0. ? eta * regret[i] : 0.;
+		}
+
+		double sumWeight = 0.0;
+		for (int i=0; i<K; i++)
+			sumWeight += weight[i];
+		for (int i=0; i<K; i++)
+			weight[i] /= sumWeight;
+		//redistribute weights if alpha > 0 (all weights are 0 or more, sum > 0)
+		for (int i=0; i<K; i++)
+			weight[i] = (1. - alpha) * weight[i] + alpha/K;
+	}
+
+	free(error);
+	free(cumDeltaError);
+	free(regret);
+}
-- 
2.44.0