prepare converter for DB extracts datasets
[epclust.git] / data / preprocessing / convert_32kEDF.R
diff --git a/data/preprocessing/convert_32kEDF.R b/data/preprocessing/convert_32kEDF.R
deleted file mode 100644 (file)
index 2e6798a..0000000
+++ /dev/null
@@ -1,24 +0,0 @@
-convert_32kEDF = function(orig_csv, nb_series_per_chunk)
-{
-       datetimes = #...TODO: all 3 years? year-by-year is better
-       orig_con = file(orig_csv, open="r") #2009, 2010 or 2011
-       ignored = readLines(orig_con, 1) #skip header
-       serie_length = length(datetimes) #around 365*24*2 = 17520
-       sep = if (year==2009) "," else if (year==2010) ";" else ";"
-
-scan(orig_con, character(), sep=",", nlines=1, quiet=TRUE)
-       library(sqldf, quietly=TRUE)
-       ids = read.csv.sql(file_csv, header = TRUE, sep = ","
-               sql = "select * from file_csv group by FK_CCU_ID")
-       index = 0
-       repeat
-       {
-               if (index+1 >= length(ids))
-                       break
-               request = "select CPP_DATE_PUISSANCE,CPP_PUISSANCE_BRUTE where FK_CCU_ID in ("
-               for (id in ids[index + seq_len(nb_series_per_chunk)])
-                       request = paste(request, id, ",", sep="")
-               request = paste(request, ") order by FK_CCU_ID,CPP_DATE_PUISSANCE", sep="")
-               series_chunk = read.csv.sql(file_csv, header = TRUE, sep = ",", sql = request)
-               
-               index = index + 17520