From 7ce27183065e8da7bd1827dd4c12d708b3912637 Mon Sep 17 00:00:00 2001 From: Frank Ebner Date: Sat, 2 Jan 2016 17:40:22 +0100 Subject: [PATCH] added current c++ code --- workspace/CMakeLists.txt | 1 - workspace/conv.cpp | 76 +- workspace/main.cpp | 534 +++---------- workspace/pca/Data.h | 112 +++ workspace/pca/KNN.h | 41 + workspace/pca/Settings.h | 25 + workspace/pca/TrainPCA.h | 108 +++ workspace/pca/aKNN.h | 73 ++ workspace/pca/nanoflann.hpp | 1397 +++++++++++++++++++++++++++++++++++ workspace/usingneuralnet.h | 585 +++++++++++++++ workspace/usingpca.h | 113 +++ 11 files changed, 2611 insertions(+), 454 deletions(-) create mode 100644 workspace/pca/Data.h create mode 100644 workspace/pca/KNN.h create mode 100644 workspace/pca/Settings.h create mode 100644 workspace/pca/TrainPCA.h create mode 100644 workspace/pca/aKNN.h create mode 100644 workspace/pca/nanoflann.hpp create mode 100644 workspace/usingneuralnet.h create mode 100644 workspace/usingpca.h diff --git a/workspace/CMakeLists.txt b/workspace/CMakeLists.txt index 2a1e234..25910b4 100644 --- a/workspace/CMakeLists.txt +++ b/workspace/CMakeLists.txt @@ -34,7 +34,6 @@ FILE(GLOB HEADERS FILE(GLOB SOURCES ./*.cpp - ../KLib/inc/tinyxml/tinyxml2.cpp ) diff --git a/workspace/conv.cpp b/workspace/conv.cpp index 19fb4f0..d1ea34f 100644 --- a/workspace/conv.cpp +++ b/workspace/conv.cpp @@ -1,55 +1,55 @@ -#include "sensors/SensorReader.h" -#include "Interpolator.h" -#include +//#include "sensors/SensorReader.h" +//#include "Interpolator.h" +//#include -/** the step size to use for interpolating the output (in ms) */ -static constexpr int stepSizeMS = 10; +///** the step size to use for interpolating the output (in ms) */ +//static constexpr int stepSizeMS = 10; -/** interpolate and convert the readings for one sensor to a matLab matrix */ -template std::string toMatLab(const SensorReadings& values) { +///** interpolate and convert the readings for one sensor to a matLab matrix */ +//template std::string toMatLab(const SensorReadings& values) { - // create and feed the interpolator with the timed sensor readings - K::Interpolator interpol; - for(const auto& reading : values.values) {interpol.add(reading.ts, reading.val);} - interpol.makeRelative(); +// // create and feed the interpolator with the timed sensor readings +// K::Interpolator interpol; +// for(const auto& reading : values.values) {interpol.add(reading.ts, reading.val);} +// interpol.makeRelative(); - // create interpolated output - const int lengthMS = interpol.values.back().key; - std::stringstream ss; - ss << "[" << std::endl; - for (int ms = stepSizeMS; ms < lengthMS; ms += stepSizeMS) { - const T cur = interpol.get(ms); - ss << cur.x << " " << cur.y << " " << cur.z << std::endl; - } - ss << "];" << std::endl; +// // create interpolated output +// const int lengthMS = interpol.values.back().key; +// std::stringstream ss; +// ss << "[" << std::endl; +// for (int ms = stepSizeMS; ms < lengthMS; ms += stepSizeMS) { +// const T cur = interpol.get(ms); +// ss << cur.x << " " << cur.y << " " << cur.z << std::endl; +// } +// ss << "];" << std::endl; - return ss.str(); +// return ss.str(); -} +//} -int main(const int argc, const char** argv) { +//int main(const int argc, const char** argv) { - std::cout << "converting " << (argc-1) << " files" << std::endl; +// std::cout << "converting " << (argc-1) << " files" << std::endl; - for (int i = 1; i < argc; ++i) { +// for (int i = 1; i < argc; ++i) { - std::string fileIn = argv[i]; - std::string fileOut = fileIn + ".m"; +// std::string fileIn = argv[i]; +// std::string fileOut = fileIn + ".m"; - // read all sensor values within the input file - Recording rec = SensorReader::read(fileIn); +// // read all sensor values within the input file +// Recording rec = SensorReader::read(fileIn); - // convert them to MatLab matrices - std::ofstream out(fileOut); - out << "Accel = " << toMatLab(rec.accel); - out << "Gyro = " << toMatLab(rec.gyro); - out << "Magnet = " << toMatLab(rec.magField); - out.close(); +// // convert them to MatLab matrices +// std::ofstream out(fileOut); +// out << "Accel = " << toMatLab(rec.accel); +// out << "Gyro = " << toMatLab(rec.gyro); +// out << "Magnet = " << toMatLab(rec.magField); +// out.close(); - } +// } - return 0; +// return 0; -} +//} diff --git a/workspace/main.cpp b/workspace/main.cpp index fdf1280..87d92f1 100644 --- a/workspace/main.cpp +++ b/workspace/main.cpp @@ -1,431 +1,135 @@ +//#include "usingneuralnet.h" +#include "usingpca.h" +#include +#include "pca/TrainPCA.h" +#include +#include +#include + +#include "pca/KNN.h" +#include "pca/aKNN.h" + +#include + +std::vector COLORS = {"#000000", "#0000ff", "#00ff00", "#ff0000", "#00ffff"}; + +std::string getClass(const std::vector& nns) { + std::unordered_map map; + for(const ClassifiedFeature& nn : nns) { map[nn.className] += 1; } + for (auto& it : map) { + if (it.second > nns.size() * 0.75) {return it.first;} + } + return ""; +} + +struct Stats{ + int match; + int error; + int unknown; + Stats() : match(0), error(0), unknown(0) {;} + float getSum() {return match+error+unknown;} +}; + +int main(void) { + + omp_set_dynamic(false); + omp_set_num_threads(3); + + const int numFeatures = 3; + + std::vector patTrain = TrainPCA::getTrainData(); + TrainPCA::Matrices m = TrainPCA::getMatrices(patTrain, numFeatures); + + std::vector patTest = TrainPCA::getTestData(); + + // construct knn + aKNN knn; + for (const ClassifiedPattern& pat : patTrain) { + K::DynColVector vec = m.A1 * K::PCAHelper::toVector(pat.pattern); + const std::vector arr = {vec(0), vec(1), vec(2)}; + knn.add(ClassifiedFeature(pat.className, arr)); + } + knn.build(); + + K::Gnuplot gp; + K::GnuplotSplot splot; + K::GnuplotSplotElementLines lines[5]; + + + Stats stats; + int xx = 0; + for (const ClassifiedPattern& pat : patTest) { + + const int idx = Settings::classToInt(pat.className); + K::DynColVector vec = m.A1 * K::PCAHelper::toVector(pat.pattern); + + // get KNN's answer + std::vector arr = {vec(0), vec(1), vec(2)}; + std::vector neighbors = knn.get(arr.data(), 10); + std::string gotClass = getClass(neighbors); + + if (pat.className == gotClass) {stats.match++;} + else if (gotClass == "") {stats.unknown++;} + else {stats.error++;} + + if (++xx % 16 == 0) { + std::cout << pat.className << " -> " << gotClass << std::endl; + std::cout << stats.getSum() << ":" << stats.match << ":" << stats.error << ":" << stats.unknown << std::endl; + std::cout << stats.match/stats.getSum() << ":" << stats.error/stats.getSum() << ":" << stats.unknown/stats.getSum() << std::endl; + } + + // plot + K::GnuplotPoint3 p3(vec(0), vec(1), vec(2)); + lines[idx].add(p3); + + } + + for (int i = 0; i < 5; ++i) {lines[i].setColorHex(COLORS[i]);} + for (int i = 0; i < 5; ++i) {splot.add(&lines[i]);} + gp.setDebugOutput(false); + gp.draw(splot); + gp.flush(); + + sleep(10000); + -//#include "sensors/SensorReader.h" -//#include "Interpolator.h" -//#include -//#include -//#include -//#include - -//#include -//#include - -//enum class PracticeType { -// REST, -// JUMPING_JACK, -// SITUPS, -// PUSHUPS, -// REJECT, -//}; - -///** interpolate the output for the given position using the provided range */ -//template T blur(K::Interpolator& interpol, const uint64_t ms, const int s = 3) { -// return interpol.get(ms-s*2) * 0.1 + -// interpol.get(ms-s) * 0.2 + -// interpol.get(ms) * 0.4 + -// interpol.get(ms+s) * 0.2 + -// interpol.get(ms+s*2) * 0.1; -//} - -//struct Practice { - -// PracticeType type; -// Recording rec; -// std::vector keyGyro; - -// //Practice(const PracticeType p, const Recording& rec, const std::vector& keyGyro) : p(p), rec(rec), keyGyro(keyGyro) {;} - -// K::Interpolator getInterpol() { -// K::Interpolator interpol; -// for (auto it : rec.gyro.values) {interpol.add(it.ts, it.val);} -// interpol.makeRelative(); -// return interpol; -// } - -//}; - -//static constexpr int NUM_IN = 60; -//static constexpr int NUM_HID = 16; -//static constexpr int NUM_OUT = 4; -//static constexpr int NUM_ARGS = NUM_IN*NUM_HID + NUM_HID*NUM_OUT; - -//static std::vector getNetworkInput(K::Interpolator& interpol, const uint64_t pos) { - -// std::vector val; -// val.resize(NUM_IN); -// int idx = 0; - -// for (int offset = -500; offset < 500; offset += 50) { -// SensorGyro gyro = interpol.get(pos + offset); -// val[idx++] = gyro.x; -// val[idx++] = gyro.y; -// val[idx++] = gyro.z; -// assert(idx <= NUM_IN); -// } - -// return val; - -//} - -///** get the index of the largest element within vec */ -//static int getMaxIdx(const K::NeuralNetResultIHO& vec) { -// float max = 0; -// int idx = 0; -// for (int i = 0; i < NUM_OUT; ++i) { -// if (vec.values[i] > max) { -// max = vec.values[i]; -// idx = i; -// } -// } -// return idx; -//} - -//struct TMP {int index; float value;}; -//static std::vector getSorted(const K::NeuralNetResultIHO& vec) { -// std::vector tmp; -// for (int i = 0; i < NUM_OUT; ++i) {tmp.push_back( TMP{i, vec.values[i]} );} -// auto comp = [] (const TMP& t1, const TMP& t2) {return t2.value < t1.value;}; -// std::sort(tmp.begin(), tmp.end(), comp); -// return tmp; -//} - -//static void debug(Practice& p, K::NeuralNetResultIHO& res) { -// const int maxIdx = getMaxIdx(res); -// const char max = (res.values[maxIdx] > 0.5) ? (maxIdx + '0') : ('?'); -// std::cout << "practice was: " << (int)p.type; -// std::cout << " network says: " << max << "\t"; -// std::cout << "["; -// for (int i = 0; i < NUM_OUT; ++i) { -// std::cout << res.values[i] << ", "; -// } -// std::cout << "]" << std::endl; -//} - -//static void debugPlot(Practice& p) { - -// static K::Gnuplot gp; -// K::GnuplotPlot plot; -// K::GnuplotPlotElementLines line[3]; - -// line[0].setColorHex("#ff0000"); line[0].setTitle("x"); -// line[1].setColorHex("#00ff00"); line[1].setTitle("y"); -// line[2].setColorHex("#0000ff"); line[2].setTitle("z"); - -// plot.add(&line[0]); -// plot.add(&line[1]); -// plot.add(&line[2]); - -// K::Interpolator interpol = p.getInterpol(); - -// for (int ms = 0; ms < 20000; ms += 50) { -// SensorGyro s = interpol.get(ms); -// line[0].add(K::GnuplotPoint2(ms, s.x)); -// line[1].add(K::GnuplotPoint2(ms, s.y)); -// line[2].add(K::GnuplotPoint2(ms, s.z)); -// } - -// gp.setDebugOutput(true); -// gp.draw(plot); -// gp.flush(); - -//} - - -//int main(void) { - -// std::vector practices; - -// practices.push_back( -// Practice { -// PracticeType::JUMPING_JACK, -// SensorReader::read("/mnt/firma/kunden/HandyGames/daten/jumpingjack/jumpingjack_gl_5_subject_3_left.txt"), -// {1950, 2900, 3850, 4850, 5850, 6850, 7850, 8850, 9800, 10800, 11850} -// } -// ); -// practices.push_back( -// Practice { -// PracticeType::REST, -// SensorReader::read("/mnt/firma/kunden/HandyGames/daten/idle/restposition_gl_24.txt"), -// {1000, 2000, 3000, 4000, 5000, 6000, 7000, 8000, 9000} -// } -// ); -// practices.push_back( -// Practice { -// PracticeType::SITUPS, -// SensorReader::read("/mnt/firma/kunden/HandyGames/daten/situps/situps_gl_12_subject_1_left.txt"), -// {1850, 3250, 4750, 6150, 7550, 8950, 10350, 11600, 13000} -// } -// ); +// std::vector> vecs = {vec1, vec2}; +// std::cout << K::PCAHelper::getR(vecs) << std::endl; +// std::cout << K::PCAHelper::getM(vecs) << std::endl; -// practices.push_back( -// Practice { -// PracticeType::PUSHUPS, -// SensorReader::read("/mnt/firma/kunden/HandyGames/daten/pushups/pushups_gl_8_subject_4_right.txt"), -// {2750, 4200, 5850, 7400, 9000, 10650} -// //{3500, 5000, 8300, 9900, 11550} -// } -// ); +// K::PCAHelper::R r; +// r.add(vec1); r.add(vec2); std::cout << r.get() << std::endl; + + +// Eigen::Vector3f v1; v1 << 1,2,3; +// Eigen::Vector3f v2; v2 << 3,4,5; +// std::vector vecs2 = {v1, v2}; +// std::cout << K::PCAHelper::getR(vecs2) << std::endl; +// std::cout << K::PCAHelper::getM(vecs2) << std::endl; + +// UsingNeuralNet::run(); + + //UsingPCA::run(); -// practices.push_back( -// Practice { -// PracticeType::REST, -// SensorReader::read("/mnt/firma/kunden/HandyGames/daten/jumpingjack/jumpingjack_gl_5_subject_3_left.txt"), -// {1950+500, 2900+500, 3850+500, 4850+500, 5850+500, 6850+500, 7850+500, 8850+500, 9800+500, 10800+500, 11850+500} -// } -// ); -//// practices.push_back( -//// Practice { -//// PracticeType::REST, -//// SensorReader::read("/mnt/firma/kunden/HandyGames/daten/pushups/pushups_gl_8_subject_4_right.txt"), -//// //{2750, 4200, 5850, 7400, 9000, 10650} -//// {3500, 5000, 8300, 9900, 11550} -//// } -//// ); -// practices.push_back( -// Practice { -// PracticeType::REST, -// SensorReader::read("/mnt/firma/kunden/HandyGames/daten/situps/situps_gl_12_subject_1_left.txt"), -// {1850+600, 3250+600, 4750+600, 6150+600, 7550+600, 8950+600, 10350+600, 11600+600, 13000+600} -// } -// ); - - -// debugPlot(practices.back()); -// sleep(100); - -// class MyOpt : public K::NumOptFunction { - -// public: - -// std::vector& practices; -// K::NeuralNetIHO& net; - -// /** ctor */ -// MyOpt(std::vector& practices, K::NeuralNetIHO& net) : practices(practices), net(net) { -// ; -// } - -// double getValue(const K::NumOptVector& args) const { - -// // configure the network -// std::vector vals; -// for(int i = 0; i < NUM_ARGS; ++i) {vals.push_back(args[i]);} -// net.setAll(vals); - -// // temporals -// float points = 0; - -// // process every practice -// for (Practice& p : practices) { - -// // get the values for the neural-net-input - -// K::Interpolator interpol = p.getInterpol(); - -// // process 4 (positive) occurences within the practice -// for (int key = 0; key < 4; ++key) { - -// for (int o = -100; o <= +100; o +=50) { - -// const uint64_t ts = p.keyGyro[key] + o; -// const std::vector values = getNetworkInput(interpol, ts); - -// // calculate the output -// const K::NeuralNetResultIHO res = net.getOutput(values.data()); - - -// // largest value matches the desired type -> good! -// std::vector resSort = getSorted(res); -// if (resSort[0].index == (int) p.type) { -// //if ( (resSort[0].value - resSort[1].value) > 0.25 ) { -// ++points; -// points += resSort[0].value; -// points -= resSort[1].value; -// //} -// //points += resSort[0].value; -// //points += (resSort[0].value - resSort[1].value); -// } else { -// --points; -// } - -//// // update the score -//// for (int i = 0; i < NUM_OUT; ++i) { -//// if (i == (int) p.type) { -//// points += 3 * res.values[i]; // matches -//// } else { -//// points -= res.values[i]; // does not match -//// } -//// } - -//// int maxIdx = getMaxIdx(res); -//// if (maxIdx == (int) p.type) { -//// ++points; -//// } - -// } - +// UsingNeuralNet::debugPlot( +// Practice { +// PracticeType::KNEEBEND, +// SensorReader::read("/mnt/firma/kunden/HandyGames/daten/kneebend/kneebend_gl_0_subject_0_right.txt"), +// {2650, 4750, 6750, 8800, 10800, 12800} +// //{3500, 5000, 8300, 9900, 11550} // } -// } - -// std::cout << points << std::endl; -// return -points; - -// } +// ); -// }; + //sleep(1000); -// K::NumOptAlgoGenetic opt; -// K::NumOptVector vec; -// K::NeuralNetIHO net; -// MyOpt func(practices, net); - -// opt.setElitism(0.025f); -// opt.setPopulationSize(300); -// opt.setMaxIterations(100); -// opt.setMutation(0.10f); -// opt.setValRange(0.5); -// opt.calculateOptimum(func, vec); - - -//// // process every practice -//// for (Practice& p : practices) { - -//// // get the values for the neural-net-input - -//// K::Interpolator interpol = p.getInterpol(); - -//// // process every (positive) occurence within the practice -//// for (uint64_t ts : p.keyGyro) { - -//// std::vector values = getNetworkInput(interpol, ts); -//// K::NeuralNetResultIHO res = net.getOutput(values.data()); -//// debug(p, res); - -//// { -//// std::vector values = getNetworkInput(interpol, ts+500); -//// K::NeuralNetResultIHO res = net.getOutput(values.data()); -//// std::cout << "###"; debug(p, res); -//// } - -//// }getMaxIdx - -//// } - - -// K::Gnuplot gp1; -// K::Gnuplot gp2; - -// K::GnuplotPlot plot1; -// K::GnuplotPlot plot2; - -// K::GnuplotMultiplot plot(2,1); -// plot.add(&plot1); -// plot.add(&plot2); - -// K::GnuplotPlotElementLines line[3]; -// line[0].setColorHex("#ff0000"); line[0].setTitle("x"); -// line[1].setColorHex("#00ff00"); line[1].setTitle("y"); -// line[2].setColorHex("#0000ff"); line[2].setTitle("z"); -// plot1.add(&line[0]); -// plot1.add(&line[1]); -// plot1.add(&line[2]); - -// K::GnuplotPlotElementLines netLines[NUM_OUT]; -// netLines[0].setColorHex("#ff0000"); netLines[0].setTitle("REST"); netLines[0].setLineWidth(2); -// netLines[1].setColorHex("#00ff00"); netLines[1].setTitle("JUMPING_JACK"); netLines[1].setLineWidth(2); -// netLines[2].setColorHex("#0000ff"); netLines[2].setTitle("SITUPS"); netLines[2].setLineWidth(2); -// netLines[3].setColorHex("#ffff00"); netLines[3].setTitle("PUSBACKS"); netLines[3].setLineWidth(2); - -// for (int i = 0; i < NUM_OUT; ++i) { -// plot2.add(&netLines[i]); -// } - -// // process every practice -// for (Practice& p : practices) { - -// // get the values for the neural-net-input - -// K::Interpolator interpol = p.getInterpol(); - -// line[0].clear(); -// line[1].clear(); -// line[2].clear(); - -// for (int i = 0; i < NUM_OUT; ++i) { -// netLines[i].clear(); -// } - -// for (int ms = 0; ms < 20000; ms += 50) { -// SensorGyro s = interpol.get(ms); -// line[0].add(K::GnuplotPoint2(ms, s.x)); -// line[1].add(K::GnuplotPoint2(ms, s.y)); -// line[2].add(K::GnuplotPoint2(ms, s.z)); -// } - -// // process every (positive) occurence within the practice -// for (int ts = 1000; ts < 10000; ts += 50) { - -// std::vector values = getNetworkInput(interpol, ts); -// K::NeuralNetResultIHO res = net.getOutput(values.data()); -// debug(p, res); - -// for (int i = 0; i < NUM_OUT; ++i) { -// netLines[i].add(K::GnuplotPoint2(ts, res.values[i])); -// } - -// gp1 << "set arrow 1 from " << ts-500 << ",-10 to " << ts-500 << ",+10\n"; -// gp1 << "set arrow 2 from " << ts+500 << ",-10 to " << ts+500 << ",+10\n"; -// gp1.draw(plot1); -// gp1.flush(); - -// gp2.draw(plot2); -// gp2.flush(); - -// usleep(1000*33); - - -// } - -// } - - - -//// K::Gnuplot gp; -//// K::GnuplotPlot plot; -//// K::GnuplotPlotElementLines line[3]; -//// line[0].setColorHex("#ff0000"); line[0].setTitle("x"); -//// line[1].setColorHex("#00ff00"); line[1].setTitle("y"); -//// line[2].setColorHex("#0000ff"); line[2].setTitle("z"); - -//// Practice p1 = practices[0]; - -//// auto interpol = p1.getInterpol(); -//// for (int ms = 0; ms < 20000; ms += 50) { -//// SensorGyro s = blur(interpol, ms, 10); -//// line[0].add(K::GnuplotPoint2(ms, s.x)); -//// line[1].add(K::GnuplotPoint2(ms, s.y)); -//// line[2].add(K::GnuplotPoint2(ms, s.z)); -//// } - -//// plot.add(&line[0]); -//// plot.add(&line[1]); -//// plot.add(&line[2]); -//// gp.draw(plot); -//// for (uint64_t ts : p1.keyGyro) { -//// gp << "set arrow from " << ts << ",-10 to " << ts << ",+10\n"; -//// } -//// gp.flush(); - - -// sleep(1000); - -//} +} diff --git a/workspace/pca/Data.h b/workspace/pca/Data.h new file mode 100644 index 0000000..a454509 --- /dev/null +++ b/workspace/pca/Data.h @@ -0,0 +1,112 @@ +#ifndef TRAINDATA_H +#define TRAINDATA_H + +#include "Settings.h" +#include +#include "../Interpolator.h" +#include "../sensors/SensorReader.h" + +struct ClassifiedPattern { + std::string className; + std::vector pattern; + ClassifiedPattern(const std::string& className, const std::vector& pattern) : className(className), pattern(pattern) {;} +}; + +struct ClassifiedFeature { + std::string className; + std::vector feature; + ClassifiedFeature(const std::string& className, const std::vector& feature) : className(className), feature(feature) {;} + ClassifiedFeature() : className("??????") {;} + + /** get the l2- distance to the given vector */ + float getDistance(const std::vector& vec) const { + if (vec.size() != feature.size()) {throw "error!";} + float dist = 0; + for (int i = 0; i < (int)vec.size(); ++i) {dist += (vec[i]-feature[i])*(vec[i]-feature[i]);} + return std::sqrt(dist); + } + +}; + +struct ClassifiedDataFile { + std::string className; + std::string fileName; + ClassifiedDataFile(const std::string& className, const std::string& fileName) : className(className), fileName(fileName) {;} +}; + +class Data { + +public: + + /** get X data-files for each class */ + static std::vector getDataFiles(const int filesPerClass) { + + Settings s; + std::vector files; + + K::File folder(s.path); + for (const std::string& className : s.classNames) { + K::File classFolder(folder, className); + + int i = 0; + for (const K::File classFile : classFolder.listFiles()) { + const std::string fileName = classFile.getAbsolutePath(); + if (fileName[fileName.length()-1] == 'm') {continue;} + if (++i > filesPerClass) {break;} + ClassifiedDataFile cdf(className, fileName); + files.push_back(cdf); + } + + } + + return files; + + } + + /** get sample date from the given data-file */ + static std::vector> getSamples(const std::string fileName, const int windowSize_ms, const int regionStart_ms, const float regionPercent, const int stepSize_ms) { + + // read all sensor-values within the given data-file + Recording rec = SensorReader::read(fileName); + + // get the value-interpolator + K::Interpolator intAccel; + for (const auto& val : rec.accel.values) {intAccel.add(val.ts, val.val);} + intAccel.makeRelative(); + + const int regionEnd_ms = intAccel.values.back().key * regionPercent; + + + + // construct all sample windows + std::vector> samples; + for (int center = regionStart_ms; center < regionEnd_ms; center += stepSize_ms) { + std::vector window = getSampleWindow(intAccel, center, windowSize_ms, stepSize_ms); + samples.push_back(window); + } + + return samples; + + } + + template static std::vector getSampleWindow(K::Interpolator& interpol, const int center_ms, const int windowSize_ms, const int stepSize_ms) { + + std::vector window; + const int start = center_ms - windowSize_ms/2; + const int end = center_ms + windowSize_ms/2; + + for (uint64_t ms = start; ms < end; ms += stepSize_ms) { + const T val = interpol.get(ms); + window.push_back(val.x); + window.push_back(val.y); + window.push_back(val.z); + } + + return window; + + } + +}; + +#endif // TRAINDATA_H + diff --git a/workspace/pca/KNN.h b/workspace/pca/KNN.h new file mode 100644 index 0000000..0a34f48 --- /dev/null +++ b/workspace/pca/KNN.h @@ -0,0 +1,41 @@ +#ifndef KNN_H +#define KNN_H + +#include +#include + +template class KNN { + +private: + + std::vector elems; + +public: + + /** add a new element */ + void add(const T& elem) { + elems.push_back(elem); + } + + void build() {;} + + /** get the nearest n elements */ + template std::vector get(const T2& src, const int num) { + + auto lambda = [&] (const T& e1, const T& e2) { + return e1.getDistance(src) < e2.getDistance(src); + }; + + std::sort(elems.begin(), elems.end(), lambda); + + std::vector res; + for (int i = 0; i < num; ++i) { res.push_back(elems[i]); } + return res; + + } + + +}; + +#endif // KNN_H + diff --git a/workspace/pca/Settings.h b/workspace/pca/Settings.h new file mode 100644 index 0000000..6039be4 --- /dev/null +++ b/workspace/pca/Settings.h @@ -0,0 +1,25 @@ +#ifndef SETTINGS_H +#define SETTINGS_H + +#include + +class Settings { + +public: + + std::string path = "/mnt/firma/kunden/HandyGames/daten"; + std::vector classNames = {"forwardbend", "jumpingjack", "kneebend", "pushups", "situps"}; + + static int classToInt(const std::string className) { + if ("forwardbend" == className) {return 0;} + if ("jumpingjack" == className) {return 1;} + if ("kneebend" == className) {return 2;} + if ("pushups" == className) {return 3;} + if ("situps" == className) {return 4;} + throw "error"; + } + +}; + +#endif // SETTINGS_H + diff --git a/workspace/pca/TrainPCA.h b/workspace/pca/TrainPCA.h new file mode 100644 index 0000000..c3826ee --- /dev/null +++ b/workspace/pca/TrainPCA.h @@ -0,0 +1,108 @@ +#ifndef TRAINPCA_H +#define TRAINPCA_H + +#include "Data.h" +#include "Settings.h" +#include + +class TrainPCA { + +private: + + + +public: + + struct Matrices { + K::DynMatrix A1; + K::DynMatrix A2; + K::DynMatrix A3; + }; + + static std::vector getTestData() { + + const int windowSize_ms = 1000; + const int regionStart_ms = 1500 + 25; // worst case: half-window-size offset + const float regionPercent = 0.85; + const int stepSize_ms = 50; + + // get 10 data-files per class + std::vector files = Data::getDataFiles(30); + + // get patterns for each class + std::vector patterns; + for (ClassifiedDataFile cdf : files) { + + std::cout << cdf.fileName << std::endl; + std::vector> samples = Data::getSamples(cdf.fileName, windowSize_ms, regionStart_ms, regionPercent, stepSize_ms); + + for (const std::vector vec : samples) { + patterns.push_back(ClassifiedPattern(cdf.className, vec)); + } + + } + + return patterns; + + } + + /** train PCA features */ + static std::vector getTrainData() { + + const int windowSize_ms = 1000; + const int regionStart_ms = 1500; + const float regionPercent = 0.4; + const int stepSize_ms = 50; + + // get 5 data-files per class + std::vector files = Data::getDataFiles(30); + + // get patterns for each class + std::vector patterns; + for (ClassifiedDataFile cdf : files) { + + std::cout << cdf.fileName << std::endl; + std::vector> samples = Data::getSamples(cdf.fileName, windowSize_ms, regionStart_ms, regionPercent, stepSize_ms); + std::cout << "\tgot" << samples.size() << " samples, each " << samples[0].size() << " values" << std::endl; + + for (const std::vector vec : samples) { + patterns.push_back(ClassifiedPattern(cdf.className, vec)); + } + + } + + return patterns; + + } + + /** get the A1,A2,A3 matrices for the given training data */ + static Matrices getMatrices(const std::vector& data, const int numFeatures) { + + K::LinearTransform::PCA pca; + K::LinearTransform::MaxInterClassDistance inter; + K::LinearTransform::MinIntraClassDistance intra; + + for (const ClassifiedPattern& pat : data) { + pca.add(pat.pattern); + inter.add(pat.className, pat.pattern); + intra.add(pat.className, pat.pattern); + } + + Matrices m; + m.A1 = pca.getA(numFeatures); + m.A2 = inter.getA(numFeatures); + m.A3 = intra.getA(numFeatures); + + std::cout << "A1: " << std::endl << m.A1 << std::endl; + std::cout << "A2: " << std::endl << m.A2 << std::endl; + std::cout << "A3: " << std::endl << m.A3 << std::endl; + + return m; + + } + + +}; + +#endif // TRAINPCA_H + diff --git a/workspace/pca/aKNN.h b/workspace/pca/aKNN.h new file mode 100644 index 0000000..c4cfc08 --- /dev/null +++ b/workspace/pca/aKNN.h @@ -0,0 +1,73 @@ +#ifndef AKNN_H +#define AKNN_H + +#include "nanoflann.hpp" + +using namespace nanoflann; + +template class aKNN { + + struct DataSet { + + std::vector elems; + + inline size_t kdtree_get_point_count() const {return elems.size();} + inline float kdtree_distance(const float* p1, const size_t idxP2, size_t) const { + float dist = 0; + for (int i = 0; i < dim; ++i) { + float delta = (p1[i] - kdtree_get_pt(idxP2, i)); + dist += delta*delta; + } + return dist; + } + + inline float kdtree_get_pt(const size_t idx, int pos) const { + return elems[idx].feature[pos]; + } + + template bool kdtree_get_bbox(BBOX&) const {return false;} + + } data; + + typedef KDTreeSingleIndexAdaptor, DataSet, dim> MyTree; + + MyTree* tree = nullptr; + + + +public: + + /** add a new element */ + void add(const T& elem) { + data.elems.push_back(elem); + } + + /** build the KD-Tree */ + void build() { + tree = new MyTree(dim, data, KDTreeSingleIndexAdaptorParams(10) ); + tree->buildIndex(); + } + + /** get the nearest n elements */ + template std::vector get(const T2* query, const int numResults) { + + float distances[numResults]; + size_t indices[numResults]; + + KNNResultSet res(numResults); + res.init(indices, distances); + + tree->knnSearch(query, numResults, indices, distances); + std::vector vec; + for (int i = 0; i < numResults; ++i) { + vec.push_back(data.elems[indices[i]]); + } + return vec; + + } + + +}; + +#endif // AKNN_H + diff --git a/workspace/pca/nanoflann.hpp b/workspace/pca/nanoflann.hpp new file mode 100644 index 0000000..edae826 --- /dev/null +++ b/workspace/pca/nanoflann.hpp @@ -0,0 +1,1397 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * Copyright 2011-2014 Jose Luis Blanco (joseluisblancoc@gmail.com). + * All rights reserved. + * + * THE BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + +/** \mainpage nanoflann C++ API documentation + * nanoflann is a C++ header-only library for building KD-Trees, mostly + * optimized for 2D or 3D point clouds. + * + * nanoflann does not require compiling or installing, just an + * #include in your code. + * + * See: + * - C++ API organized by modules + * - Online README + */ + +#ifndef NANOFLANN_HPP_ +#define NANOFLANN_HPP_ + +#include +#include +#include +#include +#include // for fwrite() +#include // for fabs(),... +#include + +// Avoid conflicting declaration of min/max macros in windows headers +#if !defined(NOMINMAX) && (defined(_WIN32) || defined(_WIN32_) || defined(WIN32) || defined(_WIN64)) +# define NOMINMAX +# ifdef max +# undef max +# undef min +# endif +#endif + +namespace nanoflann +{ +/** @addtogroup nanoflann_grp nanoflann C++ library for ANN + * @{ */ + + /** Library version: 0xMmP (M=Major,m=minor,P=patch) */ + #define NANOFLANN_VERSION 0x119 + + /** @addtogroup result_sets_grp Result set classes + * @{ */ + template + class KNNResultSet + { + IndexType * indices; + DistanceType* dists; + CountType capacity; + CountType count; + + public: + inline KNNResultSet(CountType capacity_) : indices(0), dists(0), capacity(capacity_), count(0) + { + } + + inline void init(IndexType* indices_, DistanceType* dists_) + { + indices = indices_; + dists = dists_; + count = 0; + if (capacity) + dists[capacity-1] = (std::numeric_limits::max)(); + } + + inline CountType size() const + { + return count; + } + + inline bool full() const + { + return count == capacity; + } + + + inline void addPoint(DistanceType dist, IndexType index) + { + CountType i; + for (i=count; i>0; --i) { +#ifdef NANOFLANN_FIRST_MATCH // If defined and two points have the same distance, the one with the lowest-index will be returned first. + if ( (dists[i-1]>dist) || ((dist==dists[i-1])&&(indices[i-1]>index)) ) { +#else + if (dists[i-1]>dist) { +#endif + if (i + class RadiusResultSet + { + public: + const DistanceType radius; + + std::vector >& m_indices_dists; + + inline RadiusResultSet(DistanceType radius_, std::vector >& indices_dists) : radius(radius_), m_indices_dists(indices_dists) + { + init(); + } + + inline ~RadiusResultSet() { } + + inline void init() { clear(); } + inline void clear() { m_indices_dists.clear(); } + + inline size_t size() const { return m_indices_dists.size(); } + + inline bool full() const { return true; } + + inline void addPoint(DistanceType dist, IndexType index) + { + if (dist 0 + */ + std::pair worst_item() const + { + if (m_indices_dists.empty()) throw std::runtime_error("Cannot invoke RadiusResultSet::worst_item() on an empty list of results."); + typedef typename std::vector >::const_iterator DistIt; + DistIt it = std::max_element(m_indices_dists.begin(), m_indices_dists.end()); + return *it; + } + }; + + /** operator "<" for std::sort() */ + struct IndexDist_Sorter + { + /** PairType will be typically: std::pair */ + template + inline bool operator()(const PairType &p1, const PairType &p2) const { + return p1.second < p2.second; + } + }; + + /** @} */ + + + /** @addtogroup loadsave_grp Load/save auxiliary functions + * @{ */ + template + void save_value(FILE* stream, const T& value, size_t count = 1) + { + fwrite(&value, sizeof(value),count, stream); + } + + template + void save_value(FILE* stream, const std::vector& value) + { + size_t size = value.size(); + fwrite(&size, sizeof(size_t), 1, stream); + fwrite(&value[0], sizeof(T), size, stream); + } + + template + void load_value(FILE* stream, T& value, size_t count = 1) + { + size_t read_cnt = fread(&value, sizeof(value), count, stream); + if (read_cnt != count) { + throw std::runtime_error("Cannot read from file"); + } + } + + + template + void load_value(FILE* stream, std::vector& value) + { + size_t size; + size_t read_cnt = fread(&size, sizeof(size_t), 1, stream); + if (read_cnt!=1) { + throw std::runtime_error("Cannot read from file"); + } + value.resize(size); + read_cnt = fread(&value[0], sizeof(T), size, stream); + if (read_cnt!=size) { + throw std::runtime_error("Cannot read from file"); + } + } + /** @} */ + + + /** @addtogroup metric_grp Metric (distance) classes + * @{ */ + + template inline T abs(T x) { return (x<0) ? -x : x; } + template<> inline int abs(int x) { return ::abs(x); } + template<> inline float abs(float x) { return fabsf(x); } + template<> inline double abs(double x) { return fabs(x); } + template<> inline long double abs(long double x) { return fabsl(x); } + + /** Manhattan distance functor (generic version, optimized for high-dimensionality data sets). + * Corresponding distance traits: nanoflann::metric_L1 + * \tparam T Type of the elements (e.g. double, float, uint8_t) + * \tparam _DistanceType Type of distance variables (must be signed) (e.g. float, double, int64_t) + */ + template + struct L1_Adaptor + { + typedef T ElementType; + typedef _DistanceType DistanceType; + + const DataSource &data_source; + + L1_Adaptor(const DataSource &_data_source) : data_source(_data_source) { } + + inline DistanceType operator()(const T* a, const size_t b_idx, size_t size, DistanceType worst_dist = -1) const + { + DistanceType result = DistanceType(); + const T* last = a + size; + const T* lastgroup = last - 3; + size_t d = 0; + + /* Process 4 items with each loop for efficiency. */ + while (a < lastgroup) { + const DistanceType diff0 = nanoflann::abs(a[0] - data_source.kdtree_get_pt(b_idx,d++)); + const DistanceType diff1 = nanoflann::abs(a[1] - data_source.kdtree_get_pt(b_idx,d++)); + const DistanceType diff2 = nanoflann::abs(a[2] - data_source.kdtree_get_pt(b_idx,d++)); + const DistanceType diff3 = nanoflann::abs(a[3] - data_source.kdtree_get_pt(b_idx,d++)); + result += diff0 + diff1 + diff2 + diff3; + a += 4; + if ((worst_dist>0)&&(result>worst_dist)) { + return result; + } + } + /* Process last 0-3 components. Not needed for standard vector lengths. */ + while (a < last) { + result += nanoflann::abs( *a++ - data_source.kdtree_get_pt(b_idx,d++) ); + } + return result; + } + + template + inline DistanceType accum_dist(const U a, const V b, int ) const + { + return nanoflann::abs(a-b); + } + }; + + /** Squared Euclidean distance functor (generic version, optimized for high-dimensionality data sets). + * Corresponding distance traits: nanoflann::metric_L2 + * \tparam T Type of the elements (e.g. double, float, uint8_t) + * \tparam _DistanceType Type of distance variables (must be signed) (e.g. float, double, int64_t) + */ + template + struct L2_Adaptor + { + typedef T ElementType; + typedef _DistanceType DistanceType; + + const DataSource &data_source; + + L2_Adaptor(const DataSource &_data_source) : data_source(_data_source) { } + + inline DistanceType operator()(const T* a, const size_t b_idx, size_t size, DistanceType worst_dist = -1) const + { + DistanceType result = DistanceType(); + const T* last = a + size; + const T* lastgroup = last - 3; + size_t d = 0; + + /* Process 4 items with each loop for efficiency. */ + while (a < lastgroup) { + const DistanceType diff0 = a[0] - data_source.kdtree_get_pt(b_idx,d++); + const DistanceType diff1 = a[1] - data_source.kdtree_get_pt(b_idx,d++); + const DistanceType diff2 = a[2] - data_source.kdtree_get_pt(b_idx,d++); + const DistanceType diff3 = a[3] - data_source.kdtree_get_pt(b_idx,d++); + result += diff0 * diff0 + diff1 * diff1 + diff2 * diff2 + diff3 * diff3; + a += 4; + if ((worst_dist>0)&&(result>worst_dist)) { + return result; + } + } + /* Process last 0-3 components. Not needed for standard vector lengths. */ + while (a < last) { + const DistanceType diff0 = *a++ - data_source.kdtree_get_pt(b_idx,d++); + result += diff0 * diff0; + } + return result; + } + + template + inline DistanceType accum_dist(const U a, const V b, int ) const + { + return (a-b)*(a-b); + } + }; + + /** Squared Euclidean (L2) distance functor (suitable for low-dimensionality datasets, like 2D or 3D point clouds) + * Corresponding distance traits: nanoflann::metric_L2_Simple + * \tparam T Type of the elements (e.g. double, float, uint8_t) + * \tparam _DistanceType Type of distance variables (must be signed) (e.g. float, double, int64_t) + */ + template + struct L2_Simple_Adaptor + { + typedef T ElementType; + typedef _DistanceType DistanceType; + + const DataSource &data_source; + + L2_Simple_Adaptor(const DataSource &_data_source) : data_source(_data_source) { } + + inline DistanceType operator()(const T* a, const size_t b_idx, size_t size) const { + return data_source.kdtree_distance(a,b_idx,size); + } + + template + inline DistanceType accum_dist(const U a, const V b, int ) const + { + return (a-b)*(a-b); + } + }; + + /** Metaprogramming helper traits class for the L1 (Manhattan) metric */ + struct metric_L1 { + template + struct traits { + typedef L1_Adaptor distance_t; + }; + }; + /** Metaprogramming helper traits class for the L2 (Euclidean) metric */ + struct metric_L2 { + template + struct traits { + typedef L2_Adaptor distance_t; + }; + }; + /** Metaprogramming helper traits class for the L2_simple (Euclidean) metric */ + struct metric_L2_Simple { + template + struct traits { + typedef L2_Simple_Adaptor distance_t; + }; + }; + + /** @} */ + + /** @addtogroup param_grp Parameter structs + * @{ */ + + /** Parameters (see README.md) */ + struct KDTreeSingleIndexAdaptorParams + { + KDTreeSingleIndexAdaptorParams(size_t _leaf_max_size = 10) : + leaf_max_size(_leaf_max_size) + {} + + size_t leaf_max_size; + }; + + /** Search options for KDTreeSingleIndexAdaptor::findNeighbors() */ + struct SearchParams + { + /** Note: The first argument (checks_IGNORED_) is ignored, but kept for compatibility with the FLANN interface */ + SearchParams(int checks_IGNORED_ = 32, float eps_ = 0, bool sorted_ = true ) : + checks(checks_IGNORED_), eps(eps_), sorted(sorted_) {} + + int checks; //!< Ignored parameter (Kept for compatibility with the FLANN interface). + float eps; //!< search for eps-approximate neighbours (default: 0) + bool sorted; //!< only for radius search, require neighbours sorted by distance (default: true) + }; + /** @} */ + + + /** @addtogroup memalloc_grp Memory allocation + * @{ */ + + /** + * Allocates (using C's malloc) a generic type T. + * + * Params: + * count = number of instances to allocate. + * Returns: pointer (of type T*) to memory buffer + */ + template + inline T* allocate(size_t count = 1) + { + T* mem = static_cast( ::malloc(sizeof(T)*count)); + return mem; + } + + + /** + * Pooled storage allocator + * + * The following routines allow for the efficient allocation of storage in + * small chunks from a specified pool. Rather than allowing each structure + * to be freed individually, an entire pool of storage is freed at once. + * This method has two advantages over just using malloc() and free(). First, + * it is far more efficient for allocating small objects, as there is + * no overhead for remembering all the information needed to free each + * object or consolidating fragmented memory. Second, the decision about + * how long to keep an object is made at the time of allocation, and there + * is no need to track down all the objects to free them. + * + */ + + const size_t WORDSIZE=16; + const size_t BLOCKSIZE=8192; + + class PooledAllocator + { + /* We maintain memory alignment to word boundaries by requiring that all + allocations be in multiples of the machine wordsize. */ + /* Size of machine word in bytes. Must be power of 2. */ + /* Minimum number of bytes requested at a time from the system. Must be multiple of WORDSIZE. */ + + + size_t remaining; /* Number of bytes left in current block of storage. */ + void* base; /* Pointer to base of current block of storage. */ + void* loc; /* Current location in block to next allocate memory. */ + + void internal_init() + { + remaining = 0; + base = NULL; + usedMemory = 0; + wastedMemory = 0; + } + + public: + size_t usedMemory; + size_t wastedMemory; + + /** + Default constructor. Initializes a new pool. + */ + PooledAllocator() { + internal_init(); + } + + /** + * Destructor. Frees all the memory allocated in this pool. + */ + ~PooledAllocator() { + free_all(); + } + + /** Frees all allocated memory chunks */ + void free_all() + { + while (base != NULL) { + void *prev = *(static_cast( base)); /* Get pointer to prev block. */ + ::free(base); + base = prev; + } + internal_init(); + } + + /** + * Returns a pointer to a piece of new memory of the given size in bytes + * allocated from the pool. + */ + void* malloc(const size_t req_size) + { + /* Round size up to a multiple of wordsize. The following expression + only works for WORDSIZE that is a power of 2, by masking last bits of + incremented size to zero. + */ + const size_t size = (req_size + (WORDSIZE - 1)) & ~(WORDSIZE - 1); + + /* Check whether a new block must be allocated. Note that the first word + of a block is reserved for a pointer to the previous block. + */ + if (size > remaining) { + + wastedMemory += remaining; + + /* Allocate new storage. */ + const size_t blocksize = (size + sizeof(void*) + (WORDSIZE-1) > BLOCKSIZE) ? + size + sizeof(void*) + (WORDSIZE-1) : BLOCKSIZE; + + // use the standard C malloc to allocate memory + void* m = ::malloc(blocksize); + if (!m) { + fprintf(stderr,"Failed to allocate memory.\n"); + return NULL; + } + + /* Fill first word of new block with pointer to previous block. */ + static_cast(m)[0] = base; + base = m; + + size_t shift = 0; + //int size_t = (WORDSIZE - ( (((size_t)m) + sizeof(void*)) & (WORDSIZE-1))) & (WORDSIZE-1); + + remaining = blocksize - sizeof(void*) - shift; + loc = (static_cast(m) + sizeof(void*) + shift); + } + void* rloc = loc; + loc = static_cast(loc) + size; + remaining -= size; + + usedMemory += size; + + return rloc; + } + + /** + * Allocates (using this pool) a generic type T. + * + * Params: + * count = number of instances to allocate. + * Returns: pointer (of type T*) to memory buffer + */ + template + T* allocate(const size_t count = 1) + { + T* mem = static_cast(this->malloc(sizeof(T)*count)); + return mem; + } + + }; + /** @} */ + + /** @addtogroup nanoflann_metaprog_grp Auxiliary metaprogramming stuff + * @{ */ + + // ---------------- CArray ------------------------- + /** A STL container (as wrapper) for arrays of constant size defined at compile time (class imported from the MRPT project) + * This code is an adapted version from Boost, modifed for its integration + * within MRPT (JLBC, Dec/2009) (Renamed array -> CArray to avoid possible potential conflicts). + * See + * http://www.josuttis.com/cppcode + * for details and the latest version. + * See + * http://www.boost.org/libs/array for Documentation. + * for documentation. + * + * (C) Copyright Nicolai M. Josuttis 2001. + * Permission to copy, use, modify, sell and distribute this software + * is granted provided this copyright notice appears in all copies. + * This software is provided "as is" without express or implied + * warranty, and with no claim as to its suitability for any purpose. + * + * 29 Jan 2004 - minor fixes (Nico Josuttis) + * 04 Dec 2003 - update to synch with library TR1 (Alisdair Meredith) + * 23 Aug 2002 - fix for Non-MSVC compilers combined with MSVC libraries. + * 05 Aug 2001 - minor update (Nico Josuttis) + * 20 Jan 2001 - STLport fix (Beman Dawes) + * 29 Sep 2000 - Initial Revision (Nico Josuttis) + * + * Jan 30, 2004 + */ + template + class CArray { + public: + T elems[N]; // fixed-size array of elements of type T + + public: + // type definitions + typedef T value_type; + typedef T* iterator; + typedef const T* const_iterator; + typedef T& reference; + typedef const T& const_reference; + typedef std::size_t size_type; + typedef std::ptrdiff_t difference_type; + + // iterator support + inline iterator begin() { return elems; } + inline const_iterator begin() const { return elems; } + inline iterator end() { return elems+N; } + inline const_iterator end() const { return elems+N; } + + // reverse iterator support +#if !defined(BOOST_NO_TEMPLATE_PARTIAL_SPECIALIZATION) && !defined(BOOST_MSVC_STD_ITERATOR) && !defined(BOOST_NO_STD_ITERATOR_TRAITS) + typedef std::reverse_iterator reverse_iterator; + typedef std::reverse_iterator const_reverse_iterator; +#elif defined(_MSC_VER) && (_MSC_VER == 1300) && defined(BOOST_DINKUMWARE_STDLIB) && (BOOST_DINKUMWARE_STDLIB == 310) + // workaround for broken reverse_iterator in VC7 + typedef std::reverse_iterator > reverse_iterator; + typedef std::reverse_iterator > const_reverse_iterator; +#else + // workaround for broken reverse_iterator implementations + typedef std::reverse_iterator reverse_iterator; + typedef std::reverse_iterator const_reverse_iterator; +#endif + + reverse_iterator rbegin() { return reverse_iterator(end()); } + const_reverse_iterator rbegin() const { return const_reverse_iterator(end()); } + reverse_iterator rend() { return reverse_iterator(begin()); } + const_reverse_iterator rend() const { return const_reverse_iterator(begin()); } + // operator[] + inline reference operator[](size_type i) { return elems[i]; } + inline const_reference operator[](size_type i) const { return elems[i]; } + // at() with range check + reference at(size_type i) { rangecheck(i); return elems[i]; } + const_reference at(size_type i) const { rangecheck(i); return elems[i]; } + // front() and back() + reference front() { return elems[0]; } + const_reference front() const { return elems[0]; } + reference back() { return elems[N-1]; } + const_reference back() const { return elems[N-1]; } + // size is constant + static inline size_type size() { return N; } + static bool empty() { return false; } + static size_type max_size() { return N; } + enum { static_size = N }; + /** This method has no effects in this class, but raises an exception if the expected size does not match */ + inline void resize(const size_t nElements) { if (nElements!=N) throw std::logic_error("Try to change the size of a CArray."); } + // swap (note: linear complexity in N, constant for given instantiation) + void swap (CArray& y) { std::swap_ranges(begin(),end(),y.begin()); } + // direct access to data (read-only) + const T* data() const { return elems; } + // use array as C array (direct read/write access to data) + T* data() { return elems; } + // assignment with type conversion + template CArray& operator= (const CArray& rhs) { + std::copy(rhs.begin(),rhs.end(), begin()); + return *this; + } + // assign one value to all elements + inline void assign (const T& value) { for (size_t i=0;i= size()) { throw std::out_of_range("CArray<>: index out of range"); } } + }; // end of CArray + + /** Used to declare fixed-size arrays when DIM>0, dynamically-allocated vectors when DIM=-1. + * Fixed size version for a generic DIM: + */ + template + struct array_or_vector_selector + { + typedef CArray container_t; + }; + /** Dynamic size version */ + template + struct array_or_vector_selector<-1,T> { + typedef std::vector container_t; + }; + /** @} */ + + /** @addtogroup kdtrees_grp KD-tree classes and adaptors + * @{ */ + + /** kd-tree index + * + * Contains the k-d trees and other information for indexing a set of points + * for nearest-neighbor matching. + * + * The class "DatasetAdaptor" must provide the following interface (can be non-virtual, inlined methods): + * + * \code + * // Must return the number of data poins + * inline size_t kdtree_get_point_count() const { ... } + * + * // [Only if using the metric_L2_Simple type] Must return the Euclidean (L2) distance between the vector "p1[0:size-1]" and the data point with index "idx_p2" stored in the class: + * inline DistanceType kdtree_distance(const T *p1, const size_t idx_p2,size_t size) const { ... } + * + * // Must return the dim'th component of the idx'th point in the class: + * inline T kdtree_get_pt(const size_t idx, int dim) const { ... } + * + * // Optional bounding-box computation: return false to default to a standard bbox computation loop. + * // Return true if the BBOX was already computed by the class and returned in "bb" so it can be avoided to redo it again. + * // Look at bb.size() to find out the expected dimensionality (e.g. 2 or 3 for point clouds) + * template + * bool kdtree_get_bbox(BBOX &bb) const + * { + * bb[0].low = ...; bb[0].high = ...; // 0th dimension limits + * bb[1].low = ...; bb[1].high = ...; // 1st dimension limits + * ... + * return true; + * } + * + * \endcode + * + * \tparam DatasetAdaptor The user-provided adaptor (see comments above). + * \tparam Distance The distance metric to use: nanoflann::metric_L1, nanoflann::metric_L2, nanoflann::metric_L2_Simple, etc. + * \tparam DIM Dimensionality of data points (e.g. 3 for 3D points) + * \tparam IndexType Will be typically size_t or int + */ + template + class KDTreeSingleIndexAdaptor + { + private: + /** Hidden copy constructor, to disallow copying indices (Not implemented) */ + KDTreeSingleIndexAdaptor(const KDTreeSingleIndexAdaptor&); + public: + typedef typename Distance::ElementType ElementType; + typedef typename Distance::DistanceType DistanceType; + protected: + + /** + * Array of indices to vectors in the dataset. + */ + std::vector vind; + + size_t m_leaf_max_size; + + + /** + * The dataset used by this index + */ + const DatasetAdaptor &dataset; //!< The source of our data + + const KDTreeSingleIndexAdaptorParams index_params; + + size_t m_size; //!< Number of current poins in the dataset + size_t m_size_at_index_build; //!< Number of points in the dataset when the index was built + int dim; //!< Dimensionality of each data point + + + /*--------------------- Internal Data Structures --------------------------*/ + struct Node + { + /** Union used because a node can be either a LEAF node or a non-leaf node, so both data fields are never used simultaneously */ + union { + struct { + IndexType left, right; //!< Indices of points in leaf node + } lr; + struct { + int divfeat; //!< Dimension used for subdivision. + DistanceType divlow, divhigh; //!< The values used for subdivision. + } sub; + }; + Node* child1, * child2; //!< Child nodes (both=NULL mean its a leaf node) + }; + typedef Node* NodePtr; + + + struct Interval + { + ElementType low, high; + }; + + /** Define "BoundingBox" as a fixed-size or variable-size container depending on "DIM" */ + typedef typename array_or_vector_selector::container_t BoundingBox; + + /** Define "distance_vector_t" as a fixed-size or variable-size container depending on "DIM" */ + typedef typename array_or_vector_selector::container_t distance_vector_t; + + /** The KD-tree used to find neighbours */ + NodePtr root_node; + BoundingBox root_bbox; + + /** + * Pooled memory allocator. + * + * Using a pooled memory allocator is more efficient + * than allocating memory directly when there is a large + * number small of memory allocations. + */ + PooledAllocator pool; + + public: + + Distance distance; + + /** + * KDTree constructor + * + * Refer to docs in README.md or online in https://github.com/jlblancoc/nanoflann + * + * The KD-Tree point dimension (the length of each point in the datase, e.g. 3 for 3D points) + * is determined by means of: + * - The \a DIM template parameter if >0 (highest priority) + * - Otherwise, the \a dimensionality parameter of this constructor. + * + * @param inputData Dataset with the input features + * @param params Basically, the maximum leaf node size + */ + KDTreeSingleIndexAdaptor(const int dimensionality, const DatasetAdaptor& inputData, const KDTreeSingleIndexAdaptorParams& params = KDTreeSingleIndexAdaptorParams() ) : + dataset(inputData), index_params(params), root_node(NULL), distance(inputData) + { + m_size = dataset.kdtree_get_point_count(); + m_size_at_index_build = m_size; + dim = dimensionality; + if (DIM>0) dim=DIM; + m_leaf_max_size = params.leaf_max_size; + + // Create a permutable array of indices to the input vectors. + init_vind(); + } + + /** Standard destructor */ + ~KDTreeSingleIndexAdaptor() { } + + /** Frees the previously-built index. Automatically called within buildIndex(). */ + void freeIndex() + { + pool.free_all(); + root_node=NULL; + m_size_at_index_build = 0; + } + + /** + * Builds the index + */ + void buildIndex() + { + init_vind(); + freeIndex(); + m_size_at_index_build = m_size; + if(m_size == 0) return; + computeBoundingBox(root_bbox); + root_node = divideTree(0, m_size, root_bbox ); // construct the tree + } + + /** Returns number of points in dataset */ + size_t size() const { return m_size; } + + /** Returns the length of each point in the dataset */ + size_t veclen() const { + return static_cast(DIM>0 ? DIM : dim); + } + + /** + * Computes the inde memory usage + * Returns: memory used by the index + */ + size_t usedMemory() const + { + return pool.usedMemory+pool.wastedMemory+dataset.kdtree_get_point_count()*sizeof(IndexType); // pool memory and vind array memory + } + + /** \name Query methods + * @{ */ + + /** + * Find set of nearest neighbors to vec[0:dim-1]. Their indices are stored inside + * the result object. + * + * Params: + * result = the result object in which the indices of the nearest-neighbors are stored + * vec = the vector for which to search the nearest neighbors + * + * \tparam RESULTSET Should be any ResultSet + * \return True if the requested neighbors could be found. + * \sa knnSearch, radiusSearch + */ + template + bool findNeighbors(RESULTSET& result, const ElementType* vec, const SearchParams& searchParams) const + { + assert(vec); + if (size() == 0) + return false; + if (!root_node) + throw std::runtime_error("[nanoflann] findNeighbors() called before building the index."); + float epsError = 1+searchParams.eps; + + distance_vector_t dists; // fixed or variable-sized container (depending on DIM) + dists.assign((DIM>0 ? DIM : dim) ,0); // Fill it with zeros. + DistanceType distsq = computeInitialDistances(vec, dists); + searchLevel(result, vec, root_node, distsq, dists, epsError); // "count_leaf" parameter removed since was neither used nor returned to the user. + return result.full(); + } + + /** + * Find the "num_closest" nearest neighbors to the \a query_point[0:dim-1]. Their indices are stored inside + * the result object. + * \sa radiusSearch, findNeighbors + * \note nChecks_IGNORED is ignored but kept for compatibility with the original FLANN interface. + */ + inline void knnSearch(const ElementType *query_point, const size_t num_closest, IndexType *out_indices, DistanceType *out_distances_sq, const int /* nChecks_IGNORED */ = 10) const + { + nanoflann::KNNResultSet resultSet(num_closest); + resultSet.init(out_indices, out_distances_sq); + this->findNeighbors(resultSet, query_point, nanoflann::SearchParams()); + } + + /** + * Find all the neighbors to \a query_point[0:dim-1] within a maximum radius. + * The output is given as a vector of pairs, of which the first element is a point index and the second the corresponding distance. + * Previous contents of \a IndicesDists are cleared. + * + * If searchParams.sorted==true, the output list is sorted by ascending distances. + * + * For a better performance, it is advisable to do a .reserve() on the vector if you have any wild guess about the number of expected matches. + * + * \sa knnSearch, findNeighbors, radiusSearchCustomCallback + * \return The number of points within the given radius (i.e. indices.size() or dists.size() ) + */ + size_t radiusSearch(const ElementType *query_point,const DistanceType radius, std::vector >& IndicesDists, const SearchParams& searchParams) const + { + RadiusResultSet resultSet(radius,IndicesDists); + const size_t nFound = radiusSearchCustomCallback(query_point,resultSet,searchParams); + if (searchParams.sorted) + std::sort(IndicesDists.begin(),IndicesDists.end(), IndexDist_Sorter() ); + return nFound; + } + + /** + * Just like radiusSearch() but with a custom callback class for each point found in the radius of the query. + * See the source of RadiusResultSet<> as a start point for your own classes. + * \sa radiusSearch + */ + template + size_t radiusSearchCustomCallback(const ElementType *query_point,SEARCH_CALLBACK &resultSet, const SearchParams& searchParams = SearchParams() ) const + { + this->findNeighbors(resultSet, query_point, searchParams); + return resultSet.size(); + } + + /** @} */ + + private: + /** Make sure the auxiliary list \a vind has the same size than the current dataset, and re-generate if size has changed. */ + void init_vind() + { + // Create a permutable array of indices to the input vectors. + m_size = dataset.kdtree_get_point_count(); + if (vind.size()!=m_size) vind.resize(m_size); + for (size_t i = 0; i < m_size; i++) vind[i] = i; + } + + /// Helper accessor to the dataset points: + inline ElementType dataset_get(size_t idx, int component) const { + return dataset.kdtree_get_pt(idx,component); + } + + + void save_tree(FILE* stream, NodePtr tree) + { + save_value(stream, *tree); + if (tree->child1!=NULL) { + save_tree(stream, tree->child1); + } + if (tree->child2!=NULL) { + save_tree(stream, tree->child2); + } + } + + + void load_tree(FILE* stream, NodePtr& tree) + { + tree = pool.allocate(); + load_value(stream, *tree); + if (tree->child1!=NULL) { + load_tree(stream, tree->child1); + } + if (tree->child2!=NULL) { + load_tree(stream, tree->child2); + } + } + + + void computeBoundingBox(BoundingBox& bbox) + { + bbox.resize((DIM>0 ? DIM : dim)); + if (dataset.kdtree_get_bbox(bbox)) + { + // Done! It was implemented in derived class + } + else + { + const size_t N = dataset.kdtree_get_point_count(); + if (!N) throw std::runtime_error("[nanoflann] computeBoundingBox() called but no data points found."); + for (int i=0; i<(DIM>0 ? DIM : dim); ++i) { + bbox[i].low = + bbox[i].high = dataset_get(0,i); + } + for (size_t k=1; k0 ? DIM : dim); ++i) { + if (dataset_get(k,i)bbox[i].high) bbox[i].high = dataset_get(k,i); + } + } + } + } + + + /** + * Create a tree node that subdivides the list of vecs from vind[first] + * to vind[last]. The routine is called recursively on each sublist. + * + * @param left index of the first vector + * @param right index of the last vector + */ + NodePtr divideTree(const IndexType left, const IndexType right, BoundingBox& bbox) + { + NodePtr node = pool.allocate(); // allocate memory + + /* If too few exemplars remain, then make this a leaf node. */ + if ( (right-left) <= m_leaf_max_size) { + node->child1 = node->child2 = NULL; /* Mark as leaf node. */ + node->lr.left = left; + node->lr.right = right; + + // compute bounding-box of leaf points + for (int i=0; i<(DIM>0 ? DIM : dim); ++i) { + bbox[i].low = dataset_get(vind[left],i); + bbox[i].high = dataset_get(vind[left],i); + } + for (IndexType k=left+1; k0 ? DIM : dim); ++i) { + if (bbox[i].low>dataset_get(vind[k],i)) bbox[i].low=dataset_get(vind[k],i); + if (bbox[i].highsub.divfeat = cutfeat; + + BoundingBox left_bbox(bbox); + left_bbox[cutfeat].high = cutval; + node->child1 = divideTree(left, left+idx, left_bbox); + + BoundingBox right_bbox(bbox); + right_bbox[cutfeat].low = cutval; + node->child2 = divideTree(left+idx, right, right_bbox); + + node->sub.divlow = left_bbox[cutfeat].high; + node->sub.divhigh = right_bbox[cutfeat].low; + + for (int i=0; i<(DIM>0 ? DIM : dim); ++i) { + bbox[i].low = std::min(left_bbox[i].low, right_bbox[i].low); + bbox[i].high = std::max(left_bbox[i].high, right_bbox[i].high); + } + } + + return node; + } + + + void computeMinMax(IndexType* ind, IndexType count, int element, ElementType& min_elem, ElementType& max_elem) + { + min_elem = dataset_get(ind[0],element); + max_elem = dataset_get(ind[0],element); + for (IndexType i=1; imax_elem) max_elem = val; + } + } + + void middleSplit_(IndexType* ind, IndexType count, IndexType& index, int& cutfeat, DistanceType& cutval, const BoundingBox& bbox) + { + const DistanceType EPS=static_cast(0.00001); + ElementType max_span = bbox[0].high-bbox[0].low; + for (int i=1; i<(DIM>0 ? DIM : dim); ++i) { + ElementType span = bbox[i].high-bbox[i].low; + if (span>max_span) { + max_span = span; + } + } + ElementType max_spread = -1; + cutfeat = 0; + for (int i=0; i<(DIM>0 ? DIM : dim); ++i) { + ElementType span = bbox[i].high-bbox[i].low; + if (span>(1-EPS)*max_span) { + ElementType min_elem, max_elem; + computeMinMax(ind, count, cutfeat, min_elem, max_elem); + ElementType spread = max_elem-min_elem;; + if (spread>max_spread) { + cutfeat = i; + max_spread = spread; + } + } + } + // split in the middle + DistanceType split_val = (bbox[cutfeat].low+bbox[cutfeat].high)/2; + ElementType min_elem, max_elem; + computeMinMax(ind, count, cutfeat, min_elem, max_elem); + + if (split_valmax_elem) cutval = max_elem; + else cutval = split_val; + + IndexType lim1, lim2; + planeSplit(ind, count, cutfeat, cutval, lim1, lim2); + + if (lim1>count/2) index = lim1; + else if (lim2cutval + */ + void planeSplit(IndexType* ind, const IndexType count, int cutfeat, DistanceType cutval, IndexType& lim1, IndexType& lim2) + { + /* Move vector indices for left subtree to front of list. */ + IndexType left = 0; + IndexType right = count-1; + for (;; ) { + while (left<=right && dataset_get(ind[left],cutfeat)=cutval) --right; + if (left>right || !right) break; // "!right" was added to support unsigned Index types + std::swap(ind[left], ind[right]); + ++left; + --right; + } + /* If either list is empty, it means that all remaining features + * are identical. Split in the middle to maintain a balanced tree. + */ + lim1 = left; + right = count-1; + for (;; ) { + while (left<=right && dataset_get(ind[left],cutfeat)<=cutval) ++left; + while (right && left<=right && dataset_get(ind[right],cutfeat)>cutval) --right; + if (left>right || !right) break; // "!right" was added to support unsigned Index types + std::swap(ind[left], ind[right]); + ++left; + --right; + } + lim2 = left; + } + + DistanceType computeInitialDistances(const ElementType* vec, distance_vector_t& dists) const + { + assert(vec); + DistanceType distsq = DistanceType(); + + for (int i = 0; i < (DIM>0 ? DIM : dim); ++i) { + if (vec[i] < root_bbox[i].low) { + dists[i] = distance.accum_dist(vec[i], root_bbox[i].low, i); + distsq += dists[i]; + } + if (vec[i] > root_bbox[i].high) { + dists[i] = distance.accum_dist(vec[i], root_bbox[i].high, i); + distsq += dists[i]; + } + } + + return distsq; + } + + /** + * Performs an exact search in the tree starting from a node. + * \tparam RESULTSET Should be any ResultSet + */ + template + void searchLevel(RESULTSET& result_set, const ElementType* vec, const NodePtr node, DistanceType mindistsq, + distance_vector_t& dists, const float epsError) const + { + /* If this is a leaf node, then do check and return. */ + if ((node->child1 == NULL)&&(node->child2 == NULL)) { + //count_leaf += (node->lr.right-node->lr.left); // Removed since was neither used nor returned to the user. + DistanceType worst_dist = result_set.worstDist(); + for (IndexType i=node->lr.left; ilr.right; ++i) { + const IndexType index = vind[i];// reorder... : i; + DistanceType dist = distance(vec, index, (DIM>0 ? DIM : dim)); + if (distsub.divfeat; + ElementType val = vec[idx]; + DistanceType diff1 = val - node->sub.divlow; + DistanceType diff2 = val - node->sub.divhigh; + + NodePtr bestChild; + NodePtr otherChild; + DistanceType cut_dist; + if ((diff1+diff2)<0) { + bestChild = node->child1; + otherChild = node->child2; + cut_dist = distance.accum_dist(val, node->sub.divhigh, idx); + } + else { + bestChild = node->child2; + otherChild = node->child1; + cut_dist = distance.accum_dist( val, node->sub.divlow, idx); + } + + /* Call recursively to search next level down. */ + searchLevel(result_set, vec, bestChild, mindistsq, dists, epsError); + + DistanceType dst = dists[idx]; + mindistsq = mindistsq + cut_dist - dst; + dists[idx] = cut_dist; + if (mindistsq*epsError<=result_set.worstDist()) { + searchLevel(result_set, vec, otherChild, mindistsq, dists, epsError); + } + dists[idx] = dst; + } + + public: + /** Stores the index in a binary file. + * IMPORTANT NOTE: The set of data points is NOT stored in the file, so when loading the index object it must be constructed associated to the same source of data points used while building it. + * See the example: examples/saveload_example.cpp + * \sa loadIndex */ + void saveIndex(FILE* stream) + { + save_value(stream, m_size); + save_value(stream, dim); + save_value(stream, root_bbox); + save_value(stream, m_leaf_max_size); + save_value(stream, vind); + save_tree(stream, root_node); + } + + /** Loads a previous index from a binary file. + * IMPORTANT NOTE: The set of data points is NOT stored in the file, so the index object must be constructed associated to the same source of data points used while building the index. + * See the example: examples/saveload_example.cpp + * \sa loadIndex */ + void loadIndex(FILE* stream) + { + load_value(stream, m_size); + load_value(stream, dim); + load_value(stream, root_bbox); + load_value(stream, m_leaf_max_size); + load_value(stream, vind); + load_tree(stream, root_node); + } + + }; // class KDTree + + + /** An L2-metric KD-tree adaptor for working with data directly stored in an Eigen Matrix, without duplicating the data storage. + * Each row in the matrix represents a point in the state space. + * + * Example of usage: + * \code + * Eigen::Matrix mat; + * // Fill out "mat"... + * + * typedef KDTreeEigenMatrixAdaptor< Eigen::Matrix > my_kd_tree_t; + * const int max_leaf = 10; + * my_kd_tree_t mat_index(dimdim, mat, max_leaf ); + * mat_index.index->buildIndex(); + * mat_index.index->... + * \endcode + * + * \tparam DIM If set to >0, it specifies a compile-time fixed dimensionality for the points in the data set, allowing more compiler optimizations. + * \tparam Distance The distance metric to use: nanoflann::metric_L1, nanoflann::metric_L2, nanoflann::metric_L2_Simple, etc. + */ + template + struct KDTreeEigenMatrixAdaptor + { + typedef KDTreeEigenMatrixAdaptor self_t; + typedef typename MatrixType::Scalar num_t; + typedef typename MatrixType::Index IndexType; + typedef typename Distance::template traits::distance_t metric_t; + typedef KDTreeSingleIndexAdaptor< metric_t,self_t,DIM,IndexType> index_t; + + index_t* index; //! The kd-tree index for the user to call its methods as usual with any other FLANN index. + + /// Constructor: takes a const ref to the matrix object with the data points + KDTreeEigenMatrixAdaptor(const int dimensionality, const MatrixType &mat, const int leaf_max_size = 10) : m_data_matrix(mat) + { + const IndexType dims = mat.cols(); + if (dims!=dimensionality) throw std::runtime_error("Error: 'dimensionality' must match column count in data matrix"); + if (DIM>0 && static_cast(dims)!=DIM) + throw std::runtime_error("Data set dimensionality does not match the 'DIM' template argument"); + index = new index_t( dims, *this /* adaptor */, nanoflann::KDTreeSingleIndexAdaptorParams(leaf_max_size ) ); + index->buildIndex(); + } + private: + /** Hidden copy constructor, to disallow copying this class (Not implemented) */ + KDTreeEigenMatrixAdaptor(const self_t&); + public: + + ~KDTreeEigenMatrixAdaptor() { + delete index; + } + + const MatrixType &m_data_matrix; + + /** Query for the \a num_closest closest points to a given point (entered as query_point[0:dim-1]). + * Note that this is a short-cut method for index->findNeighbors(). + * The user can also call index->... methods as desired. + * \note nChecks_IGNORED is ignored but kept for compatibility with the original FLANN interface. + */ + inline void query(const num_t *query_point, const size_t num_closest, IndexType *out_indices, num_t *out_distances_sq, const int /* nChecks_IGNORED */ = 10) const + { + nanoflann::KNNResultSet resultSet(num_closest); + resultSet.init(out_indices, out_distances_sq); + index->findNeighbors(resultSet, query_point, nanoflann::SearchParams()); + } + + /** @name Interface expected by KDTreeSingleIndexAdaptor + * @{ */ + + const self_t & derived() const { + return *this; + } + self_t & derived() { + return *this; + } + + // Must return the number of data points + inline size_t kdtree_get_point_count() const { + return m_data_matrix.rows(); + } + + // Returns the L2 distance between the vector "p1[0:size-1]" and the data point with index "idx_p2" stored in the class: + inline num_t kdtree_distance(const num_t *p1, const IndexType idx_p2,IndexType size) const + { + num_t s=0; + for (IndexType i=0; i + bool kdtree_get_bbox(BBOX& /*bb*/) const { + return false; + } + + /** @} */ + + }; // end of KDTreeEigenMatrixAdaptor + /** @} */ + +/** @} */ // end of grouping +} // end of NS + + +#endif /* NANOFLANN_HPP_ */ diff --git a/workspace/usingneuralnet.h b/workspace/usingneuralnet.h new file mode 100644 index 0000000..5d2449e --- /dev/null +++ b/workspace/usingneuralnet.h @@ -0,0 +1,585 @@ +#ifndef USINGNEURALNET_H +#define USINGNEURALNET_H + + +#include + +#include "sensors/SensorReader.h" +#include "Interpolator.h" + +#include +#include +#include +#include + +#include +#include +#include + + +enum class PracticeType { + //REST, + JUMPING_JACK, + SITUPS, + PUSHUPS, + KNEEBEND, + FORWARDBEND, +}; + +std::string NAMES[] = { + "JUMPING_JACK", + "SITUPS", + "PUSHUPS", + "KNEEBEND", + "FORWARDBEND" +}; + +std::string COLORS[] = { + "#ff0000", + "#00ff00", + "#0000ff", + "#ffff00", + "#000000", + "#666666" +}; + + +struct Practice { + + PracticeType type; + Recording rec; + std::vector keyGyro; + + //Practice(const PracticeType p, const Recording& rec, const std::vector& keyGyro) : p(p), rec(rec), keyGyro(keyGyro) {;} + + K::Interpolator getInterpol() const { + K::Interpolator interpol; + for (auto it : rec.gyro.values) {interpol.add(it.ts, it.val);} + interpol.makeRelative(); + return interpol; + } + +}; + +static constexpr int NUM_IN = 48; +static constexpr int NUM_HID = 15; +static constexpr int NUM_OUT = 5; + +static constexpr int NUM_ARGS = NUM_IN*NUM_HID + NUM_HID*NUM_OUT; + +class UsingNeuralNet { + +public: + + + + ///** interpolate the output for the given position using the provided range */ + //template T blur(K::Interpolator& interpol, const uint64_t ms, const int s = 3) { + // return interpol.get(ms-s*2) * 0.1 + + // interpol.get(ms-s) * 0.2 + + // interpol.get(ms) * 0.4 + + // interpol.get(ms+s) * 0.2 + + // interpol.get(ms+s*2) * 0.1; + //} + + + /** get the input vector for the neuronal network */float points = 0; + static std::vector getNetworkInput(K::Interpolator& interpol, const uint64_t pos) { + + std::vector val; + val.resize(NUM_IN); + int idx = 0; + + for (int offset = -400; offset < 400; offset += 50) { + SensorGyro gyro = interpol.get(pos + offset); + val[idx++] = gyro.x; + val[idx++] = gyro.y; + val[idx++] = gyro.z; + assert(idx <= NUM_IN); + } + + return val; + + } + + /** get the index of the largest element within vec */ + static int getMaxIdx(const std::vector& vec) { + float max = 0; + int idx = 0; + for (int i = 0; i < NUM_OUT; ++i) { + if (vec[i] > max) { + max = vec[i]; + idx = i; + } + } + return idx; + } + + struct TMP {int index; float value;}; + //static std::vector getSorted(const K::NeuralNetResultIHO& vec) { + // std::vector tmp; + // for (int i = 0; i < NUM_OUT; ++i) {tmp.push_back( TMP{i, vec.values[i]} );} + // auto comp = [] (const TMP& t1, const TMP& t2) {return t2.value < t1.val//struct TMP {int index; float value;}; + + static std::vector getSorted(const std::vector& vec) { + std::vector tmp; + for (int i = 0; i < NUM_OUT; ++i) {tmp.push_back( TMP{i, vec[i]} );} + auto comp = [] (const TMP& t1, const TMP& t2) {return t2.value < t1.value;}; + std::sort(tmp.begin(), tmp.end(), comp); + return tmp; + } + + // std::sort(tmp.begin(), tmp.end(), comp); + // return tmp; + //} + + static void debug(Practice& p, std::vector& res) { + const int maxIdx = getMaxIdx(res); + const char max = (res[maxIdx] > 0.5) ? (maxIdx + '0') : ('?'); + std::cout << "practice was: " << (int)p.type; + std::cout << " network says: " << max << "\t"; + std::cout << "["; + for (int i = 0; i < NUM_OUT; ++i) { + std::cout << res[i] << ", "; + } + std::cout << "]" << std::endl; + } + + static void debugPlot(Practice p) { + + static K::Gnuplot gp; + K::GnuplotPlot plot; + K::GnuplotPlotElementLines line[3]; + + line[0].setColorHex("#ff0000"); line[0].setTitle("x"); + line[1].setColorHex("#00ff00"); line[1].setTitle("y"); + line[2].setColorHex("#0000ff"); line[2].setTitle("z"); + + plot.add(&line[0]); + plot.add(&line[1]); + plot.add(&line[2]); + + K::Interpolator interpol = p.getInterpol(); + + volatile int len = p.rec.gyro.values.back().ts - p.rec.gyro.values.front().ts; + for (int ms = 0; ms < len; ms += 50) { + SensorGyro s = interpol.get(ms); + line[0].add(K::GnuplotPoint2(ms, s.x)); + line[1].add(K::GnuplotPoint2(ms, s.y)); + line[2].add(K::GnuplotPoint2(ms, s.z)); + } + + gp.setDebugOutput(true); + gp.draw(plot); + + for (uint64_t ts : p.keyGyro) { + gp << "set arrow from " << ts << ",-10 to " << ts << ",+10\n"; + } + + gp.flush(); + + } + + + class MyOpt : public K::NumOptFunction { + + public: + + std::vector& practices; + K::FeedForwardNeuralNet& net; + + /** ctor */ + MyOpt(std::vector& practices, K::FeedForwardNeuralNet& net) : practices(practices), net(net) { + ; + } + +// static float getScore(const int shouldBe, const std::vector values) { +// float points = 0; +// for (int i = 0; i < NUM_OUT; ++i) { +// if (i == shouldBe) { +// if (values[i] > 0.5) {points += values[i];} // matches and > 0.5 -> score +// } else { +// if (values[i] > 0.5) {points -= values[i];} // does not match but > 0.5 -> neg-score +// } +// } +// return points; +// } + + static float getScore(const int shouldBe, const std::vector values) { + // largest value matches the desired type -> good! + float points = 0; + std::vector resSort = getSorted(values); + if (resSort[0].index == shouldBe) { + //if ( (resSort[0].value - resSort[1].value) > 0.25 ) { + points += 2; + points += resSort[0].value; + points -= resSort[1].value; + //} + //points += resSort[0].value; + //points += (resSort[0].value - resSort[1].value); + } else { + points -= 3; // higher seems better! + } + return points; + } + + static float getScorePos(const int shouldBe, const std::vector values) { + float points = 0; + for (int idx = 0; idx < values.size(); ++idx) { + const float v = values[idx]; + if (idx == shouldBe) { + points += (v > 0.5) ? 1 : 0; + //points += (v > 0.5) ? v : 0; + } else { + points -= (v > 0.5) ? 1 : 0; + //points -= (v > 0.5) ? v : 0; + } + } + return points; + } + + static float getScoreReject(const std::vector values) { + float points = 0; + for (float v : values) { + points -= (v > 0.5) ? 1 : 0; + //points -= (v > 0.5) ? v : 0; + } + return points; + } + + double getValue(const K::NumOptVector& args) const { + + // configure the network + std::vector vals; + for(int i = 0; i < NUM_ARGS; ++i) {vals.push_back(args[i]);} + net.setFactors(vals); + + // temporals + float points = 0; + int cnt = 0; + + // process every practice + for (const Practice& p : practices) { + + // get the values for the neural-net-input + K::Interpolator interpol = p.getInterpol(); + + // process 4 (positive) occurences within the practice + for (int key = 0; key < 3; ++key) { + + uint64_t steps = 100;//(tTo - tFrom) / 8; + + // positive + volatile uint64_t k1 = p.keyGyro[key]; + volatile uint64_t k2 = p.keyGyro[key+1]; + volatile uint64_t diff = k2 - k1; + volatile uint64_t tFrom = k1 - diff/5; + volatile uint64_t tTo = k1 + diff/5; + for (uint64_t o = tFrom; o <= tTo; o += steps) { + const std::vector values = getNetworkInput(interpol, o); + const std::vector res = net.get(values, true); + points += getScorePos((int)p.type, res); + ++cnt; + } + + // negative + tFrom = k1 + diff/2; + tTo = k2 - diff/2; + for (uint64_t o = tFrom; o <= tTo; o += steps) { + const std::vector values = getNetworkInput(interpol, o); + const std::vector res = net.get(values, true); + points += getScoreReject(res); + ++cnt; + } + + } + + +// // positive +// for (int ts = 1500; ts <= 7000; ts +=400) { +// const std::vector values = getNetworkInput(interpol, ts); +// const std::vector res = net.get(values, false); +// points += getScore((int)p.type, res); +// } + + } + + points /= cnt; + + static float max = -999999; + if (points > max) { + max = points; + std::cout << points << std::endl; + } + + return -points; + + } + + }; + + + + + + static void run() { + + std::vector practices; + +// practices.push_back( +// Practice { +// PracticeType::REST, +// SensorReader::read("/mnt/firma/kunden/HandyGames/daten/idle/restposition_gl_24.txt"), +// {1000, 2000, 3000, 4000, 5000, 6000, 7000, 8000, 9000} +// } +// ); + + + practices.push_back( + Practice { + PracticeType::JUMPING_JACK, + SensorReader::read("/mnt/firma/kunden/HandyGames/daten/jumpingjack/jumpingjack_gl_5_subject_3_left.txt"), + {1950, 2900, 3850, 4850, 5850, 6850, 7850, 8850, 9800, 10800, 11850} + } + ); +// practices.push_back( +// Practice { +// PracticeType::JUMPING_JACK, +// SensorReader::read("/mnt/firma/kunden/HandyGames/daten/jumpingjack/jumpingjack_gl_6_subject_4_right.txt"), +// {2750, 3850, 4850, 5900, 7000, 7950, 9100 } +// } +// ); +// practices.push_back( +// Practice { +// PracticeType::JUMPING_JACK, +// SensorReader::read("/mnt/firma/kunden/HandyGames/daten/jumpingjack/jumpingjack_sw_5_subject_2_right.txt"), +// {1700, 2850, 4050, 5250, 6450, 7600, 8800} +// } +// ); + + practices.push_back( + Practice { + PracticeType::SITUPS, + SensorReader::read("/mnt/firma/kunden/HandyGames/daten/situps/situps_gl_12_subject_1_left.txt"), + {1850, 3250, 4750, 6150, 7550, 8950, 10350, 11600, 13000} + } + ); + + practices.push_back( + Practice { + PracticeType::PUSHUPS, + SensorReader::read("/mnt/firma/kunden/HandyGames/daten/pushups/pushups_gl_8_subject_4_right.txt"), + //{2750, 4200, 5850, 7400, 9000, 10650} + {3700, 5200, 6850, 8450, 10050, 11750} + } + ); + + practices.push_back( + Practice { + PracticeType::KNEEBEND, + SensorReader::read("/mnt/firma/kunden/HandyGames/daten/kneebend/kneebend_gl_0_subject_0_right.txt"), + {2650, 4750, 6750, 8800, 10800, 12800} + //{3500, 5000, 8300, 9900, 11550} + } + ); + + + practices.push_back( + Practice { + PracticeType::FORWARDBEND, + SensorReader::read("/mnt/firma/kunden/HandyGames/daten/forwardbend/forwardbend_gl_3_subject_1_left.txt"), + {3500, 9000, 14150, 19300} + //{3500, 5000, 8300, 9900, 11550} + } + ); + + + + // practices.push_back( + // Practice { + // PracticeType::REST, + // SensorReader::read("/mnt/firma/kunden/HandyGames/daten/jumpingjack/jumpingjack_gl_5_subject_3_left.txt"), + // {1950+500, 2900+500, 3850+500, 4850+500, 5850+500, 6850+500, 7850+500, 8850+500, 9800+500, 10800+500, 11850+500} + // } + // ); + // // practices.push_back( + // // Practice { + // // PracticeType::REST, + // // SensorReader::read("/mnt/firma/kunden/HandyGames/daten/pushups/pushups_gl_8_subject_4_right.txt"), + // // //{2750, 4200, 5850, 7400, 9000, 10650} + // // {3500, 5000, 8300, 9900, 11550} + // // } + // // ); + // practices.push_back( + // Practice { + // PracticeType::REST, + // SensorReader::read("/mnt/firma/kunden/HandyGames/daten/situps/situps_gl_12_subject_1_left.txt"), + // {1850+600, 3250+600, 4750+600, 6150+600, 7550+600, 8950+600, 10350+600, 11600+600, 13000+600} + // } + // ); + + + // debugPlot(practices.back()); + // sleep(100); + + + + + K::NumOptVector vec; + K::FeedForwardNeuralNet net; + net.setLayers({NUM_IN, NUM_HID, NUM_OUT}); + + MyOpt func(practices, net); + + // K::NumOptAlgoDownhillSimplex opt; + // opt.setMaxIterations(100); + // opt.setNumRestarts(2); + // opt.calculateOptimum(func, vec); + + K::NumOptAlgoGenetic opt; + opt.setElitism(0.07f); + opt.setPopulationSize(100); + opt.setMaxIterations(200); + opt.setMutation(0.20f); + opt.setValRange(0.25); + opt.calculateOptimum(func, vec); + + + + // // process every practice + // for (Practice& p : practices) { + + // // get the values for the neural-net-input + + // K::Interpolator interpol = p.getInterpol(); + + // // process every (positive) occurence within the practice + // for (uint64_t ts : p.keyGyro) { + + // std::vector values = getNetworkInput(interpol, ts); + // K::NeuralNetResultIHO res = net.getOutput(values.data()); + // debug(p, res); + + // { + // std::vector values = getNetworkInput(interpol, ts+500); + // K::NeuralNetResultIHO res = net.getOutput(values.data()); + // std::cout << "###"; debug(p, res); debugPlot(practices.back()); + // } + + // }getMaxIdx + + // } + + + K::Gnuplot gp1; + K::Gnuplot gp2; + + K::GnuplotPlot plot1; + K::GnuplotPlot plot2; + + K::GnuplotMultiplot plot(2,1); + plot.add(&plot1); + plot.add(&plot2); + + K::GnuplotPlotElementLines line[3]; + line[0].setColorHex("#ff0000"); line[0].setTitle("x"); + line[1].setColorHex("#00ff00"); line[1].setTitle("y"); + line[2].setColorHex("#0000ff"); line[2].setTitle("z"); + plot1.add(&line[0]); + plot1.add(&line[1]); + plot1.add(&line[2]); + + K::GnuplotPlotElementLines netLines[NUM_OUT]; + for (int i = 0; i < NUM_OUT; ++i) { + netLines[i].setColorHex(COLORS[i]); + netLines[i].setTitle(NAMES[i]); + netLines[i].setLineWidth(2); + } + + for (int i = 0; i < NUM_OUT; ++i) { + plot2.add(&netLines[i]); + } + + // process every practice + for (Practice& p : practices) { + + // get the values for the neural-net-input + + K::Interpolator interpol = p.getInterpol(); + line[0].clear(); + line[1].clear(); + line[2].clear(); + + for (int i = 0; i < NUM_OUT; ++i) { + netLines[i].clear(); + } + + for (int ms = 0; ms < 20000; ms += 50) { // K::Gnuplot gp; + // K::GnuplotPlot plot; + // K::GnuplotPlotElementLines line[3]; + // line[0].setColorHex("#ff0000"); line[0].setTitle("x"); + // line[1].setColorHex("#00ff00"); line[1].setTitle("y"); + // line[2].setColorHex("#0000ff"); line[2].setTitle("z"); + + // Practice p1 = practices[0]; + + // auto interpol = p1.getInterpol(); + // for (int ms = 0; ms < 20000; ms += 50) { + // SensorGyro s = blur(interpol, ms, 10); + // line[0].add(K::GnuplotPoint2(ms, s.x)); + // line[1].add(K::GnuplotPoint2(ms, s.y)); + // line[2].add(K::GnuplotPoint2(ms, s.z)); + // } + + // plot.add(&line[0]); + // plot.add(&line[1]); + // plot.add(&line[2]); + // gp.draw(plot); + // for (uint64_t ts : p1.keyGyro) { + // gp << "set arrow from " << ts << ",-10 to " << ts << ",+10\n"; + // } + // gp.flush(); + SensorGyro s = interpol.get(ms); + line[0].add(K::GnuplotPoint2(ms, s.x)); + line[1].add(K::GnuplotPoint2(ms, s.y)); + line[2].add(K::GnuplotPoint2(ms, s.z)); + } + + // process every (positive) occurence within the practice + for (int ts = 1000; ts < 10000; ts += 50) { + + std::vector values = getNetworkInput(interpol, ts); + std::vector res = net.get(values); + debug(p, res); + + for (int i = 0; i < NUM_OUT; ++i) { + float val = res[i]; + val = (val < 0.5) ? 0 : 1; + netLines[i].add(K::GnuplotPoint2(ts, val)); + } + + gp1 << "set arrow 1 from " << ts-500 << ",-10 to " << ts-500 << ",+10\n"; + gp1 << "set arrow 2 from " << ts+500 << ",-10 to " << ts+500 << ",+10\n"; + gp1.draw(plot1); + gp1.flush(); + + gp2.draw(plot2); + gp2.flush(); + + usleep(1000*50); + + + } + + } + + sleep(1000); + + } + + +}; + + +#endif // USINGNEURALNET_H + diff --git a/workspace/usingpca.h b/workspace/usingpca.h new file mode 100644 index 0000000..b02f37e --- /dev/null +++ b/workspace/usingpca.h @@ -0,0 +1,113 @@ +#ifndef USINGPCA_H +#define USINGPCA_H + +#include + +#include "sensors/SensorReader.h" +#include "Interpolator.h" + +#include + +enum class PracticeType { + //REST, + JUMPING_JACK, + SITUPS, + PUSHUPS, + KNEEBEND, + FORWARDBEND, +}; + +struct Practice { + + PracticeType type; + Recording rec; + std::vector keyGyro; + + //Practice(const PracticeType p, const Recording& rec, const std::vector& keyGyro) : p(p), rec(rec), keyGyro(keyGyro) {;} + + K::Interpolator getInterpol() const { + K::Interpolator interpol; + for (auto it : rec.gyro.values) {interpol.add(it.ts, it.val);} + interpol.makeRelative(); + return interpol; + } + +}; + +class UsingPCA { + + +public: + + static Eigen::VectorXf getWindow(Practice& p, uint64_t pos) { + K::Interpolator interpol = p.getInterpol(); + Eigen::VectorXf vec(600/50*3, 1); + int idx = 0; + for (int offset = -300; offset < 300; offset += 50) { + SensorGyro gyro = interpol.get(pos + offset); + vec(idx++,0) = (gyro.x); + vec(idx++,0) = (gyro.y); + vec(idx++,0) = (gyro.z); + } + std::cout << vec << std::endl; + return vec; + } + + static std::vector getClassWindows(Practice& p) { + + std::vector windows; + for (uint64_t pos = 1000; pos < 5000; pos += 500) { + Eigen::VectorXf window = getWindow(p, pos); + windows.push_back(window); + } + return windows; + + } + + static Eigen::MatrixXf getR(std::vector& vecs) { + Eigen::MatrixXf mat = Eigen::MatrixXf::Zero(vecs[0].rows(), vecs[0].rows()); + for (const Eigen::VectorXf& vec : vecs) { + mat += vec * vec.transpose(); + } + mat /= vecs.size(); + return mat; + } + + static Eigen::VectorXf getM(std::vector& vecs) { + Eigen::MatrixXf mat = Eigen::MatrixXf::Zero(vecs[0].rows(), vecs[0].cols()); + for (const Eigen::VectorXf& vec : vecs) { + mat += vec; + } + mat /= vecs.size(); + return mat; + } + + static void run() { + + std::vector practices; + + practices.push_back( + Practice { + PracticeType::JUMPING_JACK, + SensorReader::read("/mnt/firma/kunden/HandyGames/daten/jumpingjack/jumpingjack_gl_5_subject_3_left.txt"), + {1950, 2900, 3850, 4850, 5850, 6850, 7850, 8850, 9800, 10800, 11850} + } + ); + + + std::vector windows = getClassWindows(practices.back()); + Eigen::MatrixXf R = getR(windows); + Eigen::MatrixXf m = getM(windows); + Eigen::MatrixXf Q = R - (m * m.transpose()); + + Eigen::SelfAdjointEigenSolver es; + es.compute(Q); + + int i = 0; + + } + +}; + +#endif // USINGPCA_H +