607 lines
16 KiB
C++
607 lines
16 KiB
C++
#ifndef USINGNEURALNET_H
|
|
#define USINGNEURALNET_H
|
|
|
|
|
|
#include <vector>
|
|
|
|
#include "sensors/SensorReader.h"
|
|
#include "Interpolator.h"
|
|
|
|
#include <KLib/misc/gnuplot/Gnuplot.h>
|
|
#include <KLib/misc/gnuplot/GnuplotPlot.h>
|
|
#include <KLib/misc/gnuplot/GnuplotPlotElementLines.h>
|
|
#include <KLib/misc/gnuplot/GnuplotMultiplot.h>
|
|
|
|
#include <KLib/math/neuralnet/FeedForwardNeuralNet.h>
|
|
#include <KLib/math/optimization/NumOptAlgoGenetic.h>
|
|
#include <KLib/math/optimization/NumOptAlgoDownhillSimplex.h>
|
|
|
|
|
|
enum class PracticeType {
|
|
//REST,
|
|
JUMPING_JACK,
|
|
SITUPS,
|
|
PUSHUPS,
|
|
KNEEBEND,
|
|
FORWARDBEND,
|
|
};
|
|
|
|
std::string NAMES[] = {
|
|
"JUMPING_JACK",
|
|
"SITUPS",
|
|
"PUSHUPS",
|
|
"KNEEBEND",
|
|
"FORWARDBEND"
|
|
};
|
|
|
|
std::string COLORS[] = {
|
|
"#ff0000",
|
|
"#00ff00",
|
|
"#0000ff",
|
|
"#ffff00",
|
|
"#000000",
|
|
"#666666"
|
|
};
|
|
|
|
|
|
struct Practice {
|
|
|
|
PracticeType type;
|
|
Recording rec;
|
|
std::vector<uint64_t> keyGyro;
|
|
|
|
//Practice(const PracticeType p, const Recording& rec, const std::vector<uint64_t>& keyGyro) : p(p), rec(rec), keyGyro(keyGyro) {;}
|
|
|
|
K::Interpolator<uint64_t, SensorGyro> getInterpol() const {
|
|
K::Interpolator<uint64_t, SensorGyro> interpol;
|
|
for (auto it : rec.gyro.values) {interpol.add(it.ts, it.val);}
|
|
interpol.makeRelative();
|
|
return interpol;
|
|
}
|
|
|
|
};
|
|
|
|
static constexpr int NUM_IN = 48;
|
|
static constexpr int NUM_HID = 15;
|
|
static constexpr int NUM_OUT = 5;
|
|
|
|
static constexpr int NUM_ARGS = NUM_IN*NUM_HID + NUM_HID*NUM_OUT;
|
|
|
|
class UsingNeuralNet {
|
|
|
|
public:
|
|
|
|
|
|
|
|
///** interpolate the output for the given position using the provided range */
|
|
//template <typename T> T blur(K::Interpolator<uint64_t, T>& interpol, const uint64_t ms, const int s = 3) {
|
|
// return interpol.get(ms-s*2) * 0.1 +
|
|
// interpol.get(ms-s) * 0.2 +
|
|
// interpol.get(ms) * 0.4 +
|
|
// interpol.get(ms+s) * 0.2 +
|
|
// interpol.get(ms+s*2) * 0.1;
|
|
//}
|
|
|
|
|
|
/** get the input vector for the neuronal network */float points = 0;
|
|
static std::vector<float> getNetworkInput(K::Interpolator<uint64_t, SensorGyro>& interpol, const uint64_t pos) {
|
|
|
|
std::vector<float> val;
|
|
val.resize(NUM_IN);
|
|
int idx = 0;
|
|
|
|
for (int offset = -400; offset < 400; offset += 50) {
|
|
SensorGyro gyro = interpol.get(pos + offset);
|
|
val[idx++] = gyro.x;
|
|
val[idx++] = gyro.y;
|
|
val[idx++] = gyro.z;
|
|
assert(idx <= NUM_IN);
|
|
}
|
|
|
|
return val;
|
|
|
|
}
|
|
|
|
/** get the index of the largest element within vec */
|
|
static int getMaxIdx(const std::vector<float>& vec) {
|
|
float max = 0;
|
|
int idx = 0;
|
|
for (int i = 0; i < NUM_OUT; ++i) {
|
|
if (vec[i] > max) {
|
|
max = vec[i];
|
|
idx = i;
|
|
}
|
|
}
|
|
return idx;
|
|
}
|
|
|
|
struct TMP {int index; float value;};
|
|
//static std::vector<TMP> getSorted(const K::NeuralNetResultIHO<NUM_OUT>& vec) {
|
|
// std::vector<TMP> tmp;
|
|
// for (int i = 0; i < NUM_OUT; ++i) {tmp.push_back( TMP{i, vec.values[i]} );}
|
|
// auto comp = [] (const TMP& t1, const TMP& t2) {return t2.value < t1.val//struct TMP {int index; float value;};
|
|
|
|
static std::vector<TMP> getSorted(const std::vector<float>& vec) {
|
|
std::vector<TMP> tmp;
|
|
for (int i = 0; i < NUM_OUT; ++i) {tmp.push_back( TMP{i, vec[i]} );}
|
|
auto comp = [] (const TMP& t1, const TMP& t2) {return t2.value < t1.value;};
|
|
std::sort(tmp.begin(), tmp.end(), comp);
|
|
return tmp;
|
|
}
|
|
|
|
// std::sort(tmp.begin(), tmp.end(), comp);
|
|
// return tmp;
|
|
//}
|
|
|
|
static void debug(Practice& p, std::vector<float>& res) {
|
|
const int maxIdx = getMaxIdx(res);
|
|
const char max = (res[maxIdx] > 0.5) ? (maxIdx + '0') : ('?');
|
|
std::cout << "practice was: " << (int)p.type;
|
|
std::cout << " network says: " << max << "\t";
|
|
std::cout << "[";
|
|
for (int i = 0; i < NUM_OUT; ++i) {
|
|
std::cout << res[i] << ", ";
|
|
}
|
|
std::cout << "]" << std::endl;
|
|
}
|
|
|
|
static void debugPlot(Practice p) {
|
|
|
|
static K::Gnuplot gp;
|
|
K::GnuplotPlot plot;
|
|
K::GnuplotPlotElementLines line[3];
|
|
|
|
line[0].setColorHex("#ff0000"); line[0].setTitle("x");
|
|
line[1].setColorHex("#00ff00"); line[1].setTitle("y");
|
|
line[2].setColorHex("#0000ff"); line[2].setTitle("z");
|
|
|
|
plot.add(&line[0]);
|
|
plot.add(&line[1]);
|
|
plot.add(&line[2]);
|
|
|
|
K::Interpolator<uint64_t, SensorGyro> interpol = p.getInterpol();
|
|
|
|
volatile int len = p.rec.gyro.values.back().ts - p.rec.gyro.values.front().ts;
|
|
for (int ms = 0; ms < len; ms += 50) {
|
|
SensorGyro s = interpol.get(ms);
|
|
line[0].add(K::GnuplotPoint2(ms, s.x));
|
|
line[1].add(K::GnuplotPoint2(ms, s.y));
|
|
line[2].add(K::GnuplotPoint2(ms, s.z));
|
|
}
|
|
|
|
gp.setDebugOutput(true);
|
|
gp.draw(plot);
|
|
|
|
for (uint64_t ts : p.keyGyro) {
|
|
gp << "set arrow from " << ts << ",-10 to " << ts << ",+10\n";
|
|
}
|
|
|
|
gp.flush();
|
|
|
|
}
|
|
|
|
|
|
class MyOpt : public K::NumOptFunction<NUM_ARGS> {
|
|
|
|
public:
|
|
|
|
std::vector<Practice>& practices;
|
|
K::FeedForwardNeuralNet<float, K::FeedForwardNeuralNetOPLogistic>& net;
|
|
|
|
/** ctor */
|
|
MyOpt(std::vector<Practice>& practices, K::FeedForwardNeuralNet<float, K::FeedForwardNeuralNetOPLogistic>& net) : practices(practices), net(net) {
|
|
;
|
|
}
|
|
|
|
// static float getScore(const int shouldBe, const std::vector<float> values) {
|
|
// float points = 0;
|
|
// for (int i = 0; i < NUM_OUT; ++i) {
|
|
// if (i == shouldBe) {
|
|
// if (values[i] > 0.5) {points += values[i];} // matches and > 0.5 -> score
|
|
// } else {
|
|
// if (values[i] > 0.5) {points -= values[i];} // does not match but > 0.5 -> neg-score
|
|
// }
|
|
// }
|
|
// return points;
|
|
// }
|
|
|
|
static float getScore(const int shouldBe, const std::vector<float> values) {
|
|
// largest value matches the desired type -> good!
|
|
float points = 0;
|
|
std::vector<TMP> resSort = getSorted(values);
|
|
if (resSort[0].index == shouldBe) {
|
|
//if ( (resSort[0].value - resSort[1].value) > 0.25 ) {
|
|
points += 2;
|
|
points += resSort[0].value;
|
|
points -= resSort[1].value;
|
|
//}
|
|
//points += resSort[0].value;
|
|
//points += (resSort[0].value - resSort[1].value);
|
|
} else {
|
|
points -= 3; // higher seems better!
|
|
}
|
|
return points;
|
|
}
|
|
|
|
static float getScorePos(const int shouldBe, const std::vector<float> values) {
|
|
float points = 0;
|
|
for (int idx = 0; idx < values.size(); ++idx) {
|
|
const float v = values[idx];
|
|
if (idx == shouldBe) {
|
|
points += (v > 0.5) ? 1 : 0;
|
|
//points += (v > 0.5) ? v : 0;
|
|
} else {
|
|
points -= (v > 0.5) ? 1 : 0;
|
|
//points -= (v > 0.5) ? v : 0;
|
|
}
|
|
}
|
|
return points;
|
|
}
|
|
|
|
static float getScoreReject(const std::vector<float> values) {
|
|
float points = 0;
|
|
for (float v : values) {
|
|
points -= (v > 0.5) ? 1 : 0;
|
|
//points -= (v > 0.5) ? v : 0;
|
|
}
|
|
return points;
|
|
}
|
|
|
|
double getValue(const K::NumOptVector<NUM_ARGS>& args) const {
|
|
|
|
// configure the network
|
|
std::vector<float> vals;
|
|
for(int i = 0; i < NUM_ARGS; ++i) {vals.push_back(args[i]);}
|
|
net.setFactors(vals);
|
|
|
|
// temporals
|
|
float points = 0;
|
|
int cnt = 0;
|
|
|
|
// process every practice
|
|
for (const Practice& p : practices) {
|
|
|
|
// get the values for the neural-net-input
|
|
K::Interpolator<uint64_t, SensorGyro> interpol = p.getInterpol();
|
|
|
|
// process 4 (positive) occurences within the practice
|
|
for (int key = 0; key < 3; ++key) {
|
|
|
|
uint64_t steps = 100;//(tTo - tFrom) / 8;
|
|
|
|
// positive
|
|
volatile uint64_t k1 = p.keyGyro[key];
|
|
volatile uint64_t k2 = p.keyGyro[key+1];
|
|
volatile uint64_t diff = k2 - k1;
|
|
volatile uint64_t tFrom = k1 - diff/5;
|
|
volatile uint64_t tTo = k1 + diff/5;
|
|
for (uint64_t o = tFrom; o <= tTo; o += steps) {
|
|
const std::vector<float> values = getNetworkInput(interpol, o);
|
|
const std::vector<float> res = net.get(values, true);
|
|
points += getScorePos((int)p.type, res);
|
|
++cnt;
|
|
}
|
|
|
|
// negative
|
|
tFrom = k1 + diff/2;
|
|
tTo = k2 - diff/2;
|
|
for (uint64_t o = tFrom; o <= tTo; o += steps) {
|
|
const std::vector<float> values = getNetworkInput(interpol, o);
|
|
const std::vector<float> res = net.get(values, true);
|
|
points += getScoreReject(res);
|
|
++cnt;
|
|
}
|
|
|
|
}
|
|
|
|
|
|
// // positive
|
|
// for (int ts = 1500; ts <= 7000; ts +=400) {
|
|
// const std::vector<float> values = getNetworkInput(interpol, ts);
|
|
// const std::vector<float> res = net.get(values, false);
|
|
// points += getScore((int)p.type, res);
|
|
// }
|
|
|
|
}
|
|
|
|
points /= cnt;
|
|
|
|
static float max = -999999;
|
|
if (points > max) {
|
|
max = points;
|
|
std::cout << points << std::endl;
|
|
}
|
|
|
|
return -points;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
static void run() {
|
|
|
|
std::vector<Practice> practices;
|
|
|
|
// practices.push_back(
|
|
// Practice {
|
|
// PracticeType::REST,
|
|
// SensorReader::read("/mnt/firma/kunden/HandyGames/daten/idle/restposition_gl_24.txt"),
|
|
// {1000, 2000, 3000, 4000, 5000, 6000, 7000, 8000, 9000}
|
|
// }
|
|
// );
|
|
|
|
|
|
practices.push_back(
|
|
Practice {
|
|
PracticeType::JUMPING_JACK,
|
|
SensorReader::read("/mnt/firma/kunden/HandyGames/daten/jumpingjack/jumpingjack_gl_5_subject_3_left.txt"),
|
|
{1950, 2900, 3850, 4850, 5850, 6850, 7850, 8850, 9800, 10800, 11850}
|
|
}
|
|
);
|
|
// practices.push_back(
|
|
// Practice {
|
|
// PracticeType::JUMPING_JACK,
|
|
// SensorReader::read("/mnt/firma/kunden/HandyGames/daten/jumpingjack/jumpingjack_gl_6_subject_4_right.txt"),
|
|
// {2750, 3850, 4850, 5900, 7000, 7950, 9100 }
|
|
// }
|
|
// );
|
|
// practices.push_back(
|
|
// Practice {
|
|
// PracticeType::JUMPING_JACK,
|
|
// SensorReader::read("/mnt/firma/kunden/HandyGames/daten/jumpingjack/jumpingjack_sw_5_subject_2_right.txt"),
|
|
// {1700, 2850, 4050, 5250, 6450, 7600, 8800}
|
|
// }
|
|
// );
|
|
|
|
practices.push_back(
|
|
Practice {
|
|
PracticeType::SITUPS,
|
|
SensorReader::read("/mnt/firma/kunden/HandyGames/daten/situps/situps_gl_12_subject_1_left.txt"),
|
|
{1850, 3250, 4750, 6150, 7550, 8950, 10350, 11600, 13000}
|
|
}
|
|
);
|
|
|
|
practices.push_back(
|
|
Practice {
|
|
PracticeType::PUSHUPS,
|
|
SensorReader::read("/mnt/firma/kunden/HandyGames/daten/pushups/pushups_gl_8_subject_4_right.txt"),
|
|
//{2750, 4200, 5850, 7400, 9000, 10650}
|
|
{3700, 5200, 6850, 8450, 10050, 11750}
|
|
}
|
|
);
|
|
|
|
practices.push_back(
|
|
Practice {
|
|
PracticeType::KNEEBEND,
|
|
SensorReader::read("/mnt/firma/kunden/HandyGames/daten/kneebend/kneebend_gl_0_subject_0_right.txt"),
|
|
{2650, 4750, 6750, 8800, 10800, 12800}
|
|
//{3500, 5000, 8300, 9900, 11550}
|
|
}
|
|
);
|
|
|
|
|
|
practices.push_back(
|
|
Practice {
|
|
PracticeType::FORWARDBEND,
|
|
SensorReader::read("/mnt/firma/kunden/HandyGames/daten/forwardbend/forwardbend_gl_3_subject_1_left.txt"),
|
|
{3500, 9000, 14150, 19300}
|
|
//{3500, 5000, 8300, 9900, 11550}
|
|
}
|
|
);
|
|
|
|
|
|
|
|
// practices.push_back(
|
|
// Practice {
|
|
// PracticeType::REST,
|
|
// SensorReader::read("/mnt/firma/kunden/HandyGames/daten/jumpingjack/jumpingjack_gl_5_subject_3_left.txt"),
|
|
// {1950+500, 2900+500, 3850+500, 4850+500, 5850+500, 6850+500, 7850+500, 8850+500, 9800+500, 10800+500, 11850+500}
|
|
// }
|
|
// );
|
|
// // practices.push_back(
|
|
// // Practice {
|
|
// // PracticeType::REST,
|
|
// // SensorReader::read("/mnt/firma/kunden/HandyGames/daten/pushups/pushups_gl_8_subject_4_right.txt"),
|
|
// // //{2750, 4200, 5850, 7400, 9000, 10650}
|
|
// // {3500, 5000, 8300, 9900, 11550}
|
|
// // }
|
|
// // );
|
|
// practices.push_back(
|
|
// Practice {
|
|
// PracticeType::REST,
|
|
// SensorReader::read("/mnt/firma/kunden/HandyGames/daten/situps/situps_gl_12_subject_1_left.txt"),
|
|
// {1850+600, 3250+600, 4750+600, 6150+600, 7550+600, 8950+600, 10350+600, 11600+600, 13000+600}
|
|
// }
|
|
// );
|
|
|
|
|
|
// debugPlot(practices.back());
|
|
// sleep(100);
|
|
|
|
|
|
|
|
|
|
K::NumOptVector<NUM_ARGS> vec;
|
|
K::FeedForwardNeuralNet<float, K::FeedForwardNeuralNetOPLogistic> net;
|
|
net.setLayers({NUM_IN, NUM_HID, NUM_OUT});
|
|
|
|
MyOpt func(practices, net);
|
|
|
|
// K::NumOptAlgoDownhillSimplex<NUM_ARGS> opt;
|
|
// opt.setMaxIterations(100);
|
|
// opt.setNumRestarts(2);
|
|
// opt.calculateOptimum(func, vec);
|
|
|
|
K::NumOptAlgoGenetic<NUM_ARGS> opt;
|
|
opt.setElitism(0.07f);
|
|
opt.setPopulationSize(100);
|
|
opt.setMaxIterations(200);
|
|
opt.setMutation(0.40f);
|
|
opt.setValRange(0.20);
|
|
opt.calculateOptimum(func, vec);
|
|
|
|
|
|
|
|
// // process every practice
|
|
// for (Practice& p : practices) {
|
|
|
|
// // get the values for the neural-net-input
|
|
|
|
// K::Interpolator<uint64_t, SensorGyro> interpol = p.getInterpol();
|
|
|
|
// // process every (positive) occurence within the practice
|
|
// for (uint64_t ts : p.keyGyro) {
|
|
|
|
// std::vector<float> values = getNetworkInput(interpol, ts);
|
|
// K::NeuralNetResultIHO<NUM_OUT> res = net.getOutput(values.data());
|
|
// debug(p, res);
|
|
|
|
// {
|
|
// std::vector<float> values = getNetworkInput(interpol, ts+500);
|
|
// K::NeuralNetResultIHO<NUM_OUT> res = net.getOutput(values.data());
|
|
// std::cout << "###"; debug(p, res); debugPlot(practices.back());
|
|
// }
|
|
|
|
// }getMaxIdx
|
|
|
|
// }
|
|
|
|
|
|
K::Gnuplot gp1;
|
|
K::Gnuplot gp2;
|
|
|
|
K::GnuplotPlot plot1;
|
|
K::GnuplotPlot plot2;
|
|
|
|
K::GnuplotMultiplot plot(2,1);
|
|
plot.add(&plot1);
|
|
plot.add(&plot2);
|
|
|
|
K::GnuplotPlotElementLines line[3];
|
|
line[0].setColorHex("#ff0000"); line[0].setTitle("x");
|
|
line[1].setColorHex("#00ff00"); line[1].setTitle("y");
|
|
line[2].setColorHex("#0000ff"); line[2].setTitle("z");
|
|
plot1.add(&line[0]);
|
|
plot1.add(&line[1]);
|
|
plot1.add(&line[2]);
|
|
|
|
K::GnuplotPlotElementLines netLines[NUM_OUT];
|
|
for (int i = 0; i < NUM_OUT; ++i) {
|
|
netLines[i].setColorHex(COLORS[i]);
|
|
netLines[i].setTitle(NAMES[i]);
|
|
netLines[i].setLineWidth(2);
|
|
}
|
|
|
|
for (int i = 0; i < NUM_OUT; ++i) {
|
|
plot2.add(&netLines[i]);
|
|
}
|
|
|
|
// process every practice
|
|
for (Practice& p : practices) {
|
|
|
|
// get the values for the neural-net-input
|
|
|
|
K::Interpolator<uint64_t, SensorGyro> interpol = p.getInterpol();
|
|
line[0].clear();
|
|
line[1].clear();
|
|
line[2].clear();
|
|
|
|
for (int i = 0; i < NUM_OUT; ++i) {
|
|
netLines[i].clear();
|
|
}
|
|
|
|
for (int ms = 0; ms < 12000; ms += 100) { // K::Gnuplot gp;
|
|
// K::GnuplotPlot plot;
|
|
// K::GnuplotPlotElementLines line[3];
|
|
// line[0].setColorHex("#ff0000"); line[0].setTitle("x");
|
|
// line[1].setColorHex("#00ff00"); line[1].setTitle("y");
|
|
// line[2].setColorHex("#0000ff"); line[2].setTitle("z");
|
|
|
|
// Practice p1 = practices[0];
|
|
|
|
// auto interpol = p1.getInterpol();
|
|
// for (int ms = 0; ms < 20000; ms += 50) {
|
|
// SensorGyro s = blur(interpol, ms, 10);
|
|
// line[0].add(K::GnuplotPoint2(ms, s.x));
|
|
// line[1].add(K::GnuplotPoint2(ms, s.y));
|
|
// line[2].add(K::GnuplotPoint2(ms, s.z));
|
|
// }
|
|
|
|
// plot.add(&line[0]);
|
|
// plot.add(&line[1]);
|
|
// plot.add(&line[2]);
|
|
// gp.draw(plot);
|
|
// for (uint64_t ts : p1.keyGyro) {
|
|
// gp << "set arrow from " << ts << ",-10 to " << ts << ",+10\n";
|
|
// }
|
|
// gp.flush();
|
|
SensorGyro s = interpol.get(ms);
|
|
line[0].add(K::GnuplotPoint2(ms, s.x));
|
|
line[1].add(K::GnuplotPoint2(ms, s.y));
|
|
line[2].add(K::GnuplotPoint2(ms, s.z));
|
|
}
|
|
|
|
// process every (positive) occurence within the practice
|
|
for (int ts = 0; ts < 12000; ts += 100) {
|
|
|
|
std::vector<float> values = getNetworkInput(interpol, ts);
|
|
std::vector<float> res = net.get(values);
|
|
debug(p, res);
|
|
|
|
for (int i = 0; i < NUM_OUT; ++i) {
|
|
float val = res[i];
|
|
val = (val < 0.5) ? 0 : 1;
|
|
netLines[i].add(K::GnuplotPoint2(ts, val));
|
|
}
|
|
|
|
gp1 << "set arrow 1 from " << ts-500 << ",-10 to " << ts-500 << ",+10\n";
|
|
gp1 << "set arrow 2 from " << ts+500 << ",-10 to " << ts+500 << ",+10\n";
|
|
gp1.draw(plot1);
|
|
gp1.flush();
|
|
|
|
gp2.draw(plot2);
|
|
gp2.flush();
|
|
|
|
|
|
|
|
//usleep(1000*50);
|
|
|
|
|
|
}
|
|
|
|
gp1.flush();;
|
|
gp2.flush();;
|
|
|
|
std::string fileRaw = "raw_" + std::to_string((int)p.type);
|
|
std::string fileNet = "net_" + std::to_string((int)p.type);
|
|
gp1 << "set terminal emf size 600,250\n set output '"<<fileRaw<<".emf'\n unset xtics\n unset key\n unset arrow 1\n unset arrow 2\n set format y ' '\n";
|
|
gp2 << "set terminal emf size 600,250\n set output '"<<fileNet<<".emf'\n unset xtics\n unset key\n set format y ' '\n";
|
|
|
|
gp1.draw(plot1);
|
|
gp2.draw(plot2);
|
|
|
|
std::ofstream out1("/tmp/"+fileRaw+".gp"); out1 << gp1.getBuffer(); out1.close();
|
|
std::ofstream out2("/tmp/"+fileNet+".gp"); out2 << gp2.getBuffer(); out2.close();
|
|
gp1.flush();
|
|
gp2.flush();
|
|
|
|
|
|
|
|
|
|
}
|
|
|
|
sleep(1000);
|
|
|
|
}
|
|
|
|
|
|
};
|
|
|
|
|
|
#endif // USINGNEURALNET_H
|
|
|