added the mixing particle filter model with all is might and failures :)
This commit is contained in:
@@ -32,8 +32,7 @@
|
||||
#include <Indoor/data/Timestamp.h>
|
||||
|
||||
#include <KLib/math/filter/particles/Particle.h>
|
||||
#include <KLib/math/filter/particles/ParticleFilter.h>
|
||||
#include <KLib/math/filter/particles/ParticleFilterHistory.h>
|
||||
#include <KLib/math/filter/particles/ParticleFilterMixing.h>
|
||||
#include <KLib/math/filter/particles/ParticleFilterInitializer.h>
|
||||
|
||||
#include <KLib/math/filter/particles/estimation/ParticleFilterEstimationWeightedAverage.h>
|
||||
@@ -45,12 +44,81 @@
|
||||
#include <KLib/math/filter/particles/resampling/ParticleFilterResamplingPercent.h>
|
||||
#include <KLib/math/filter/particles/resampling/ParticleFilterResamplingDivergence.h>
|
||||
|
||||
#include <KLib/math/filter/merging/MarkovTransitionProbability.h>
|
||||
#include <KLib/math/filter/merging/mixing/MixingSamplerDivergency.h>
|
||||
#include <KLib/math/filter/merging/estimation/JointEstimationPosteriorOnly.h>
|
||||
|
||||
#include "Structs.h"
|
||||
|
||||
#include "../Plotti.h"
|
||||
#include "Logic.h"
|
||||
#include "../Settings.h"
|
||||
|
||||
double __KLD = 0.0;
|
||||
|
||||
//todo function return the transition prob matrix for markov chain!
|
||||
//getKernelDensityProbability should work fine for first shot! nevertheless we need to do 2 kernel density estimations for both filters :( :( :(
|
||||
|
||||
|
||||
struct ModeProbabilityTransition : public K::MarkovTransitionProbability<MyState, MyControl, MyObs>{
|
||||
|
||||
Grid<MyNode>& grid;
|
||||
const double lambda;
|
||||
|
||||
ModeProbabilityTransition(Grid<MyNode>& grid, double lambda) : grid(grid), lambda(lambda) {;}
|
||||
|
||||
virtual Eigen::MatrixXd update(std::vector<K::ParticleFilterMixing<MyState, MyControl, MyObs>>& modes) override {
|
||||
|
||||
std::vector<double> probsWifiV;
|
||||
std::vector<double> probsParticleV;
|
||||
|
||||
// mode[0] -> Posterior & mode[1] -> Wifi ---- i know what im doing :)
|
||||
for(MyNode node : grid.getNodes()){
|
||||
double probParzenPosterior = calcKernelDensity(node, modes[0].getParticles());
|
||||
probsParticleV.push_back(probParzenPosterior);
|
||||
|
||||
double probParzenWifi = calcKernelDensity(node, modes[1].getParticles());
|
||||
probsWifiV.push_back(probParzenWifi);
|
||||
}
|
||||
|
||||
// make vectors
|
||||
Eigen::Map<Eigen::VectorXd> probsWifi(&probsWifiV[0], probsWifiV.size());
|
||||
Eigen::Map<Eigen::VectorXd> probsParticle(&probsParticleV[0], probsParticleV.size());
|
||||
|
||||
// get kld
|
||||
double kld = Divergence::KullbackLeibler<double>::getGeneralFromSamples(probsParticle, probsWifi, Divergence::LOGMODE::NATURALIS);
|
||||
|
||||
// debugging global variable
|
||||
__KLD = kld;
|
||||
|
||||
//exp. distribution
|
||||
double expKld = std::exp(-lambda * kld);
|
||||
|
||||
//create the matrix
|
||||
Eigen::MatrixXd m(2,2);
|
||||
m << 1-expKld, expKld, 0, 1;
|
||||
|
||||
return m;
|
||||
|
||||
}
|
||||
|
||||
double calcKernelDensity(const MyNode node, const std::vector<K::Particle<MyState>> particles){
|
||||
|
||||
int size = particles.size();
|
||||
double prob = 0;
|
||||
|
||||
#pragma omp parallel for reduction(+:prob) num_threads(6)
|
||||
for(int i = 0; i < size; ++i){
|
||||
double distance = particles[i].state.position.getDistanceInCM(node);
|
||||
prob += Distribution::Normal<double>::getProbability(0, 100, distance) * particles[i].weight;
|
||||
}
|
||||
|
||||
return prob;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
|
||||
static double getKernelDensityProbability(std::vector<K::Particle<MyState>>& particles, MyState state, std::vector<K::Particle<MyState>>& samplesWifi){
|
||||
|
||||
Distribution::KernelDensity<double, MyState> parzen([&](MyState state){
|
||||
@@ -92,8 +160,8 @@ static double getKernelDensityProbability(std::vector<K::Particle<MyState>>& par
|
||||
Eigen::Map<Eigen::VectorXd> probsParticle(&probsParticleV[0], probsParticleV.size());
|
||||
|
||||
//get divergence
|
||||
//double kld = Divergence::KullbackLeibler<double>::getGeneralFromSamples(probsParticle, probsWifi, Divergence::LOGMODE::NATURALIS);
|
||||
double kld = Divergence::JensenShannon<double>::getGeneralFromSamples(probsParticle, probsWifi, Divergence::LOGMODE::NATURALIS);
|
||||
double kld = Divergence::KullbackLeibler<double>::getGeneralFromSamples(probsParticle, probsWifi, Divergence::LOGMODE::NATURALIS);
|
||||
//double kld = Divergence::JensenShannon<double>::getGeneralFromSamples(probsParticle, probsWifi, Divergence::LOGMODE::NATURALIS);
|
||||
|
||||
//plotti
|
||||
//plot.debugDistribution1(samplesWifi);
|
||||
@@ -101,9 +169,9 @@ static double getKernelDensityProbability(std::vector<K::Particle<MyState>>& par
|
||||
|
||||
|
||||
//estimate the mean
|
||||
//K::ParticleFilterEstimationOrderedWeightedAverage<MyState> estimateWifi(0.95);
|
||||
//const MyState estWifi = estimateWifi.estimate(samplesWifi);
|
||||
//plot.addEstimationNodeSmoothed(estWifi.position.inMeter());
|
||||
// K::ParticleFilterEstimationOrderedWeightedAverage<MyState> estimateWifi(0.95);
|
||||
// const MyState estWifi = estimateWifi.estimate(samplesWifi);
|
||||
// plot.addEstimationNodeSmoothed(estWifi.position.inMeter());
|
||||
|
||||
return kld;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user