#39 #40 git add for last commit

This commit is contained in:
toni
2017-11-15 17:46:06 +01:00
parent c8063bc862
commit 95a5c8f34f
49 changed files with 4661 additions and 0 deletions

54
smc/Particle.h Normal file
View File

@@ -0,0 +1,54 @@
/*
* Particle.h
*
* Created on: Sep 17, 2013
* Author: Frank Ebner
*/
#ifndef PARTICLE_H_
#define PARTICLE_H_
/**
* a particle consists of a (user-defined) state
* assigned with a weight (importance).
*
* depending on the particle filter's configuration,
* the (user-defined) state must provide several methods
* like:
* assigning values from another state
* multiplication
* etc..
*
*/
namespace SMC {
template <typename State> struct Particle {
/** the particles state */
State state;
/** the (current) probability for this state */
double weight;
/** empty ctor */
Particle() : state(), weight(0) {;}
/** ctor */
Particle(const State& state, double weight) : state(state), weight(weight) {;}
/** assignment operator */
Particle& operator = (const Particle& other) {
this->state = other.state;
this->weight = other.weight;
return *this;
}
};
}
#endif /* PARTICLE_H_ */

69
smc/ParticleAssertions.h Normal file
View File

@@ -0,0 +1,69 @@
#ifndef PARTICLEASSERTIONS_H
#define PARTICLEASSERTIONS_H
namespace SMC {
/** check whether T provides a += operator */
template <typename T>
class HasOperatorPlusEq {
typedef char one;
typedef long two;
template <typename C> static one test( decltype(&C::operator+=) ) ;
template <typename C> static two test(...);
public:
enum { value = sizeof(test<T>(0)) == sizeof(one) };
};
/** check whether T provides a /= operator */
template <typename T>
class HasOperatorDivEq {
typedef char one;
typedef long two;
template <typename C> static one test( decltype(&C::operator/=) ) ;
template <typename C> static two test(...);
public:
enum { value = sizeof(test<T>(0)) == sizeof(one) };
};
/** check whether T provides a * operator */
template <typename T>
class HasOperatorMul {
typedef char one;
typedef long two;
template <typename C> static one test( decltype(&C::operator*) ) ;
template <typename C> static two test(...);
public:
enum { value = sizeof(test<T>(0)) == sizeof(one) };
};
/** check whether T provides an assignment operator */
template <typename T>
class HasOperatorAssign{
typedef char one;
typedef long two;
template <typename C> static one test( decltype(&C::operator=) ) ;
template <typename C> static two test(...);
public:
enum { value = sizeof(test<T>(0)) == sizeof(one) };
};
}
#endif // PARTICLEASSERTIONS_H

View File

@@ -0,0 +1,256 @@
/*
* ParticleFilter.h
*
* Created on: Sep 17, 2013
* Author: Frank Ebner
*/
#ifndef PARTICLEFILTER_H_
#define PARTICLEFILTER_H_
#include <vector>
#include <memory>
#include "../Particle.h"
#include "resampling/ParticleFilterResampling.h"
#include "estimation/ParticleFilterEstimation.h"
#include "ParticleFilterTransition.h"
#include "ParticleFilterEvaluation.h"
#include "ParticleFilterInitializer.h"
#include "../../Assertions.h"
namespace SMC {
/**
* the main-class for the particle filter
* @param State the (user-defined) state for each particle
* @param Observation the observation (sensor) data
*/
template <typename State, typename Control, typename Observation>
class ParticleFilter {
private:
/** all used particles */
std::vector<Particle<State>> particles;
/** the resampler to use */
std::unique_ptr<ParticleFilterResampling<State>> resampler;
/** the estimation function to use */
std::unique_ptr<ParticleFilterEstimation<State>> estimation;
/** the transition function to use */
std::unique_ptr<ParticleFilterTransition<State, Control>> transition;
/** the evaluation function to use */
std::unique_ptr<ParticleFilterEvaluation<State, Observation>> evaluation;
/** the initialization function to use */
std::unique_ptr<ParticleFilterInitializer<State>> initializer;
/** the percentage-of-efficient-particles-threshold for resampling */
double nEffThresholdPercent = 0.25;
public:
/** ctor */
ParticleFilter(const uint32_t numParticles, std::unique_ptr<ParticleFilterInitializer<State>> initializer) {
particles.resize(numParticles);
setInitializier(std::move(initializer));
init();
}
/** dtor */
~ParticleFilter() {
;
}
/** access to all particles */
const std::vector<Particle<State>>& getParticles() {
return particles;
}
/** initialize/re-start the particle filter */
void init() {
Assert::isNotNull(initializer, "initializer MUST not be null! call setInitializer() first!");
initializer->initialize(particles);
}
/** set the resampling method to use */
void setResampling(std::unique_ptr<ParticleFilterResampling<State>> resampler) {
Assert::isNotNull(resampler, "setResampling() MUST not be called with a nullptr!");
this->resampler = std::move(resampler);
}
/** set the estimation method to use */
void setEstimation(std::unique_ptr<ParticleFilterEstimation<State>> estimation) {
Assert::isNotNull(estimation, "setEstimation() MUST not be called with a nullptr!");
this->estimation = std::move(estimation);
}
/** set the transition method to use */
void setTransition(std::unique_ptr<ParticleFilterTransition<State, Control>> transition) {
Assert::isNotNull(transition, "setTransition() MUST not be called with a nullptr!");
this->transition = std::move(transition);
}
/** get the used transition method */
ParticleFilterTransition<State, Control>* getTransition() {
return this->transition.get();
}
/** set the evaluation method to use */
void setEvaluation(std::unique_ptr<ParticleFilterEvaluation<State, Observation>> evaluation) {
Assert::isNotNull(evaluation, "setEvaluation() MUST not be called with a nullptr!");
this->evaluation = std::move(evaluation);
}
/** set the initialization method to use */
void setInitializier(std::unique_ptr<ParticleFilterInitializer<State>> initializer) {
Assert::isNotNull(initializer, "setInitializer() MUST not be called with a nullptr!");
this->initializer = std::move(initializer);
}
/** set the resampling threshold as the percentage of efficient particles */
void setNEffThreshold(const double thresholdPercent) {
this->nEffThresholdPercent = thresholdPercent;
}
double lastNEff = 9999999999999;
/** perform resampling -> transition -> evaluation -> estimation */
State update(const Control* control, const Observation& observation) {
// sanity checks (if enabled)
Assert::isNotNull(resampler, "resampler MUST not be null! call setResampler() first!");
Assert::isNotNull(transition, "transition MUST not be null! call setTransition() first!");
Assert::isNotNull(evaluation, "evaluation MUST not be null! call setEvaluation() first!");
Assert::isNotNull(estimation, "estimation MUST not be null! call setEstimation() first!");
// if the number of efficient particles is too low, perform resampling
if (lastNEff < particles.size() * nEffThresholdPercent) {resampler->resample(particles); }
// perform the transition step
transition->transition(particles, control);
// perform the evaluation step and calculate the sum of all particle weights
evaluation->evaluation(particles, observation);
// normalize the particle weights and thereby calculate N_eff
lastNEff = normalize();
//std::cout << "normalized. n_eff is " << lastNEff << std::endl;
// estimate the current state
const State est = estimation->estimate(particles);
// done
return est;
}
/** perform only the transition step */
void updateTransitionOnly(const Control* control) {
// sanity checks (if enabled)
Assert::isNotNull(transition, "transition MUST not be null! call setTransition() first!");
// perform the transition step
transition->transition(particles, control);
}
/** perform only the evaluation step */
State updateEvaluationOnly(const Observation& observation) {
// sanity checks (if enabled)
Assert::isNotNull(resampler, "resampler MUST not be null! call setResampler() first!");
Assert::isNotNull(evaluation, "evaluation MUST not be null! call setEvaluation() first!");
Assert::isNotNull(estimation, "estimation MUST not be null! call setEstimation() first!");
// perform the evaluation step and calculate the sum of all particle weights
evaluation->evaluation(particles, observation);
// not needed anymore.. was to tricky to forget etc...
// sanity check
//Assert::isNotNaN(weightSum, "sum of all particle weights (returned from eval) is NAN!");
//Assert::isNotNull(weightSum, "sum of all particle weights (returned from eval) is 0.0!");
// normalize the particle weights and thereby calculate N_eff
const double neff = normalize();
// estimate the current state
const State est = estimation->estimate(particles);
// if the number of efficient particles is too low, perform resampling
if (neff < particles.size() * nEffThresholdPercent) { resampler->resample(particles); }
// done
return est;
}
/** estimate the current state without update or transition just based on the current weights */
State estimate() {
// sanity checks (if enabled)
Assert::isNotNull(estimation, "estimation MUST not be null! call setEstimation() first!");
return estimation->estimate(particles);
}
private:
/** normalize the weight of all particles to 1.0 and perform some sanity checks */
double normalize() {
// calculate sum(weights)
//double min1 = 9999999;
double weightSum = 0.0;
for (const auto& p : particles) {
weightSum += p.weight;
//if (p.weight < min1) {min1 = p.weight;}
}
// sanity check. always!
if (weightSum != weightSum) {
throw Exception("sum of paticle-weights is NaN");
}
if (weightSum == 0) {
throw Exception("sum of paticle-weights is 0.0");
}
// normalize and calculate N_eff
double sum2 = 0.0;
//double min2 = 9999999;
for (auto& p : particles) {
p.weight /= weightSum;
//if (p.weight < min2) {min2 = p.weight;}
sum2 += (p.weight * p.weight);
}
// N_eff
return 1.0 / sum2;
}
// /** calculate the number of efficient particles (N_eff) */
// double getNeff() const {
// double sum = 0.0;
// for (auto& p : particles) {sum += (p.weight * p.weight);}
// return 1.0 / sum;
// }
};
}
#endif /* PARTICLEFILTER_H_ */

View File

@@ -0,0 +1,29 @@
#ifndef K_MATH_FILTERS_PARTICLE_PARTICLEFILTEREVALUATION_H
#define K_MATH_FILTERS_PARTICLE_PARTICLEFILTEREVALUATION_H
#include <vector>
#include "../Particle.h"
namespace SMC {
/**
* interface for the user-defined particle-evaluation.
* the evaluation weighs the particle by comparing its state with the observation p(o_t | q_t)
*/
template <typename State, typename Observation>
class ParticleFilterEvaluation {
public:
/**
* evaluate all particles (update p.weight) depending on their state and the current observation.
* this method MUST return the sum of all weights (used for normalization)
*/
virtual double evaluation(std::vector<Particle<State>>& particles, const Observation& observation) = 0;
};
}
#endif // K_MATH_FILTERS_PARTICLE_PARTICLEFILTEREVALUATION_H

View File

@@ -0,0 +1,283 @@
/*
* ParticleFilterHistory.h
*
* Created on: Jul 13, 2015
* Author: Toni Fetzer
*/
#ifndef PARTICLEFILTERHISTORY_H_
#define PARTICLEFILTERHISTORY_H_
#include <vector>
#include <memory>
#include "../Particle.h"
#include "resampling/ParticleFilterResampling.h"
#include "estimation/ParticleFilterEstimation.h"
#include "ParticleFilterTransition.h"
#include "ParticleFilterEvaluation.h"
#include "ParticleFilterInitializer.h"
#include "../../Assertions.h"
namespace SMC {
/**
* the main-class for the particle filter
* @param State the (user-defined) state for each particle
* @param Observation the observation (sensor) data
*/
template <typename State, typename Control, typename Observation>
class ParticleFilterHistory {
private:
/** all used particles */
std::vector<Particle<State>> particles;
/** all non resampled particles */
std::vector<Particle<State>> particlesNoResampling;
/** the resampler to use */
std::unique_ptr<ParticleFilterResampling<State>> resampler;
/** the estimation function to use */
std::unique_ptr<ParticleFilterEstimation<State>> estimation;
/** the transition function to use */
std::unique_ptr<ParticleFilterTransition<State, Control>> transition;
/** the evaluation function to use */
std::unique_ptr<ParticleFilterEvaluation<State, Observation>> evaluation;
/** the initialization function to use */
std::unique_ptr<ParticleFilterInitializer<State>> initializer;
/** the percentage-of-efficient-particles-threshold for resampling */
double nEffThresholdPercent = 0.25;
public:
/** ctor */
ParticleFilterHistory(const uint32_t numParticles, std::unique_ptr<ParticleFilterInitializer<State>> initializer) {
particles.resize(numParticles);
particlesNoResampling.resize(numParticles);
setInitializier(std::move(initializer));
init();
}
/** dtor */
~ParticleFilterHistory() {
;
}
/** access to all particles */
const std::vector<Particle<State>>& getParticles() {
return particles;
}
/** access to all non resample particles */
const std::vector<Particle<State>>& getNonResamplingParticles() {
return particlesNoResampling;
}
/** initialize/re-start the particle filter */
void init() {
Assert::isNotNull(initializer, "initializer MUST not be null! call setInitializer() first!");
initializer->initialize(particles);
}
void setAngle(const double angle){
for(SMC::Particle<State>& p : particles){
p.state.walkState.heading = angle;
}
}
/** set the resampling method to use */
void setResampling(std::unique_ptr<ParticleFilterResampling<State>> resampler) {
Assert::isNotNull(resampler, "setResampling() MUST not be called with a nullptr!");
this->resampler = std::move(resampler);
}
/** set the estimation method to use */
void setEstimation(std::unique_ptr<ParticleFilterEstimation<State>> estimation) {
Assert::isNotNull(estimation, "setEstimation() MUST not be called with a nullptr!");
this->estimation = std::move(estimation);
}
/** set the transition method to use */
void setTransition(std::unique_ptr<ParticleFilterTransition<State, Control>> transition) {
Assert::isNotNull(transition, "setTransition() MUST not be called with a nullptr!");
this->transition = std::move(transition);
}
/** get the used transition method */
ParticleFilterTransition<State, Control>* getTransition() {
return this->transition.get();
}
/** set the evaluation method to use */
void setEvaluation(std::unique_ptr<ParticleFilterEvaluation<State, Observation>> evaluation) {
Assert::isNotNull(evaluation, "setEvaluation() MUST not be called with a nullptr!");
this->evaluation = std::move(evaluation);
}
/** set the initialization method to use */
void setInitializier(std::unique_ptr<ParticleFilterInitializer<State>> initializer) {
Assert::isNotNull(initializer, "setInitializer() MUST not be called with a nullptr!");
this->initializer = std::move(initializer);
}
/** set the resampling threshold as the percentage of efficient particles */
void setNEffThreshold(const double thresholdPercent) {
this->nEffThresholdPercent = thresholdPercent;
}
/** perform resampling -> transition -> evaluation -> estimation */
State update(const Control* control, const Observation& observation) {
// sanity checks (if enabled)
Assert::isNotNull(resampler, "resampler MUST not be null! call setResampler() first!");
Assert::isNotNull(transition, "transition MUST not be null! call setTransition() first!");
Assert::isNotNull(evaluation, "evaluation MUST not be null! call setEvaluation() first!");
Assert::isNotNull(estimation, "estimation MUST not be null! call setEstimation() first!");
// perform the transition step
transition->transition(particles, control);
// perform the evaluation step and calculate the sum of all particle weights
const double weightSum = evaluation->evaluation(particles, observation);
// normalize the particle weights and thereby calculate N_eff
const double neff = normalize(weightSum);
// estimate the current state
const State est = estimation->estimate(particles);
//save particels before resampling to save the weight at this timestep
particlesNoResampling = particles;
// if the number of efficient particles is too low, perform resampling
if (neff < particles.size() * nEffThresholdPercent) { resampler->resample(particles); }
// done
return est;
}
/** perform resampling -> transition -> evaluation -> estimation */
State update(const Control* control, const Observation& observation, std::vector<Particle<State>>& particlesWifi,
std::function<double(std::vector<Particle<State>>&, State, std::vector<Particle<State>>&)> calcKLD, double& kld) {
// sanity checks (if enabled)
Assert::isNotNull(resampler, "resampler MUST not be null! call setResampler() first!");
Assert::isNotNull(transition, "transition MUST not be null! call setTransition() first!");
Assert::isNotNull(evaluation, "evaluation MUST not be null! call setEvaluation() first!");
Assert::isNotNull(estimation, "estimation MUST not be null! call setEstimation() first!");
//Assert::isNotNull(kld, "kld MUST not be null! call setEstimation() first!");
// perform the transition step
transition->transition(particles, control);
// perform the evaluation step and calculate the sum of all particle weights
const double weightSum = evaluation->evaluation(particles, observation);
// normalize the particle weights and thereby calculate N_eff
const double neff = normalize(weightSum);
// estimate the current state
const State est = estimation->estimate(particles);
//save particels before resampling to save the weight at this timestep
particlesNoResampling = particles;
//calc the current divergence between wifi and particle propability
kld = calcKLD(particles, est, particlesWifi);
// if the number of efficient particles is too low, perform resampling
if (neff < particles.size() * nEffThresholdPercent) { resampler->resample(particles, kld, particlesWifi); }
// done
return est;
}
/** perform only the transition step */
void updateTransitionOnly(const Control* control) {
// sanity checks (if enabled)
Assert::isNotNull(transition, "transition MUST not be null! call setTransition() first!");
// perform the transition step
transition->transition(particles, control);
}
/** perform only the evaluation step */
State updateEvaluationOnly(const Observation& observation) {
// sanity checks (if enabled)
Assert::isNotNull(resampler, "resampler MUST not be null! call setResampler() first!");
Assert::isNotNull(evaluation, "evaluation MUST not be null! call setEvaluation() first!");
Assert::isNotNull(estimation, "estimation MUST not be null! call setEstimation() first!");
// perform the evaluation step and calculate the sum of all particle weights
const double weightSum = evaluation->evaluation(particles, observation);
// sanity check
Assert::isNotNaN(weightSum, "sum of all particle weights (returned from eval) is NAN!");
Assert::isNotNull(weightSum, "sum of all particle weights (returned from eval) is 0.0!");
// normalize the particle weights and thereby calculate N_eff
const double neff = normalize(weightSum);
// estimate the current state
const State est = estimation->estimate(particles);
// if the number of efficient particles is too low, perform resampling
if (neff < particles.size() * nEffThresholdPercent) { resampler->resample(particles); }
// done
return est;
}
/** estimate the current state without update or transition just based on the current weights */
State estimate() {
// sanity checks (if enabled)
Assert::isNotNull(estimation, "estimation MUST not be null! call setEstimation() first!");
return estimation->estimate(particles);
}
private:
/** normalize the weight of all particles to one */
double normalize(const double weightSum) {
double sum = 0.0;
for (auto& p : particles) {
p.weight /= weightSum;
sum += (p.weight * p.weight);
}
return 1.0 / sum;
}
/** calculate the number of efficient particles (N_eff) */
double getNeff() const {
double sum = 0.0;
for (auto& p : particles) {sum += (p.weight * p.weight);}
return 1.0 / sum;
}
};
}
#endif /* PARTICLEFILTERHISTORY_H_ */

View File

@@ -0,0 +1,25 @@
#ifndef K_MATH_FILTER_PARTICLES_PARTICLEFILTERINITIALIZER_H
#define K_MATH_FILTER_PARTICLES_PARTICLEFILTERINITIALIZER_H
#include <vector>
#include "../Particle.h"
namespace SMC {
/**
* interface for particle filter initializers.
* an initializer "configures" all particles before the filter starts: p(q_0)
*/
template <typename State>
class ParticleFilterInitializer {
public:
/** the initializer must setup each particle within the given vector */
virtual void initialize(std::vector<Particle<State>>& particles) = 0;
};
}
#endif // K_MATH_FILTER_PARTICLES_PARTICLEFILTERINITIALIZER_H

View File

@@ -0,0 +1,241 @@
#ifndef PARTICLEFILTERMIXING_H
#define PARTICLEFILTERMIXING_H
#include <vector>
#include <memory>
#include "../Particle.h"
#include "resampling/ParticleFilterResampling.h"
#include "estimation/ParticleFilterEstimation.h"
#include "ParticleFilterTransition.h"
#include "ParticleFilterEvaluation.h"
#include "ParticleFilterInitializer.h"
#include "../../Assertions.h"
namespace SMC {
/**
* the main-class for the particle filter
* @param State the (user-defined) state for each particle
* @param Observation the observation (sensor) data
*/
template <typename State, typename Control, typename Observation>
class ParticleFilterMixing {
private:
/** all used particles */
std::vector<Particle<State>> particles;
/** the current calculated estimation */
State estimation;
/** the resampler to use */
std::shared_ptr<ParticleFilterResampling<State>> resampler;
/** the estimation function to use */
std::shared_ptr<ParticleFilterEstimation<State>> estimator;
/** the transition function to use */
std::shared_ptr<ParticleFilterTransition<State, Control>> transition;
/** the evaluation function to use */
std::shared_ptr<ParticleFilterEvaluation<State, Observation>> evaluation;
/** the initialization function to use */
std::shared_ptr<ParticleFilterInitializer<State>> initializer;
/** the percentage-of-efficient-particles-threshold for resampling */
double nEffThresholdPercent = 0.25;
/** the current sum of all weights NOT normalized*/
double weightSum = 1.0;
/** the predicted mode probability P(m_t|Z_t-1) */
double predictedModeProbability = 1.0;
/** the posterior probability of the mode p(m_t | Z_t)*/
double modePosteriorProbability = 1.0;
/** the transition mode probability P(m_t-1 | m_t, Z_t-1)*/
double transitionModeProbability = 1.0;
public:
/** ctor
* NOTE: The modePosteriorProbability needs the be normalized depending on the number of filters within the IMMPF!!
*/
ParticleFilterMixing(const uint32_t numParticles, std::shared_ptr<ParticleFilterInitializer<State>> initializer, double modePosteriorProbability) {
this->modePosteriorProbability = modePosteriorProbability;
particles.resize(numParticles);
setInitializier(std::move(initializer));
init();
}
/** dtor */
~ParticleFilterMixing() {
;
}
/** access to all particles */
const std::vector<Particle<State>>& getParticles() const {
return this->particles;
}
void setParticles(const std::vector<Particle<State>>& newParticles){
this->particles = newParticles;
}
/** get the current estimation */
const State getEstimation() const {
return estimation;
}
/** initialize/re-start the particle filter */
void init() {
Assert::isNotNull(initializer, "initializer MUST not be null! call setInitializer() first!");
initializer->initialize(particles);
}
/** set the resampling method to use */
void setResampling(std::shared_ptr<ParticleFilterResampling<State>> resampler) {
Assert::isNotNull(resampler, "setResampling() MUST not be called with a nullptr!");
this->resampler = std::move(resampler);
}
/** set the estimation method to use */
void setEstimator(std::shared_ptr<ParticleFilterEstimation<State>> estimator) {
Assert::isNotNull(estimator, "setEstimation() MUST not be called with a nullptr!");
this->estimator = std::move(estimator);
}
/** set the transition method to use */
void setTransition(std::shared_ptr<ParticleFilterTransition<State, Control>> transition) {
Assert::isNotNull(transition, "setTransition() MUST not be called with a nullptr!");
this->transition = std::move(transition);
}
/** get the used transition method */
ParticleFilterTransition<State, Control>* getTransition() {
return this->transition.get();
}
/** set the evaluation method to use */
void setEvaluation(std::shared_ptr<ParticleFilterEvaluation<State, Observation>> evaluation) {
Assert::isNotNull(evaluation, "setEvaluation() MUST not be called with a nullptr!");
this->evaluation = std::move(evaluation);
}
/** set the initialization method to use */
void setInitializier(std::shared_ptr<ParticleFilterInitializer<State>> initializer) {
Assert::isNotNull(initializer, "setInitializer() MUST not be called with a nullptr!");
this->initializer = std::move(initializer);
}
/** set the resampling threshold as the percentage of efficient particles */
void setNEffThreshold(const double thresholdPercent) {
this->nEffThresholdPercent = thresholdPercent;
}
/** get the unormalized weight sum of all particles */
double getWeightSum() const
{
return this->weightSum;
}
/** get the predicted mode probability P(m_t|Z_t-1)*/
double getPredictedModeProbability() const
{
return this->predictedModeProbability;
}
/** set the predicted mode probability P(m_t|Z_t-1)*/
void setPredictedModeProbability(const double val) {
this->predictedModeProbability = val;
}
/** get the posterior mode probability P(m_t|Z_t)*/
double getModePosteriorProbability() const
{
return this->modePosteriorProbability;
}
/** set the posterior mode probability P(m_t|Z_t)*/
void setModePosteriorProbability(const double likelihoodSum) {
Assert::isNotNull(likelihoodSum, "likelihoodsum is zero.. thats not possible");
Assert::isNotNull(this->weightSum, "weightSum is zero.. thats not possible");
//Assert::isNotNull(this->predictedModeProbability, "predictedModeProbability is zero.. thats not possible");
this->modePosteriorProbability = (this->weightSum * this->predictedModeProbability) / likelihoodSum;
//Assert::isNotNull(this->modePosteriorProbability, "modePosteriorProbability is zero.. thats not possible");
}
/** get the transition mode probability P(m_t|Z_t)
* NOTE: Dont use this value! It is only needed for more beatiful mixed sampling!
*/
double getTransitionModeProbability() const
{
return this->transitionModeProbability;
}
/** set the transition mode probability P(m_t|Z_t)*/
void setTransitionModeProbability(const double val) {
this->transitionModeProbability = val;
}
/** perform resampling -> transition -> evaluation -> estimation */
void update(const Control* control, const Observation& observation) {
// sanity checks (if enabled)
Assert::isNotNull(resampler, "resampler MUST not be null! call setResampler() first!");
Assert::isNotNull(transition, "transition MUST not be null! call setTransition() first!");
Assert::isNotNull(evaluation, "evaluation MUST not be null! call setEvaluation() first!");
Assert::isNotNull(estimator, "estimation MUST not be null! call setEstimation() first!");
// perform the transition step
transition->transition(particles, control);
// perform the evaluation step and calculate the sum of all particle weights
this->weightSum = evaluation->evaluation(particles, observation);
// normalize the particle weights and thereby calculate N_eff
const double neff = normalize(weightSum);
// estimate the current state
this->estimation = estimator->estimate(particles);
// if the number of efficient particles is too low, perform resampling
if (neff < particles.size() * nEffThresholdPercent) { resampler->resample(particles); }
// done
}
private:
/** normalize the weight of all particles to one */
double normalize(const double weightSum) {
double sum = 0.0;
for (auto& p : particles) {
p.weight /= weightSum;
sum += (p.weight * p.weight);
}
return 1.0 / sum;
}
/** calculate the number of efficient particles (N_eff) */
double getNeff() const {
double sum = 0.0;
for (auto& p : particles) {sum += (p.weight * p.weight);}
return 1.0 / sum;
}
};
}
#endif // PARTICLEFILTERMIXING_H

View File

@@ -0,0 +1,26 @@
#ifndef K_MATH_FILTER_PARTICLES_PARTICLEFILTERTRANSITION_H
#define K_MATH_FILTER_PARTICLES_PARTICLEFILTERTRANSITION_H
#include <vector>
#include "../Particle.h"
namespace SMC {
/**
* interface for the user-defined particle transition.
* the transition describes the particles change during the transition phase p(q_t | q_t-1)
* depending on the control data (if any)
*/
template <typename State, typename Control>
class ParticleFilterTransition {
public:
/** perform the transition p(q_t | q_t-1) for all particles based on the given control data */
virtual void transition(std::vector<Particle<State>>& particles, const Control* control) = 0;
};
}
#endif // K_MATH_FILTER_PARTICLES_PARTICLEFILTERTRANSITION_H

View File

@@ -0,0 +1,24 @@
#ifndef PARTICLEFILTERESTIMATION_H
#define PARTICLEFILTERESTIMATION_H
#include "../../Particle.h"
#include <vector>
namespace SMC {
template <typename State>
class ParticleFilterEstimation {
public:
// dtor
virtual ~ParticleFilterEstimation() {;}
// get the current state estimation for the given particle set
virtual State estimate(const std::vector<Particle<State>>& particles) = 0;
};
}
#endif // PARTICLEFILTERESTIMATION_H

View File

@@ -0,0 +1,160 @@
#if FIXME
#ifndef K_MATH_FILTER_PARTICLES_PARTICLEFILTERESTIMATIONKERNELDENSITY_H
#define K_MATH_FILTER_PARTICLES_PARTICLEFILTERESTIMATIONKERNELDENSITY_H
#include "ParticleFilterEstimation.h"
#include <algorithm>
#include <vector>
#include "../../Particle.h"
#include "../../../../misc/gnuplot/Gnuplot.h"
#include "../../../optimization/NumOptVector.h"
#include "../../../optimization/NumOptAlgoDownhillSimplex.h"
namespace SMC {
template <typename State, int numParams>
class ParticleFilterEstimationKernelDensity : public ParticleFilterEstimation<State> {
Gnuplot gp;
K::NumOptAlgoDownhillSimplex<float, numParams> simplex;
private:
class OptFunc {
private:
/** the particles to work on */
const std::vector<Particle<State>>& particles;
public:
/** ctor */
OptFunc(const std::vector<Particle<State>>& particles) : particles(particles) {;}
float operator () (const float* params) const {
double prob = 0;
const int size = particles.size();
//#pragma omp parallel for
for (int i = 0; i < size; i+=10) {
const Particle<State>& p = particles[i];
prob += p.state.getKernelDensityProbability(params) * p.weight;
}
// convert probability to "error"
return -prob;
}
};
public:
State estimate(std::vector<Particle<State>>& particles) override {
// comparator
auto comp = [] (const Particle<State>& p1, const Particle<State>& p2) { return p1.weight < p2.weight; };
// // find max state
// auto el = std::max_element(particles.begin(), particles.end(), comp);
// State max = el->state;
// // region to check
// BBox2 bbox;
// bbox.add(Point2f(-50,-50));
// bbox.add(Point2f(100,100));
// const float stepSize = 1.0f;
// const int pxX = (bbox.getMax().x - bbox.getMin().x) / stepSize;
// const int pxY = (bbox.getMax().y - bbox.getMin().y) / stepSize;
// // optimize using simplex
OptFunc func(particles);
// // calculate the optimum
// simplex.calculateOptimum(func, params);
// std::cout << params << std::endl;
float params[numParams];
getGlobalMax(particles, params);
// create output state from optimized params
State res(params);
gp << "splot '-' with lines\n";
int x1 = 0;//params[0]-2500;
int x2 = 100*100;//params[0]+2500;
int y1 = 0;//params[1]-2500;
int y2 = 60*100;//params[1]+2500;
for (int x = x1; x < x2; x += 400) {
for (int y = y1; y < y2; y += 400) {
params[0] = x;
params[1] = y;
params[2] = 0;
gp << x << " " << y << " " << -func(params) << "\n";
}
gp << "\n";
}
gp << "e\n";
gp.flush();
return res;
}
void getGlobalMax(const std::vector<Particle<State>>& particles, float* startParams) {
OptFunc func(particles);
double bestP = 0;
float bestParams[numParams];
simplex.setMaxIterations(10);
simplex.setNumRestarts(0);
for (int i = 0; i < 15; ++i) {
// start at a random particle
const int idx = rand() % particles.size();
// start optimization at this particle's paramters
particles[idx].state.fillKernelDenstityParameters(startParams);
simplex.calculateOptimum(func, startParams);
const float prob = -func(startParams);
if (prob > bestP) {
bestP = prob;
memcpy(bestParams, startParams, numParams*sizeof(float));
//std::cout << bestParams << std::endl;
}
}
memcpy(startParams, bestParams, numParams*sizeof(float));
}
};
}
#endif // K_MATH_FILTER_PARTICLES_PARTICLEFILTERESTIMATIONKERNELDENSITY_H
#endif

View File

@@ -0,0 +1,37 @@
#ifndef PARTICLEFILTERESTIMATIONMAX_H
#define PARTICLEFILTERESTIMATIONMAX_H
#include "ParticleFilterEstimation.h"
#include <algorithm>
#include <vector>
#include "../../Particle.h"
namespace SMC {
/**
* just use the particle with the maximum weight
* as the currently sestimated state
*/
template <typename State>
class ParticleFilterEstimationMax : public ParticleFilterEstimation<State> {
public:
State estimate(const std::vector<Particle<State>>& particles) override {
// comparator
auto comp = [] (const Particle<State>& p1, const Particle<State>& p2) {
return p1.weight < p2.weight;
};
// find max element
auto el = std::max_element(particles.begin(), particles.end(), comp);
return el->state;
}
};
}
#endif // PARTICLEFILTERESTIMATIONMAX_H

View File

@@ -0,0 +1,78 @@
#ifndef PARTICLEFILTERESTIMATIONORDEREDWEIGHTEDAVERAGE_H
#define PARTICLEFILTERESTIMATIONORDEREDWEIGHTEDAVERAGE_H
#include <vector>
#include "../../Particle.h"
#include "../../ParticleAssertions.h"
#include "ParticleFilterEstimation.h"
#include "../../../Assertions.h"
namespace SMC {
/**
* calculate the (weighted) average state using
* the X% best weighted particles
*/
template <typename State>
class ParticleFilterEstimationOrderedWeightedAverage : public ParticleFilterEstimation<State> {
private:
const float percent;
public:
/** ctor */
ParticleFilterEstimationOrderedWeightedAverage(const float percent) : percent(percent) {;}
State estimate(const std::vector<Particle<State>>& particles) override {
// compile-time sanity checks
static_assert( HasOperatorPlusEq<State>::value, "your state needs a += operator!" );
static_assert( HasOperatorDivEq<State>::value, "your state needs a /= operator!" );
static_assert( HasOperatorMul<State>::value, "your state needs a * operator!" );
// comparator (highest first)
auto comp = [] (const Particle<State>& p1, const Particle<State>& p2) {
return p1.weight > p2.weight;
};
// create a copy
std::vector<Particle<State>> copy;
copy.insert(copy.begin(), particles.begin(), particles.end());
// sort the copy (highest weight first)
std::sort (copy.begin(), copy.end(), comp);
State tmp;
// calculate weighted average
const int numBest = copy.size() * percent;
double weightSum = 0;
for (int i = 0; i < numBest; ++i) {
const Particle<State>& p = copy[i];
tmp += p.state * p.weight;
weightSum += p.weight;
}
if (weightSum != weightSum) {
int i = 0; (void) i;
}
Assert::isTrue( (weightSum == weightSum), "the sum of particle weights is NaN!");
Assert::isTrue( (weightSum != 0), "the sum of particle weights is null!");
// normalize
tmp /= weightSum;
return tmp;
}
};
}
#endif // PARTICLEFILTERESTIMATIONORDEREDWEIGHTEDAVERAGE_H

View File

@@ -0,0 +1,60 @@
#ifndef PARTICLEFILTERESTIMATIONREGIONALWEIGHTEDAVERAGE_H
#define PARTICLEFILTERESTIMATIONREGIONALWEIGHTEDAVERAGE_H
#include "ParticleFilterEstimation.h"
#include "../../Particle.h"
#include "../../ParticleAssertions.h"
#include <algorithm>
namespace SMC {
/**
* this implementation estimates the current state
* by using the most probable particle and some near particles
* combining them by their weight (weighted average)
*
* the checker, whether a particle is near or not, uses a special,
* user-defined metric "belongsToRegion()". This user-method must
* return a boolean, whether to include the provided particle
* within the region around the maximum particle, or not.
*/
template <typename State>
class ParticleFilterEstimationRegionalWeightedAverage : public ParticleFilterEstimation<State> {
public:
State estimate(const std::vector<Particle<State>>& particles) override {
// compile-time sanity checks
static_assert( HasOperatorPlusEq<State>::value, "your state needs a += operator!" );
static_assert( HasOperatorDivEq<State>::value, "your state needs a /= operator!" );
static_assert( HasOperatorMul<State>::value, "your state needs a * operator!" );
//1) get the most probable particle
const auto comp = [] (const Particle<State>& p1, const Particle<State>& p2) {return p1.weight < p2.weight;};
const Particle<State>& max = *std::max_element(particles.begin(), particles.end(), comp);
//2) calculate the regional weighted average
double cumWeight = 0;
State res;
for (const Particle<State>& p : particles) {
if (!p.state.belongsToRegion(max.state)) {continue;}
cumWeight += p.weight;
res += p.state * p.weight;
}
// 3) normalize
res /= cumWeight;
// done
return res;
}
};
}
#endif // PARTICLEFILTERESTIMATIONREGIONALWEIGHTEDAVERAGE_H

View File

@@ -0,0 +1,52 @@
#ifndef PARTICLEFILTERESTIMATIONWEIGHTEDAVERAGE_H
#define PARTICLEFILTERESTIMATIONWEIGHTEDAVERAGE_H
#include <vector>
#include "../../Particle.h"
#include "../../ParticleAssertions.h"
#include "ParticleFilterEstimation.h"
#include "../../../Assertions.h"
namespace SMC {
/**
* calculate the (weighted) average state using
* all particles and their weight
*/
template <typename State>
class ParticleFilterEstimationWeightedAverage : public ParticleFilterEstimation<State> {
public:
State estimate(const std::vector<Particle<State>>& particles) override {
// compile-time sanity checks
static_assert( HasOperatorPlusEq<State>::value, "your state needs a += operator!" );
static_assert( HasOperatorDivEq<State>::value, "your state needs a /= operator!" );
static_assert( HasOperatorMul<State>::value, "your state needs a * operator!" );
State tmp;
// calculate weighted average
double weightSum = 0;
for (const Particle<State>& p : particles) {
tmp += p.state * p.weight;
weightSum += p.weight;
}
Assert::isTrue( (weightSum == weightSum), "the sum of particle weights is NaN!");
Assert::isTrue( (weightSum != 0), "the sum of particle weights is null!");
// normalize
tmp /= weightSum;
return tmp;
}
};
}
#endif // PARTICLEFILTERESTIMATIONWEIGHTEDAVERAGE_H

View File

@@ -0,0 +1,66 @@
#ifndef PARTICLEFILTERESTIMATIONWEIGHTEDAVERAGEWITHANGLE_H
#define PARTICLEFILTERESTIMATIONWEIGHTEDAVERAGEWITHANGLE_H
#include <vector>
#include <cmath>
#include <math.h>
#include "../../Particle.h"
#include "../../ParticleAssertions.h"
#include "ParticleFilterEstimation.h"
#include "../../../Assertions.h"
namespace SMC {
/**
* calculate the (weighted) average state using
* all particles and their weight
*/
template <typename State>
class ParticleFilterEstimationWeightedAverageWithAngle : public ParticleFilterEstimation<State> {
public:
State estimate(std::vector<Particle<State>>& particles) override {
// compile-time sanity checks
static_assert( HasOperatorPlusEq<State>::value, "your state needs a += operator!" );
static_assert( HasOperatorDivEq<State>::value, "your state needs a /= operator!" );
static_assert( HasOperatorMul<State>::value, "your state needs a * operator!" );
State tmp;
// calculate weighted average
double weightSum = 0;
double xAngle = 0;
double yAngle = 0;
for (const Particle<State>& p : particles) {
tmp += p.state * p.weight;
weightSum += p.weight;
xAngle += std::cos(p.state.walkState.heading.getRAD());
yAngle += std::sin(p.state.walkState.heading.getRAD());
}
Assert::isTrue( (weightSum == weightSum), "the sum of particle weights is NaN!");
Assert::isTrue( (weightSum != 0), "the sum of particle weights is null!");
// normalize
tmp /= weightSum;
//calculated avg angle
tmp.avgAngle = std::fmod((std::atan2(yAngle / particles.size(), xAngle / particles.size()) * 180.0/PI) + 720, 360.0);
if(tmp.avgAngle > 360.0)
std::cout << "fuck" << std::endl;
return tmp;
}
};
}
#endif // PARTICLEFILTERESTIMATIONWEIGHTEDAVERAGE_H

View File

@@ -0,0 +1,43 @@
/*
* ParticleResampling.h
*
* Created on: Sep 18, 2013
* Author: Frank Ebner
*/
#ifndef PARTICLEFILTERRESAMPLING_H_
#define PARTICLEFILTERRESAMPLING_H_
#include "../../Particle.h"
#include <vector>
#include <chrono>
/**
* interface for all available resampling methods
* within the particle filter
* @param State the (user-defined) state
*/
namespace SMC {
template <typename State> class ParticleFilterResampling {
public:
/**
* perform resampling on the given particle-vector
* @param particles the vector of all particles to resample
*/
virtual void resample(std::vector<Particle<State>>& particles) = 0;
/**
* perform resampling on the given particle-vector
* @param particles the vector of all particles to resample
*/
virtual void resample(std::vector<Particle<State>>& particles, double kld, std::vector<Particle<State>>& particlesWifi) {}
};
}
#endif /* PARTICLEFILTERRESAMPLING_H_ */

View File

@@ -0,0 +1,157 @@
#ifndef PARTICLEFILTERRESAMPLINGDIVERGENCE_H
#define PARTICLEFILTERRESAMPLINGDIVERGENCE_H
#include <algorithm>
#include <random>
#include "ParticleFilterResampling.h"
#include "../../ParticleAssertions.h"
int fuck_off = 0;
namespace SMC {
/**
* draws new particle depending on the current divergence between two
* probability distributations based on JensenShannon divergence
*/
template <typename State>
class ParticleFilterResamplingDivergence : public ParticleFilterResampling<State> {
private:
/** this is a copy of the particle-set to draw from it */
std::vector<Particle<State>> particlesCopy;
/** this is a copy of the wifi particle-set */
std::vector<Particle<State>> particlesWifiCopy;
/** random number generator */
std::minstd_rand gen;
public:
/** ctor */
ParticleFilterResamplingDivergence() {
gen.seed(1234);
}
void resample(std::vector<Particle<State>>& particles) override{
}
void resample(std::vector<Particle<State>>& particles, double kld, std::vector<Particle<State>>& particlesWifi) override {
// compile-time sanity checks
// TODO: this solution requires EXPLICIT overloading which is bad...
//static_assert( HasOperatorAssign<State>::value, "your state needs an assignment operator!" );
const uint32_t cnt = (uint32_t) particles.size();
const uint32_t cntWifi = (uint32_t) particlesWifi.size();
// equal weight for all particles. sums up to 1.0
const double equalWeight = 1.0 / (double) cnt;
// ensure the copy vector has the same size as the real particle vector
particlesCopy.resize(cnt);
particlesWifiCopy.resize(cntWifi);
// swap both vectors
particlesCopy.swap(particles);
particlesWifiCopy = particlesWifi;
// calculate cumulative weight
double cumWeight = 0;
for (uint32_t i = 0; i < cnt; ++i) {
cumWeight += particlesCopy[i].weight;
particlesCopy[i].weight = cumWeight;
}
double cumWeightWifi = 0;
for (uint32_t i = 0; i < cntWifi; ++i) {
cumWeightWifi += particlesWifiCopy[i].weight;
particlesWifiCopy[i].weight = cumWeightWifi;
}
double maxKld = 250;
double minKld = 20;
double diffKld = kld - minKld;
if(diffKld < 0){
diffKld = 0;
}
int numWifiParticel = (diffKld / maxKld) * cnt;
//skip the first observations
static const int skipStart = 5;
if(fuck_off++ < skipStart){
numWifiParticel = 0;
}
// now draw from the copy vector and fill the original one
// with the resampled particle-set keep the number of particles.
for (uint32_t i = 0; i < cnt; ++i) {
if(i < numWifiParticel){
//we draw a particle from posterior and change to position based on the wifi distribution
Particle<State> posteriorParticle = draw(cumWeight);
Particle<State> wifiParticle = drawWifi(cumWeightWifi);
posteriorParticle.state.position = wifiParticle.state.position;
particles[i] = posteriorParticle;
} else {
particles[i] = draw(cumWeight);
}
particles[i].weight = equalWeight;
}
}
private:
/** draw one particle according to its weight from the copy vector */
const Particle<State>& draw(const double cumWeight) {
// generate random values between [0:cumWeight]
std::uniform_real_distribution<float> dist(0, cumWeight);
// draw a random value between [0:cumWeight]
const float rand = dist(gen);
// search comparator (cumWeight is ordered -> use binary search)
auto comp = [] (const Particle<State>& s, const float d) {return s.weight < d;};
auto it = std::lower_bound(particlesCopy.begin(), particlesCopy.end(), rand, comp);
return *it;
}
/** draw one particle according to its weight from the copy vector */
const Particle<State>& drawWifi(const double cumWeight) {
// generate random values between [0:cumWeight]
std::uniform_real_distribution<float> dist(0, cumWeight);
// draw a random value between [0:cumWeight]
const float rand = dist(gen);
// search comparator (cumWeight is ordered -> use binary search)
auto comp = [] (const Particle<State>& s, const float d) {return s.weight < d;};
auto it = std::lower_bound(particlesWifiCopy.begin(), particlesWifiCopy.end(), rand, comp);
return *it;
}
};
}
#endif // PARTICLEFILTERRESAMPLINGDIVERGENCE_H

View File

@@ -0,0 +1,130 @@
#ifndef PARTICLEFILTERRESAMPLINGKLD_H
#define PARTICLEFILTERRESAMPLINGKLD_H
#include <algorithm>
#include <random>
#include <Indoor/math/random/RandomGenerator.h>
#include "ParticleFilterResampling.h"
#include "../../ParticleAssertions.h"
#include <Indoor/math/distribution/NormalCDF.h>
#include <Indoor/misc/Binning.h>
namespace SMC {
/**
* "Adapting sample size in particle filters through KLD-resampling" T.Li et al.
* Resample a dynamic size of new particles. for resampling we use the simple (multinomial version)
* We cann guarente with the probability 1 - delt, the KLD between posterior and the true posterior is
* less then epsilon.
*
*
* Note: the State template parameter needs to implement a float getBinValue(const int dim) const {..} function
*/
template <typename State>
class ParticleFilterResamplingKLD : public ParticleFilterResampling<State> {
private:
/** this is a copy of the particle-set to draw from it */
std::vector<Particle<State>> particlesCopy;
/** random number generator */
std::minstd_rand gen;
/** upper bound epsilon of the kld distance - the particle size is not allowed to exceed epsilon*/
double epsilon;
/** the upper 1 - delta quantil of the normal distribution. something like 0.01 */
double delta;
/** the bins */
Binning<State> bins;
/** max particle size */
uint32_t N_max;
public:
/** ctor */
ParticleFilterResamplingKLD(double delta = 0.01, double epsilon = 0.13, uint32_t N_max = 25000) : delta(delta), epsilon(epsilon), N_max(N_max) {
gen.seed(RANDOM_SEED);
bins.setBinSizes({0.01, 0.01, 0.2, 0.3});
bins.setRanges({BinningRange(-1,100), BinningRange(-10,60), BinningRange(-1,15), BinningRange(0, 2 * M_PI)});
}
void resample(std::vector<Particle<State>>& particles) override {
const uint32_t cnt = (uint32_t) particles.size();
// equal weight for all particles. sums up to 1.0
const double equalWeight = 1.0 / (double) cnt;
// ensure the copy vector has the same size as the real particle vector
particlesCopy.resize(cnt);
// swap both vectors
particlesCopy.swap(particles);
// calculate cumulative weight
double cumWeight = 0;
for (uint32_t i = 0; i < cnt; ++i) {
cumWeight += particlesCopy[i].weight;
particlesCopy[i].weight = cumWeight;
}
//clean the bins and particles
bins.clearUsed();
particles.clear();
// draw a new particle and check if it is within a bin or not
uint32_t k = 1;
double N = 0;
int i = 0;
while(i <= N && i <= N_max){
particles.push_back(draw(cumWeight));
particles.back().weight = equalWeight;
//is bin free?
if(bins.isFree(particles[i].state)){
k++;
bins.markUsed(particles[i].state);
//calculate the new N
double z_delta = Distribution::NormalCDF<double>::getProbit(1 - delta);
double front = (k - 1) / (2 * epsilon);
double back = 1 - (2 / (9 * (k - 1))) + (std::sqrt(2 / (9 * (k - 1))) * z_delta );
N = front * std::pow(back, 3.0);
}
++i;
}
}
private:
/** draw one particle according to its weight from the copy vector */
const Particle<State>& draw(const double cumWeight) {
// generate random values between [0:cumWeight]
std::uniform_real_distribution<float> dist(0, cumWeight);
// draw a random value between [0:cumWeight]
const float rand = dist(gen);
// search comparator (cumWeight is ordered -> use binary search)
auto comp = [] (const Particle<State>& s, const float d) {return s.weight < d;};
auto it = std::lower_bound(particlesCopy.begin(), particlesCopy.end(), rand, comp);
return *it;
}
};
}
#endif // PARTICLEFILTERRESAMPLINGKLD_H

View File

@@ -0,0 +1,31 @@
#ifndef K_MATH_FILTER_PARTICLES_PARTICLEFILTERRESAMPLINGLOG_H
#define K_MATH_FILTER_PARTICLES_PARTICLEFILTERRESAMPLINGLOG_H
#include "ParticleFilterResamplingSimple.h"
namespace K {
/**
* draw particles using their log-weight
*/
template <typename State>
class ParticleFilterResamplingLog : public ParticleFilterResamplingSimple<State> {
public:
void resample(std::vector<Particle<State>>& particles) override {
for (Particle<State>& p : particles) {
//p.weight = - 1.0 / std::log(p.weight);
p.weight = std::pow(p.weight, 0.5);
}
ParticleFilterResamplingSimple<State>::resample(particles);
}
};
}
#endif // K_MATH_FILTER_PARTICLES_PARTICLEFILTERRESAMPLINGLOG_H

View File

@@ -0,0 +1,163 @@
#ifndef PARTICLEFILTERRESAMPLINGNEFF_H
#define PARTICLEFILTERRESAMPLINGNEFF_H
#include <algorithm>
#include <functional>
#include "ParticleFilterResampling.h"
#include "../ParticleAssertions.h"
namespace K {
/**
* TODO
*/
template <typename State> class ParticleFilterResamplingNEff : public ParticleFilterResampling<State> {
public:
using DrawCallback = std::function<void(Particle<State>& p)>;
private:
/** random number generator */
std::minstd_rand gen;
const float desiredNEff;
const float maxAdapt = 0.05;
DrawCallback drawCallback;
public:
/** ctor */
ParticleFilterResamplingNEff(const float desiredNEff = 0.75, const float maxAdapt = 0.05) : desiredNEff(desiredNEff), maxAdapt(maxAdapt) {
gen.seed(1234);
}
/** callback to inform about redrawn particles */
void setDrawCallback(const DrawCallback& callback) {this->drawCallback = callback;}
void resample(std::vector<Particle<State>>& particles) override {
// comparator (highest first)
static auto comp = [] (const Particle<State>& p1, const Particle<State>& p2) {
return p1.weight > p2.weight;
};
// ensure all particle-weights sum up to 1.0
normalize(particles);
// calculate current N_eff
const double curNEff = getNEff(particles);
// cur N_eff is > desired N_eff -> nothing to do
if (curNEff > desiredNEff) {return;}
const size_t cnt = particles.size();
// how many particles to discard and resample based on N_eff
// example: N_eff = 75% -> 25% to discard/resample
// cur N_eff < desired N_eff -> calculate adaption
float adapt = desiredNEff - curNEff;
if (adapt > maxAdapt) {adapt = maxAdapt;}
const size_t toResample = cnt * adapt;
// sort orig particles by weight (highest first)
std::sort(particles.begin(), particles.end(), comp);
// to-be-removed region [at the end of the vector]
const size_t start = particles.size() - toResample;
// zero the weight of the to-be-removed region
for (size_t i = start; i < cnt; ++i) {
particles[i].weight = 0;
}
// renormalize the remaining set
normalize(particles);
// replace the to-be-removed region [start:cnt]
for (size_t i = start; i < cnt; ++i) {
// track cumulative weight for each particle-index
std::vector<float> cumWeights;
// calculate cumulative weight for the copy [0:start] = the to-be-kept particles
double cumWeight = 0;
for (size_t i = 0; i < start; ++i) {
cumWeight += particles[i].weight;
cumWeights.push_back(cumWeight);
}
// get a random particle between [0:start]
const size_t idx = draw(cumWeights);
// split
particles[i] = particles[idx];
particles[i].weight /= 2;
particles[idx].weight /= 2;
// callback?
if (drawCallback) {drawCallback(particles[i]);}
}
// sanity check. sum of weights must now still be 1.0!
double weightSum = 0;
for (const auto& p : particles) {weightSum += p.weight;}
if (std::abs(weightSum - 1.0) > 0.01) {
throw Exception("particle weight does not sum up to 1.0");
}
}
private:
void normalize(std::vector<Particle<State>>& particles) const {
double sum = 0.0;
for (const auto& p : particles) {sum += p.weight;} // calculate sum
for (auto& p : particles) {p.weight /= sum;} // normalize
}
double getNEff(const std::vector<Particle<State>>& particles) const {
double sum = 0.0;
for (auto& p : particles) {
sum += (p.weight * p.weight);
}
return 1.0 / sum / particles.size();
}
size_t draw(const std::vector<float>& cumWeights) {
// get the cumulative weight [= last entry]
const double cumWeight = cumWeights.back();
// generate random values between [0:cumWeight]
std::uniform_real_distribution<float> dist(0, cumWeight);
// draw a random value between [0:cumWeight]
const float rand = dist(gen);
// search comparator (cumWeight is ordered -> use binary search)
auto it = std::lower_bound(cumWeights.begin(), cumWeights.end(), rand);
// convert iterator to index
const int idx = it - cumWeights.begin();
return idx;
}
};
}
#endif // PARTICLEFILTERRESAMPLINGNEFF_H

View File

@@ -0,0 +1,34 @@
/*
* ParticleResamplingNone.h
*
* Created on: Sep 18, 2013
* Author: Frank Ebner
*/
#ifndef K_MATH_FILTER_PARTICLES_PARTICLEFILTERRESAMPLINGNONE_H_
#define K_MATH_FILTER_PARTICLES_PARTICLEFILTERRESAMPLINGNONE_H_
#include "ParticleFilterResampling.h"
namespace K {
/**
* dummy resampler simply doing nothing
*/
template <typename State>
class ParticleFilterResamplingNone : public ParticleFilterResampling<State> {
public:
// /** dtor */
// ~ParticleFilterResamplingNone() {;}
void resample(std::vector<Particle<State>>& particles) override {
(void) particles;
}
};
}
#endif /* K_MATH_FILTER_PARTICLES_PARTICLEFILTERRESAMPLINGNONE_H_ */

View File

@@ -0,0 +1,94 @@
#ifndef PARTICLEFILTERRESAMPLINGPERCENT_H
#define PARTICLEFILTERRESAMPLINGPERCENT_H
#include <algorithm>
#include "ParticleFilterResampling.h"
#include "../../ParticleAssertions.h"
namespace SMC {
/**
* TODO
*/
template <typename State> class ParticleFilterResamplingPercent : public ParticleFilterResampling<State> {
private:
/** random number generator */
std::minstd_rand gen;
float percent;
public:
/** ctor */
ParticleFilterResamplingPercent(const float percent) : percent(percent) {
gen.seed(1234);
}
void resample(std::vector<Particle<State>>& particles) override {
// comparator (highest first)
static auto comp = [] (const Particle<State>& p1, const Particle<State>& p2) {
return p1.weight > p2.weight;
};
const uint32_t cnt = (uint32_t) particles.size();
// sort particles by weight (highest first)
std::sort(particles.begin(), particles.end(), comp);
// to-be-removed region
const int start = particles.size() * (1-percent);
const int end = particles.size();
std::uniform_int_distribution<int> dist(0, start-1);
// remove by re-drawing
for (uint32_t i = start; i < end; ++i) {
const int rnd = dist(gen);
particles[i] = particles[rnd];
particles[i].weight /= 2;
particles[rnd].weight /= 2;
}
// calculate weight-sum
double weightSum = 0;
for (const auto& p : particles) {
weightSum += p.weight;
}
int i = 0;
}
private:
/** draw one particle according to its weight from the copy vector */
const Particle<State>& draw(std::vector<Particle<State>>& copy, const double cumWeight) {
// generate random values between [0:cumWeight]
std::uniform_real_distribution<float> dist(0, cumWeight);
// draw a random value between [0:cumWeight]
const float rand = dist(gen);
// search comparator (cumWeight is ordered -> use binary search)
auto comp = [] (const Particle<State>& s, const float d) {return s.weight < d;};
auto it = std::lower_bound(copy.begin(), copy.end(), rand, comp);
return *it;
}
};
}
#endif // PARTICLEFILTERRESAMPLINGPERCENT_H

View File

@@ -0,0 +1,102 @@
/*
* ParticleResamplingSimple.h
*
* Created on: Sep 18, 2013
* Author: Frank Ebner
*/
#ifndef K_MATH_FILTER_PARTICLES_PARTICLEFILTERRESAMPLINGSIMPLE_H_
#define K_MATH_FILTER_PARTICLES_PARTICLEFILTERRESAMPLINGSIMPLE_H_
#include <algorithm>
#include <random>
#include "ParticleFilterResampling.h"
#include "../../ParticleAssertions.h"
namespace SMC {
/**
* uses simple probability resampling by drawing particles according
* to their current weight.
* O(log(n)) per particle
*/
template <typename State>
class ParticleFilterResamplingSimple : public ParticleFilterResampling<State> {
private:
/** this is a copy of the particle-set to draw from it */
std::vector<Particle<State>> particlesCopy;
/** random number generator */
std::minstd_rand gen;
public:
/** ctor */
ParticleFilterResamplingSimple() {
gen.seed(1234);
}
void resample(std::vector<Particle<State>>& particles) override {
// compile-time sanity checks
// TODO: this solution requires EXPLICIT overloading which is bad...
// static_assert( HasOperatorAssign<State>::value, "your state needs an assignment operator!" );
const uint32_t cnt = (uint32_t) particles.size();
// equal weight for all particles. sums up to 1.0
const double equalWeight = 1.0 / (double) cnt;
// ensure the copy vector has the same size as the real particle vector
particlesCopy.resize(cnt);
// swap both vectors
particlesCopy.swap(particles);
// calculate cumulative weight
double cumWeight = 0;
for (uint32_t i = 0; i < cnt; ++i) {
cumWeight += particlesCopy[i].weight;
particlesCopy[i].weight = cumWeight;
}
// now draw from the copy vector and fill the original one
// with the resampled particle-set
for (uint32_t i = 0; i < cnt; ++i) {
particles[i] = draw(cumWeight);
particles[i].weight = equalWeight;
}
}
private:
/** draw one particle according to its weight from the copy vector */
const Particle<State>& draw(const double cumWeight) {
// generate random values between [0:cumWeight]
std::uniform_real_distribution<float> dist(0, cumWeight);
// draw a random value between [0:cumWeight]
const float rand = dist(gen);
// search comparator (cumWeight is ordered -> use binary search)
auto comp = [] (const Particle<State>& s, const float d) {return s.weight < d;};
auto it = std::lower_bound(particlesCopy.begin(), particlesCopy.end(), rand, comp);
return *it;
}
};
}
#endif /* K_MATH_FILTER_PARTICLES_PARTICLEFILTERRESAMPLINGSIMPLE_H_ */

View File

@@ -0,0 +1,95 @@
#ifndef K_MATH_FILTER_PARTICLES_PARTICLEFILTERRESAMPLINGWHEEL_H_
#define K_MATH_FILTER_PARTICLES_PARTICLEFILTERRESAMPLINGWHEEL_H_
#include <algorithm>
#include <vector>
#include "ParticleFilterResampling.h"
#include "../ParticleAssertions.h"
#include "../../../distribution/Uniform.h"
#include "../../../../os/Time.h"
/**
* sources:
* https://www.udacity.com/course/viewer#!/c-cs373/l-48704330/e-48748082/m-48740082
* https://www.youtube.com/watch?list=PLpUPoM7Rgzi_7YWn14Va2FODh7LzADBSm&feature=player_detailpage&v=kZhOJgooMxI#t=567
*/
namespace K {
/**
* uses simple probability resampling by drawing particles according
* to their current weight using a "wheel"
* -> O(n) for all particles
*/
template <typename State>
class ParticleFilterResamplingWheel: public ParticleFilterResampling<State> {
private:
/** this is a copy of the particle-set to draw from it */
std::vector<Particle<State>> particlesCopy;
public:
void resample(std::vector<Particle<State>>& particles) override {
uint64_t start = K::Time::getTimeMS();
// compile-time sanity checks
// TODO: this solution requires EXPLICIT overloading which is bad...
//static_assert( HasOperatorAssign<State>::value, "your state needs an assignment operator!" );
const uint32_t cnt = (uint32_t) particles.size();
// equal weight for all particles. sums up to 1.0
const double equalWeight = 1.0 / (double) cnt;
// get the weight of the "heaviest" particle
const auto lambda = [] (const Particle<State>& p1, const Particle<State>& p2) {return p1.weight < p2.weight;};
const Particle<State> max = *std::max_element(particles.begin(), particles.end(), lambda);
// draw form the input particle set by treating it as a wheel
K::UniformDistribution dist(0, 2 * max.weight);
int curIdx = K::UniformDistribution::draw( (int)0, (int)particles.size() - 1);
double curOffset = dist.draw();
// ensure the copy vector has the same size as the real particle vector
particlesCopy.resize(cnt);
// draw the new set of particles
for (uint32_t i = 0; i < cnt; ) {
// found a suitable particle, use it and draw the next random number
if (particles[curIdx].weight >= curOffset) {
particlesCopy[i] = particles[curIdx];
particlesCopy[i].weight = equalWeight;
curOffset += dist.draw();
++i;
// weight to small, subtract the particles weight and move on to the next particle
} else {
curOffset -= particles[curIdx].weight;
curIdx = (curIdx + 1) % (particles.size());
}
}
// swap both vectors
particlesCopy.swap(particles);
uint64_t end = K::Time::getTimeMS();
std::cout << (end-start) << std::endl;
}
};
}
#endif /* K_MATH_FILTER_PARTICLES_PARTICLEFILTERRESAMPLINGWHEEL_H_ */

View File

@@ -0,0 +1,135 @@
#ifndef INTERACTINGMULTIPLEMODELPARTICLEFILTER_H
#define INTERACTINGMULTIPLEMODELPARTICLEFILTER_H
#include <vector>
#include "mixing/MixingSampler.h"
#include "MarkovTransitionProbability.h"
#include "estimation/JointEstimation.h"
#include "../filtering/ParticleFilterMixing.h"
#include "../../Assertions.h"
namespace SMC {
/**
* the main-class for IMMPF based on Driessen and Boers
* @param vector of particle filters as modes
*/
template <typename State, typename Control, typename Observation>
class InteractingMultipleModelParticleFilter {
private:
/** the used particle filters */
std::vector<ParticleFilterMixing<State, Control, Observation>> modes;
/** the mixing function to use */
std::unique_ptr<MixingSampler<State, Control, Observation>> mixing;
/** the function for calculating markov chain transition*/
std::unique_ptr<MarkovTransitionProbability<State, Control, Observation>> transition;
/** the function for calculating a joint estimation */
std::unique_ptr<JointEstimation<State, Control, Observation>> estimation;
/** the transition probability matrix */
Eigen::MatrixXd transitionProbabilityMatrix;
public:
/** ctor
* NOTE: The single rows of the transition matrix need to sum to 1!!!
*/
InteractingMultipleModelParticleFilter(std::vector<ParticleFilterMixing<State, Control, Observation>>& modes, Eigen::MatrixXd transitionProbabilityMatrix) {
// TODO: this is a deep copy... we could or should change that later.. since slooooooooow =)
this->modes = modes;
//init transmatrix
this->transitionProbabilityMatrix = transitionProbabilityMatrix;
}
/** dtor */
~InteractingMultipleModelParticleFilter() {
;
}
/** set mixing function */
void setMixingSampler(std::unique_ptr<MixingSampler<State, Control, Observation>> mixing) {
Assert::isNotNull(mixing, "setMixingSampler() MUST not be called with a nullptr!");
this->mixing = std::move(mixing);
}
/** set mode transition function */
void setMarkovTransitionProbability(std::unique_ptr<MarkovTransitionProbability<State, Control, Observation>> transition) {
Assert::isNotNull(transition, "setMarkovTransitionProbability() MUST not be called with a nullptr!");
this->transition = std::move(transition);
}
/** set joint estimation function */
void setJointEstimation(std::unique_ptr<JointEstimation<State, Control, Observation>> estimation) {
Assert::isNotNull(estimation, "setJointEstimation() MUST not be called with a nullptr!");
this->estimation = std::move(estimation);
}
const std::vector<ParticleFilterMixing<State, Control, Observation>>& getModes() const{
return modes;
}
/** perform the mixed update -> update particle filters -> estimation -> mixing*/
State update(const Control* control, const Observation& observation){
// sanity checks (if enabled)
Assert::isNotNull(mixing, "mixingsampler MUST not be null! call setResampler() first!");
//Assert::isNotNull(transition, "transition MUST not be null! call setResampler() first!");
Assert::isNotNull(estimation, "estimation MUST not be null! call setResampler() first!");
// mix both modes depending on the kld divergency and their calculated
// impact (mode posterior prob) to draw new particles
mixing->mixAndSample(modes, this->transitionProbabilityMatrix);
for(ParticleFilterMixing<State, Control, Observation>& filter : modes){
//update the particle filter and save estimation
filter.update(control, observation);
}
// calculate the transition probability matrix for the markov chain based on
// kld divergency.
this->transitionProbabilityMatrix = transition->update(modes, observation);
// calc posterior probability
this->calcPosteriorProbability();
// calculate current estimation
const State jointEst = estimation->estimate(modes);
//done
return jointEst;
}
private:
void calcPosteriorProbability(){
// calculate the likelihood Sum lambda for all modes
// the Posterior is init with P(m_0 | Z_0)
double likelihoodSum;
for(ParticleFilterMixing<State, Control, Observation>& filter : modes){
likelihoodSum += filter.getWeightSum() * filter.getPredictedModeProbability();
}
// set the last posterior probability p(m_t-1 | Z_t-1) for all modes
for(ParticleFilterMixing<State, Control, Observation>& filter : modes){
filter.setModePosteriorProbability(likelihoodSum);
}
}
};
}
#endif // INTERACTINGMULTIPLEMODELPARTICLEFILTER_H

View File

@@ -0,0 +1,30 @@
#ifndef MARKOVTRANSITIONPROBABILITY_H
#define MARKOVTRANSITIONPROBABILITY_H
#include "../filtering/ParticleFilterMixing.h"
#include <eigen3/Eigen/Dense>
namespace SMC {
/**
* interface for all available transition probability calculations
* within the IMMPF
*/
template <typename State, typename Control, typename Observation>
class MarkovTransitionProbability {
public:
/**
* perform the calculation of the transition matrix
* @param vector of modes / particle filters
*/
virtual Eigen::MatrixXd update(std::vector<ParticleFilterMixing<State, Control, Observation>>& modes, const Observation& obs) = 0;
};
}
#endif // MARKOVTRANSITIONPROBABILITY_H

View File

@@ -0,0 +1,25 @@
#ifndef JOINTESTIMATION_H
#define JOINTESTIMATION_H
#include "../../filtering/ParticleFilterMixing.h"
namespace SMC {
/**
* interface for all available joint estimations
* within the IMMPF we have multiple particle filters
* the "true" estimation is a joint state of all
*/
template <typename State, typename Control, typename Observation>
class JointEstimation {
public:
// get the current state estimation for the given particle set
virtual const State estimate(std::vector<ParticleFilterMixing<State, Control, Observation>>& modes) = 0;
};
}
#endif // JOINTESTIMATION_H

View File

@@ -0,0 +1,28 @@
#ifndef JOINTESTIMATIONPOSTERIORONLY_H
#define JOINTESTIMATIONPOSTERIORONLY_H
#include "JointEstimation.h"
#include "../../filtering/ParticleFilterMixing.h"
namespace SMC {
/**
* Use only the posterior distribution (first mode entry) for the joint estimaton
*/
template <typename State, typename Control, typename Observation>
class JointEstimationPosteriorOnly
: public JointEstimation<State, Control, Observation> {
public:
// get the current state estimation for the given particle set
const State estimate(std::vector<ParticleFilterMixing<State, Control, Observation>>& modes) override {
return modes[0].getEstimation();
}
};
}
#endif // JOINTESTIMATIONPOSTERIORONLY_H

View File

@@ -0,0 +1,29 @@
#ifndef MIXINGSAMPLER_H
#define MIXINGSAMPLER_H
#include "../../filtering/ParticleFilterMixing.h"
#include <eigen3/Eigen/Dense>
namespace SMC {
/**
* interface for all available resampling methods
* within the particle filter
*/
template <typename State, typename Control, typename Observation>
class MixingSampler {
public:
/**
* perform mixing of modes and sample according to the modes probability
* @param particles the vector of all particles to resample
*/
virtual void mixAndSample(std::vector<ParticleFilterMixing<State, Control, Observation>>& modes, Eigen::MatrixXd transitionProbabilityMatrix) = 0;
};
}
#endif // MIXINGSAMPLER_H

View File

@@ -0,0 +1,157 @@
#ifndef MIXINGSAMPLERDIVERGENCY_H
#define MIXINGSAMPLERDIVERGENCY_H
#include "MixingSampler.h"
#include <algorithm>
#include <random>
#include <eigen3/Eigen/Dense>
namespace SMC {
/**
* Using the direct sampling approach of Driessen and Boers in
* "Efficient particle filter for jump Markov nonlinear systems".
* as transition probability matrix for the markov chain we use
* a divergence based on JensenShannon divergence
*/
template <typename State, typename Control, typename Observation> class MixingSamplerDivergency
: public MixingSampler<State, Control, Observation> {
/** random number generator */
std::minstd_rand genNorm;
std::minstd_rand genPart;
/** copy of the modes with cumulative probabilities for easy drawing*/
std::vector<ParticleFilterMixing<State, Control, Observation>> copyModes;
public:
void mixAndSample(std::vector<ParticleFilterMixing<State, Control, Observation>>& modes, Eigen::MatrixXd transitionProbabilityMatrix) override{
genNorm.seed(std::chrono::system_clock::now().time_since_epoch().count());
genPart.seed(std::chrono::system_clock::now().time_since_epoch().count() + 233);
// set copyModes for later drawing
copyModes = modes;
// create cumulative particlesets for the copy
// Note: in most cases, the particles are already resampled within the filtering stage
// but in some cases they are not and therefore we need to draw cumulatively and not equally
for(ParticleFilterMixing<State, Control, Observation>& copyFilter : copyModes){
double cumWeight = 0;
std::vector<Particle<State>> copyParticles;
copyParticles = copyFilter.getParticles();
for(int i = 0; i < copyParticles.size(); ++i){
cumWeight += copyParticles[i].weight;
copyParticles[i].weight = cumWeight;
}
copyFilter.setParticles(copyParticles);
}
// calculate the new predicted mode prob P(m_t|Z_t-1) and P(m_t-1 | m_t, Z_t-1)
int m = 0; //this is m_t
for(ParticleFilterMixing<State, Control, Observation>& focusedFilter : modes){
// P(m_t|Z_t-1) = sum(P(m_t | m_t-1) P(m_t-1 | Z_t-1))
int i = 0; //this are all possible m_t-1
double predictedModeProbability = 0;
for(ParticleFilterMixing<State, Control, Observation>& filter : modes){
predictedModeProbability += transitionProbabilityMatrix(m,i) * filter.getModePosteriorProbability();
++i;
}
//Assert::isNotNull(predictedModeProbability, "predictedModeProbability is zero.. thats not possible!");
focusedFilter.setPredictedModeProbability(predictedModeProbability);
// cumulative sum of TransitionModeProbabilities for drawing modes from the perspective of ONE filter!
// this means, the transition mode probabilities are calculated for each filter NEW!
// calculate P(m_t-1 | m_t, Z_t-1) = P(m_t | m_t-1) * p(m_t-1 | Z_t-1) / P(m_t|Z_t-1)
double cumTransitionModeProbability = 0;
i = 0;
for(ParticleFilterMixing<State, Control, Observation>& filter : modes){
double prob = (transitionProbabilityMatrix(m,i) * filter.getModePosteriorProbability()) / focusedFilter.getPredictedModeProbability();
filter.setTransitionModeProbability(prob);
cumTransitionModeProbability += prob;
copyModes[i].setTransitionModeProbability(cumTransitionModeProbability);
//std::cout << "Draw Mode Probability from mode " << m << i << " : " << prob << std::endl;
++i;
}
// draw new modes and particles
// Note: in most cases, the particles are already resampled within the filtering stage
// but in some cases they are not and therefore we need to draw cumulatively and not equally
// todo: make the particle size dynamic depending on the kld or something else
// number of particles for this timestep
int numParticles = focusedFilter.getParticles().size();
std::vector<Particle<State>> newParticles;
newParticles.resize(numParticles);
double equalWeight = 1.0 / numParticles;
// draw modes cumulative
for(int k = 0; k < numParticles; ++k){
auto mode = drawMode(cumTransitionModeProbability);
newParticles[k] = drawParticle(mode);
newParticles[k].weight = equalWeight;
}
focusedFilter.setParticles(newParticles);
//iter
++m;
}
}
private:
/** draw a mode depending upon the transition mode probabilities */
ParticleFilterMixing<State, Control, Observation>& drawMode(const double cumTransitionModeProbabilities){
// generate random values between [0:cumWeight]
std::uniform_real_distribution<float> dist(0, cumTransitionModeProbabilities);
// draw a random value between [0:cumWeight]
const float rand = dist(genNorm);
// search comparator (cumWeight is ordered -> use binary search)
auto comp = [] (ParticleFilterMixing<State, Control, Observation>& filter, const float d) {return filter.getTransitionModeProbability() < d;};
auto it = std::lower_bound(copyModes.begin(), copyModes.end(), rand, comp);
return *it;
}
/** draw one particle according to its weight from the copy vector of a given mode */
const Particle<State>& drawParticle(ParticleFilterMixing<State, Control, Observation>& filter) {
double weights = filter.getParticles().back().weight;
// generate random values between [0:cumWeight]
std::uniform_real_distribution<float> dist(0, filter.getParticles().back().weight);
// draw a random value between [0:cumWeight]
const float rand = dist(genPart);
// search comparator (cumWeight is ordered -> use binary search)
auto comp = [] (const Particle<State>& s, const float d) {return s.weight < d;};
auto it = std::lower_bound(filter.getParticles().begin(), filter.getParticles().end(), rand, comp);
return *it;
}
};
}
#endif // MIXINGSAMPLERDIVERGENCY_H

View File

@@ -0,0 +1,100 @@
#ifndef CUMULATIVESAMPLER_H
#define CUMULATIVESAMPLER_H
#include <vector>
#include <memory>
#include <algorithm>
#include "../Particle.h"
#include "ParticleTrajectorieSampler.h"
namespace SMC {
/**
* drawing trajectories using a cumulative drawer
*/
template <typename State>
class CumulativeSampler : public ParticleTrajectorieSampler<State> {
public:
/** ctor */
CumulativeSampler(){
}
/** draw a single particle */
Particle<State> drawSingleParticle(std::vector<Particle<State>> const& particles){
// ensure the copy vector has the same size as the real particle vector
std::vector<Particle<State>> particlesCopy;
particlesCopy.resize(particles.size());
// swap both vectors
particlesCopy = particles;
// calculate cumulative weight
double cumWeight = 0;
for (uint32_t i = 0; i < particles.size(); ++i) {
cumWeight += particlesCopy[i].weight;
particlesCopy[i].weight = cumWeight;
}
return draw(cumWeight, particlesCopy, particles);
}
/** draw a trajectorie of n particles */
std::vector<Particle<State>> drawTrajectorie(std::vector<Particle<State>> const& particles, const int numRealizations){
// ensure the copy vector has the same size as the real particle vector
std::vector<Particle<State>> particlesCopy;
particlesCopy.reserve(particles.size());
particlesCopy = particles;
// calculate cumulative weight
double cumWeight = 0;
for (uint32_t i = 0; i < particles.size(); ++i) {
cumWeight += particlesCopy[i].weight;
particlesCopy[i].weight = cumWeight;
}
// now draw the initial weights and therefore the corresponding particles
std::vector<Particle<State>> trajectorie;
trajectorie.reserve(numRealizations);
for (uint32_t i = 0; i < numRealizations; ++i) {
trajectorie.push_back(draw(cumWeight, particlesCopy, particles));
}
return trajectorie;
}
private:
/** function for drawing particles */
Particle<State> draw(const double cumWeight, std::vector<Particle<State>> const& cumParticles, std::vector<Particle<State>> const& origParticles){
// random value between [0;1]
const double rand01 = double(rand()) / double(RAND_MAX);
// random value between [0; cumulativeWeight]
const double rand = rand01 * cumWeight;
// search comparator
auto comp = [] (const Particle<State>& s, const double d) {return s.weight < d;};
auto it = std::lower_bound(cumParticles.begin(), cumParticles.end(), rand, comp);
//get the idx for the iterator it. this is the same as std::distance(..,..)
int idx = it - cumParticles.begin();
//get the original weight instead of the cumulative weight
Particle<State> drawnParticle = *it;
drawnParticle.weight = origParticles[idx].weight;
return drawnParticle;
}
};
}
#endif // ARTIFICIALDISTRIBUTION_H

View File

@@ -0,0 +1,31 @@
#ifndef PARTICLETRAJECTORIESAMPLER_H
#define PARTICLETRAJECTORIESAMPLER_H
#include <vector>
#include "..//Particle.h"
namespace SMC {
/**
* interface for Sampling Trajectories of Particles
*/
template <typename State>
class ParticleTrajectorieSampler {
public:
/** draw a single particle */
virtual Particle<State> drawSingleParticle(std::vector<Particle<State>> const& particles) = 0;
/** draw a trajectorie of all incoming particles / like resampling*/
virtual std::vector<Particle<State>> drawTrajectorie(std::vector<Particle<State>>const& particles, const int num) = 0;
private:
/** function for drawing particles */
virtual Particle<State> draw(const double cumWeight, std::vector<Particle<State>> const& cumParticles, std::vector<Particle<State>> const& origParticles) = 0;
};
}
#endif // ARTIFICIALDISTRIBUTION_H

View File

@@ -0,0 +1,24 @@
#ifndef ARTIFICIALDISTRIBUTION_H
#define ARTIFICIALDISTRIBUTION_H
#include <vector>
#include "../Particle.h"
namespace SMC {
/**
* interface for artificial distributions
*/
template <typename State>
class ArtificialDistribution {
public:
/** calculate the probability/density*/
virtual double calculate(Particle<State> const& particle) = 0;
};
}
#endif // ARTIFICIALDISTRIBUTION_H

View File

@@ -0,0 +1,57 @@
#ifndef BACKWARDFILTER_H
#define BACKWARDFILTER_H
#include <vector>
#include <memory>
#include "BackwardFilterTransition.h"
#include "../sampling/ParticleTrajectorieSampler.h"
#include "../Particle.h"
#include "../filtering/resampling/ParticleFilterResampling.h"
#include "../filtering/estimation/ParticleFilterEstimation.h"
#include "../filtering/ParticleFilterEvaluation.h"
#include "../filtering/ParticleFilterInitializer.h"
#include "../../Assertions.h"
namespace SMC {
template <typename State, typename Control, typename Observation>
class BackwardFilter {
public:
virtual State update(std::vector<Particle<State>> const& forwardParticles) = 0;
/** access to all backward / smoothed particles */
virtual const std::vector<std::vector<Particle<State>>>& getbackwardParticles() = 0;
/** set the estimation method to use */
virtual void setEstimation(std::unique_ptr<ParticleFilterEstimation<State>> estimation) = 0;
/** set the transition method to use */
virtual void setTransition(std::unique_ptr<BackwardFilterTransition<State, Control>> transition) = 0;
/** set the resampling method to use */
virtual void setResampling(std::unique_ptr<ParticleFilterResampling<State>> resampler) = 0;
/** set the resampling threshold as the percentage of efficient particles */
virtual void setNEffThreshold(const double thresholdPercent) = 0;
/** set sampler */
virtual void setSampler(std::unique_ptr<ParticleTrajectorieSampler<State>> sampler) { (void) sampler; };
/** get the used transition method */
virtual BackwardFilterTransition<State, Control>* getTransition() = 0;
/** reset */
virtual void reset() {};
};
}
#endif // BACKWARDFILTER_H

View File

@@ -0,0 +1,33 @@
#ifndef BACKWARDFILTERTRANSITION_H
#define BACKWARDFILTERTRANSITION_H
#include <vector>
#include "../Particle.h"
namespace SMC {
/**
* interface for the user-defined backward filter transition.
* the transition describes the probability of a state change during the transition phase p(q_t+1 | q_t)
*/
template <typename State, typename Control>
class BackwardFilterTransition {
public:
/**
* @brief perform the transition p(q_t+1 | q_t) for all particles and possibilities
* if you do not use this abstract function, do not forget to throw an error if the user does
*/
virtual std::vector<std::vector<double>> transition(std::vector<Particle<State>> const& toBeSmoothedParticles_qt, std::vector<Particle<State>>const& alreadySmoothedParticles_qt1) = 0;
/**
* @brief perform a forward transition based on the to be smoothed particles at time q_t and sample particles at time q_t+1, also gets an vector with controls c_1:T
* if you do not use this abstract function, do not forget to throw an error if the user does
*/
virtual std::vector<Particle<State>> transition(std::vector<Particle<State>> const& toBeSmoothedParticles_qt, std::vector<Control> const& controls_1T) = 0;
};
}
#endif // BACKWARDFILTERTRANSITION_H

View File

@@ -0,0 +1,258 @@
/*
* CondensationBackwardFilter.h
*
* Created on: Jun 23, 2015
* Author: Toni Fetzer
*/
#ifndef BACKWARDSIMULATION_H_
#define BACKWARDSIMULATION_H_
#include <vector>
#include <memory>
#include <algorithm>
#include "BackwardFilterTransition.h"
#include "BackwardFilter.h"
#include "../Particle.h"
#include "../filtering/resampling/ParticleFilterResampling.h"
#include "../filtering/estimation/ParticleFilterEstimation.h"
#include "../filtering/ParticleFilterEvaluation.h"
#include "../filtering/ParticleFilterInitializer.h"
#include "../sampling/ParticleTrajectorieSampler.h"
#include "../../Assertions.h"
namespace SMC {
/**
* the main-class for the Backward Simulation Filter
* running "backwards" in time, generates multiple backwards trajectories
* (Realizations) by repeating the backward simulation M time.
* it can be started at a random time T of any forward particle filter
* [Monte Carlo smoothing for non-linear time series Godsill et al. '03]
* @param State the (user-defined) state for each particle
* @param numRealizations is the number of backward trajectories starting
*/
template <typename State, typename Control, typename Observation>
class BackwardSimulation : public BackwardFilter<State, Control, Observation>{
private:
/** all smoothed particles T -> 1*/
std::vector<std::vector<Particle<State>>> backwardParticles;
/** container for particles */
std::vector<Particle<State>> smoothedParticles;
/** the estimation function to use */
std::unique_ptr<ParticleFilterEstimation<State>> estimation;
/** the transition function to use */
std::unique_ptr<BackwardFilterTransition<State, Control>> transition;
/** the resampler to use */
std::unique_ptr<ParticleFilterResampling<State>> resampler;
/** the sampler for drawing trajectories */
std::unique_ptr<ParticleTrajectorieSampler<State>> sampler;
/** the percentage-of-efficient-particles-threshold for resampling */
double nEffThresholdPercent = 0.25;
/** number of realizations to be calculated */
int numRealizations;
/** is update called the first time? */
bool firstFunctionCall;
public:
/** ctor */
BackwardSimulation(int numRealizations) {
this->numRealizations = numRealizations;
backwardParticles.reserve(numRealizations);
smoothedParticles.reserve(numRealizations);
firstFunctionCall = true;
}
/** dtor */
~BackwardSimulation() {
;
}
/** reset **/
void reset(){
this->numRealizations = numRealizations;
backwardParticles.clear();
backwardParticles.reserve(numRealizations);
smoothedParticles.clear();
smoothedParticles.reserve(numRealizations);
firstFunctionCall = true;
}
/** access to all backward / smoothed particles */
const std::vector<std::vector<Particle<State>>>& getbackwardParticles() {
return backwardParticles;
}
/** set the estimation method to use */
void setEstimation(std::unique_ptr<ParticleFilterEstimation<State>> estimation) {
Assert::isNotNull(estimation, "setEstimation() MUST not be called with a nullptr!");
this->estimation = std::move(estimation);
}
/** set the transition method to use */
void setTransition(std::unique_ptr<BackwardFilterTransition<State, Control>> transition) {
Assert::isNotNull(transition, "setTransition() MUST not be called with a nullptr!");
this->transition = std::move(transition);
}
/** set the resampling method to use */
void setResampling(std::unique_ptr<ParticleFilterResampling<State>> resampler) {
Assert::isNotNull(resampler, "setResampling() MUST not be called with a nullptr!");
this->resampler = std::move(resampler);
}
/** set the sampler method to use */
void setSampler(std::unique_ptr<ParticleTrajectorieSampler<State>> sampler){
Assert::isNotNull(sampler, "setSampler() MUST not be called with a nullptr!");
this->sampler = std::move(sampler);
}
/** set the resampling threshold as the percentage of efficient particles */
void setNEffThreshold(const double thresholdPercent) {
this->nEffThresholdPercent = thresholdPercent;
}
/** get the used transition method */
BackwardFilterTransition<State, Control>* getTransition() {
return this->transition.get();
}
/**
* perform update: transition -> correction -> approximation
* gets the weighted sample set of a standard condensation
* particle filter in REVERSED order!
*/
State update(std::vector<Particle<State>> const& forwardParticles) {
// sanity checks (if enabled)
Assert::isNotNull(transition, "transition MUST not be null! call setTransition() first!");
Assert::isNotNull(estimation, "estimation MUST not be null! call setEstimation() first!");
//storage for single trajectories / smoothed particles
smoothedParticles.clear();
// Choose \tilde x_T = x^(i)_T with probability w^(i)_T
// Therefore sample independently from the categorical distribution of weights.
if(firstFunctionCall){
smoothedParticles = sampler->drawTrajectorie(forwardParticles, numRealizations);
firstFunctionCall = false;
backwardParticles.push_back(smoothedParticles);
const State es = estimation->estimate(smoothedParticles);
return es;
}
// compute weights using the transition model
// transitionWeigths[numRealizations][numParticles]
std::vector<std::vector<double>> transitionWeights = transition->transition(forwardParticles, backwardParticles.back());
//get the next trajectorie for a realisation
for(int j = 0; j < numRealizations; ++j){
//vector for the current smoothedWeights at time t
std::vector<Particle<State>> smoothedWeights;
smoothedWeights.resize(forwardParticles.size());
smoothedWeights = forwardParticles;
//check if all transitionWeights are zero
double weightSumTransition = std::accumulate(transitionWeights[j].begin(), transitionWeights[j].end(), 0.0);
Assert::isNot0(weightSumTransition, "all transition weights for smoothing are zero");
int i = 0;
for (auto& w : transitionWeights.at(j)) {
// multiply the weight of the particles at time t and normalize
smoothedWeights.at(i).weight = (smoothedWeights.at(i).weight * w);
if(smoothedWeights.at(i).weight != smoothedWeights.at(i).weight) {throw "detected NaN";}
// iter
++i;
}
//get the sum of all weights
auto lambda = [](double current, const Particle<State>& a){return current + a.weight; };
double weightSumSmoothed = std::accumulate(smoothedWeights.begin(), smoothedWeights.end(), 0.0, lambda);
//normalize the weights
if(weightSumSmoothed != 0.0){
for (int i = 0; i < smoothedWeights.size(); ++i){
smoothedWeights.at(i).weight /= weightSumSmoothed;
}
//check if normalization worked
double normWeightSum = std::accumulate(smoothedWeights.begin(), smoothedWeights.end(), 0.0, lambda);
Assert::isNear(normWeightSum, 1.0, 0.001, "Smoothed weights do not sum to 1");
}
//draw the next trajectorie at time t for a realization and save them
smoothedParticles.push_back(sampler->drawSingleParticle(smoothedWeights));
//throw if weight of smoothedParticle is zero
//in practice this is possible, if a particle is completely separated from the rest and is therefore
//weighted zero or very very low.
Assert::isNot0(smoothedParticles.back().weight, "smoothed particle has zero weight");
}
if(resampler)
{
//TODO - does this even make sense?
std::cout << "Warning - Resampling is not yet implemented!" << std::endl;
// //resampling if necessery
// double sum = 0.0;
// double weightSum = std::accumulate(smoothedParticles.begin().weight, smoothedParticles.end().weight, 0.0);
// for (auto& p : smoothedParticles) {
// p.weight /= weightSum;
// sum += (p.weight * p.weight);
// }
// const double neff = 1.0/sum;
// if (neff != neff) {throw "detected NaN";}
// // if the number of efficient particles is too low, perform resampling
// if (neff < smoothedParticles.size() * nEffThresholdPercent) { resampler->resample(smoothedParticles); }
}
// push_back the smoothedParticles
backwardParticles.push_back(smoothedParticles);
// estimate the current state
const State est = estimation->estimate(smoothedParticles);
// done
return est;
}
};
}
#endif /* BACKWARDSIMULATION_H_ */

View File

@@ -0,0 +1,226 @@
/*
* CondensationBackwardFilter.h
*
* Created on: Jun 23, 2015
* Author: Toni Fetzer
*/
#ifndef CONDENSATIONBACKWARDFILTER_H_
#define CONDENSATIONBACKWARDFILTER_H_
#include <vector>
#include <memory>
#include "BackwardFilterTransition.h"
#include "BackwardFilter.h"
#include "../Particle.h"
#include "../filtering/resampling/ParticleFilterResampling.h"
#include "../filtering/estimation/ParticleFilterEstimation.h"
#include "../filtering/ParticleFilterEvaluation.h"
#include "../filtering/ParticleFilterInitializer.h"
#include "../../Assertions.h"
long long count = 0.0;
namespace SMC {
/**
* the main-class for the Condensation Backward Filter
* running "backwards" in time, updating every timestep, no resampling
* it can be started at a random time T of an forward particle filter
* [Smoothing filter for condensation by Isard and Blake '98]
* @param State the (user-defined) state for each particle
*/
template <typename State>
class CondensationBackwardFilter : public BackwardFilter<State> {
private:
/** all smoothed particles 1 -> T*/
std::vector<std::vector<Particle<State>>> backwardParticles;
/** the estimation function to use */
std::unique_ptr<ParticleFilterEstimation<State>> estimation;
/** the transition function to use */
std::unique_ptr<BackwardFilterTransition<State>> transition;
/** the resampler to use */
std::unique_ptr<ParticleFilterResampling<State>> resampler;
/** the percentage-of-efficient-particles-threshold for resampling */
double nEffThresholdPercent = 0.25;
public:
/** ctor */
CondensationBackwardFilter() {
}
/** dtor */
~CondensationBackwardFilter() {
;
}
/** access to all backward / smoothed particles */
const std::vector<std::vector<Particle<State>>>& getbackwardParticles() {
return backwardParticles;
}
/** set the estimation method to use */
void setEstimation(std::unique_ptr<ParticleFilterEstimation<State>> estimation) {
Assert::isNotNull(estimation, "setEstimation() MUST not be called with a nullptr!");
this->estimation = std::move(estimation);
}
/** set the transition method to use */
void setTransition(std::unique_ptr<BackwardFilterTransition<State>> transition) {
Assert::isNotNull(transition, "setTransition() MUST not be called with a nullptr!");
this->transition = std::move(transition);
}
/** set the resampling method to use */
void setResampling(std::unique_ptr<ParticleFilterResampling<State>> resampler) {
Assert::isNotNull(resampler, "setResampling() MUST not be called with a nullptr!");
this->resampler = std::move(resampler);
}
/** set the resampling threshold as the percentage of efficient particles */
void setNEffThreshold(const double thresholdPercent) {
this->nEffThresholdPercent = thresholdPercent;
}
/** get the used transition method */
BackwardFilterTransition<State>* getTransition() {
return this->transition.get();
}
/**
* perform update: transition -> correction -> approximation
* gets the weighted sample set of a standard condensation
* particle filter in REVERSED order!
*/
State update(std::vector<Particle<State>> const& forwardParticles) {
// sanity checks (if enabled)
Assert::isNotNull(transition, "transition MUST not be null! call setTransition() first!");
Assert::isNotNull(estimation, "estimation MUST not be null! call setEstimation() first!");
// since the algorithm starts at T-1 we need to initialize with the first set of forwardParticels
// psi_T = pi_T
static bool firstFunctionCall = true;
if(firstFunctionCall){
backwardParticles.push_back(forwardParticles);
firstFunctionCall = false;
std::vector<Particle<State>> tt = forwardParticles;
const State es = estimation->estimate(tt);
return es;
}
//weightsume for normalization
double weightSum = 0.0;
// perform the transition step p(x_t+1|x_t)
std::vector<std::vector<double>> predictionProbabilities = transition->transition(forwardParticles, backwardParticles.back());
// calculate the correction factors
std::vector<double> correctionFactors;
for(int m = 0; m < forwardParticles.size(); ++m){
double gamma = 0.0;
for(int k = 0; k < forwardParticles.size(); ++k){
// gamma(m) = sum(pi(k) * alpha(m,k))
gamma += forwardParticles[k].weight * predictionProbabilities[m][k];
if (gamma != gamma) {throw "detected NaN";}
}
correctionFactors.push_back(gamma);
}
// approximate backward variables
std::vector<Particle<State>> smoothedParticles = forwardParticles;
for(int n = 0; n < forwardParticles.size(); ++n){
double delta = 0.0;
for(int m = 0; m < forwardParticles.size(); ++m){
// delta(n) = sum(psi(m) * alpha(m,n) / gamma(m))
//!! THIS IS A HACK !! Gamma is getting zero if the prob is to damn low. This would results in NaN for gamma
//!! Therefore we set alpha(m,n) / gamma(m) = 1.0;
if(correctionFactors[m] == 0.0){
delta += backwardParticles.back().at(m).weight;
std::cout << "Gamma is Zero" << count ++ << std::endl;
}
else
delta += backwardParticles.back().at(m).weight * (predictionProbabilities[m][n] / correctionFactors[m]);
if (delta != delta) {throw "detected NaN";}
}
// Evaluate smoothing weights
// psi(n) = pi(n) * delta(n)
double weight = delta * forwardParticles[n].weight;
smoothedParticles[n].weight = weight;
// fill weightsum
weightSum += weight;
if (forwardParticles[n].weight != forwardParticles[n].weight) {throw "detected NaN";}
if (delta != delta) {throw "detected NaN";}
if (weight != weight) {throw "detected NaN";}
if (weightSum != weightSum) {throw "detected NaN";}
}
// normalize the particle weights and thereby calculate N_eff
double sum = 0.0;
for (auto& p : smoothedParticles) {
p.weight /= weightSum;
sum += (p.weight * p.weight);
// sanity check
// if (p.state.heading != p.state.heading) {throw "detected NaN";}
// if (p.state.z_nr != p.state.z_nr) {throw "detected NaN";}
// if (p.state.x_cm != p.state.x_cm) {throw "detected NaN";}
// if (p.state.y_cm != p.state.y_cm) {throw "detected NaN";}
// if (p.weight != p.weight) {throw "detected NaN";}
}
const double neff = 1.0/sum;
if (neff != neff) {throw "detected NaN";}
// estimate the current state
const State est = estimation->estimate(smoothedParticles);
// if (est.heading != est.heading) {throw "detected NaN";}
// if (est.z_nr != est.z_nr) {throw "detected NaN";}
// if (est.x_cm != est.x_cm) {throw "detected NaN";}
// if (est.y_cm != est.y_cm) {throw "detected NaN";}
if(resampler)
{
// if the number of efficient particles is too low, perform resampling
if (neff < smoothedParticles.size() * nEffThresholdPercent) { resampler->resample(smoothedParticles); }
}
// push_back the smoothedParticles
backwardParticles.push_back(smoothedParticles);
// done
return est;
}
};
}
#endif /* CONDENSATIONBACKWARDFILTER_H_ */

View File

@@ -0,0 +1,142 @@
#ifndef FORWARDFILTERHISTORY_H
#define FORWARDFILTERHISTORY_H
#include <vector>
#include "../Particle.h"
#include "../../data/Timestamp.h"
#include "../../Assertions.h"
namespace SMC {
/**
* @brief Provides a data structur for the data available at a specific timestamp of the forward filtering procedure.
* @brief Timestamp; ParticleSet (After Transition and Update); Controls; Observations
*/
template <typename State, typename Control, typename Observation>
class ForwardFilterHistory {
private:
// NOTE: it would be possible to make some kind of struct for this, but in many upcoming functions and methods, i am not able
// to use all this informations. sometimes if have something like p(q_t+1| q_t, o_t) or p(o_t | q_t, c_t). So keep it flexible!
std::vector<Timestamp> timestamps;
std::vector<std::vector<Particle<State>>> particleSets;
std::vector<Control> controls;
std::vector<Observation> observations;
public:
ForwardFilterHistory(){
//empty ctor
}
void add(Timestamp time, std::vector<std::vector<Particle<State>>> set, Control control, Observation obs){
// Is empty? Null? etc.
Assert::isNotNull(time, "Timestamp is Null");
Assert::isNotNull(set, "Particle Set is Null");
Assert::isNotNull(control, "Control is Null");
Assert::isNotNull(obs, "Observation is Null");
timestamps.push_back(time);
particleSets.push_back(set);
controls.push_back(control);
observations.push_back(obs);
}
void removeLatest(){
particleSets.pop_back();
controls.pop_back();
observations.pop_back();
}
void removeFirst(){
particleSets.erase(particleSets.begin());
controls.erase(controls.begin());
observations.erase(observations.begin());
}
/**
* @brief Return the particles from [latestFilterUpdate - @param idx]
* @return returns vector of particles. note: c11 makes a std::move here
*/
std::vector<Particle<State>> getParticleSet(idx = 0){
return particleSets.at(particleSets.end() - idx);
}
/**
* @brief getControl from [latestFilterUpdate - @param idx]
* @return const controls object
*/
const Control getControl(idx = 0){
return controls.at(controls.end() - idx);
}
/**
* @brief getObservationf rom [latestFilterUpdate - @param idx]
* @return const obervations object
*/
const Observation getObservation (idx = 0){
return observations.at(observations.end() - idx);
}
/**
* @brief Return the timestamp from [latestFilterUpdate - @param idx]
* @return returns a Timstampf object
*/
std::vector<Particle<State>> getTimestamp(idx = 0){
return timestamps.at(particleSets.end() - idx);
}
/**
* @brief getLatestFilterUpdateNum
* @return num of particleSets size
*/
const int getLatestFilterUpdateNum(){
return particleSets.size();
}
/**
* @brief getLatestParticleSet Reference
* @return return particle set Note: c11 std::move by vector
*/
std::vector<Particle<State>> getLatestParticleSet(){
return particleSets.back();
}
/**
* @brief getLatestControls
* @return const control object
*/
const Control getLatestControls(){
return controls.back();
}
/**
* @brief getLatestObservation
* @return const observation object
*/
const Observation getLatestObservation(){
return observations.back();
}
/**
* @brief getLatestTimestamp
* @return const Timestamp object
*/
const Timestamp getLatestTimestamp(){
return timestamps.back();
}
};
}
#endif // FORWARDFILTERHISTORY_H

View File

@@ -0,0 +1,187 @@
/*
* CondensationBackwardFilter.h
*
* Created on: Jun 23, 2015
* Author: Toni Fetzer
*/
#ifndef TWOFILTERSMOOTHING_H_
#define TWOFILTERSMOOTHING_H_
#include <vector>
#include <memory>
#include "BackwardFilterTransition.h"
#include "BackwardFilter.h"
#include "ArtificialDistribution.h"
#include "../Particle.h"
#include "../filtering/resampling/ParticleFilterResampling.h"
#include "../filtering/estimation/ParticleFilterEstimation.h"
#include "../filtering/ParticleFilterEvaluation.h"
#include "../filtering/ParticleFilterInitializer.h"
#include "../../Assertions.h"
namespace SMC {
/**
* Smoothing Forward and Backward Filter together.
* Call the Update Function.
* Algorithm taken from [Briers04] Smoothing Algorithms for State-Space Models
*/
template <typename State>
class TwoFilterSmoothing {
private:
/** all smoothed particles 1 -> T*/
std::vector<std::vector<Particle<State>>> smoothedParticles;
/** the estimation function to use */
std::unique_ptr<ParticleFilterEstimation<State>> estimation;
/** the transition function to use */
std::unique_ptr<BackwardFilterTransition<State>> transition;
/** the resampler to use */
std::unique_ptr<ParticleFilterResampling<State>> resampler;
/** artificial distribuation */
std::unique_ptr<ArtificialDistribution<State>> artificialDistribution;
/** the percentage-of-efficient-particles-threshold for resampling */
double nEffThresholdPercent = 0.25;
public:
/** ctor */
TwoFilterSmoothing() {
}
/** dtor */
~TwoFilterSmoothing() {
;
}
/** access to all backward / smoothed particles */
const std::vector<std::vector<Particle<State>>>& getsmoothedParticles() {
return smoothedParticles;
}
/** set the estimation method to use */
void setEstimation(std::unique_ptr<ParticleFilterEstimation<State>> estimation) {
Assert::isNotNull(estimation, "setEstimation() MUST not be called with a nullptr!");
this->estimation = std::move(estimation);
}
/** set the transition method to use */
void setTransition(std::unique_ptr<BackwardFilterTransition<State>> transition) {
Assert::isNotNull(transition, "setTransition() MUST not be called with a nullptr!");
this->transition = std::move(transition);
}
/** set the resampling method to use */
void setResampling(std::unique_ptr<ParticleFilterResampling<State>> resampler) {
Assert::isNotNull(resampler, "setResampling() MUST not be called with a nullptr!");
this->resampler = std::move(resampler);
}
void setArtificialDistribution(std::unique_ptr<ArtificialDistribution<State>> artificialDistribution){
Assert::isNotNull(artificialDistribution, "setArtificialDistribution() MUST not be called with a nullptr!");
this->artificialDistribution = std::move(artificialDistribution);
}
/** set the resampling threshold as the percentage of efficient particles */
void setNEffThreshold(const double thresholdPercent) {
this->nEffThresholdPercent = thresholdPercent;
}
/** get the used transition method */
BackwardFilterTransition<State>* getTransition() {
return this->transition.get();
}
/**
* perform update: transition -> correction -> approximation
* particles from a forwards filter are used to re-weight those from a backwards filter
* so that they represent the target distribution.
* @param: forwardParticles at t-1
* @param: backwardparticles at t
*/
State update(std::vector<Particle<State>> const& forwardParticles, std::vector<Particle<State>> const& backwardParticles) {
// sanity checks (if enabled)
Assert::isNotNull(transition, "transition MUST not be null! call setTransition() first!");
Assert::isNotNull(estimation, "estimation MUST not be null! call setEstimation() first!");
// perform the transition step p(backward_x_t|forward_x_t-1)
std::vector<std::vector<double>> predictionProbabilities = transition->transition(forwardParticles, backwardParticles);
//we are using the forwardparticles to re-weight the backward filter (other direction also possible?)
std::vector<Particle<State>> currentParticles = backwardParticles;
double weightSum = 0.0;
// calculate the correction factors
for(int j = 0; j < backwardParticles.size(); ++j){
double alpha = 0.0;
for(int i = 0; i < backwardParticles.size(); ++i){
// alpha(j) = sum(forward_weight_t-1 * prediction)
alpha += forwardParticles[i].weight * predictionProbabilities[j][i];
if (alpha != alpha) {throw "detected NaN";}
}
double gamma = 1.0;
if(artificialDistribution){
gamma = artificialDistribution->calculate(backwardParticles[j]);
}
//calc the weight
double weight = (currentParticles[j].weight / gamma) * alpha;
currentParticles[j].weight = weight;
weightSum += weight;
}
// normalize the particle weights and thereby calculate N_eff
double sum = 0.0;
for (auto& p : currentParticles) {
p.weight /= weightSum;
sum += (p.weight * p.weight);
}
double neff = 1.0/sum;
if (neff != neff) {neff = 1.0;}
// estimate the current state
const State est = estimation->estimate(currentParticles);
if(resampler)
{
// if the number of efficient particles is too low, perform resampling
if (neff < currentParticles.size() * nEffThresholdPercent) { resampler->resample(currentParticles); }
}
// push_back the smoothedParticles
smoothedParticles.push_back(currentParticles);
// done
return est;
}
};
}
#endif /* TWOFILTERSMOOTHING_H_ */