added kullback leibler for gaussian cases

This commit is contained in:
toni
2017-03-09 18:57:47 +01:00
parent 62087fe072
commit e48d3bafcd
9 changed files with 374 additions and 8 deletions

View File

@@ -29,7 +29,7 @@ int main(int argc, char** argv) {
//::testing::GTEST_FLAG(filter) = "*WiFiOptimizer*";
::testing::GTEST_FLAG(filter) = "*MotionDetection*";
::testing::GTEST_FLAG(filter) = "*KullbackLeibler*";
//::testing::GTEST_FLAG(filter) = "*Barometer*";
//::testing::GTEST_FLAG(filter) = "*GridWalk2RelPressure*";

View File

@@ -8,5 +8,6 @@
#include "distribution/VonMises.h"
#include "distribution/Region.h"
#include "distribution/Triangle.h"
#include "distribution/NormalN.h"
#endif // DISTRIBUTIONS_H

View File

@@ -44,6 +44,15 @@ namespace Distribution {
gen.seed(seed);
}
/** get the mean value */
const T getMu() {
return this->mu;
}
/** get the standard deviation */
const T getSigma() {
return this->sigma;
}
/** get the probability for the given value */
static T getProbability(const T mu, const T sigma, const T val) {

View File

@@ -0,0 +1,50 @@
#ifndef NORMALN_H
#define NORMALN_H
#include <eigen3/Eigen/Dense>
namespace Distribution {
class NormalDistributionN {
private:
const Eigen::VectorXd mu;
const Eigen::MatrixXd sigma;
const double _a;
const Eigen::MatrixXd _sigmaInv;
public:
/** ctor */
NormalDistributionN(const Eigen::VectorXd mu, const Eigen::MatrixXd sigma) :
mu(mu), sigma(sigma), _a( 1.0 / std::sqrt( (sigma * 2.0 * M_PI).determinant() ) ), _sigmaInv(sigma.inverse()) {
}
/** get probability for the given value */
double getProbability(const Eigen::VectorXd val) const {
const double b = ((val-mu).transpose() * _sigmaInv * (val-mu));
return _a * std::exp(-b/2.0);
}
/** get the mean vector */
const Eigen::VectorXd getMu(){
return this->mu;
}
/** get covariance matrix */
const Eigen::MatrixXd getSigma(){
return this->sigma;
}
const Eigen::MatrixXd getSigmaInv(){
return this->_sigmaInv;
}
};
}
#endif // NORMALN_H

View File

@@ -0,0 +1,90 @@
#ifndef KULLBACKLEIBLER_H
#define KULLBACKLEIBLER_H
#include "../distribution/Normal.h"
#include "../distribution/NormalN.h"
#include "../../Assertions.h"
#include <string>
namespace Divergence {
template <typename Scalar> class KullbackLeibler {
public:
/** Calculate the Kullback Leibler Distance for a univariate Gaussian distribution
* Info: https://tgmstat.wordpress.com/2013/07/10/kullback-leibler-divergence/
*/
static inline Scalar getUnivariateGauss(Distribution::Normal<Scalar> norm1, Distribution::Normal<Scalar> norm2){
auto sigma1Quad = norm1.getSigma() * norm1.getSigma();
auto sigma2Quad = norm2.getSigma() * norm2.getSigma();
auto mu12Quad = (norm1.getMu() - norm2.getMu()) * (norm1.getMu() - norm2.getMu());
auto log1 = std::log(norm1.getSigma());
auto log2 = std::log(norm2.getSigma());
// kl = log(sigma_2 / sigma_1) + ((sigma_1^2 + (mu_1 - mu_2)^2) / 2 * sigma_2^2) - 0.5
double klb = (log2 - log1) + ((sigma1Quad + mu12Quad)/(2.0 * sigma2Quad)) - 0.5;
//klb is always greater 0
if(klb < 0.0){
Assert::doThrow("The Kullback Leibler Distance is < 0! Thats not possible");
}
return klb;
}
/** Calculate the Kullback Leibler Distance for a univariate Gaussian distribution symmetric*/
static inline Scalar getUnivariateGaussSymmetric(Distribution::Normal<Scalar> norm1, Distribution::Normal<Scalar> norm2){
return getUnivariateGauss(norm1, norm2) + getUnivariateGauss(norm2, norm1);
}
/** Calculate the Kullback Leibler Distance for a multivariate Gaussian distribution */
static inline Scalar getMultivariateGauss(Distribution::NormalDistributionN norm1, Distribution::NormalDistributionN norm2){
//both gaussian have the same dimension.
Assert::equal(norm1.getMu().rows(), norm2.getMu().rows(), "mean vectors do not have the same dimension");
Assert::equal(norm1.getSigma().rows(), norm2.getSigma().rows(), "cov matrices do not have the same dimension");
Assert::equal(norm1.getSigma().cols(), norm2.getSigma().cols(), "cov matrices do not have the same dimension");
//log
auto det1 = norm1.getSigma().determinant();
auto det2 = norm2.getSigma().determinant();
auto log1 = std::log(det1);
auto log2 = std::log(det2);
//trace
Eigen::MatrixXd toTrace(norm1.getSigma().rows(),norm1.getSigma().cols());
toTrace = norm2.getSigmaInv() * norm1.getSigma();
auto trace = toTrace.trace();
//transpose
Eigen::VectorXd toTranspose(norm1.getMu().rows());
toTranspose = norm2.getMu() - norm1.getMu();
auto transpose = toTranspose.transpose();
//rawdensity
auto rawDensity = transpose * norm2.getSigmaInv() * toTranspose;
auto dimension = norm1.getMu().rows();
//0.5 * ((log(det(cov_2)/det(cov_1)) + tr(cov_2^-1 cov_1) + (mu_2 - mu_1)^T * cov_2^-1 * (mu_2 - mu_1) - dimension)
double klb = 0.5 * ((log2 - log1) + trace + rawDensity - dimension);
//klb is always greater 0
if(klb < 0.0){
Assert::doThrow("The Kullback Leibler Distance is < 0! Thats not possible");
}
return klb;
}
/** Calculate the Kullback Leibler Distance for a multivariate Gaussian distribution symmetric*/
static inline Scalar getMultivariateGaussSymmetric(Distribution::NormalDistributionN norm1, Distribution::NormalDistributionN norm2){
return getMultivariateGauss(norm1, norm2) + getMultivariateGauss(norm2, norm1);
}
};
}
#endif // KULLBACKLEIBLER_H

View File

@@ -57,7 +57,9 @@ public:
const float modelRSSI = model.getRSSI(entry.getBeacon().getMAC(), pos_m);
// NaN? -> AP not known to the model -> skip
if (modelRSSI != modelRSSI) {continue;}
if (modelRSSI != modelRSSI) {
continue;
}
// the scan's RSSI

View File

@@ -58,7 +58,7 @@ public:
const Timestamp age = curTime - entry.getTimestamp();
Assert::isTrue(age.ms() >= 0, "found a negative wifi measurement age. this does not make sense");
Assert::isTrue(age.ms() <= 40000, "found a 40 second old wifi measurement. maybe there is a coding error?");
//Assert::isTrue(age.ms() <= 40000, "found a 40 second old wifi measurement. maybe there is a coding error?");
// sigma grows with measurement age
const float sigma = this->sigma + this->sigmaPerSecond * age.sec();
@@ -72,14 +72,15 @@ public:
}
// sanity check
Assert::isTrue(numMatchingAPs > 0, "not a single measured AP was matched against known ones. coding error? model error?");
//Assert::isTrue(numMatchingAPs > 0, "not a single measured AP was matched against known ones. coding error? model error?");
return prob;
}
template <typename Node> double getProbability(const Node& n, const Timestamp curTime, const WiFiMeasurements& obs, const int age_ms = 0) const {
throw Exception("todo??");
return this->getProbability(n.inMeter() + Point3(0,0,1.3), curTime, obs);
}
};

View File

@@ -73,7 +73,7 @@ public:
const Timestamp age = curTime - measurement.getTimestamp();
// sigma grows with measurement age
float sigma = this->sigma + this->sigmaPerSecond * age.sec();
float sigma = this->sigma + this->sigmaPerSecond * age.sec();
// the RSSI from the scan
const float measuredRSSI = measurement.getRSSI();
@@ -102,8 +102,7 @@ public:
}
// sanity check
// Assert::isTrue(numMatchingAPs > 0, "not a single measured AP was matched against known ones. coding error? model error?");
// if (numMatchingAPs == 0) {return 0;}
//Assert::isTrue(numMatchingAPs > 0, "not a single measured AP was matched against known ones. coding error? model error?");
// as not every node has the same number of visible/matching APs
// we MUST return something like the average probability

View File

@@ -0,0 +1,214 @@
#ifdef WITH_TESTS
#include "../../Tests.h"
#include "../../../math/divergence/KullbackLeibler.h"
#include "../../../math/Distributions.h"
#include <random>
TEST(KullbackLeibler, univariateGaussEQ) {
//if the distributions are equal, kld is 0
Distribution::Normal<float> norm1(0,1);
Distribution::Normal<float> norm2(0,1);
ASSERT_EQ(0.0f, Divergence::KullbackLeibler<float>::getUnivariateGauss(norm1, norm2));
ASSERT_EQ(0.0f, Divergence::KullbackLeibler<float>::getUnivariateGaussSymmetric(norm1, norm2));
}
TEST(KullbackLeibler, univariateGaussGEmu) {
//bigger mu means greater kld
Distribution::Normal<float> norm1(0,1);
Distribution::Normal<float> norm2(0,1);
Distribution::Normal<float> norm3(0,1);
Distribution::Normal<float> norm4(1,1);
ASSERT_GE(Divergence::KullbackLeibler<float>::getUnivariateGauss(norm3, norm4), Divergence::KullbackLeibler<float>::getUnivariateGauss(norm1, norm2));
ASSERT_GE(Divergence::KullbackLeibler<float>::getUnivariateGaussSymmetric(norm3, norm4), Divergence::KullbackLeibler<float>::getUnivariateGaussSymmetric(norm1, norm2));
}
TEST(KullbackLeibler, univariateGaussGEsigma) {
//bigger sigma means greater kld
Distribution::Normal<float> norm1(0,1);
Distribution::Normal<float> norm2(0,1);
Distribution::Normal<float> norm5(0,1);
Distribution::Normal<float> norm6(0,3);
ASSERT_GE(Divergence::KullbackLeibler<float>::getUnivariateGauss(norm5, norm6), Divergence::KullbackLeibler<float>::getUnivariateGauss(norm1, norm2));
ASSERT_GE(Divergence::KullbackLeibler<float>::getUnivariateGaussSymmetric(norm5, norm6), Divergence::KullbackLeibler<float>::getUnivariateGaussSymmetric(norm1, norm2));
}
TEST(KullbackLeibler, univariateGaussRAND) {
for(int i = 0; i < 20; i++){
auto randMu1 = rand() % 100;
auto randMu2 = rand() % 100 + 100;
auto randMu3 = rand() % 100;
auto randMu4 = rand() % 100 + 200;
Distribution::Normal<float> norm7(randMu1,1);
Distribution::Normal<float> norm8(randMu2,1);
Distribution::Normal<float> norm9(randMu3,1);
Distribution::Normal<float> norm10(randMu4,1);
ASSERT_GE(Divergence::KullbackLeibler<float>::getUnivariateGauss(norm9, norm10), Divergence::KullbackLeibler<float>::getUnivariateGauss(norm8, norm7));
ASSERT_GE(Divergence::KullbackLeibler<float>::getUnivariateGaussSymmetric(norm9, norm10), Divergence::KullbackLeibler<float>::getUnivariateGaussSymmetric(norm8, norm7));
}
}
TEST(KullbackLeibler, multivariateGaussEQ) {
//eq
Eigen::VectorXd mu1(2);
mu1[0] = 1.0;
mu1[1] = 1.0;
Eigen::VectorXd mu2(2);
mu2[0] = 1.0;
mu2[1] = 1.0;
Eigen::MatrixXd cov1(2,2);
cov1(0,0) = 1.0;
cov1(0,1) = 0.0;
cov1(1,0) = 0.0;
cov1(1,1) = 1.0;
Eigen::MatrixXd cov2(2,2);
cov2(0,0) = 1.0;
cov2(0,1) = 0.0;
cov2(1,0) = 0.0;
cov2(1,1) = 1.0;
Distribution::NormalDistributionN norm1(mu1, cov1);
Distribution::NormalDistributionN norm2(mu2, cov2);
ASSERT_EQ(0.0f, Divergence::KullbackLeibler<float>::getMultivariateGauss(norm1, norm2));
ASSERT_EQ(0.0f, Divergence::KullbackLeibler<float>::getMultivariateGaussSymmetric(norm1, norm2));
}
TEST(KullbackLeibler, multivariateGaussGeMu) {
//ge mu
Eigen::VectorXd mu1(2);
mu1[0] = 1.0;
mu1[1] = 1.0;
Eigen::VectorXd mu2(2);
mu2[0] = 1.0;
mu2[1] = 1.0;
Eigen::VectorXd mu3(2);
mu3[0] = 1.0;
mu3[1] = 1.0;
Eigen::VectorXd mu4(2);
mu4[0] = 1.0;
mu4[1] = 3.0;
Eigen::MatrixXd cov1(2,2);
cov1(0,0) = 1.0;
cov1(0,1) = 0.0;
cov1(1,0) = 0.0;
cov1(1,1) = 1.0;
Eigen::MatrixXd cov2(2,2);
cov2(0,0) = 1.0;
cov2(0,1) = 0.0;
cov2(1,0) = 0.0;
cov2(1,1) = 1.0;
Eigen::MatrixXd cov3(2,2);
cov3(0,0) = 1.0;
cov3(0,1) = 0.0;
cov3(1,0) = 0.0;
cov3(1,1) = 1.0;
Eigen::MatrixXd cov4(2,2);
cov4(0,0) = 1.0;
cov4(0,1) = 0.0;
cov4(1,0) = 0.0;
cov4(1,1) = 1.0;
Distribution::NormalDistributionN norm1(mu1, cov1);
Distribution::NormalDistributionN norm2(mu2, cov2);
Distribution::NormalDistributionN norm3(mu3, cov3);
Distribution::NormalDistributionN norm4(mu4, cov4);
double kld12 = Divergence::KullbackLeibler<float>::getMultivariateGauss(norm1, norm2);
double kld34 = Divergence::KullbackLeibler<float>::getMultivariateGauss(norm3, norm4);
std::cout << kld34 << " > " << kld12 << std::endl;
double kld12sym = Divergence::KullbackLeibler<float>::getMultivariateGaussSymmetric(norm1, norm2);
double kld34sym = Divergence::KullbackLeibler<float>::getMultivariateGaussSymmetric(norm3, norm4);
std::cout << kld34sym << " > " << kld12sym << std::endl;
ASSERT_GE(kld34, kld12);
ASSERT_GE(kld34sym, kld12sym);
}
TEST(KullbackLeibler, multivariateGaussGeCov) {
//ge cov
Eigen::VectorXd mu1(2);
mu1[0] = 1.0;
mu1[1] = 1.0;
Eigen::VectorXd mu2(2);
mu2[0] = 1.0;
mu2[1] = 1.0;
Eigen::VectorXd mu3(2);
mu3[0] = 1.0;
mu3[1] = 1.0;
Eigen::VectorXd mu4(2);
mu4[0] = 1.0;
mu4[1] = 1.0;
Eigen::MatrixXd cov1(2,2);
cov1(0,0) = 1.0;
cov1(0,1) = 0.0;
cov1(1,0) = 0.0;
cov1(1,1) = 1.0;
Eigen::MatrixXd cov2(2,2);
cov2(0,0) = 1.0;
cov2(0,1) = 0.0;
cov2(1,0) = 0.0;
cov2(1,1) = 1.0;
Eigen::MatrixXd cov3(2,2);
cov3(0,0) = 1.0;
cov3(0,1) = 0.0;
cov3(1,0) = 0.0;
cov3(1,1) = 1.0;
Eigen::MatrixXd cov4(2,2);
cov4(0,0) = 3.0;
cov4(0,1) = 0.0;
cov4(1,0) = 0.0;
cov4(1,1) = 1.0;
Distribution::NormalDistributionN norm1(mu1, cov1);
Distribution::NormalDistributionN norm2(mu2, cov2);
Distribution::NormalDistributionN norm3(mu3, cov3);
Distribution::NormalDistributionN norm4(mu4, cov4);
double kld12 = Divergence::KullbackLeibler<float>::getMultivariateGauss(norm1, norm2);
double kld34 = Divergence::KullbackLeibler<float>::getMultivariateGauss(norm3, norm4);
std::cout << kld34 << " >" << kld12 << std::endl;
double kld12sym = Divergence::KullbackLeibler<float>::getMultivariateGaussSymmetric(norm1, norm2);
double kld34sym = Divergence::KullbackLeibler<float>::getMultivariateGaussSymmetric(norm3, norm4);
std::cout << kld34sym << " > " << kld12sym << std::endl;
ASSERT_GE(kld34, kld12);
ASSERT_GE(kld34sym, kld12sym);
}
#endif