306 lines
8.9 KiB
C++
306 lines
8.9 KiB
C++
#ifdef WITH_TESTS
|
|
#ifdef WITH_EIGEN
|
|
|
|
#include "../../Tests.h"
|
|
#include "../../../math/divergence/KullbackLeibler.h"
|
|
#include "../../../math/Distributions.h"
|
|
|
|
#include <random>
|
|
|
|
|
|
TEST(KullbackLeibler, univariateGaussEQ) {
|
|
//if the distributions are equal, kld is 0
|
|
Distribution::Normal<float> norm1(0,1);
|
|
Distribution::Normal<float> norm2(0,1);
|
|
|
|
ASSERT_EQ(0.0f, Divergence::KullbackLeibler<float>::getUnivariateGauss(norm1, norm2));
|
|
ASSERT_EQ(0.0f, Divergence::KullbackLeibler<float>::getUnivariateGaussSymmetric(norm1, norm2));
|
|
}
|
|
|
|
TEST(KullbackLeibler, univariateGaussGEmu) {
|
|
//bigger mu means greater kld
|
|
Distribution::Normal<float> norm1(0,1);
|
|
Distribution::Normal<float> norm2(0,1);
|
|
Distribution::Normal<float> norm3(0,1);
|
|
Distribution::Normal<float> norm4(1,1);
|
|
|
|
ASSERT_GE(Divergence::KullbackLeibler<float>::getUnivariateGauss(norm3, norm4), Divergence::KullbackLeibler<float>::getUnivariateGauss(norm1, norm2));
|
|
ASSERT_GE(Divergence::KullbackLeibler<float>::getUnivariateGaussSymmetric(norm3, norm4), Divergence::KullbackLeibler<float>::getUnivariateGaussSymmetric(norm1, norm2));
|
|
}
|
|
|
|
TEST(KullbackLeibler, univariateGaussGEsigma) {
|
|
//bigger sigma means greater kld
|
|
Distribution::Normal<float> norm1(0,1);
|
|
Distribution::Normal<float> norm2(0,1);
|
|
Distribution::Normal<float> norm5(0,1);
|
|
Distribution::Normal<float> norm6(0,3);
|
|
|
|
ASSERT_GE(Divergence::KullbackLeibler<float>::getUnivariateGauss(norm5, norm6), Divergence::KullbackLeibler<float>::getUnivariateGauss(norm1, norm2));
|
|
ASSERT_GE(Divergence::KullbackLeibler<float>::getUnivariateGaussSymmetric(norm5, norm6), Divergence::KullbackLeibler<float>::getUnivariateGaussSymmetric(norm1, norm2));
|
|
}
|
|
|
|
TEST(KullbackLeibler, univariateGaussRAND) {
|
|
|
|
for(int i = 0; i < 20; i++){
|
|
auto randMu1 = rand() % 100;
|
|
auto randMu2 = rand() % 100 + 100;
|
|
|
|
auto randMu3 = rand() % 100;
|
|
auto randMu4 = rand() % 100 + 200;
|
|
|
|
Distribution::Normal<float> norm7(randMu1,1);
|
|
Distribution::Normal<float> norm8(randMu2,1);
|
|
|
|
Distribution::Normal<float> norm9(randMu3,1);
|
|
Distribution::Normal<float> norm10(randMu4,1);
|
|
|
|
ASSERT_GE(Divergence::KullbackLeibler<float>::getUnivariateGauss(norm9, norm10), Divergence::KullbackLeibler<float>::getUnivariateGauss(norm8, norm7));
|
|
ASSERT_GE(Divergence::KullbackLeibler<float>::getUnivariateGaussSymmetric(norm9, norm10), Divergence::KullbackLeibler<float>::getUnivariateGaussSymmetric(norm8, norm7));
|
|
}
|
|
|
|
}
|
|
|
|
TEST(KullbackLeibler, multivariateGaussEQ) {
|
|
|
|
//eq
|
|
Eigen::VectorXd mu1(2);
|
|
mu1[0] = 1.0;
|
|
mu1[1] = 1.0;
|
|
|
|
Eigen::VectorXd mu2(2);
|
|
mu2[0] = 1.0;
|
|
mu2[1] = 1.0;
|
|
|
|
Eigen::MatrixXd cov1(2,2);
|
|
cov1(0,0) = 1.0;
|
|
cov1(0,1) = 0.0;
|
|
cov1(1,0) = 0.0;
|
|
cov1(1,1) = 1.0;
|
|
|
|
Eigen::MatrixXd cov2(2,2);
|
|
cov2(0,0) = 1.0;
|
|
cov2(0,1) = 0.0;
|
|
cov2(1,0) = 0.0;
|
|
cov2(1,1) = 1.0;
|
|
|
|
Distribution::NormalDistributionN norm1(mu1, cov1);
|
|
Distribution::NormalDistributionN norm2(mu2, cov2);
|
|
|
|
ASSERT_EQ(0.0f, Divergence::KullbackLeibler<float>::getMultivariateGauss(norm1, norm2));
|
|
ASSERT_EQ(0.0f, Divergence::KullbackLeibler<float>::getMultivariateGaussSymmetric(norm1, norm2));
|
|
|
|
|
|
}
|
|
|
|
TEST(KullbackLeibler, multivariateGaussGeMu) {
|
|
|
|
//ge mu
|
|
Eigen::VectorXd mu1(2);
|
|
mu1[0] = 1.0;
|
|
mu1[1] = 1.0;
|
|
|
|
Eigen::VectorXd mu2(2);
|
|
mu2[0] = 1.0;
|
|
mu2[1] = 1.0;
|
|
|
|
Eigen::VectorXd mu3(2);
|
|
mu3[0] = 1.0;
|
|
mu3[1] = 1.0;
|
|
|
|
Eigen::VectorXd mu4(2);
|
|
mu4[0] = 1.0;
|
|
mu4[1] = 3.0;
|
|
|
|
Eigen::MatrixXd cov1(2,2);
|
|
cov1(0,0) = 1.0;
|
|
cov1(0,1) = 0.0;
|
|
cov1(1,0) = 0.0;
|
|
cov1(1,1) = 1.0;
|
|
|
|
Eigen::MatrixXd cov2(2,2);
|
|
cov2(0,0) = 1.0;
|
|
cov2(0,1) = 0.0;
|
|
cov2(1,0) = 0.0;
|
|
cov2(1,1) = 1.0;
|
|
|
|
Eigen::MatrixXd cov3(2,2);
|
|
cov3(0,0) = 1.0;
|
|
cov3(0,1) = 0.0;
|
|
cov3(1,0) = 0.0;
|
|
cov3(1,1) = 1.0;
|
|
|
|
Eigen::MatrixXd cov4(2,2);
|
|
cov4(0,0) = 1.0;
|
|
cov4(0,1) = 0.0;
|
|
cov4(1,0) = 0.0;
|
|
cov4(1,1) = 1.0;
|
|
|
|
Distribution::NormalDistributionN norm1(mu1, cov1);
|
|
Distribution::NormalDistributionN norm2(mu2, cov2);
|
|
Distribution::NormalDistributionN norm3(mu3, cov3);
|
|
Distribution::NormalDistributionN norm4(mu4, cov4);
|
|
|
|
double kld12 = Divergence::KullbackLeibler<float>::getMultivariateGauss(norm1, norm2);
|
|
double kld34 = Divergence::KullbackLeibler<float>::getMultivariateGauss(norm3, norm4);
|
|
std::cout << kld34 << " > " << kld12 << std::endl;
|
|
|
|
double kld12sym = Divergence::KullbackLeibler<float>::getMultivariateGaussSymmetric(norm1, norm2);
|
|
double kld34sym = Divergence::KullbackLeibler<float>::getMultivariateGaussSymmetric(norm3, norm4);
|
|
std::cout << kld34sym << " > " << kld12sym << std::endl;
|
|
|
|
ASSERT_GE(kld34, kld12);
|
|
ASSERT_GE(kld34sym, kld12sym);
|
|
}
|
|
|
|
TEST(KullbackLeibler, multivariateGaussGeCov) {
|
|
|
|
//ge cov
|
|
Eigen::VectorXd mu1(2);
|
|
mu1[0] = 1.0;
|
|
mu1[1] = 1.0;
|
|
|
|
Eigen::VectorXd mu2(2);
|
|
mu2[0] = 1.0;
|
|
mu2[1] = 1.0;
|
|
|
|
Eigen::VectorXd mu3(2);
|
|
mu3[0] = 1.0;
|
|
mu3[1] = 1.0;
|
|
|
|
Eigen::VectorXd mu4(2);
|
|
mu4[0] = 1.0;
|
|
mu4[1] = 1.0;
|
|
|
|
Eigen::MatrixXd cov1(2,2);
|
|
cov1(0,0) = 1.0;
|
|
cov1(0,1) = 0.0;
|
|
cov1(1,0) = 0.0;
|
|
cov1(1,1) = 1.0;
|
|
|
|
Eigen::MatrixXd cov2(2,2);
|
|
cov2(0,0) = 1.0;
|
|
cov2(0,1) = 0.0;
|
|
cov2(1,0) = 0.0;
|
|
cov2(1,1) = 1.0;
|
|
|
|
Eigen::MatrixXd cov3(2,2);
|
|
cov3(0,0) = 1.0;
|
|
cov3(0,1) = 0.0;
|
|
cov3(1,0) = 0.0;
|
|
cov3(1,1) = 1.0;
|
|
|
|
Eigen::MatrixXd cov4(2,2);
|
|
cov4(0,0) = 3.0;
|
|
cov4(0,1) = 0.0;
|
|
cov4(1,0) = 0.0;
|
|
cov4(1,1) = 1.0;
|
|
|
|
Distribution::NormalDistributionN norm1(mu1, cov1);
|
|
Distribution::NormalDistributionN norm2(mu2, cov2);
|
|
Distribution::NormalDistributionN norm3(mu3, cov3);
|
|
Distribution::NormalDistributionN norm4(mu4, cov4);
|
|
|
|
double kld12 = Divergence::KullbackLeibler<float>::getMultivariateGauss(norm1, norm2);
|
|
double kld34 = Divergence::KullbackLeibler<float>::getMultivariateGauss(norm3, norm4);
|
|
std::cout << kld34 << " > " << kld12 << std::endl;
|
|
|
|
double kld12sym = Divergence::KullbackLeibler<float>::getMultivariateGaussSymmetric(norm1, norm2);
|
|
double kld34sym = Divergence::KullbackLeibler<float>::getMultivariateGaussSymmetric(norm3, norm4);
|
|
std::cout << kld34sym << " > " << kld12sym << std::endl;
|
|
|
|
ASSERT_GE(kld34, kld12);
|
|
ASSERT_GE(kld34sym, kld12sym);
|
|
}
|
|
|
|
TEST(KullbackLeibler, generalFromSamples) {
|
|
//ge cov
|
|
Eigen::VectorXd mu1(2);
|
|
mu1[0] = 1.0;
|
|
mu1[1] = 1.0;
|
|
|
|
Eigen::VectorXd mu2(2);
|
|
mu2[0] = 1.0;
|
|
mu2[1] = 1.0;
|
|
|
|
Eigen::VectorXd mu3(2);
|
|
mu3[0] = 1.0;
|
|
mu3[1] = 1.0;
|
|
|
|
Eigen::VectorXd mu4(2);
|
|
mu4[0] = 1.0;
|
|
mu4[1] = 1.0;
|
|
|
|
Eigen::MatrixXd cov1(2,2);
|
|
cov1(0,0) = 1.0;
|
|
cov1(0,1) = 0.0;
|
|
cov1(1,0) = 0.0;
|
|
cov1(1,1) = 1.0;
|
|
|
|
Eigen::MatrixXd cov2(2,2);
|
|
cov2(0,0) = 1.0;
|
|
cov2(0,1) = 0.0;
|
|
cov2(1,0) = 0.0;
|
|
cov2(1,1) = 1.0;
|
|
|
|
Eigen::MatrixXd cov3(2,2);
|
|
cov3(0,0) = 1.0;
|
|
cov3(0,1) = 0.0;
|
|
cov3(1,0) = 0.0;
|
|
cov3(1,1) = 1.0;
|
|
|
|
Eigen::MatrixXd cov4(2,2);
|
|
cov4(0,0) = 3.0;
|
|
cov4(0,1) = 0.0;
|
|
cov4(1,0) = 0.0;
|
|
cov4(1,1) = 1.0;
|
|
|
|
Distribution::NormalDistributionN norm1(mu1, cov1);
|
|
Distribution::NormalDistributionN norm2(mu2, cov2);
|
|
Distribution::NormalDistributionN norm3(mu3, cov3);
|
|
Distribution::NormalDistributionN norm4(mu4, cov4);
|
|
|
|
int size = 10000;
|
|
Eigen::VectorXd samples1(size);
|
|
Eigen::VectorXd samples2(size);
|
|
Eigen::VectorXd samples3(size);
|
|
Eigen::VectorXd samples4(size);
|
|
|
|
//random numbers
|
|
std::mt19937_64 rng;
|
|
// initialize the random number generator with time-dependent seed
|
|
uint64_t timeSeed = std::chrono::high_resolution_clock::now().time_since_epoch().count();
|
|
std::seed_seq ss{uint32_t(timeSeed & 0xffffffff), uint32_t(timeSeed>>32)};
|
|
rng.seed(ss);
|
|
// initialize a uniform distribution between 0 and 1
|
|
std::uniform_real_distribution<double> unif(-9, 10);
|
|
|
|
//generate samples
|
|
for(int i = 0; i < size; ++i){
|
|
|
|
double r1 = unif(rng);
|
|
double r2 = unif(rng);
|
|
Eigen::VectorXd v(2);
|
|
v << r1, r2;
|
|
|
|
samples1[i] = norm1.getProbability(v);
|
|
samples2[i] = norm2.getProbability(v);
|
|
samples3[i] = norm3.getProbability(v);
|
|
samples4[i] = norm4.getProbability(v);
|
|
}
|
|
|
|
double kld12 = Divergence::KullbackLeibler<float>::getGeneralFromSamples(samples1, samples2, Divergence::LOGMODE::NATURALIS);
|
|
double kld34 = Divergence::KullbackLeibler<float>::getGeneralFromSamples(samples3, samples4, Divergence::LOGMODE::NATURALIS);
|
|
std::cout << kld34 << " > " << kld12 << std::endl;
|
|
|
|
double kld12sym = Divergence::KullbackLeibler<float>::getGeneralFromSamplesSymmetric(samples1, samples2, Divergence::LOGMODE::NATURALIS);
|
|
double kld34sym = Divergence::KullbackLeibler<float>::getGeneralFromSamplesSymmetric(samples3, samples4, Divergence::LOGMODE::NATURALIS);
|
|
std::cout << kld34sym << " > " << kld12sym << std::endl;
|
|
|
|
ASSERT_GE(kld34, kld12);
|
|
ASSERT_GE(kld34sym, kld12sym);
|
|
|
|
}
|
|
|
|
#endif
|
|
#endif
|