ref #39 smoothing is refactored

KDE smoothing algorithmisch mal geschrieben, jetzt noch testen
This commit is contained in:
toni
2017-12-06 17:37:14 +01:00
parent 95a5c8f34f
commit 74981c6a45
14 changed files with 1303 additions and 127 deletions

View File

@@ -52,7 +52,7 @@ namespace Assert {
}
template <typename T, typename STR> static inline void isNotNull(const T& v, const STR err) {
if (v == nullptr) {doThrow(err);}
if (v == nullptr) {doThrow(err);}
}
template <typename T, typename STR> static inline void isNotNaN(const T v, const STR err) {

View File

@@ -52,7 +52,7 @@ public:
state.heading.direction += ctrl->turnSinceLastTransition_rad + var;
//set kappa of mises
float kappa = 5 / std::exp(2 * std::abs(ctrl->motionDeltaAngle_rad));
float kappa = 600 / std::exp(2 * std::abs(ctrl->motionDeltaAngle_rad));
dist.setKappa(kappa);
}

127
math/boxkde/BoxGaus.h Normal file
View File

@@ -0,0 +1,127 @@
#pragma once
#include <cmath>
#include <vector>
#include "BoxSizes.h"
template <class T>
struct BoxGaus
{
void boxfilter(std::vector<T>& input, size_t w, size_t h, unsigned filterSize)
{
assertMsg((filterSize % 2) == 1, "filterSize must be odd");
unsigned radius = filterSize / 2;
std::vector<T> buffer(input.size());
boxBlur(input, buffer, w, h, radius);
boxBlur(buffer, input, w, h, radius);
}
void approxGaus(Image2D<T>& input, T sigmaX, T sigmaY, unsigned nFilt)
{
approxGaus(input.data(), input.width, input.height, sigmaX, sigmaY, nFilt);
}
void approxGaus(std::vector<T>& input, size_t w, size_t h, T sigmaX, T sigmaY, unsigned nFilt)
{
BoxSizes<T> bsX(sigmaX, nFilt);
BoxSizes<T> bsY(sigmaY, nFilt);
std::vector<T> buffer(input.size());
assertMsg((2 * bsX.wl + 1 < w) && (2 * bsX.wl + 1 < h), "Box-Filter size in X direction is too big");
assertMsg((2 * bsX.wu + 1 < w) && (2 * bsX.wu + 1 < h), "Box-Filter size in X direction is too big");
assertMsg((2 * bsY.wl + 1 < w) && (2 * bsY.wl + 1 < h), "Box-Filter size in Y direction is too big");
assertMsg((2 * bsY.wu + 1 < w) && (2 * bsY.wu + 1 < h), "Box-Filter size in Y direction is too big");
// if equal, we can save some cond's inside the loop
if (bsX.m == bsY.m)
{
const size_t m = bsX.m;
for (size_t i = 0; i < m; i++)
{
boxBlur(input, buffer, w, h, bsY.wl);
boxBlur(buffer, input, w, h, bsX.wl);
}
for (size_t i = 0; i < nFilt - m; i++)
{
boxBlur(input, buffer, w, h, bsY.wu);
boxBlur(buffer, input, w, h, bsX.wu);
}
}
else
{
for (size_t i = 0; i < nFilt; i++)
{
boxBlur(input, buffer, w, h, (i < bsY.m ? bsY.wl : bsY.wu) );
boxBlur(buffer, input, w, h, (i < bsX.m ? bsX.wl : bsX.wu));
}
}
}
private:
void boxBlur(const std::vector<T> &src, std::vector<T> &dst, size_t w, size_t h, size_t r)
{
T iarr = (T)1.0 / (r + r + 1);
for (size_t i = 0; i < w; i++)
{
// Init indexes
size_t ti = i;
size_t li = ti;
size_t ri = ti + r*w;
// Init values
T fv = src[ti]; // first values
T lv = src[ti + w*(h - 1)]; // last values
T val = fv * (r + 1); // overhang over image border
for (size_t j = 0; j < r; j++)
{
val += src[ti + j*w]; // col sum
}
// <20>berhangbereich links vom Bild
for (size_t j = 0; j <= r; j++)
{
val += src[ri] - fv;
dst[j + i*w] = val * iarr;
ri += w;
ti += w;
}
// Bildbereich
for (size_t j = r + 1; j < h - r; j++)
{
val += src[ri] - src[li];
dst[j + i*w] = val * iarr;
li += w;
ri += w;
ti += w;
}
// <20>berhangbereich rechts vom Bild
for (size_t j = h - r; j < h; j++)
{
val += lv - src[li];
dst[j + i*w] = val * iarr;
li += w;
ti += w;
}
}
}
};

85
math/boxkde/BoxSizes.h Normal file
View File

@@ -0,0 +1,85 @@
#pragma once
#include "DataStructures.h"
template<typename T>
struct BoxSizes
{
static_assert(std::is_floating_point<T>::value, "This class only works with floats.");
T sigma, sigmaActual;
T wIdeal, mIdeal;
unsigned n, m, wl, wu;
BoxSizes(T sigma, unsigned n)
: sigma(sigma), n(n)
{
assertMsg(sigma >= 0.8, "Sigma values below about 0.8 cannot be represented");
wIdeal = sqrt(12 * sigma*sigma / n + 1); // Ideal averaging filter width
// wl is first odd valued integer less than wIdeal
wl = (unsigned)floor(wIdeal);
if (wl % 2 == 0)
wl = wl - 1;
// wu is the next odd value > wl
wu = wl + 2;
// Compute m.Refer to the tech note for derivation of this formula
mIdeal = (12 * sigma*sigma - n*wl*wl - 4 * n*wl - 3 * n) / (-4 * wl - 4);
m = (unsigned)round(mIdeal);
assertMsg(!(m > n || m < 0), "calculation of m has failed");
// Compute actual sigma that will be achieved
sigmaActual = sqrt((m*wl*wl + (n - m)*wu*wu - n) / (T)12.0);
}
};
template<typename T>
struct ExBoxSizes
{
static_assert(std::is_floating_point<T>::value, "This class only works with floats.");
T sigma, sigma_actual;
unsigned n, r;
T alpha, c, c1, c2;
T r_f;
// Special case for h == 1
ExBoxSizes(T sigma, unsigned n)
: sigma(sigma), n(n)
{
T var = sigma*sigma;
r_f = 0.5*sqrt((12 * var) / n + 1) - T(0.5);
r = (unsigned)std::floor(r_f);
alpha = (2 * r + 1) * ( (r*r + r - 3*var/n) / (6 * (var/n - (r+1)*(r+1))) );
c1 = alpha / (2*alpha + 2*r + 1);
c2 = (1 - alpha) / (2 * alpha + 2 * r + 1);
c = c1 + c2;
}
ExBoxSizes(T sigma, unsigned d, T h)
: sigma(sigma), n(d)
{
T v = sigma*sigma;
r_f = sqrt((12 * v) / n + 1)/(2*h) - T(0.5); // (7)
r = (unsigned)std::floor(std::max(T(0), r_f));
alpha = (2 * r + 1) * (((r*r + r) - (v*3*h)/(d*h*h*h)) / (6 * ( v/(d*h*h) - (r+1)*(r+1)) )); // (8) (14)
c1 = alpha / (h*(2 * r + 2 * alpha + 1)); // (8) (13)
c2 = (1 - alpha) / (h*(2 * r + 2 * alpha + 1)); // (8) (13)
c = c1 + c2;
T lambda = h*(2*r + 1 + 2*alpha); // (8)
T variance_actual = (d*h*h*h) / (3 * lambda) * (2*r*r*r + 3*r*r + r + 6*alpha*(r+1)*(r+1)); // (14)
sigma_actual = sqrt(variance_actual);
}
};

View File

@@ -0,0 +1,115 @@
#pragma once
#include <cassert>
#include <limits>
#include <memory>
#include <sstream>
#include <vector>
template <class T>
struct BoundingBox
{
static_assert(std::is_arithmetic<T>::value, "This class only works with floats or integers.");
T MinX, MaxX, MinY, MaxY;
BoundingBox(T MinX = std::numeric_limits<T>::max(),
T MaxX = std::numeric_limits<T>::lowest(),
T MinY = std::numeric_limits<T>::max(),
T MaxY = std::numeric_limits<T>::lowest())
: MinX(MinX), MaxX(MaxX), MinY(MinY), MaxY(MaxY)
{ }
T width () const { return MaxX - MinX; }
T heigth() const { return MaxY - MinY; }
T area () const { return width()*heigth(); }
bool isInside(T x, T y) const { return (x >= MinX && x <= MaxX) && (y >= MinY && y <= MaxY); }
// Expands the size of the BB if the given values are extreme
void expand(T x, T y)
{
if (x < MinX) MinX = x;
if (x > MaxX) MaxX = x;
if (y < MinY) MinY = y;
if (y > MaxY) MaxY = y;
}
// Enlarges the BB in both direction along an axis.
void inflate(T szX, T szY)
{
MinX -= szX;
MinY -= szY;
MaxX += szX;
MaxY += szY;
}
};
template <class T>
struct Point2D
{
static_assert(std::is_arithmetic<T>::value, "This class only works with floats and integers.");
T X, Y;
Point2D(T x = 0, T y = 0)
: X(x), Y(y)
{ }
};
template <class T>
struct Size2D
{
static_assert(std::is_arithmetic<T>::value, "This class only works with floats and integers.");
T sX, sY;
Size2D(T all)
: sX(all), sY(all)
{ }
Size2D(T x = 0, T y = 0)
: sX(x), sY(y)
{ }
};
#ifdef NDEBUG
#define assertCond(_EXPR_) (false)
#define assertThrow(_arg_) ((void)0)
#define assert_throw(...) ((void)0)
#define assertMsg(...) ((void)0)
#else
// Evaluates the expression. Ifdef NDEBUG returns always false
#define assertCond(_EXPR_) (!(_EXPR_))
// Throws a excpetion with the argument as message by prepending the current file name and line number
#define assertThrow(_std_string_) assert_throw( (_std_string_), __FILE__, __LINE__)
inline void assert_throw(const std::string& message, const char* file, int line)
{
std::stringstream ss;
ss << file << ":" << line << ": " << message;
throw std::invalid_argument(ss.str());
}
#define assertMsg(_EXPR_, _MSG_) if (!(_EXPR_)) assertThrow(_MSG_)
#endif
// ostream overloads
template<typename T>
std::ostream& operator<<(std::ostream& os, const Point2D<T>& pt)
{
return os << "(" << pt.X << "; " << pt.Y << ")";
}
template<typename T>
std::ostream& operator<<(std::ostream& os, const BoundingBox<T>& bb)
{
return os << "(X: " << bb.MinX << " - " << bb.MaxX << ";"
<< " Y: " << bb.MinY << " - " << bb.MaxY << ")";
}

34
math/boxkde/GausLib.h Normal file
View File

@@ -0,0 +1,34 @@
// The following ifdef block is the standard way of creating macros which make exporting
// from a DLL simpler. All files within this DLL are compiled with the GAUSLIB_EXPORTS
// symbol defined on the command line. This symbol should not be defined on any project
// that uses this DLL. This way any other project whose source files include this file see
// GAUSLIB_API functions as being imported from a DLL, whereas this DLL sees symbols
// defined with this macro as being exported.
typedef struct c_bbox {
double minX;
double maxX;
double minY;
double maxY;
double minZ;
double maxZ;
} c_bbox;
#define ALGO_BOX 0
#define ALGO_EXBOX 1
#define ALGO_BOX_SIMD 2
#define ALGO_BOX_CL 3
void gausKde2D_simple(float* X, int sizeX,
float* weights,
float hx, float hy,
int nBinsX, int nBinsY,
c_bbox* boundingBox,
int algorithm,
double* out_maxValue,
double* out_maxPosX, double* out_maxPosY,
double* out_runTimeInNS,
float* out_density);

211
math/boxkde/Grid2D.h Normal file
View File

@@ -0,0 +1,211 @@
#pragma once
#include <sstream>
#include "DataStructures.h"
#include "Image2D.h"
template<class T>
struct Grid2D
{
static_assert(std::is_floating_point<T>::value, "Grid2D only supports float values");
BoundingBox<T> bb;
size_t numBinsX, numBinsY;
T binSizeX, binSizeY;
private:
Image2D<T> data;
public:
Grid2D(){ } //TODO: fast hack
Grid2D(BoundingBox<T> bb, size_t numBins)
: Grid2D(bb, numBins, numBins)
{
}
Grid2D(BoundingBox<T> bb, size_t numBinsX, size_t numBinsY)
: bb(bb),
numBinsX(numBinsX),
numBinsY(numBinsY),
binSizeX((bb.width()) / (numBinsX-1)),
binSizeY((bb.heigth()) / (numBinsY-1)),
data(numBinsX, numBinsY)
{
}
Image2D<T>& image() { return data; }
const Image2D<T>& image() const { return data; }
T& operator() (size_t x, size_t y) { return data(x, y); }
const T& operator() (size_t x, size_t y) const { return data(x, y); }
void clear() { data.clear(); }
void fill(const std::vector<Point2D<T>>& samples)
{
assertMsg(!samples.empty(), "Samples must be non-empty");
data.clear();
const T weight = T(1.0) / samples.size();
for (const auto& pt : samples)
{
add(pt.X, pt.Y, weight);
}
}
void fill(const std::vector<Point2D<T>>& samples, const std::vector<T>& weights)
{
assertMsg(!samples.empty(), "Samples must be non-empty");
assertMsg(weights.size() == samples.size(), "Weights must have the same size as samples");
data.clear();
for (size_t i = 0; i < samples.size(); i++)
{
add(samples[i].X, samples[i].Y, weights[i]);
}
}
Point2D<size_t> add(T x, T y, T w)
{
if (assertCond(bb.isInside(x,y)))
{
std::stringstream ss;
ss << "Point " << Point2D<T>(x, y) << " is out of bounds. " << bb;
assertThrow(ss.str());
}
return add_simple_bin(x, y, w);
//return add_linear_bin(x, y, w);
}
void add_linear(T x, T y, T w)
{
if (assertCond(bb.isInside(x, y)))
{
std::stringstream ss;
ss << "Point " << Point2D<T>(x, y) << " is out of bounds. " << bb;
assertThrow(ss.str());
}
add_linear_bin(x, y, w);
}
T fetch(T x, T y) const
{
size_t bin_x = (size_t)((x - bb.MinX) / binSizeX);
size_t bin_y = (size_t)((y - bb.MinY) / binSizeY);
//if (bin_x == data.width) bin_x--;
//if (bin_y == data.height) bin_y--;
return data(bin_x, bin_y);
}
// Returns the summation of all bin values
T sum() const
{
return data.sum();
}
// Takes a point in input space and converts it to grid space coordinates
Point2D<size_t> asGridSpace(T x, T y) const
{
size_t bin_x = (size_t)((x - bb.MinX) / binSizeX);
size_t bin_y = (size_t)((y - bb.MinY) / binSizeY);
if (bin_x == data.width) bin_x--;
if (bin_y == data.height) bin_y--;
return Point2D<size_t>(bin_x, bin_y);
}
// Takes a Size2D in input space and converts it to grid space coordiantes
Size2D<size_t> asGridSpace(Size2D<T> sz) const
{
return Size2D<size_t>(sz.sX / binSizeX, sz.sY / binSizeY);
}
// Takes a point in grid space and converts it to input space coordinates
Point2D<T> asInputSpace(Point2D<size_t> pt) const
{
return asInputSpace(pt.X, pt.Y);
}
// Takes a point in grid space and converts it to input space coordinates
Point2D<T> asInputSpace(size_t x, size_t y) const
{
return Point2D<T>(x * binSizeX + bb.MinX + T(0.5)*binSizeX,
y * binSizeY + bb.MinY + T(0.5)*binSizeY);
}
T maximum(Point2D<T>& pt) const
{
Point2D<size_t> gridPt;
T maxValue = image().maximum(gridPt);
pt = asInputSpace(gridPt);
return maxValue;
}
private:
Point2D<size_t> add_simple_bin(T x, T y, T w)
{
size_t bin_x = (size_t)((x - bb.MinX) / binSizeX);
size_t bin_y = (size_t)((y - bb.MinY) / binSizeY);
if (bin_x == data.width) bin_x--;
if (bin_y == data.height) bin_y--;
data(bin_x, bin_y) += w;
return Point2D<size_t>(bin_x, bin_y);
}
void add_linear_bin(T x, T y, T w)
{
T xpos1 = (x - bb.MinX) / binSizeX;
T xpos2 = (y - bb.MinY) / binSizeY;
size_t ix1 = (size_t)floor(xpos1);
size_t ix2 = (size_t)floor(xpos2);
T fx1 = xpos1 - ix1;
T fx2 = xpos2 - ix2;
const size_t ixmin1 = 0;
const size_t ixmax1 = numBinsX - 2;
const size_t ixmin2 = 0;
const size_t ixmax2 = numBinsY - 2;
if ( ixmin1 <= ix1 && ixmin2 <= ix2 && ix2 <= ixmax2)
{
data(ix1, ix2) += w*(1 - fx1)*(1 - fx2);
data(ix1+1, ix2) += w*fx1*(1 - fx2);
data(ix1, ix2+1) += w*(1 - fx1)*fx2;
data(ix1 + 1, ix2 + 1) += w*fx1*fx2;
}
else if (ix1 == ixmax1 + 1 && ixmin2 <= ix2 && ix2 <= ixmax2)
{
// rechts
data(ix1, ix2) += w*(1 - fx1)*(1 - fx2);
data(ix1, ix2+1) += w*(1 - fx1)*fx2;
}
else if (ixmin1 <= ix1 && ix1 <= ixmax1 && ix2 == ixmax2 + 1)
{
// unten
data(ix1, ix2) += w*(1 - fx1)*(1 - fx2);
data(ix1+1, ix2) += w*fx1*(1 - fx2);
}
else if (ix1 == ixmax1 + 1 && ix2 == ixmax2 + 1)
{
// rechts-unten
data(ix1, ix2) += w*(1 - fx1)*(1 - fx2);
}
}
};
template struct Grid2D<float>;
template struct Grid2D<double>;

188
math/boxkde/Image2D.h Normal file
View File

@@ -0,0 +1,188 @@
#pragma once
#include <algorithm>
#include <cassert>
#include <numeric>
#include <vector>
#include "DataStructures.h"
enum struct LineDirection { X, Y };
template <typename TValue>
struct ImageView2D
{
static_assert(std::is_arithmetic<TValue>::value, "Image only supports integers or floats");
// forward declaration
//enum struct LineDirection;
template<LineDirection D> struct LineView;
template<LineDirection D> struct ConstLineView;
size_t width, height, size;
protected:
TValue* values; // contains image data row-wise
public:
ImageView2D()
: width(0), height(0), size(0), values(nullptr)
{ }
ImageView2D(size_t width, size_t height)
: width(width), height(height), size(width*height), values(nullptr)
{ }
ImageView2D(size_t width, size_t height, TValue* data)
: width(width), height(height), size(width*height), values(data)
{ }
inline TValue& operator() (size_t x, size_t y) { return values[indexFromCoord(x, y)]; }
inline const TValue& operator() (size_t x, size_t y) const { return values[indexFromCoord(x, y)]; }
TValue* val_begin() { return values; }
TValue* val_end () { return values + size; }
const TValue* val_begin() const { return values; }
const TValue* val_end () const { return values + size; }
inline size_t indexFromCoord(size_t x, size_t y) const
{
assertMsg(x < width && y < height, "(x,y) out of bounds");
return y * width + x;
}
Point2D<size_t> coordFromIndex(size_t index) const
{
assertMsg(index < size, "Index out of bounds");
return Point2D<size_t>(index % width, index / width);
}
Point2D<size_t> maximum() const
{
size_t maxValueIndex = std::distance(val_begin(), std::max_element(val_begin(), val_end()));
return coordFromIndex(maxValueIndex);
}
TValue maximum(Point2D<size_t>& pt) const
{
size_t maxValueIndex = std::distance(val_begin(), std::max_element(val_begin(), val_end()));
pt = coordFromIndex(maxValueIndex);
return values[maxValueIndex];
}
void clear()
{
fill(TValue(0));
}
void fill(TValue value)
{
std::fill(val_begin(), val_end(), value);
}
void assign(const ImageView2D<TValue>& other)
{
assertMsg(size == other.size, "Other must be of the same size as this");
std::copy(other.val_begin(), other.val_end(), val_begin());
}
TValue sum() const
{
return std::accumulate(val_begin(), val_end(), TValue(0));
}
// Returns a transposed view of this image without copying any data.
ImageView2D<TValue> transpose() const
{
return ImageView2D<TValue>(height, width, values);
}
// Returns the value at (x,y). Throws if out of bounds.
const TValue& at(size_t x, size_t y) const
{
return values[indexFromCoord(x, y)];
}
// Returns the value at (x,y) but falls back to default if out of bounds.
const TValue& get(size_t x, size_t y, TValue dflt = 0) const
{
if (x >= width || y >= height)
return dflt;
else
return values[indexFromCoord(x, y)];
}
std::vector<Point2D<size_t>> findLocalMaxima(int radiusX = 1, int radiusY = 1) const
{
std::vector<Point2D<size_t>> result;
findLocalMaxima(result, radiusX, radiusY);
return result;
}
void findLocalMaxima(std::vector<Point2D<size_t>>& result, int radiusX = 1, int radiusY = 1) const
{
assertMsg(radiusX > 0, "Search radius must be greater than 0.");
for (size_t y = 0; y < height; y++)
{
for (size_t x = 0; x < width; x++)
{
TValue val = at(x, y);
bool lclMax = true;
for (int ry = -radiusY; ry <= radiusY; ry++)
{
for (int rx = -radiusX; rx <= radiusX; rx++)
{
TValue other = get(x + rx, y + ry);
if (!(rx == 0 && ry == 0) && val <= other)
{
lclMax = false;
}
}
}
if (lclMax)
{
result.emplace_back(x, y);
}
}
}
}
};
template <class TValue>
struct Image2D : public ImageView2D<TValue>
{
static_assert(std::is_arithmetic<TValue>::value, "Image only supports integers or floats");
std::vector<TValue> values_vec;
Image2D()
{
}
Image2D(size_t width, size_t height)
: ImageView2D<TValue>(width, height), values_vec(width * height)
{
this->values = values_vec.data();
}
Image2D(size_t width, size_t height, std::vector<TValue>& data)
: ImageView2D<TValue>(width, height), values_vec(data)
{
assertMsg(data.size() == width*height, "Sizes must be the same");
this->values = values_vec.data();
}
std::vector<TValue>& data() { return values_vec; }
const std::vector<TValue>& data() const { return values_vec; }
};
template struct ImageView2D<float>;
template struct ImageView2D<double>;
template struct Image2D<float>;
template struct Image2D<double>;

105
math/boxkde/benchmark.h Normal file
View File

@@ -0,0 +1,105 @@
#pragma once
#include <chrono>
#include <string>
#include <iostream>
template <typename F>
static void benchmark(std::string name, size_t count, F&& lambda)
{
auto start = std::chrono::high_resolution_clock::now();
for (size_t i = 0; i < count; i++)
{
lambda();
}
auto duration = std::chrono::duration_cast<std::chrono::microseconds>(std::chrono::high_resolution_clock::now() - start);
long long durationAVG = duration.count() / count;
std::cout << "Function " << name << " took avg. " << durationAVG << "us (" << durationAVG / 1000.0f << "ms) over " << count << " runs." << std::endl;
}
struct BenchResult
{
const long long min, max;
const double mean, median;
BenchResult()
: min(0), max(0), mean(0), median(0)
{}
BenchResult(long long min, long long max, double mean, double median)
: min(min), max(max), mean(mean), median(median)
{}
};
template <typename F>
static BenchResult benchmarkEx(std::string name, size_t count, F&& lambda)
{
std::vector<long long> durations(count);
for (size_t i = 0; i < count; i++)
{
auto start = std::chrono::high_resolution_clock::now();
lambda();
auto stop = std::chrono::high_resolution_clock::now();
auto duration = std::chrono::duration_cast<std::chrono::nanoseconds>(stop - start);
durations[i] = duration.count();
}
long long min = *std::min_element(durations.begin(), durations.end());
long long max = *std::max_element(durations.begin(), durations.end());
double average = std::accumulate(durations.begin(), durations.end(), 0.0) / durations.size();
double median;
std::sort(durations.begin(), durations.end());
if (durations.size() % 2 == 0)
median = (double) (durations[durations.size() / 2 - 1] + durations[durations.size() / 2]) / 2;
else
median = (double) durations[durations.size() / 2];
std::cout << "Function " << name << " took avg. " << average << "ns (" << average / 1000.0f << "us) over " << count << " runs." << std::endl;
return BenchResult(min, max, average, median);
}
struct StopWatch
{
typedef std::chrono::high_resolution_clock clock;
typedef clock::time_point time_point;
time_point startTime; // Point in time when start() was called
time_point lapTime; // Point in time when operator() was called
public:
StopWatch()
{
reset();
}
void reset()
{
startTime = clock::now();
lapTime = startTime;
}
std::chrono::microseconds stop()
{
auto duration = std::chrono::duration_cast<std::chrono::microseconds>(clock::now() - startTime);
std::cout << "Total time: " << duration.count() << "us (" << duration.count() / 1000.0f << "ms)" << std::endl;
return duration;
}
void operator()(const std::string& str = "")
{
auto duration = std::chrono::duration_cast<std::chrono::microseconds>(clock::now() - lapTime);
std::cout << str << (str.empty() ? "" : " ") << "took " << duration.count() << "us (" << duration.count() / 1000.0f << "ms)" << std::endl;
lapTime = clock::now();
}
};

View File

@@ -80,10 +80,10 @@ namespace SMC {
return particles;
}
/** access to all non resample particles */
const std::vector<Particle<State>>& getNonResamplingParticles() {
return particlesNoResampling;
}
/** access to all non resample particles */
const std::vector<Particle<State>>& getNonResamplingParticles() {
return particlesNoResampling;
}
/** initialize/re-start the particle filter */
void init() {
@@ -91,11 +91,11 @@ namespace SMC {
initializer->initialize(particles);
}
void setAngle(const double angle){
for(SMC::Particle<State>& p : particles){
p.state.walkState.heading = angle;
}
}
void setAngle(const double angle){
for(SMC::Particle<State>& p : particles){
p.state.walkState.heading = angle;
}
}
/** set the resampling method to use */
@@ -148,7 +148,7 @@ namespace SMC {
Assert::isNotNull(estimation, "estimation MUST not be null! call setEstimation() first!");
// perform the transition step
transition->transition(particles, control);
transition->transition(particles, control);
// perform the evaluation step and calculate the sum of all particle weights
const double weightSum = evaluation->evaluation(particles, observation);

View File

@@ -6,6 +6,7 @@
#include <memory>
#include "BackwardFilterTransition.h"
#include "ForwardFilterHistory.h"
#include "../sampling/ParticleTrajectorieSampler.h"
@@ -16,6 +17,7 @@
#include "../filtering/ParticleFilterEvaluation.h"
#include "../filtering/ParticleFilterInitializer.h"
#include "../../Assertions.h"
@@ -25,7 +27,19 @@ namespace SMC {
class BackwardFilter {
public:
virtual State update(std::vector<Particle<State>> const& forwardParticles) = 0;
/**
* @brief updating the backward filter (smoothing) using the informations made in the forward step (filtering)
* @param forwardFilter is a given ForwardFilterHistory containing all Filtering steps made.
* @return return the last smoothed estimation ordered backwards in time direction.
*/
virtual State update(ForwardFilterHistory<State, Control, Observation>& forwardFilter, int lag = 0) = 0;
/**
* @brief getEstimations
* @return vector with all estimations running backward in time. T -> 0
*/
virtual std::vector<State> getEstimations() = 0;
/** access to all backward / smoothed particles */
virtual const std::vector<std::vector<Particle<State>>>& getbackwardParticles() = 0;
@@ -48,9 +62,6 @@ namespace SMC {
/** get the used transition method */
virtual BackwardFilterTransition<State, Control>* getTransition() = 0;
/** reset */
virtual void reset() {};
};
}

View File

@@ -46,8 +46,8 @@ namespace SMC {
/** all smoothed particles T -> 1*/
std::vector<std::vector<Particle<State>>> backwardParticles;
/** container for particles */
std::vector<Particle<State>> smoothedParticles;
/** all estimations calculated */
std::vector<State> estimatedStates;
/** the estimation function to use */
std::unique_ptr<ParticleFilterEstimation<State>> estimation;
@@ -77,26 +77,13 @@ namespace SMC {
BackwardSimulation(int numRealizations) {
this->numRealizations = numRealizations;
backwardParticles.reserve(numRealizations);
smoothedParticles.reserve(numRealizations);
firstFunctionCall = true;
}
/** dtor */
~BackwardSimulation() {
;
}
/** reset **/
void reset(){
this->numRealizations = numRealizations;
backwardParticles.clear();
backwardParticles.reserve(numRealizations);
smoothedParticles.clear();
smoothedParticles.reserve(numRealizations);
firstFunctionCall = true;
estimatedStates.clear();
}
/** access to all backward / smoothed particles */
@@ -140,118 +127,154 @@ namespace SMC {
}
/**
* perform update: transition -> correction -> approximation
* gets the weighted sample set of a standard condensation
* particle filter in REVERSED order!
* @brief update
* @param forwardHistory
* @return
*/
State update(std::vector<Particle<State>> const& forwardParticles) {
State update(ForwardFilterHistory<State, Control, Observation>& forwardHistory, int lag = 666) {
// sanity checks (if enabled)
Assert::isNotNull(transition, "transition MUST not be null! call setTransition() first!");
Assert::isNotNull(estimation, "estimation MUST not be null! call setEstimation() first!");
//storage for single trajectories / smoothed particles
smoothedParticles.clear();
//init for backward filtering
std::vector<Particle<State>> smoothedParticles;
smoothedParticles.reserve(numRealizations);
firstFunctionCall = true;
// Choose \tilde x_T = x^(i)_T with probability w^(i)_T
// Therefore sample independently from the categorical distribution of weights.
if(firstFunctionCall){
smoothedParticles = sampler->drawTrajectorie(forwardParticles, numRealizations);
firstFunctionCall = false;
backwardParticles.push_back(smoothedParticles);
const State es = estimation->estimate(smoothedParticles);
return es;
if(lag == 666){
lag = forwardHistory.size() - 1;
}
// compute weights using the transition model
// transitionWeigths[numRealizations][numParticles]
std::vector<std::vector<double>> transitionWeights = transition->transition(forwardParticles, backwardParticles.back());
//check if we have enough data for lag
if(forwardHistory.size() <= lag){
lag = forwardHistory.size() - 1;
}
//get the next trajectorie for a realisation
for(int j = 0; j < numRealizations; ++j){
//iterate through all forward filtering steps
for(int i = 0; i <= lag; ++i){
std::vector<Particle<State>> forwardParticles = forwardHistory.getParticleSet(i);
//vector for the current smoothedWeights at time t
std::vector<Particle<State>> smoothedWeights;
smoothedWeights.resize(forwardParticles.size());
smoothedWeights = forwardParticles;
//storage for single trajectories / smoothed particles
smoothedParticles.clear();
//check if all transitionWeights are zero
double weightSumTransition = std::accumulate(transitionWeights[j].begin(), transitionWeights[j].end(), 0.0);
Assert::isNot0(weightSumTransition, "all transition weights for smoothing are zero");
// Choose \tilde x_T = x^(i)_T with probability w^(i)_T
// Therefore sample independently from the categorical distribution of weights.
if(firstFunctionCall){
int i = 0;
for (auto& w : transitionWeights.at(j)) {
smoothedParticles = sampler->drawTrajectorie(forwardParticles, numRealizations);
// multiply the weight of the particles at time t and normalize
smoothedWeights.at(i).weight = (smoothedWeights.at(i).weight * w);
if(smoothedWeights.at(i).weight != smoothedWeights.at(i).weight) {throw "detected NaN";}
firstFunctionCall = false;
backwardParticles.push_back(smoothedParticles);
// iter
++i;
State est = estimation->estimate(smoothedParticles);
estimatedStates.push_back(est);
}
//get the sum of all weights
auto lambda = [](double current, const Particle<State>& a){return current + a.weight; };
double weightSumSmoothed = std::accumulate(smoothedWeights.begin(), smoothedWeights.end(), 0.0, lambda);
// compute weights using the transition model
// transitionWeigths[numRealizations][numParticles]
std::vector<std::vector<double>> transitionWeights = transition->transition(forwardParticles, backwardParticles.back());
//normalize the weights
if(weightSumSmoothed != 0.0){
for (int i = 0; i < smoothedWeights.size(); ++i){
smoothedWeights.at(i).weight /= weightSumSmoothed;
//get the next trajectorie for a realisation
for(int j = 0; j < numRealizations; ++j){
//vector for the current smoothedWeights at time t
std::vector<Particle<State>> smoothedWeights;
smoothedWeights.resize(forwardParticles.size());
smoothedWeights = forwardParticles;
//check if all transitionWeights are zero
double weightSumTransition = std::accumulate(transitionWeights[j].begin(), transitionWeights[j].end(), 0.0);
Assert::isNot0(weightSumTransition, "all transition weights for smoothing are zero");
int k = 0;
for (auto& w : transitionWeights.at(j)) {
// multiply the weight of the particles at time t and normalize
smoothedWeights.at(k).weight = (smoothedWeights.at(k).weight * w);
if(smoothedWeights.at(k).weight != smoothedWeights.at(k).weight) {throw "detected NaN";}
// iter
++k;
}
//check if normalization worked
double normWeightSum = std::accumulate(smoothedWeights.begin(), smoothedWeights.end(), 0.0, lambda);
Assert::isNear(normWeightSum, 1.0, 0.001, "Smoothed weights do not sum to 1");
//get the sum of all weights
auto lambda = [](double current, const Particle<State>& a){return current + a.weight; };
double weightSumSmoothed = std::accumulate(smoothedWeights.begin(), smoothedWeights.end(), 0.0, lambda);
//normalize the weights
if(weightSumSmoothed != 0.0){
for (int l = 0; l < smoothedWeights.size(); ++l){
smoothedWeights.at(l).weight /= weightSumSmoothed;
}
//check if normalization worked
double normWeightSum = std::accumulate(smoothedWeights.begin(), smoothedWeights.end(), 0.0, lambda);
Assert::isNear(normWeightSum, 1.0, 0.001, "Smoothed weights do not sum to 1");
}
//draw the next trajectorie at time t for a realization and save them
smoothedParticles.push_back(sampler->drawSingleParticle(smoothedWeights));
//throw if weight of smoothedParticle is zero
//in practice this is possible, if a particle is completely separated from the rest and is therefore
//weighted zero or very very low.
Assert::isNot0(smoothedParticles.back().weight, "smoothed particle has zero weight");
}
//draw the next trajectorie at time t for a realization and save them
smoothedParticles.push_back(sampler->drawSingleParticle(smoothedWeights));
if(resampler)
{
//TODO - does this even make sense?
Assert::doThrow("Warning - Resampling is not yet implemented!");
}
// push_back the smoothedParticles
backwardParticles.push_back(smoothedParticles);
// estimate the current state
if(lag == (forwardHistory.size() - 1) ){ //fixed lag
State est = estimation->estimate(smoothedParticles);
estimatedStates.push_back(est);
}
else if (i == lag) { //fixed interval
State est = estimation->estimate(smoothedParticles);
estimatedStates.push_back(est);
}
//throw if weight of smoothedParticle is zero
//in practice this is possible, if a particle is completely separated from the rest and is therefore
//weighted zero or very very low.
Assert::isNot0(smoothedParticles.back().weight, "smoothed particle has zero weight");
}
//return the calculated estimations
// TODO: Wir interessieren uns beim fixed-lag smoothing immer nur für die letzte estimation und den letzten satz gesmoothet particle da wir ja weiter vorwärts in der Zeit gehen
// und pro zeitschritt ein neues particle set hinzu kommt. also wenn lag = 3, dann smoothen wir t - 3 und sollten auch nur die estimation von t-3 und das particle set von t-3 abspeichern
//
// beim interval smoothing dagegen interessieren uns alle, da ja keine neuen future informationen kommen und wir einfach sequentiell zurück in die zeit wandern.
//
// lösungsvorschlag.
// - observer-pattern? immer wenn eine neue estimation und ein neues particle set kommt, sende. (wird nix bringen, da keine unterschiedlichen threads?)
// - wie vorher machen, also pro update eine estimation aber jedem update einfach alle observations und alle controls mitgeben?!?!?
// - ich gebe zusätzlich den lag mit an. dann kann die forwardfilterhistory auch ständig alles halten. dann gibt es halt keine ständigen updates, sondern man muss die eine
// berechnung abwarten. eventl. eine art ladebalken hinzufügen. (30 von 120 timestamps done) (ich glaube das ist die beste blackboxigste version) man kann den lag natürlich auch beim
// init des backwardsimulation objects mit übergeben. ABER: damit könntem an kein dynamic-lag smoothing mehr machen. also lieber variable lassen :). durch den lag wissen wir einfach was wir
// genau in estimatedStates und backwardParticles speichern müssen ohne über das problem oben zu stoßen. haben halt keine ständigen updates. observer-pattern hier nur bei mehrere threads,
// das wäre jetzt aber overkill und deshalb einfach ladebalken :):):):)
// - oder man macht einfach zwei update funktionen mit den beiden möglichkeiten. halte ich aber für nen dummen kompromiss. )
if(resampler)
{
// würde es sinn machen, die estimations auch mit zu speichern?
//TODO - does this even make sense?
std::cout << "Warning - Resampling is not yet implemented!" << std::endl;
// //resampling if necessery
// double sum = 0.0;
// double weightSum = std::accumulate(smoothedParticles.begin().weight, smoothedParticles.end().weight, 0.0);
// for (auto& p : smoothedParticles) {
// p.weight /= weightSum;
// sum += (p.weight * p.weight);
// }
// const double neff = 1.0/sum;
// if (neff != neff) {throw "detected NaN";}
// // if the number of efficient particles is too low, perform resampling
// if (neff < smoothedParticles.size() * nEffThresholdPercent) { resampler->resample(smoothedParticles); }
}
// push_back the smoothedParticles
backwardParticles.push_back(smoothedParticles);
// estimate the current state
const State est = estimation->estimate(smoothedParticles);
// done
return est;
return estimatedStates.back();
}
std::vector<State> getEstimations(){
return estimatedStates;
}
};
}

View File

@@ -0,0 +1,271 @@
#ifndef FASTKDESMOOTHING_H
#define FASTKDESMOOTHING_H
#include <vector>
#include <memory>
#include <algorithm>
#include "BackwardFilterTransition.h"
#include "BackwardFilter.h"
#include "../Particle.h"
#include "../../floorplan/v2/FloorplanHelper.h";
#include "../../grid/Grid.h";
#include "../filtering/resampling/ParticleFilterResampling.h"
#include "../filtering/estimation/ParticleFilterEstimation.h"
#include "../filtering/ParticleFilterEvaluation.h"
#include "../filtering/ParticleFilterInitializer.h"
#include "../filtering/ParticleFilterTransition.h"
#include "../sampling/ParticleTrajectorieSampler.h"
#include "../../math/boxkde/benchmark.h"
#include "../../math/boxkde/DataStructures.h"
#include "../../math/boxkde/Image2D.h"
#include "../../math/boxkde/BoxGaus.h"
#include "../../math/boxkde/Grid2D.h"
#include "../../Assertions.h"
namespace SMC {
template <typename State, typename Control, typename Observation>
class FastKDESmoothing : public BackwardFilter<State, Control, Observation>{
private:
/** all smoothed particles T -> 1*/
std::vector<std::vector<Particle<State>>> backwardParticles;
/** all estimations calculated */
std::vector<State> estimatedStates;
/** the estimation function to use */
std::unique_ptr<ParticleFilterEstimation<State>> estimation;
/** the transition function to use */
std::unique_ptr<ParticleFilterTransition<State, Control>> transition;
/** the resampler to use */
std::unique_ptr<ParticleFilterResampling<State>> resampler;
/** the sampler for drawing trajectories */
std::unique_ptr<ParticleTrajectorieSampler<State>> sampler;
/** the percentage-of-efficient-particles-threshold for resampling */
double nEffThresholdPercent = 0.25;
/** number of realizations to be calculated */
int numParticles;
/** is update called the first time? */
bool firstFunctionCall;
/** boundingBox for the boxKDE */
BoundingBox<float> bb;
/** histogram/grid holding the particles*/
Grid2D<float> grid;
/** bandwith for KDE */
Point2 bandwith;
public:
/** ctor */
FastKDESmoothing(int numParticles, const Floorplan::IndoorMap* map, const int gridsize_cm, const Point2 bandwith) {
this->numParticles = numParticles;
backwardParticles.reserve(numParticles);
firstFunctionCall = true;
const Point3 maxBB = FloorplanHelper::getBBox(map).getMax();
const Point3 minBB = FloorplanHelper::getBBox(map).getMin();
bb = BoundingBox<float>(minBB.x, maxBB.x, minBB.y, maxBB.y);
// Create histogram
size_t nBinsX = static_cast<size_t>((maxBB.x - minBB.x) / gridsize_cm);
size_t nBinsY = static_cast<size_t>((maxBB.y - minBB.y) / gridsize_cm);
grid = Grid2D<float>(bb, nBinsX, nBinsY);
this->bandwith = bandwith;
}
/** dtor */
~FastKDESmoothing() {
backwardParticles.clear();
estimatedStates.clear();
}
/** access to all backward / smoothed particles */
const std::vector<std::vector<Particle<State>>>& getbackwardParticles() {
return backwardParticles;
}
/** set the estimation method to use */
void setEstimation(std::unique_ptr<ParticleFilterEstimation<State>> estimation) {
Assert::isNotNull(estimation, "setEstimation() MUST not be called with a nullptr!");
this->estimation = std::move(estimation);
}
/** set the transition method to use */
void setTransition(std::unique_ptr<ParticleFilterTransition<State, Control>> transition) {
Assert::isNotNull(transition, "setTransition() MUST not be called with a nullptr!");
this->transition = std::move(transition);
}
/** set the transition method to use */
void setTransition(std::unique_ptr<BackwardFilterTransition<State, Control>> transition) {
Assert::doThrow("Do not use a backward transition for fast smoothing! Forward Transition");
//TODO: two times setTransition is not the best solution
}
/** set the resampling method to use */
void setResampling(std::unique_ptr<ParticleFilterResampling<State>> resampler) {
Assert::isNotNull(resampler, "setResampling() MUST not be called with a nullptr!");
this->resampler = std::move(resampler);
}
/** set the sampler method to use */
void setSampler(std::unique_ptr<ParticleTrajectorieSampler<State>> sampler){
Assert::isNotNull(sampler, "setSampler() MUST not be called with a nullptr!");
this->sampler = std::move(sampler);
}
/** set the resampling threshold as the percentage of efficient particles */
void setNEffThreshold(const double thresholdPercent) {
this->nEffThresholdPercent = thresholdPercent;
}
/** get the used transition method */
BackwardFilterTransition<State, Control>* getTransition() {
return nullptr;
}
/**
* @brief update
* @param forwardHistory
* @return
*/
State update(ForwardFilterHistory<State, Control, Observation>& forwardHistory, int lag = 666) {
// sanity checks (if enabled)
Assert::isNotNull(transition, "transition MUST not be null! call setTransition() first!");
Assert::isNotNull(estimation, "estimation MUST not be null! call setEstimation() first!");
//init for backward filtering
std::vector<Particle<State>> smoothedParticles;
smoothedParticles.reserve(numParticles);
firstFunctionCall = true;
// if no lag is given, we have a fixed interval smoothing
if(lag == 666){
lag = forwardHistory.size() - 1;
}
//check if we have enough data for lag
if(forwardHistory.size() <= lag){
lag = forwardHistory.size() - 1;
}
//iterate through all forward filtering steps
for(int i = 0; i <= lag; ++i){
std::vector<Particle<State>> forwardParticlesForTransition_t1 = forwardHistory.getParticleSet(i);
//storage for single trajectories / smoothed particles
smoothedParticles.clear();
// Choose \tilde x_T = x^(i)_T with probability w^(i)_T
// Therefore sample independently from the categorical distribution of weights.
if(firstFunctionCall){
smoothedParticles = sampler->drawTrajectorie(forwardParticlesForTransition_t1, numParticles);
firstFunctionCall = false;
backwardParticles.push_back(smoothedParticles);
State est = estimation->estimate(smoothedParticles);
estimatedStates.push_back(est);
return est;
}
// transition p(q_t+1* | q_t): so we are performing again a forward transition step.
// we are doing this to track single particles between two timesteps! normaly, resampling would destroy
// any identifier given to particles.
// Node: at this point we can integrate future observation and control information for better smoothing
Control controls = forwardHistory.getControl(i-1);
Observation obs = forwardHistory.getObservation(i-1);
transition->transition(forwardParticlesForTransition_t1, &controls);
// KDE auf q_t+1 Samples = p(q_t+1 | o_1:T) - Smoothed samples from the future
grid.clear();
for (Particle<State> p : backwardParticles.back())
{
grid.add(p.state.position.x_cm, p.state.position.y_cm, p.weight);
}
int nFilt = 3;
float sigmaX = bandwith.x / grid.binSizeX;
float sigmaY = bandwith.y / grid.binSizeY;
BoxGaus<float> boxGaus;
boxGaus.approxGaus(grid.image(), sigmaX, sigmaY, nFilt);
// Apply Position from Samples from q_t+1* into KDE of p(q_t+1 | o_1:T) to get p(q_t+1* | o_1:T)
// Calculate new weight w(q_(t|T)) = w(q_t) * p(q_t+1* | o_1:T) * p(q_t+1* | q_t) * normalisation
smoothedParticles = forwardHistory.getParticleSet(i);
for(Particle<State> p : smoothedParticles){
p.weight = p.weight * grid.fetch(p.state.position.x_cm, p.state.position.y_cm);
Assert::isNot0(p.weight, "smoothed particle has zero weight");
}
//normalization
auto lambda = [](double current, const Particle<State>& a){return current + a.weight; };
double weightSumSmoothed = std::accumulate(smoothedParticles.begin(), smoothedParticles.end(), 0.0, lambda);
if(weightSumSmoothed != 0.0){
for (Particle<State> p : smoothedParticles){
p.weight /= weightSumSmoothed;
}
//check if normalization worked
double normWeightSum = std::accumulate(smoothedParticles.begin(), smoothedParticles.end(), 0.0, lambda);
Assert::isNear(normWeightSum, 1.0, 0.001, "Smoothed weights do not sum to 1");
} else {
Assert::doThrow("Weight Sum of smoothed particles is zero!");
}
if(resampler)
{
//TODO - does this even make sense?
Assert::doThrow("Warning - Resampling is not yet implemented!");
}
// push_back the smoothedParticles
backwardParticles.push_back(smoothedParticles);
// estimate the current state
if(lag == (forwardHistory.size() - 1) ){ //fixed interval
State est = estimation->estimate(smoothedParticles);
estimatedStates.push_back(est);
}
else if (i == lag) { //fixed lag
State est = estimation->estimate(smoothedParticles);
estimatedStates.push_back(est);
}
}
return estimatedStates.back();
}
std::vector<State> getEstimations(){
return estimatedStates;
}
};
}
#endif // FASTKDESMOOTHING_H

View File

@@ -32,13 +32,11 @@ namespace SMC {
//empty ctor
}
void add(Timestamp time, std::vector<std::vector<Particle<State>>> set, Control control, Observation obs){
void add(Timestamp time, std::vector<Particle<State>> set, Control control, Observation obs){
// Is empty? Null? etc.
Assert::isNotNull(time, "Timestamp is Null");
Assert::isNotNull(set, "Particle Set is Null");
Assert::isNotNull(control, "Control is Null");
Assert::isNotNull(obs, "Observation is Null");
// Is empty? Null? 0?`etc.
Assert::isNot0(time.ms(), "Timestamp is 0");
if(set.empty()){Assert::doThrow("Particle Set is Empty");}
timestamps.push_back(time);
particleSets.push_back(set);
@@ -64,39 +62,39 @@ namespace SMC {
* @brief Return the particles from [latestFilterUpdate - @param idx]
* @return returns vector of particles. note: c11 makes a std::move here
*/
std::vector<Particle<State>> getParticleSet(idx = 0){
return particleSets.at(particleSets.end() - idx);
std::vector<Particle<State>> getParticleSet(int idx = 0){
return particleSets.at(particleSets.size() - 1 - idx);
}
/**
* @brief getControl from [latestFilterUpdate - @param idx]
* @return const controls object
*/
const Control getControl(idx = 0){
return controls.at(controls.end() - idx);
const Control getControl(int idx = 0){
return controls.at(controls.size() - 1 - idx);
}
/**
* @brief getObservationf rom [latestFilterUpdate - @param idx]
* @return const obervations object
*/
const Observation getObservation (idx = 0){
return observations.at(observations.end() - idx);
const Observation getObservation(int idx = 0){
return observations.at(observations.size() - 1 - idx);
}
/**
* @brief Return the timestamp from [latestFilterUpdate - @param idx]
* @return returns a Timstampf object
*/
std::vector<Particle<State>> getTimestamp(idx = 0){
return timestamps.at(particleSets.end() - idx);
const Timestamp getTimestamp(int idx = 0){
return timestamps.at(timestamps.size() - 1 - idx);
}
/**
* @brief getLatestFilterUpdateNum
* @brief size
* @return num of particleSets size
*/
const int getLatestFilterUpdateNum(){
int size(){
return particleSets.size();
}
@@ -133,6 +131,14 @@ namespace SMC {
return timestamps.back();
}
/**
* @brief getFirstimestamp
* @return const Timestamp object
*/
const Timestamp getFirstTimestamp(){
return timestamps.front();
}
};