fixed comments
This commit is contained in:
@@ -394,7 +394,7 @@ public:
|
|||||||
//std::cout << "SmoothingDist: " << tsHistory[(tsHistory.size() - 1) - MiscSettings::lag] << " " << statsDistSmoothing.asString() << std::endl;
|
//std::cout << "SmoothingDist: " << tsHistory[(tsHistory.size() - 1) - MiscSettings::lag] << " " << statsDistSmoothing.asString() << std::endl;
|
||||||
|
|
||||||
//save to file
|
//save to file
|
||||||
statsoutSmoothing << "\n\t"; statsSmoothing.appendTo(statsoutFiltering); statsoutFiltering << "\n\n";
|
statsoutSmoothing << "\n\t"; statsSmoothing.appendTo(statsoutSmoothing); statsoutSmoothing << "\n\n";
|
||||||
|
|
||||||
//plot
|
//plot
|
||||||
vis.clearStates();
|
vis.clearStates();
|
||||||
@@ -444,8 +444,8 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
statsout.close();
|
statsoutFiltering.close();
|
||||||
|
statsoutSmoothing.close();
|
||||||
|
|
||||||
{ // detailled error-description (normally filtered)
|
{ // detailled error-description (normally filtered)
|
||||||
std::ofstream oError("/tmp/err_norm_" + runName + ".dat");
|
std::ofstream oError("/tmp/err_norm_" + runName + ".dat");
|
||||||
|
|||||||
@@ -46,15 +46,27 @@ public:
|
|||||||
//pf->setResampling( std::unique_ptr<K::ParticleFilterResamplingPercent<MyState>>(new K::ParticleFilterResamplingPercent<MyState>(0.10)) );
|
//pf->setResampling( std::unique_ptr<K::ParticleFilterResamplingPercent<MyState>>(new K::ParticleFilterResamplingPercent<MyState>(0.10)) );
|
||||||
|
|
||||||
// state estimation step
|
// state estimation step
|
||||||
pf->setEstimation( std::unique_ptr<K::ParticleFilterEstimationWeightedAverageWithAngle<MyState>>(new K::ParticleFilterEstimationWeightedAverageWithAngle<MyState>()));
|
//pf->setEstimation( std::unique_ptr<K::ParticleFilterEstimationWeightedAverageWithAngle<MyState>>(new K::ParticleFilterEstimationWeightedAverageWithAngle<MyState>()));
|
||||||
//pf->setEstimation( std::unique_ptr<K::ParticleFilterEstimationRegionalWeightedAverage<MyState>>(new K::ParticleFilterEstimationRegionalWeightedAverage<MyState>()));
|
//pf->setEstimation( std::unique_ptr<K::ParticleFilterEstimationRegionalWeightedAverage<MyState>>(new K::ParticleFilterEstimationRegionalWeightedAverage<MyState>()));
|
||||||
//pf->setEstimation( std::unique_ptr<K::ParticleFilterEstimationOrderedWeightedAverage<MyState>>(new K::ParticleFilterEstimationOrderedWeightedAverage<MyState>(0.50f)));
|
pf->setEstimation( std::unique_ptr<K::ParticleFilterEstimationOrderedWeightedAverage<MyState>>(new K::ParticleFilterEstimationOrderedWeightedAverage<MyState>(0.50f)));
|
||||||
|
|
||||||
|
|
||||||
//create the backward smoothing filter
|
//create the backward smoothing filter
|
||||||
bf = new K::BackwardSimulation<MyState>(MiscSettings::numBSParticles);
|
//bf = new K::BackwardSimulation<MyState>(MiscSettings::numBSParticles);
|
||||||
//bf = new K::CondensationBackwardFilter<MyState>;
|
bf = new K::CondensationBackwardFilter<MyState>;
|
||||||
bf->setSampler( std::unique_ptr<K::CumulativeSampler<MyState>>(new K::CumulativeSampler<MyState>()));
|
//bf->setSampler( std::unique_ptr<K::CumulativeSampler<MyState>>(new K::CumulativeSampler<MyState>()));
|
||||||
|
|
||||||
|
bool smoothing_resample = false;
|
||||||
|
|
||||||
|
|
||||||
|
//Smoothing using Simple Trans
|
||||||
|
//bf->setEstimation(std::unique_ptr<K::ParticleFilterEstimationWeightedAverage<MyState>>(new K::ParticleFilterEstimationWeightedAverage<MyState>()));
|
||||||
|
//bf->setEstimation( std::unique_ptr<K::ParticleFilterEstimationRegionalWeightedAverage<MyState>>(new K::ParticleFilterEstimationRegionalWeightedAverage<MyState>()));
|
||||||
|
bf->setEstimation( std::unique_ptr<K::ParticleFilterEstimationOrderedWeightedAverage<MyState>>(new K::ParticleFilterEstimationOrderedWeightedAverage<MyState>(0.50f)));
|
||||||
|
|
||||||
|
if(smoothing_resample)
|
||||||
|
bf->setResampling( std::unique_ptr<K::ParticleFilterResamplingSimple<MyState>>(new K::ParticleFilterResamplingSimple<MyState>()) );
|
||||||
|
bf->setTransition(std::unique_ptr<MySmoothingTransitionExperimental>( new MySmoothingTransitionExperimental) );
|
||||||
|
|
||||||
|
|
||||||
}
|
}
|
||||||
@@ -81,14 +93,6 @@ public:
|
|||||||
smoothing_heading_sigma = 5.0;
|
smoothing_heading_sigma = 5.0;
|
||||||
smoothing_baro_sigma = 0.05;
|
smoothing_baro_sigma = 0.05;
|
||||||
|
|
||||||
bool smoothing_resample = true;
|
|
||||||
|
|
||||||
|
|
||||||
//Smoothing using Simple Trans
|
|
||||||
bf->setEstimation(std::unique_ptr<K::ParticleFilterEstimationWeightedAverageWithAngle<MyState>>(new K::ParticleFilterEstimationWeightedAverageWithAngle<MyState>()));
|
|
||||||
if(smoothing_resample)
|
|
||||||
bf->setResampling( std::unique_ptr<K::ParticleFilterResamplingSimple<MyState>>(new K::ParticleFilterResamplingSimple<MyState>()) );
|
|
||||||
bf->setTransition(std::unique_ptr<MySmoothingTransitionExperimental>( new MySmoothingTransitionExperimental) );
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void fixedIntervallSimpleTransPath4(){
|
void fixedIntervallSimpleTransPath4(){
|
||||||
@@ -112,15 +116,6 @@ public:
|
|||||||
smoothing_heading_sigma = 5.0;
|
smoothing_heading_sigma = 5.0;
|
||||||
smoothing_baro_sigma = 0.05;
|
smoothing_baro_sigma = 0.05;
|
||||||
|
|
||||||
bool smoothing_resample = false;
|
|
||||||
|
|
||||||
|
|
||||||
//Smoothing using Simple Trans
|
|
||||||
//bf->setEstimation( std::unique_ptr<K::ParticleFilterEstimationOrderedWeightedAverage<MyState>>(new K::ParticleFilterEstimationOrderedWeightedAverage<MyState>(0.50f)));
|
|
||||||
bf->setEstimation(std::unique_ptr<K::ParticleFilterEstimationWeightedAverageWithAngle<MyState>>(new K::ParticleFilterEstimationWeightedAverageWithAngle<MyState>()));
|
|
||||||
if(smoothing_resample)
|
|
||||||
bf->setResampling( std::unique_ptr<K::ParticleFilterResamplingSimple<MyState>>(new K::ParticleFilterResamplingSimple<MyState>()) );
|
|
||||||
bf->setTransition(std::unique_ptr<MySmoothingTransitionExperimental>( new MySmoothingTransitionExperimental) );
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ============================================================ Dijkstra ============================================== //
|
// ============================================================ Dijkstra ============================================== //
|
||||||
@@ -146,15 +141,6 @@ public:
|
|||||||
smoothing_walk_sigma = 0.5;
|
smoothing_walk_sigma = 0.5;
|
||||||
smoothing_heading_sigma = 5.0;
|
smoothing_heading_sigma = 5.0;
|
||||||
smoothing_baro_sigma = 0.05;
|
smoothing_baro_sigma = 0.05;
|
||||||
|
|
||||||
bool smoothing_resample = true;
|
|
||||||
|
|
||||||
|
|
||||||
//Smoothing using Dijkstra
|
|
||||||
bf->setEstimation(std::unique_ptr<K::ParticleFilterEstimationWeightedAverage<MyState>>(new K::ParticleFilterEstimationWeightedAverage<MyState>()));
|
|
||||||
if(smoothing_resample)
|
|
||||||
bf->setResampling( std::unique_ptr<K::ParticleFilterResamplingSimple<MyState>>(new K::ParticleFilterResamplingSimple<MyState>()) );
|
|
||||||
bf->setTransition(std::unique_ptr<MySmoothingTransition>( new MySmoothingTransition(&grid)) );
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@@ -175,15 +161,6 @@ public:
|
|||||||
smoothing_walk_sigma = 0.5;
|
smoothing_walk_sigma = 0.5;
|
||||||
smoothing_heading_sigma = 5.0;
|
smoothing_heading_sigma = 5.0;
|
||||||
smoothing_baro_sigma = 0.05;
|
smoothing_baro_sigma = 0.05;
|
||||||
|
|
||||||
bool smoothing_resample = true;
|
|
||||||
|
|
||||||
|
|
||||||
//Smoothing using Simple Trans
|
|
||||||
bf->setEstimation(std::unique_ptr<K::ParticleFilterEstimationWeightedAverage<MyState>>(new K::ParticleFilterEstimationWeightedAverage<MyState>()));
|
|
||||||
if(smoothing_resample)
|
|
||||||
bf->setResampling( std::unique_ptr<K::ParticleFilterResamplingSimple<MyState>>(new K::ParticleFilterResamplingSimple<MyState>()) );
|
|
||||||
bf->setTransition(std::unique_ptr<MySmoothingTransitionExperimental>( new MySmoothingTransitionExperimental) );
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void bergwerk_path1_nexus_simple() {
|
void bergwerk_path1_nexus_simple() {
|
||||||
@@ -244,14 +221,6 @@ public:
|
|||||||
smoothing_heading_sigma = 5.0;
|
smoothing_heading_sigma = 5.0;
|
||||||
smoothing_baro_sigma = 0.05;
|
smoothing_baro_sigma = 0.05;
|
||||||
|
|
||||||
bool smoothing_resample = true;
|
|
||||||
|
|
||||||
|
|
||||||
//Smoothing using Simple Trans
|
|
||||||
bf->setEstimation(std::unique_ptr<K::ParticleFilterEstimationWeightedAverage<MyState>>(new K::ParticleFilterEstimationWeightedAverage<MyState>()));
|
|
||||||
if(smoothing_resample)
|
|
||||||
bf->setResampling( std::unique_ptr<K::ParticleFilterResamplingSimple<MyState>>(new K::ParticleFilterResamplingSimple<MyState>()) );
|
|
||||||
bf->setTransition(std::unique_ptr<MySmoothingTransitionExperimental>( new MySmoothingTransitionExperimental) );
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void bergwerk_path2_nexus_simple() {
|
void bergwerk_path2_nexus_simple() {
|
||||||
@@ -313,14 +282,6 @@ public:
|
|||||||
smoothing_heading_sigma = 5.0;
|
smoothing_heading_sigma = 5.0;
|
||||||
smoothing_baro_sigma = 0.05;
|
smoothing_baro_sigma = 0.05;
|
||||||
|
|
||||||
bool smoothing_resample = true;
|
|
||||||
|
|
||||||
|
|
||||||
//Smoothing using Simple Trans
|
|
||||||
bf->setEstimation(std::unique_ptr<K::ParticleFilterEstimationWeightedAverage<MyState>>(new K::ParticleFilterEstimationWeightedAverage<MyState>()));
|
|
||||||
if(smoothing_resample)
|
|
||||||
bf->setResampling( std::unique_ptr<K::ParticleFilterResamplingSimple<MyState>>(new K::ParticleFilterResamplingSimple<MyState>()) );
|
|
||||||
bf->setTransition(std::unique_ptr<MySmoothingTransitionExperimental>( new MySmoothingTransitionExperimental) );
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void bergwerk_path3_nexus_simple() {
|
void bergwerk_path3_nexus_simple() {
|
||||||
@@ -381,14 +342,6 @@ public:
|
|||||||
smoothing_heading_sigma = 5.0;
|
smoothing_heading_sigma = 5.0;
|
||||||
smoothing_baro_sigma = 0.05;
|
smoothing_baro_sigma = 0.05;
|
||||||
|
|
||||||
bool smoothing_resample = true;
|
|
||||||
|
|
||||||
|
|
||||||
//Smoothing using Simple Trans
|
|
||||||
bf->setEstimation(std::unique_ptr<K::ParticleFilterEstimationWeightedAverage<MyState>>(new K::ParticleFilterEstimationWeightedAverage<MyState>()));
|
|
||||||
if(smoothing_resample)
|
|
||||||
bf->setResampling( std::unique_ptr<K::ParticleFilterResamplingSimple<MyState>>(new K::ParticleFilterResamplingSimple<MyState>()) );
|
|
||||||
bf->setTransition(std::unique_ptr<MySmoothingTransitionExperimental>( new MySmoothingTransitionExperimental) );
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void bergwerk_path4_nexus_simple() {
|
void bergwerk_path4_nexus_simple() {
|
||||||
@@ -450,14 +403,6 @@ public:
|
|||||||
smoothing_heading_sigma = 5.0;
|
smoothing_heading_sigma = 5.0;
|
||||||
smoothing_baro_sigma = 0.15;
|
smoothing_baro_sigma = 0.15;
|
||||||
|
|
||||||
bool smoothing_resample = true;
|
|
||||||
|
|
||||||
|
|
||||||
//Smoothing using Simple Trans
|
|
||||||
bf->setEstimation(std::unique_ptr<K::ParticleFilterEstimationWeightedAverage<MyState>>(new K::ParticleFilterEstimationWeightedAverage<MyState>()));
|
|
||||||
if(smoothing_resample)
|
|
||||||
bf->setResampling( std::unique_ptr<K::ParticleFilterResamplingSimple<MyState>>(new K::ParticleFilterResamplingSimple<MyState>()) );
|
|
||||||
bf->setTransition(std::unique_ptr<MySmoothingTransitionExperimental>( new MySmoothingTransitionExperimental) );
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void bergwerk_path1_galaxy_simple() {
|
void bergwerk_path1_galaxy_simple() {
|
||||||
@@ -501,14 +446,6 @@ public:
|
|||||||
smoothing_heading_sigma = 5.0;
|
smoothing_heading_sigma = 5.0;
|
||||||
smoothing_baro_sigma = 0.15;
|
smoothing_baro_sigma = 0.15;
|
||||||
|
|
||||||
bool smoothing_resample = true;
|
|
||||||
|
|
||||||
|
|
||||||
//Smoothing using Simple Trans
|
|
||||||
bf->setEstimation(std::unique_ptr<K::ParticleFilterEstimationWeightedAverage<MyState>>(new K::ParticleFilterEstimationWeightedAverage<MyState>()));
|
|
||||||
if(smoothing_resample)
|
|
||||||
bf->setResampling( std::unique_ptr<K::ParticleFilterResamplingSimple<MyState>>(new K::ParticleFilterResamplingSimple<MyState>()) );
|
|
||||||
bf->setTransition(std::unique_ptr<MySmoothingTransitionExperimental>( new MySmoothingTransitionExperimental) );
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void bergwerk_path2_galaxy_simple() {
|
void bergwerk_path2_galaxy_simple() {
|
||||||
@@ -553,14 +490,6 @@ public:
|
|||||||
smoothing_heading_sigma = 5.0;
|
smoothing_heading_sigma = 5.0;
|
||||||
smoothing_baro_sigma = 0.15;
|
smoothing_baro_sigma = 0.15;
|
||||||
|
|
||||||
bool smoothing_resample = true;
|
|
||||||
|
|
||||||
|
|
||||||
//Smoothing using Simple Trans
|
|
||||||
bf->setEstimation(std::unique_ptr<K::ParticleFilterEstimationWeightedAverage<MyState>>(new K::ParticleFilterEstimationWeightedAverage<MyState>()));
|
|
||||||
if(smoothing_resample)
|
|
||||||
bf->setResampling( std::unique_ptr<K::ParticleFilterResamplingSimple<MyState>>(new K::ParticleFilterResamplingSimple<MyState>()) );
|
|
||||||
bf->setTransition(std::unique_ptr<MySmoothingTransitionExperimental>( new MySmoothingTransitionExperimental) );
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void bergwerk_path3_galaxy_simple() {
|
void bergwerk_path3_galaxy_simple() {
|
||||||
@@ -605,15 +534,6 @@ public:
|
|||||||
smoothing_walk_sigma = 0.5;
|
smoothing_walk_sigma = 0.5;
|
||||||
smoothing_heading_sigma = 5.0;
|
smoothing_heading_sigma = 5.0;
|
||||||
smoothing_baro_sigma = 0.15;
|
smoothing_baro_sigma = 0.15;
|
||||||
|
|
||||||
bool smoothing_resample = true;
|
|
||||||
|
|
||||||
|
|
||||||
//Smoothing using Simple Trans
|
|
||||||
bf->setEstimation(std::unique_ptr<K::ParticleFilterEstimationWeightedAverage<MyState>>(new K::ParticleFilterEstimationWeightedAverage<MyState>()));
|
|
||||||
if(smoothing_resample)
|
|
||||||
bf->setResampling( std::unique_ptr<K::ParticleFilterResamplingSimple<MyState>>(new K::ParticleFilterResamplingSimple<MyState>()) );
|
|
||||||
bf->setTransition(std::unique_ptr<MySmoothingTransitionExperimental>( new MySmoothingTransitionExperimental) );
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void bergwerk_path4_galaxy_simple() {
|
void bergwerk_path4_galaxy_simple() {
|
||||||
|
|||||||
Binary file not shown.
@@ -2,6 +2,8 @@
|
|||||||
|
|
||||||
map information into smoothing. better way and faster then just dijkstra. compensate big jumps caused by wifi. better method for estimation and drawing of particles in backward simulation. more advanced smoothing transition. not used evaluating using the observations, but using the given information for more advanced approaches.
|
map information into smoothing. better way and faster then just dijkstra. compensate big jumps caused by wifi. better method for estimation and drawing of particles in backward simulation. more advanced smoothing transition. not used evaluating using the observations, but using the given information for more advanced approaches.
|
||||||
|
|
||||||
|
fixed-lag gap dynamic interval dependend upon estimation error variance
|
||||||
|
|
||||||
\begin{figure}
|
\begin{figure}
|
||||||
\input{gfx/activity/activity_over_time}
|
\input{gfx/activity/activity_over_time}
|
||||||
\caption{activity recognition}
|
\caption{activity recognition}
|
||||||
|
|||||||
@@ -22,9 +22,9 @@ The measurements were recorded using a Motorola Nexus 6 and a Samsung Galaxy S5.
|
|||||||
As the Galaxy's \docWIFI{} can not be limited to the \SI{2.4}{\giga\hertz} band only, its scans take much longer than those of the Nexus: \SI{3500}{\milli\second} vs. \SI{600}{\milli\second}.
|
As the Galaxy's \docWIFI{} can not be limited to the \SI{2.4}{\giga\hertz} band only, its scans take much longer than those of the Nexus: \SI{3500}{\milli\second} vs. \SI{600}{\milli\second}.
|
||||||
Additionally, the Galaxy's barometer sensor provides fare more inaccurate and less frequent readings than the Nexus does.
|
Additionally, the Galaxy's barometer sensor provides fare more inaccurate and less frequent readings than the Nexus does.
|
||||||
This results in a better localisation using the Nexus smartphone.
|
This results in a better localisation using the Nexus smartphone.
|
||||||
The computation for both filtering and smoothing was done offline using the aforementioned \mbox{CONDENSATION} algorithm.
|
The computation for both filtering and smoothing was done offline using the aforementioned \mbox{CONDENSATION} algorithm and multinomal (cumulative) resampling.
|
||||||
However, the filter itself would be fast enough to run on the smartphone itself ($ \approx \SI{100}{\milli\second} $ per transition, single-core Intel\textsuperscript{\textregistered} Atom{\texttrademark} C2750).
|
%However, the filter itself would be fast enough to run on the smartphone itself ($ \approx \SI{100}{\milli\second} $ per transition, single-core Intel\textsuperscript{\textregistered} Atom{\texttrademark} C2750).
|
||||||
The computational times of the different smoothing algorithm will be discussed later.
|
%The computational times of the different smoothing algorithm will be discussed later.
|
||||||
Unless explicitly stated, the state was estimated using the weighted arithmetic mean of the particles.
|
Unless explicitly stated, the state was estimated using the weighted arithmetic mean of the particles.
|
||||||
|
|
||||||
As mentioned earlier, the position of all \docAP{}s (about 5 per floor) is known beforehand.
|
As mentioned earlier, the position of all \docAP{}s (about 5 per floor) is known beforehand.
|
||||||
@@ -85,10 +85,15 @@ At next, we discuss the advantages and disadvantages of utilizing FBS and BS as
|
|||||||
Compared to fixed-interval smoothing, timely errors are now of higher importance due to an interest on real-time localization.
|
Compared to fixed-interval smoothing, timely errors are now of higher importance due to an interest on real-time localization.
|
||||||
Especially interesting in this context are small lags $\tau < 10$ considering filter updates near \SI{500}{\milli\second}.
|
Especially interesting in this context are small lags $\tau < 10$ considering filter updates near \SI{500}{\milli\second}.
|
||||||
|
|
||||||
%as seen fbs war im fixed interval schon nicht so gut, im lag ist sein einfluss vernachlässigbar. lediglich eine verbesserung von... deshalb konzentrieren wir uns bei der diskussion auf den BS.
|
%as seen fbs war im fixed interval schon nicht so gut, im lag ist sein einfluss vernachlässigbar. optische und error technische verbesserung sind kaum vorhanden. lediglich eine verbesserung von deshalb konzentrieren wir uns bei der diskussion auf den BS. trotzdem verschlechtert sich das ergebniss aber auch nicht. die verbesserung ist nur nicht so signifikant wie bei bs
|
||||||
|
|
||||||
Fig. \ref{} illustrates the estimation results for path 4 using \SI{2500}{particles}, \SI{50}{sample realisations} for BS and a fixed-lag $\tau = 5$.
|
Fig. \ref{} illustrates the estimation results for path 4 using \SI{2500}{particles}, \SI{50}{sample realisations} for BS and a fixed-lag $\tau = 5$.
|
||||||
|
|
||||||
|
fixed-lag reduces the error about... however, as seen in fig. \ref{} ist der bloße error nicht unbedingt ausschlaggebend für die verbesserung. fast immer liefert smoothing pfade die realistischer sind, aber die error erhöhen.
|
||||||
|
|
||||||
|
%conclusion der experimente
|
||||||
|
bei weniger partikeln bringt fixed-lag und fixed-interval smoothing im verhältnis sogar mehr! weil es da mehr zum "aufräumen" gibt. trotzdem hängt die performane natürlich stark vom vorwärtsschritt ab und man sollte nicht all zu wenige waehlen und lieber auf nummer sicher gehen. bs ist im vergleich zu fbs ein gutes stück besser in unserem fall. das hängt auch stark mit dem bereits sehr guten filtering schritt zusammen. man könnte aber trotzdem schlussfolgern das bs besser für indoor ist.
|
||||||
|
|
||||||
%fbs ist hier mies und liegt direkt über dem filter? oder ein fbs mit anderer estimaton und den filter nicht anzeigen?! das ist doch quatsch... wennn ich den filter net anzeige. notfalls einfach den fbs nicht nehmen. sondern sagen das er net taugt und rauswerfen.
|
%fbs ist hier mies und liegt direkt über dem filter? oder ein fbs mit anderer estimaton und den filter nicht anzeigen?! das ist doch quatsch... wennn ich den filter net anzeige. notfalls einfach den fbs nicht nehmen. sondern sagen das er net taugt und rauswerfen.
|
||||||
|
|
||||||
%Smoothing mit großen lag kann die zeitliche information schwer halten. das liegt hauptsächlich daran, das im smoothing nur die relativen positionsinfos genutzt werden. das wi-fi wird nicht beachtet und deswegen können absolute justierungen der position (sprünge) nur sehr schlecht abgefedert werden.
|
%Smoothing mit großen lag kann die zeitliche information schwer halten. das liegt hauptsächlich daran, das im smoothing nur die relativen positionsinfos genutzt werden. das wi-fi wird nicht beachtet und deswegen können absolute justierungen der position (sprünge) nur sehr schlecht abgefedert werden.
|
||||||
|
|||||||
@@ -1,7 +1,4 @@
|
|||||||
%\section{Filtering}
|
%\section{Filtering}
|
||||||
|
|
||||||
\commentByFrank{eval und transition tauschen von der reihenfolge?}
|
|
||||||
|
|
||||||
\subsection{Evaluation}
|
\subsection{Evaluation}
|
||||||
\label{sec:eval}
|
\label{sec:eval}
|
||||||
|
|
||||||
@@ -89,7 +86,7 @@
|
|||||||
directly within the transition step provides a more robust posterior distribution. Adding them to the evaluation
|
directly within the transition step provides a more robust posterior distribution. Adding them to the evaluation
|
||||||
instead, would lead to sample impoverishment due to the used Monte Carlo methods \cite{Isard98:CCD}.
|
instead, would lead to sample impoverishment due to the used Monte Carlo methods \cite{Isard98:CCD}.
|
||||||
|
|
||||||
\commentByFrank{ist das verstaendlich oder schon zu kurz?}
|
%\commentByFrank{ist das verstaendlich oder schon zu kurz?}
|
||||||
|
|
||||||
%\subsubsection{Pedestrian's Destination}
|
%\subsubsection{Pedestrian's Destination}
|
||||||
We assume the pedestrian's desired destination to be known beforehand. This prior knowledge is incorporated
|
We assume the pedestrian's desired destination to be known beforehand. This prior knowledge is incorporated
|
||||||
@@ -138,21 +135,30 @@
|
|||||||
%\subsubsection{Activity-Detection}
|
%\subsubsection{Activity-Detection}
|
||||||
Additionally we perform a simple activity detection for the pedestrian, able to distinguish between several actions
|
Additionally we perform a simple activity detection for the pedestrian, able to distinguish between several actions
|
||||||
$\mObsActivity \in \{ \text{unknown}, \text{standing}, \text{walking}, \text{stairs\_up}, \text{stairs\_down} \}$.
|
$\mObsActivity \in \{ \text{unknown}, \text{standing}, \text{walking}, \text{stairs\_up}, \text{stairs\_down} \}$.
|
||||||
Likewise, this knowledge is evaluated when walking the grid: Edges $\mEdgeAB$ matching the currently detected
|
%
|
||||||
activity are favoured using $p(\mEdgeAB)_\text{act} = 0.8$ and $0.2$ otherwise:
|
%\commentByFrank{bei mir ueberlappt aktuell nix, muessten mal testen was besser ist. beim ueberlappen ist das delay halt kuerzer. denke das schon ok.}
|
||||||
|
%
|
||||||
|
For this, the sensor signals are split in sliding windows. Each window has a length of one second and overlaps 500 ms with its prior window.
|
||||||
|
We use a naive Bayes classifier with two features. The first one is the variance of the accelerometer's magnitude within a window.
|
||||||
|
The second feature is the difference between the last and first barometer measurement of the particular window.
|
||||||
|
Based on these features the classifier assigns an activity to each of the sliding windows.
|
||||||
|
%
|
||||||
|
Similarly to the above, this knowledge is then evaluated when walking the grid: Edges $\mEdgeAB$ matching the currently detected
|
||||||
|
activity are favoured using $p(\mEdgeAB)_\text{act} = 0.8$ and $0.2$ otherwise.
|
||||||
|
If no information of the current activitiy could be obtained, no influence is exerted on the edges.
|
||||||
|
|
||||||
\begin{equation}
|
% \begin{equation}
|
||||||
p(\mEdgeAB)_\text{act} =
|
% p(\mEdgeAB)_\text{act} =
|
||||||
\footnotesize{
|
% \footnotesize{
|
||||||
\begin{cases}
|
% \begin{cases}
|
||||||
1.0 & \mObsActivity = \text{unknown} \\
|
% 1.0 & \mObsActivity = \text{unknown} \\
|
||||||
0.8 & \mObsActivity = \text{stairs\_up} \land \fPos{\mVertexB}_z > \fPos{\mVertexA}_z \\
|
% 0.8 & \mObsActivity = \text{stairs\_up} \land \fPos{\mVertexB}_z > \fPos{\mVertexA}_z \\
|
||||||
0.2 & \mObsActivity = \text{stairs\_up} \land \fPos{\mVertexB}_z \le \fPos{\mVertexA}_z \\
|
% 0.2 & \mObsActivity = \text{stairs\_up} \land \fPos{\mVertexB}_z \le \fPos{\mVertexA}_z \\
|
||||||
\cdots
|
% \cdots
|
||||||
\end{cases}
|
% \end{cases}
|
||||||
}\enskip .
|
% }\enskip .
|
||||||
\end{equation}
|
% \end{equation}
|
||||||
\commentByFrank{das switch ist wahrscheinlich unnoetig und der text reicht}
|
% \commentByFrank{das switch ist wahrscheinlich unnoetig und der text reicht}
|
||||||
|
|
||||||
|
|
||||||
% Activity Recognition
|
% Activity Recognition
|
||||||
@@ -161,17 +167,11 @@
|
|||||||
% Zeitintervall für das die Merkmale berechnet werden
|
% Zeitintervall für das die Merkmale berechnet werden
|
||||||
|
|
||||||
|
|
||||||
\commentByFrank{weg mit diesem absatz? das hatte ich ja schon beschrieben}
|
% \commentByFrank{weg mit diesem absatz? das hatte ich ja schon beschrieben}
|
||||||
The transition model includes a simple recognizer of different locomotion modes like normal walking or ascending/descending stairs. The reasoning behind this is to favour paths that correspond with the detected locomotion mode.
|
% The transition model includes a simple recognizer of different locomotion modes like normal walking or ascending/descending stairs. The reasoning behind this is to favour paths that correspond with the detected locomotion mode.
|
||||||
|
|
||||||
|
|
||||||
\commentByFrank{bei mir ueberlappt aktuell nix, muessten mal testen was besser ist. beim ueberlappen ist das delay halt kuerzer. denke das schon ok.}
|
|
||||||
\commentByFrank{satzreihenfolge war komisch -> angepasst}
|
|
||||||
For this, the sensor signals are split in sliding windows. Each window has a length of one second and overlaps 500 ms with its prior window.
|
|
||||||
\commentByFrank{navies: naive?}
|
|
||||||
We use a Naives Bayes classifier with two features. The first one is the variance of the accelerometer's magnitude within a window.
|
|
||||||
The second feature is the difference between the last and first barometer measurement of the particular window.
|
|
||||||
Based on these features the classifier assigns an activity to each of the sliding windows.
|
|
||||||
%\todo{Was passiert wenn ein überlappendes Fenster zwei verschiedene Aktivitäten zugewiesen bekommt? Sliding windows evtl. weglassen?}
|
%\todo{Was passiert wenn ein überlappendes Fenster zwei verschiedene Aktivitäten zugewiesen bekommt? Sliding windows evtl. weglassen?}
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -6,7 +6,7 @@
|
|||||||
Sequential MC filter, like aforementioned particle filter, use all observations $\mObsVec_{1:t}$ until the current time $t$ for computing an estimation of the state $\mStateVec_t$.
|
Sequential MC filter, like aforementioned particle filter, use all observations $\mObsVec_{1:t}$ until the current time $t$ for computing an estimation of the state $\mStateVec_t$.
|
||||||
In a Bayesian setting, this can be formalized as the computation of the posterior distribution $p(\mStateVec_t \mid \mObsVec_{1:t})$ using a sample of $N$ independent random variables, $\vec{X}^i_{t} \sim (\mStateVec_t \mid \mObsVec_{1:t})$ for $i = 1,...,N$ for approximation.
|
In a Bayesian setting, this can be formalized as the computation of the posterior distribution $p(\mStateVec_t \mid \mObsVec_{1:t})$ using a sample of $N$ independent random variables, $\vec{X}^i_{t} \sim (\mStateVec_t \mid \mObsVec_{1:t})$ for $i = 1,...,N$ for approximation.
|
||||||
Due to importance sampling, a weight $W^i_t$ is assigned to each sample $\vec{X}^i_{t}$.
|
Due to importance sampling, a weight $W^i_t$ is assigned to each sample $\vec{X}^i_{t}$.
|
||||||
In context of particle filtering $\{\vec{X}^i_{1:t}, W^i_{1:t} \}_{i=1}^N$ is a weighted set of samples, also called particles.
|
In context of particle filtering $\{W^i_{1:t}, \vec{X}^i_{1:t} \}_{i=1}^N$ is a weighted set of samples, also called particles.
|
||||||
Therefore a particle is a representation of one possible system state $\mStateVec$.
|
Therefore a particle is a representation of one possible system state $\mStateVec$.
|
||||||
By considering a situation given all observations $\vec{o}_{1:T}$ until a time step $T$, where $t \ll T$, standard filtering methods are not able to make use of this additional data for computing $p(\mStateVec_t \mid \mObsVec_{1:T})$.
|
By considering a situation given all observations $\vec{o}_{1:T}$ until a time step $T$, where $t \ll T$, standard filtering methods are not able to make use of this additional data for computing $p(\mStateVec_t \mid \mObsVec_{1:T})$.
|
||||||
This problem can be solved with a smoothing algorithm.
|
This problem can be solved with a smoothing algorithm.
|
||||||
@@ -22,7 +22,7 @@ The origin of MC smoothing can be traced back to Genshiro Kitagawa.
|
|||||||
In his work \cite{kitagawa1996monte} he presented the simplest form of smoothing as an extension to the particle filter.
|
In his work \cite{kitagawa1996monte} he presented the simplest form of smoothing as an extension to the particle filter.
|
||||||
This algorithm is often called the filter-smoother since it runs online and a smoothing is provided while filtering.
|
This algorithm is often called the filter-smoother since it runs online and a smoothing is provided while filtering.
|
||||||
%\commentByFrank{das mit dem weighted paths irritiert mich etwas. war das original work auch fuer etwas, wo pfade im spiel waren? weils halt gar so gut passt. ned dass da begrifflichkeiten durcheinander kommen. beim lesen fehlt mir das beim 1. anlauf was damit gemeint ist}
|
%\commentByFrank{das mit dem weighted paths irritiert mich etwas. war das original work auch fuer etwas, wo pfade im spiel waren? weils halt gar so gut passt. ned dass da begrifflichkeiten durcheinander kommen. beim lesen fehlt mir das beim 1. anlauf was damit gemeint ist}
|
||||||
This approach uses the particle filter steps to update weighted paths $\{(\vec{X}_{1:t}^i , W^i_t)\}^N_{i=1}$, producing an accurate approximation of the filtering posterior $p(\vec{q}_{t} \mid \vec{o}_{1:t})$ with a computational complexity of only $\mathcal{O}(N)$.
|
This approach uses the particle filter steps to update weighted paths $\{(W^i_t, \vec{X}_{1:t}^i)\}^N_{i=1}$, producing an accurate approximation of the filtering posterior $p(\vec{q}_{t} \mid \vec{o}_{1:t})$ with a computational complexity of only $\mathcal{O}(N)$.
|
||||||
However, it gives a poor representation of previous states due a monotonic decrease of distinct particles caused by resampling of each weighted path \cite{Doucet11:ATO}.
|
However, it gives a poor representation of previous states due a monotonic decrease of distinct particles caused by resampling of each weighted path \cite{Doucet11:ATO}.
|
||||||
Based on this, more advanced methods like the forward-backward smoother \cite{doucet2000} and backward simulation \cite{Godsill04:MCS} were developed.
|
Based on this, more advanced methods like the forward-backward smoother \cite{doucet2000} and backward simulation \cite{Godsill04:MCS} were developed.
|
||||||
Both methods are running backwards in time to reweight a set of particles recursively by using future observations.
|
Both methods are running backwards in time to reweight a set of particles recursively by using future observations.
|
||||||
|
|||||||
@@ -2,38 +2,35 @@
|
|||||||
\label{sec:smoothing}
|
\label{sec:smoothing}
|
||||||
|
|
||||||
The main purpose of this work is to provide MC smoothing methods in context of indoor localisation.
|
The main purpose of this work is to provide MC smoothing methods in context of indoor localisation.
|
||||||
\commentByFrank{algorithms?}
|
As mentioned before, those algorithms are able to compute probability distributions in the form of $p(\mStateVec_t \mid \mObsVec_{1:T})$ and are therefore able to make use of future observations between $t$ and $T$, where $t << T$.
|
||||||
As mentioned before, those algorithm are able to compute probability distributions in the form of $p(\mStateVec_t \mid \mObsVec_{1:T})$ and are therefore able to make use of future observations between $t$ and $T$.
|
|
||||||
\commentByFrank{evtl nochmal das $t << T$ dazu? is ne weile her und verwirrt vlt mit groß und klein t}
|
|
||||||
|
|
||||||
%Especially fixed-lag smoothing is very promising in context of pedestrian localisation.
|
%Especially fixed-lag smoothing is very promising in context of pedestrian localisation.
|
||||||
In the following we discuss the algorithmic details of the forward-backward smoother and the backward simulation.
|
In the following we discuss the algorithmic details of the forward-backward smoother and the backward simulation.
|
||||||
Further, a novel approach for incorporating them into the localisation system is shown.
|
Further, a novel approach for incorporating them into the localisation system is shown.
|
||||||
|
|
||||||
\subsection{Forward-backward Smoother}
|
\subsection{Forward-backward Smoother}
|
||||||
|
|
||||||
\commentByFrank{Smoother (grosses S) wie in der caption?}
|
|
||||||
The forward-backward smoother (FBS) of \cite{Doucet00:OSM} is a well established alternative to the simple filter-smoother. The foundation of this algorithm was again laid by Kitagawa in \cite{kitagawa1987non}.
|
The forward-backward smoother (FBS) of \cite{Doucet00:OSM} is a well established alternative to the simple filter-smoother. The foundation of this algorithm was again laid by Kitagawa in \cite{kitagawa1987non}.
|
||||||
An approximation is given by
|
An approximation is given by
|
||||||
\begin{equation}
|
\begin{equation}
|
||||||
p(\vec{q}_t \mid \vec{o}_{1:T}) \approx \sum^N_{i=1} W^i_{t \mid T} \delta_{\vec{X}^i_{t}}(\vec{q}_{t}) \enspace,
|
p(\vec{q}_t \mid \vec{o}_{1:T}) \approx \sum^N_{i=1} W^i_{t \mid T} \delta_{\vec{X}^i_{t}}(\vec{q}_{t}) \enspace,
|
||||||
\label{eq:approxFBS}
|
\label{eq:approxFBS}
|
||||||
\end{equation}
|
\end{equation}
|
||||||
\commentByFrank{support?}
|
%\commentByFrank{support?}
|
||||||
\commentByFrank{ist $\delta$ irgendwo erklaert?}
|
|
||||||
\commentByFrank{ist klein $\vec{x}$ irgendwo erklaert?}
|
|
||||||
\commentByFrank{ist die notation $A_{b \mid c}$ bekannt? mir sagt das noch garnix}
|
|
||||||
where $p(\vec{q}_t \mid \vec{o}_{1:T})$ has the same support as the filtering distribution $p(\vec{q}_t \mid \vec{o}_{1:t})$, but the weights are different.
|
where $p(\vec{q}_t \mid \vec{o}_{1:T})$ has the same support as the filtering distribution $p(\vec{q}_t \mid \vec{o}_{1:t})$, but the weights are different.
|
||||||
This means, that the FBS maintains the original particle locations and just reweights the particles to obtain a smoothed density.
|
This means, that the FBS maintains the original particle locations and just reweights the particles to obtain a smoothed density.
|
||||||
|
$\delta_{\vec{X}^i_{t}}$ denotes the Dirac delta function.
|
||||||
The complete FBS can be seen in algorithm \ref{alg:forward-backwardSmoother} in pseudo-algorithmic form.
|
The complete FBS can be seen in algorithm \ref{alg:forward-backwardSmoother} in pseudo-algorithmic form.
|
||||||
\commentByFrank{forward step vlt etwas genauer erklaeren weil 1. mal benutzt? oder is das hinlaenglich bekannt? :P}
|
%\commentByFrank{forward step vlt etwas genauer erklaeren weil 1. mal benutzt? oder is das hinlaenglich bekannt? :P}
|
||||||
|
%\commentByToni{Das ist natuerlich eine ueberlegung... Mal schaun was Frank. D. dazu sagt.}
|
||||||
At first, the algorithm obtains the filtered distribution (particles) by deploying a forward step at each time $t$.
|
At first, the algorithm obtains the filtered distribution (particles) by deploying a forward step at each time $t$.
|
||||||
\commentByFrank{pfennigfuchserei: sagt man smoothing distribution oder smoothed distribution? bin da ned drin}
|
%\commentByFrank{pfennigfuchserei: sagt man smoothing distribution oder smoothed distribution? bin da ned drin}
|
||||||
|
%\commentByToni{smoothing hat eine andere distri als filtering also smoothing distribution}
|
||||||
Then the backward step for determining the smoothing distribution is carried out.
|
Then the backward step for determining the smoothing distribution is carried out.
|
||||||
The weights are obtained through the backward recursion in line 9.
|
The weights are obtained through the backward recursion in line 9.
|
||||||
\commentByFrank{mir (als laie) wird nicht klar: mache ich erst alle forwaertsschritte (also alles bis zum pfadende durchlaufen) und gehe dann von da rueckwaerts (so klingts etwas im text), oder gehe ich nach jedem forwartsschritt rueckwarts (so klingts im pseudocode)}
|
%\commentByFrank{mir (als laie) wird nicht klar: mache ich erst alle forwaertsschritte (also alles bis zum pfadende durchlaufen) und gehe dann von da rueckwaerts (so klingts etwas im text), oder gehe ich nach jedem forwartsschritt rueckwarts (so klingts im pseudocode)}
|
||||||
|
%\commentByToni{klingt fuer mich ueberall so? wie kommst du auf die zweite annahme? im endeffekt haengt das aber auch davon ab ob die fixed-lag oder fixed-interval machst.}
|
||||||
|
|
||||||
\commentByFrank{reihenfolge von $\{ W^i_t, \vec{X}^i_t\}^N_{i=1}$ war oben andersrum. ned schlimm. nur wegen konsistenz :P}
|
%\commentByFrank{reihenfolge von $\{ W^i_t, \vec{X}^i_t\}^N_{i=1}$ war oben andersrum. ned schlimm. nur wegen konsistenz :P}
|
||||||
\label{alg:forward-backwardSmoother}
|
\label{alg:forward-backwardSmoother}
|
||||||
|
|
||||||
\begin{algorithm}[t]
|
\begin{algorithm}[t]
|
||||||
@@ -41,9 +38,9 @@ The weights are obtained through the backward recursion in line 9.
|
|||||||
\begin{algorithmic}[1] % The number tells where the line numbering should start
|
\begin{algorithmic}[1] % The number tells where the line numbering should start
|
||||||
\For{$t = 1$ \textbf{to} $T$} \Comment{Filtering}
|
\For{$t = 1$ \textbf{to} $T$} \Comment{Filtering}
|
||||||
\State{Obtain the weighted trajectories $ \{ W^i_t, \vec{X}^i_t\}^N_{i=1}$}
|
\State{Obtain the weighted trajectories $ \{ W^i_t, \vec{X}^i_t\}^N_{i=1}$}
|
||||||
|
\todo{Filtering hier genauer beschreiben?}
|
||||||
\EndFor
|
\EndFor
|
||||||
\For{ $i = 1$ \textbf{to} $N$} \Comment{Initialization}
|
\For{ $i = 1$ \textbf{to} $N$} \Comment{Initialization}
|
||||||
\commentByFrank{$t \mid T$ oder $T \mid T$?}
|
|
||||||
\State{Set $W^i_{T \mid T} = W^i_T$}
|
\State{Set $W^i_{T \mid T} = W^i_T$}
|
||||||
\EndFor
|
\EndFor
|
||||||
\For{$t = T-1$ \textbf{to} $1$} \Comment{Smoothing}
|
\For{$t = T-1$ \textbf{to} $1$} \Comment{Smoothing}
|
||||||
@@ -61,13 +58,13 @@ $}
|
|||||||
|
|
||||||
|
|
||||||
%Probleme? Nachteile? Komplexität etc.
|
%Probleme? Nachteile? Komplexität etc.
|
||||||
\commentByFrank{muss ich die quelle gelesen haben ums zu verstehen? wird mir naemlich so ned klar}
|
%\commentByFrank{muss ich die quelle gelesen haben ums zu verstehen? wird mir naemlich so ned klar}
|
||||||
|
%\commentByToni{Sollte man gelesen haben.}
|
||||||
By reweighting the filter particles, the FBS improves the simple filter-smoother by removing its dependence on the inheritance (smoothed) paths \cite{fearnhead2010sequential}. However, by looking at algorithm \ref{alg:forward-backwardSmoother} it can easily be seen that this approach computes in $\mathcal{O}(N^2)$, where the calculation of each particle's weight is an $\mathcal{O}(N)$ operation. To reduce this computational bottleneck, \cite{klaas2006fast} introduced a solution using algorithms from N-body simulation. By integrating dual tree recursions and fast multipole techniques with the FBS, a run-time cost of $\mathcal{O}(N \log N)$ can be achieved.
|
By reweighting the filter particles, the FBS improves the simple filter-smoother by removing its dependence on the inheritance (smoothed) paths \cite{fearnhead2010sequential}. However, by looking at algorithm \ref{alg:forward-backwardSmoother} it can easily be seen that this approach computes in $\mathcal{O}(N^2)$, where the calculation of each particle's weight is an $\mathcal{O}(N)$ operation. To reduce this computational bottleneck, \cite{klaas2006fast} introduced a solution using algorithms from N-body simulation. By integrating dual tree recursions and fast multipole techniques with the FBS, a run-time cost of $\mathcal{O}(N \log N)$ can be achieved.
|
||||||
|
|
||||||
\subsection{Backward Simulation}
|
\subsection{Backward Simulation}
|
||||||
For smoothing applications with a high number of particles, it is often not necessary to use all particles for smoothing.
|
For smoothing applications with a high number of particles, it is often not necessary to use all particles for smoothing.
|
||||||
\commentByFrank{certain = accurate?}
|
This decision can for example be made due to a high sample impoverishment and/or highly accurate sensors.
|
||||||
This decision can for example be made due to a high sample impoverishment and/or highly certain sensors.
|
|
||||||
By choosing a good sub-set for representing the posterior distribution, it is theoretically possible to further improve the estimation.
|
By choosing a good sub-set for representing the posterior distribution, it is theoretically possible to further improve the estimation.
|
||||||
|
|
||||||
Therefore, \cite{Godsill04:MCS} presented the backward simulation (BS). Where a number of independent sample realisations
|
Therefore, \cite{Godsill04:MCS} presented the backward simulation (BS). Where a number of independent sample realisations
|
||||||
@@ -96,9 +93,11 @@ from the entire smoothing density are used to approximate the smoothing distribu
|
|||||||
%
|
%
|
||||||
This method can be seen in algorithm \ref{alg:backwardSimulation} in pseudo-algorithmic form.
|
This method can be seen in algorithm \ref{alg:backwardSimulation} in pseudo-algorithmic form.
|
||||||
Again, a particle filter is performed at first and then the smoothing procedure gets applied.
|
Again, a particle filter is performed at first and then the smoothing procedure gets applied.
|
||||||
\commentByFrank{das klingt so, als waeren particle-filter und smoothing zwei komplett verschiedene sachen.}
|
%\commentByFrank{das klingt so, als waeren particle-filter und smoothing zwei komplett verschiedene sachen.}
|
||||||
\commentByFrank{was heisst 'drawn approximately'? nach welchen gesichtspunkte?}
|
%\commentByToni{Sind sie doch auch irgendwo.}
|
||||||
|
%\commentByFrank{was heisst 'drawn approximately'? nach welchen gesichtspunkte?}
|
||||||
Here, $\tilde{\vec{q}}_t$ is a random sample drawn approximately from $p(\vec{q}_{t} \mid \tilde{\vec{q}}_{t+1}, \vec{o}_{1:T})$.
|
Here, $\tilde{\vec{q}}_t$ is a random sample drawn approximately from $p(\vec{q}_{t} \mid \tilde{\vec{q}}_{t+1}, \vec{o}_{1:T})$.
|
||||||
|
For example $\tilde{\vec{q}}_t$ could be chosen by selecting particles within a cumulative frequency.
|
||||||
Therefore $\tilde{\vec{q}}_{1:T} = (\tilde{\vec{q}}_{1}, \tilde{\vec{q}}_{2}, ...,\tilde{\vec{q}}_{T})$ is one particular sample
|
Therefore $\tilde{\vec{q}}_{1:T} = (\tilde{\vec{q}}_{1}, \tilde{\vec{q}}_{2}, ...,\tilde{\vec{q}}_{T})$ is one particular sample
|
||||||
realisation from $p(\vec{q}_{1:T} \mid \vec{o}_{1:T})$.
|
realisation from $p(\vec{q}_{1:T} \mid \vec{o}_{1:T})$.
|
||||||
Further independent realisations are obtained by repeating the algorithm until the desired number $N_{\text{sample}}$ is reached.
|
Further independent realisations are obtained by repeating the algorithm until the desired number $N_{\text{sample}}$ is reached.
|
||||||
@@ -121,14 +120,12 @@ p(\vec{q}_{t+1} \mid \vec{q}_t, \mObsVec_t)_{\text{step}} = \mathcal{N}(\Delta d
|
|||||||
\end{equation}
|
\end{equation}
|
||||||
we receive a statement about how likely it is to cover a distance $\Delta d_t$ between two states $\vec{q}_{t+1}$ and $\vec{q}_{t}$.
|
we receive a statement about how likely it is to cover a distance $\Delta d_t$ between two states $\vec{q}_{t+1}$ and $\vec{q}_{t}$.
|
||||||
In the easiest case, $\Delta d_t$ is the linear distance between two states.
|
In the easiest case, $\Delta d_t$ is the linear distance between two states.
|
||||||
\commentByFrank{summarize: sum up?}
|
Of course, based on the graph structure, one could calculate the shortest path between both and sum up the respective edge lengths.
|
||||||
Of course, based on the graph structure, one could calculate the shortest path between both and summarize the respective edge lengths.
|
|
||||||
However, this requires tremendous calculation time for negligible improvements.
|
However, this requires tremendous calculation time for negligible improvements.
|
||||||
Therefore this is not further discussed within this work.
|
Therefore this is not further discussed within this work.
|
||||||
The average step length $\mu_{\text{step}}$ is based on the pedestrian's walking speed and $\sigma_{\gDist}^2$ denotes the step length's variance.
|
The average step length $\mu_{\text{step}}$ is based on the pedestrian's walking speed and $\sigma_{\gDist}^2$ denotes the step length's variance.
|
||||||
Both values are chosen depending on the activity $x$ recognized at time $t$.
|
Both values are chosen depending on the activity $x$ recognized at time $t$.
|
||||||
\commentByFrank{then oder than?}
|
For example $\mu_{\text{step}}$ gets smaller while a pedestrian is walking upstairs, than just walking straight.
|
||||||
For example $\mu_{\text{step}}$ gets smaller while a pedestrian is walking upstairs, then just walking straight.
|
|
||||||
This requires to extend the smoothing transition by the current observation $\mObsVec_t$.
|
This requires to extend the smoothing transition by the current observation $\mObsVec_t$.
|
||||||
Since $\mStateVec$ is hidden and the Markov property is satisfied, we are able to do so.
|
Since $\mStateVec$ is hidden and the Markov property is satisfied, we are able to do so.
|
||||||
|
|
||||||
@@ -168,7 +165,6 @@ Looking at \refeq{eq:smoothingTransDistance} to \refeq{eq:smoothingTransPressure
|
|||||||
\end{equation}
|
\end{equation}
|
||||||
%
|
%
|
||||||
It is important to notice, that all particles at each time step $t$ of the forward filtering need to be saved.
|
It is important to notice, that all particles at each time step $t$ of the forward filtering need to be saved.
|
||||||
\commentByFrank{increases?}
|
Therefore, the memory requirement increases proportional to the processing time.
|
||||||
Therefore, the memory requirement increasing proportional to the processing time.
|
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -18,7 +18,8 @@ Therefore, a Bayes filter that satisfies the Markov property is used to calculat
|
|||||||
\end{equation}
|
\end{equation}
|
||||||
%
|
%
|
||||||
Here, the previous observation $\mObsVec_{t-1}$ is included into the state transition \cite{Koeping14-PSA}.
|
Here, the previous observation $\mObsVec_{t-1}$ is included into the state transition \cite{Koeping14-PSA}.
|
||||||
For approximating eq. \eqref{equ:bayesInt} by means of MC methods, the transition is used as proposal distribution, also known as CONDENSATION algorithm \cite{isard1998smoothing}.
|
For approximating eq. \eqref{equ:bayesInt} by means of MC methods, the transition is used as proposal distribution, also known as CONDENSATION algorithm \cite{isard1998smoothing}.
|
||||||
|
The handle the phenomenon of weight degeneracy we additionally apply a resampling step.
|
||||||
|
|
||||||
In context of indoor localisation, the hidden state $\mStateVec$ is defined as follows:
|
In context of indoor localisation, the hidden state $\mStateVec$ is defined as follows:
|
||||||
\begin{equation}
|
\begin{equation}
|
||||||
@@ -36,9 +37,7 @@ covering all relevant sensor measurements.
|
|||||||
Here, $\mRssiVec_\text{wifi}$ and $\mRssiVec_\text{ib}$ contain the measurements of all nearby \docAP{}s (\docAPshort{}) and \docIBeacon{}s, respectively.
|
Here, $\mRssiVec_\text{wifi}$ and $\mRssiVec_\text{ib}$ contain the measurements of all nearby \docAP{}s (\docAPshort{}) and \docIBeacon{}s, respectively.
|
||||||
$\mObsHeading$ and $\mObsSteps$ describe the relative angular change and the number of steps detected for the pedestrian.
|
$\mObsHeading$ and $\mObsSteps$ describe the relative angular change and the number of steps detected for the pedestrian.
|
||||||
$\mObsPressure$ is the relative barometric pressure with respect to a fixed reference.
|
$\mObsPressure$ is the relative barometric pressure with respect to a fixed reference.
|
||||||
Finally, $\mObsActivity$
|
Finally, $\mObsActivity$ contains the activity, currently estimated for the pedestrian, which is one of:
|
||||||
\commentByLukas{Vermutlich gerade nur Platzhalter. Aber x ueberschneidet sich mit dem x der Position. Wie waers mit $\Omega$}
|
|
||||||
\commentByFrank{ja war ein platzhalter, hatte auch Omega vorgesehen} contains the activity, currently estimated for the pedestrian, which is one of:
|
|
||||||
unknown, standing, walking, walking stairs up or walking stairs down.
|
unknown, standing, walking, walking stairs up or walking stairs down.
|
||||||
|
|
||||||
The probability density of the state evaluation is given by
|
The probability density of the state evaluation is given by
|
||||||
@@ -60,7 +59,3 @@ The barometer information is evaluated using $p(\vec{o}_t \mid \vec{q}_t)_\text{
|
|||||||
is given by $p(\vec{o}_t \mid \vec{q}_t)_\text{ib}$ for \docIBeacon{}s and by $p(\vec{o}_t \mid \vec{q}_t)_\text{wifi}$ for \docWIFI{}.
|
is given by $p(\vec{o}_t \mid \vec{q}_t)_\text{ib}$ for \docIBeacon{}s and by $p(\vec{o}_t \mid \vec{q}_t)_\text{wifi}$ for \docWIFI{}.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user