From 8370920d7da43d66a2d91c6dae8778f888ba40a6 Mon Sep 17 00:00:00 2001 From: toni Date: Tue, 20 Feb 2018 15:58:18 +0100 Subject: [PATCH] replace algo pos --- tex/chapters/experiments.tex | 4 ++-- tex/chapters/mvg.tex | 2 +- tex/chapters/usage.tex | 18 +++++++++--------- 3 files changed, 12 insertions(+), 12 deletions(-) diff --git a/tex/chapters/experiments.tex b/tex/chapters/experiments.tex index 57a0b16..58ef304 100644 --- a/tex/chapters/experiments.tex +++ b/tex/chapters/experiments.tex @@ -5,8 +5,8 @@ We now empirically evaluate the accuracy of our method, using the mean integrate The ground truth is given as $N=1000$ synthetic samples drawn from a bivariate mixture normal density $f$ \begin{equation} \begin{split} - \bm{X} \sim &\G{\VecTwo{0}{0}}{0.5\bm{I}} + \G{\VecTwo{3}{0}}{\bm{I}} \\ - &+ \G{\VecTwo{0}{3}}{\bm{I}} + \G{\VecTwo{-3}{0} }{\bm{I}} + \G{\VecTwo{0}{-3}}{\bm{I}} + \bm{X} \sim & ~\G{\VecTwo{0}{0}}{0.5\bm{I}} + \G{\VecTwo{3}{0}}{\bm{I}} + \G{\VecTwo{0}{3}}{\bm{I}} \\ + &+ \G{\VecTwo{-3}{0} }{\bm{I}} + \G{\VecTwo{0}{-3}}{\bm{I}} \end{split} \end{equation} where the majority of the probability mass lies in the range $[-6; 6]^2$. diff --git a/tex/chapters/mvg.tex b/tex/chapters/mvg.tex index bc72ff6..6432904 100644 --- a/tex/chapters/mvg.tex +++ b/tex/chapters/mvg.tex @@ -61,7 +61,7 @@ This recursive calculation scheme further reduces the time complexity of the box Furthermore, only one addition and subtraction is required to calculate a single output value. The overall algorithm to efficiently compute \eqref{eq:boxFilt} is listed in Algorithm~\ref{alg:naiveboxalgo}. -\begin{algorithm}[ht] +\begin{algorithm}[t] \caption{Recursive 1D box filter} \label{alg:naiveboxalgo} \begin{algorithmic}[1] diff --git a/tex/chapters/usage.tex b/tex/chapters/usage.tex index d7f028c..54e0888 100644 --- a/tex/chapters/usage.tex +++ b/tex/chapters/usage.tex @@ -5,7 +5,15 @@ %As the density estimation poses only a single step in the whole process, its computation needs to be as fast as possible. % not taking to much time from the frame -\begin{algorithm}[ht] +Consider a set of two-dimensional samples with associated weights, e.g. presumably generated from a particle filter system. +The overall process for bivariate data is described in Algorithm~\ref{alg:boxKDE}. + +Assuming that the given $N$ samples are stored in a sequential list, the first step is to create a grid representation. +In order to efficiently construct the grid and to allocate the required memory the extrema of the samples need to be known in advance. +These limits might be given by the application, for example, the position of a pedestrian within a building is limited by the physical dimensions of the building. +Such knowledge should be integrated into the system to avoid a linear search over the sample set, naturally reducing the computation time. + +\begin{algorithm}[t] \caption{Bivariate \textsc{boxKDE}} \label{alg:boxKDE} \begin{algorithmic}[1] @@ -42,14 +50,6 @@ \end{algorithmic} \end{algorithm} -Consider a set of two-dimensional samples with associated weights, e.g. presumably generated from a particle filter system. -The overall process for bivariate data is described in Algorithm~\ref{alg:boxKDE}. - -Assuming that the given $N$ samples are stored in a sequential list, the first step is to create a grid representation. -In order to efficiently construct the grid and to allocate the required memory the extrema of the samples need to be known in advance. -These limits might be given by the application, for example, the position of a pedestrian within a building is limited by the physical dimensions of the building. -Such knowledge should be integrated into the system to avoid a linear search over the sample set, naturally reducing the computation time. - Given the extreme values of the samples and grid sizes $G_1$ and $G_2$ defined by the user, a $G_1\times G_2$ grid can be constructed, using a binning rule from \eqref{eq:simpleBinning} or \eqref{eq:linearBinning}. As the number of grid points directly affects both computation time and accuracy, a suitable grid should be as coarse as possible, but at the same time narrow enough to produce an estimate sufficiently fast with an acceptable approximation error.