Tracking a Markov Target in a Discrete Environment With Multiple Sensors

@article{leahy_tracking_2019,
  title = {Tracking a {Markov} {Target} in a {Discrete} {Environment} {With} {Multiple} {Sensors}},
  volume = {64},
  issn = {0018-9286, 1558-2523, 2334-3303},
  url = {https://ieeexplore.ieee.org/document/8416729/},
  abstract = {In this work we consider using multiple noisy binary sensors to track a target that moves as a Markov Chain in a finite discrete environment, with symmetric probability of false alarm and missed detection. We study two policies. Firstly, we show that the greedy policy, whereby m sensors are placed at the m most-likely target locations, is one-step optimal in that it maximizes the expected maximum a posteriori (MAP) estimate. Secondly, we show that a policy in which the m sensors are placed in the second through (m+1)st most likely target locations achieves equal or slightly worse expected MAP performance, but leads to significantly decreased variance on the MAP estimate. The result is proven for m = 1, and Monte Carlo simulations give evidence for m {\textgreater} 1. Both policies are closed-loop, indexbased active sensing strategies that are computationally trivial to implement. Our approach focuses on one-step optimality because of the apparent intractability of computing an optimal policy via dynamic programming in belief space. However, Monte Carlo simulations suggest that both policies perform well over arbitrary horizons.},
  language = {en},
  number = {6},
  urldate = {2021-02-21},
  journal = {IEEE Transactions on Automatic Control},
  author = {Leahy, Kevin and Schwager, Mac},
  month = jun,
  year = {2019},
  keywords = {Belief Space Planning, Dynamic Programming, Information Gathering, Multi-Agent Control, Optimal Search},
  pages = {2396--2411},
  month_numeric = {6}
}