Další formáty:
BibTeX
LaTeX
RIS
@inproceedings{1562480, author = {Chatterjee, Krishnendu and Novotný, Petr and Pérez, Guillermo A. and Raskin, JeanandFrancois and Žikelić, Djordje}, booktitle = {Proceedings of the Thirty-First AAAI Conference on Artificial Intelligence (AAAI)}, editor = {Satinder P. Singh and Shaul Markovitch}, keywords = {Partially-observable Markov decision processes; Discounted payoff; Probabilistic planning; Verification}, howpublished = {elektronická verze "online"}, pages = {3725--3732}, publisher = {AAAI Press}, title = {Optimizing Expectation with Guarantees in POMDPs}, url = {http://aaai.org/ocs/index.php/AAAI/AAAI17/paper/view/14354}, year = {2017} }
TY - JOUR ID - 1562480 AU - Chatterjee, Krishnendu - Novotný, Petr - Pérez, Guillermo A. - Raskin, Jean-Francois - Žikelić, Djordje PY - 2017 TI - Optimizing Expectation with Guarantees in POMDPs PB - AAAI Press KW - Partially-observable Markov decision processes KW - Discounted payoff KW - Probabilistic planning KW - Verification UR - http://aaai.org/ocs/index.php/AAAI/AAAI17/paper/view/14354 N2 - A standard objective in partially-observable Markov decision processes (POMDPs) is to find a policy that maximizes the expected discounted-sum payoff. However, such policies may still permit unlikely but highly undesirable outcomes, which is problematic especially in safety-critical applications. Recently, there has been a surge of interest in POMDPs where the goal is to maximize the probability to ensure that the payoff is at least a given threshold, but these approaches do not consider any optimization beyond satisfying this threshold constraint. In this work we go beyond both the “expectation” and “threshold” approaches and consider a “guaranteed payoff optimization (GPO)” problem for POMDPs, where we are given a threshold t and the objective is to find a policy σ such that a) each possible outcome of σ yields a discounted-sum payoff of at least t, and b) the expected discounted-sum payoff of σ is optimal (or near-optimal) among all policies satisfying a). We present a practical approach to tackle the GPO problem and evaluate it on standard POMDP benchmarks. ER -
CHATTERJEE, Krishnendu, Petr NOVOTNÝ, Guillermo A. PÉREZ, Jean-Francois RASKIN a Djordje ŽIKELI$\backslash$'C. Optimizing Expectation with Guarantees in POMDPs. Online. In Satinder P. Singh and Shaul Markovitch. \textit{Proceedings of the Thirty-First AAAI Conference on Artificial Intelligence (AAAI)}. AAAI Press, 2017, s.~3725--3732.
|