@inbook{f405a98f7eed4a8096df98ab7d1d6d75,
title = "J-P: MDP. FP. PP: Characterizing Total Expected Rewards in Markov Decision Processes as Least Fixed Points with an Application to Operational Semantics of Probabilistic Programs",
abstract = "Markov decision processes (MDPs) with rewards are a widespread and well-studied model for systems that make both probabilistic and nondeterministic choices. A fundamental result about MDPs is that their minimal and maximal expected rewards satisfy Bellmann{\textquoteright}s optimality equations. For various classes of MDPs – notably finite-state MDPs, positive bounded models, and negative models – expected rewards are known to be the least solution of those equations. However, these classes of MDPs are too restrictive for probabilistic program verification. In particular, they assume that all rewards are finite. This is already not the case for the expected runtime of a simple probabilisitic program modeling a 1-dimensional random walk. In this paper, we develop a generalized least fixed point characterization of expected rewards in MDPs without those restrictions. Furthermore, we demonstrate how said characterization can be leveraged to prove weakest-preexpectation-style calculi sound with respect to an operational MDP model.",
author = "Kevin Batz and Kaminski, {Benjamin Lucien} and Christoph Matheja and Tobias Winkler",
year = "2025",
doi = "10.1007/978-3-031-75783-9_11",
language = "English",
isbn = "978-3-031-75782-2",
volume = "15260",
series = "Lecture Notes in Computer Science",
publisher = "Springer",
pages = "255--302",
booktitle = "Principles of Verification: Cycling the Probabilistic Landscape",
}