@inbook{42a8729ba3c64a968aab1b2cba9c0a75,
title = "Explainable ai – preface",
abstract = "The development of “intelligent” systems that can take decisions and perform autonomously might lead to faster and more consistent decisions. A limiting factor for a broader adoption of AI technology is the inherent risks that come with giving up human control and oversight to “intelligent” machines. Forsensitive tasks involving critical infrastructures and affecting human well-being or health, it is crucial to limit the possibility of improper, non-robust and unsafe decisions and actions. Before deploying an AI system, we see a strong need to validate its behavior, and thus establish guarantees that it will continue to perform as expected when deployed in a real-world environment. In pursuit of that objective, ways for humans to verify the agreement between the AI decision structure and their own ground-truth knowledge have been explored. Explainable AI (XAI) has developed as a subfield of AI, focused on exposing complex AI models to humans in a systematic and interpretable manner.",
author = "Wojciech Samek and Gr{\'e}goire Montavon and Andrea Vedaldi and Hansen, {Lars Kai} and M{\"u}ller, {Klaus Robert}",
year = "2019",
month = jan,
day = "1",
doi = "10.1007/978-3-030-28954-6",
language = "English",
isbn = "978-3-030-28953-9",
series = "Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)",
publisher = "Springer",
pages = "v--vii",
booktitle = "Explainable AI: Interpreting, Explaining and Visualizing Deep Learning",
}