@inbook{77af00e30e834a5ea42da3943eae8eb2,
title = "Explanations for Attributing Deep Neural Network Predictions",
abstract = "Given the recent success of deep neural networks and their applications to more high impact and high risk applications, like autonomous driving and healthcare decision-making, there is a great need for faithful and interpretable explanations of “why” an algorithm is making a certain prediction. In this chapter, we introduce 1. Meta-Predictors as Explanations, a principled framework for learning explanations for any black box algorithm, and 2. Meaningful Perturbations, an instantiation of our paradigm applied to the problem of attribution, which is concerned with attributing what features of an input (i.e., regions of an input image) are responsible for a model{\textquoteright}s output (i.e., a CNN classifier{\textquoteright}s object class prediction). We first introduced these contributions in [8]. We also briefly survey existing visual attribution methods and highlight how they faith to be both faithful and interpretable.",
keywords = "Computer vision, Explainable artificial intelligence, Machine learning",
author = "Ruth Fong and Andrea Vedaldi",
note = "Publisher Copyright: {\textcopyright} Springer Nature Switzerland AG 2019.",
year = "2019",
doi = "10.1007/978-3-030-28954-6_8",
language = "English (US)",
series = "Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)",
publisher = "Springer Verlag",
pages = "149--167",
booktitle = "Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)",
address = "Germany",
}