Citation

BibTex format

@article{Albini:2023:10.3389/frai.2023.1099407,
author = {Albini, E and Rago, A and Baroni, P and Toni, F},
doi = {10.3389/frai.2023.1099407},
journal = {Frontiers in Artificial Intelligence},
pages = {1--18},
title = {Achieving descriptive accuracy in explanations via argumentation: the case of probabilistic classifiers},
url = {http://dx.doi.org/10.3389/frai.2023.1099407},
volume = {6},
year = {2023}
}

RIS format (EndNote, RefMan)

TY  - JOUR
AB - The pursuit of trust in and fairness of AI systems in order to enable human-centric goals has been gathering pace of late, often supported by the use of explanations for the outputs of these systems. Several properties of explanations have been highlighted as critical for achieving trustworthy and fair AI systems, but one that has thus far been overlooked is that of descriptive accuracy (DA), i.e., that the explanation contents are in correspondence with the internal working of the explained system. Indeed, the violation of this core property would lead to the paradoxical situation of systems producing explanations which are not suitably related to how the system actually works: clearly this may hinder user trust. Further, if explanations violate DA then they can be deceitful, resulting in an unfair behavior toward the users. Crucial as the DA property appears to be, it has been somehow overlooked in the XAI literature to date. To address this problem, we consider the questions of formalizing DA and of analyzing its satisfaction by explanation methods. We provide formal definitions of naive, structural and dialectical DA, using the family of probabilistic classifiers as the context for our analysis. We evaluate the satisfaction of our given notions of DA by several explanation methods, amounting to two popular feature-attribution methods from the literature, variants thereof and a novel form of explanation that we propose. We conduct experiments with a varied selection of concrete probabilistic classifiers and highlight the importance, with a user study, of our most demanding notion of dialectical DA, which our novel method satisfies by design and others may violate. We thus demonstrate how DA could be a critical component in achieving trustworthy and fair systems, in line with the principles of human-centric AI.
AU - Albini,E
AU - Rago,A
AU - Baroni,P
AU - Toni,F
DO - 10.3389/frai.2023.1099407
EP - 18
PY - 2023///
SN - 2624-8212
SP - 1
TI - Achieving descriptive accuracy in explanations via argumentation: the case of probabilistic classifiers
T2 - Frontiers in Artificial Intelligence
UR - http://dx.doi.org/10.3389/frai.2023.1099407
UR - https://www.ncbi.nlm.nih.gov/pubmed/37091304
UR - https://www.frontiersin.org/articles/10.3389/frai.2023.1099407/full
UR - http://hdl.handle.net/10044/1/104094
VL - 6
ER -