Laugel, Thibault; Renard, Xavier; Lesot, Marie-Jeanne; Marsala, Christophe; Detyniecki, Marcin
Defining Locality for Surrogates in Post-hoc Interpretablity Proceedings Article
In: Workshop on Human Interpretability for Machine Learning (WHI)-International Conference on Machine Learning (ICML), 2018.
Abstract | Links | BibTeX | Tags: Explainability & Interpretability
@inproceedings{laugel2018defining,
title = {Defining Locality for Surrogates in Post-hoc Interpretablity},
author = {Thibault Laugel and Xavier Renard and Marie-Jeanne Lesot and Christophe Marsala and Marcin Detyniecki},
url = {https://arxiv.org/abs/1806.07498},
year = {2018},
date = {2018-01-01},
booktitle = {Workshop on Human Interpretability for Machine Learning (WHI)-International Conference on Machine Learning (ICML)},
abstract = {Local surrogate models, to approximate the local decision boundary of a black-box classifier, constitute one approach to generate explanations for the rationale behind an individual prediction made by the back-box. This paper highlights the importance of defining the right locality, the neighborhood on which a local surrogate is trained, in order to approximate accurately the local black-box decision boundary. Unfortunately, as shown in this paper, this issue is not only a parameter or sampling distribution challenge and has a major impact on the relevance and quality of the approximation of the local black-box decision boundary and thus on the meaning and accuracy of the generated explanation. To overcome the identified problems, quantified with an adapted measure and procedure, we propose to generate surrogate-based explanations for individual predictions based on a sampling centered on particular place of the decision boundary, relevant for the prediction to be explained, rather than on the prediction itself as it is classically done. We evaluate the novel approach compared to state-of-the-art methods and a straightforward improvement thereof on four UCI datasets.},
keywords = {Explainability & Interpretability},
pubstate = {published},
tppubtype = {inproceedings}
}
Laugel, Thibault; Lesot, Marie-Jeanne; Marsala, Christophe; Renard, Xavier; Detyniecki, Marcin
Comparison-based Inverse Classification for Interpretability in Machine Learning Proceedings Article
In: 17th International Conference on Information Processing and Management of Uncertainty in Knowledge-Based Systems (IPMU 2018), pp. 100–111, Springer Verlag 2018.
BibTeX | Tags: Explainability & Interpretability
@inproceedings{laugel2018comparison,
title = {Comparison-based Inverse Classification for Interpretability in Machine Learning},
author = {Thibault Laugel and Marie-Jeanne Lesot and Christophe Marsala and Xavier Renard and Marcin Detyniecki},
year = {2018},
date = {2018-01-01},
booktitle = {17th International Conference on Information Processing and Management of Uncertainty in Knowledge-Based Systems (IPMU 2018)},
pages = {100--111},
organization = {Springer Verlag},
keywords = {Explainability & Interpretability},
pubstate = {published},
tppubtype = {inproceedings}
}
Laugel, Thibault; Lesot, Marie-Jeanne; Marsala, Christophe; Renard, Xavier; Detyniecki, Marcin
Inverse classification for comparison-based interpretability in machine learning Journal Article
In: arXiv preprint arXiv:1712.08443, 2017.
Abstract | Links | BibTeX | Tags: Explainability & Interpretability
@article{laugel2017inverse,
title = {Inverse classification for comparison-based interpretability in machine learning},
author = {Thibault Laugel and Marie-Jeanne Lesot and Christophe Marsala and Xavier Renard and Marcin Detyniecki},
url = {https://arxiv.org/abs/1712.08443},
year = {2017},
date = {2017-01-01},
journal = {arXiv preprint arXiv:1712.08443},
abstract = {In the context of post-hoc interpretability, this paper addresses the task of explaining the prediction of a classifier, considering the case where no information is available, neither on the classifier itself, nor on the processed data (neither the training nor the test data). It proposes an instance-based approach whose principle consists in determining the minimal changes needed to alter a prediction: given a data point whose classification must be explained, the proposed method consists in identifying a close neighbour classified differently, where the closeness definition integrates a sparsity constraint. This principle is implemented using observation generation in the Growing Spheres algorithm. Experimental results on two datasets illustrate the relevance of the proposed approach that can be used to gain knowledge about the classifier.},
keywords = {Explainability & Interpretability},
pubstate = {published},
tppubtype = {article}
}