Bove, Clara; Laugel, Thibault; Lesot, Marie-Jeanne; Tijus, Charles; Detyniecki, Marcin
Why do explanations fail? A typology and discussion on failures in XAI Conference
arxiv preprint, 2024.
Links | BibTeX | Tags: Explainability & Interpretability, Human-Computer Interface & HCXAI
@conference{nokey,
title = {Why do explanations fail? A typology and discussion on failures in XAI},
author = {Clara Bove and Thibault Laugel and Marie-Jeanne Lesot and Charles Tijus and Marcin Detyniecki},
url = {https://arxiv.org/abs/2405.13474},
year = {2024},
date = {2024-05-22},
urldate = {2024-05-22},
booktitle = {arxiv preprint},
keywords = {Explainability & Interpretability, Human-Computer Interface & HCXAI},
pubstate = {published},
tppubtype = {conference}
}
Grari, Vincent; Laugel, Thibault; Hashimoto, Tatsunori; Lamprier, Sylvain; Detyniecki, Marcin
On the Fairness ROAD: Robust Optimization for Adversarial Debiasing Conference
The Twelfth International Conference on Learning Representations (ICLR 2024), 2024.
Links | BibTeX | Tags: Fairness, Robustness
@conference{nokey,
title = {On the Fairness ROAD: Robust Optimization for Adversarial Debiasing},
author = {Vincent Grari and Thibault Laugel and Tatsunori Hashimoto and Sylvain Lamprier and Marcin Detyniecki},
url = {https://openreview.net/forum?id=xnhvVtZtLD},
year = {2024},
date = {2024-04-11},
urldate = {2024-04-11},
booktitle = {The Twelfth International Conference on Learning Representations (ICLR 2024)},
keywords = {Fairness, Robustness},
pubstate = {published},
tppubtype = {conference}
}
Rida, Adam; Lesot, Marie-Jeanne; Renard, Xavier; Marsala, Christophe
Dynamic Interpretability for Model Comparison via Decision Rules Conference
2023 ECML PKDD Workshop on Explainable Artificial Intelligence From Static to Dynamic (DynXAI), 2023.
BibTeX | Tags: Explainability & Interpretability
@conference{nokey,
title = {Dynamic Interpretability for Model Comparison via Decision Rules},
author = {Adam Rida and Marie-Jeanne Lesot and Xavier Renard and Christophe Marsala},
year = {2023},
date = {2023-09-18},
urldate = {2023-09-18},
booktitle = {2023 ECML PKDD Workshop on Explainable Artificial Intelligence From Static to Dynamic (DynXAI)},
keywords = {Explainability & Interpretability},
pubstate = {published},
tppubtype = {conference}
}
Jeyasothy, Adulam; Laugel, Thibault; Lesot, Marie-Jeanne; Marsala, Christophe; Detyniecki, Marcin
A general framework for personalising post hoc explanations through user knowledge integration Journal Article
In: International Journal of Approximate Reasoning, vol. 160, 2023.
Links | BibTeX | Tags: Explainability & Interpretability
@article{jeyasothy2023,
title = {A general framework for personalising post hoc explanations through user knowledge integration},
author = {Adulam Jeyasothy and Thibault Laugel and Marie-Jeanne Lesot and Christophe Marsala and Marcin Detyniecki},
doi = {https://doi.org/10.1016/j.ijar.2023.108944},
year = {2023},
date = {2023-09-01},
urldate = {2023-09-01},
journal = {International Journal of Approximate Reasoning},
volume = {160},
keywords = {Explainability & Interpretability},
pubstate = {published},
tppubtype = {article}
}
Laugel, Thibault; Jeyasothy, Adulam; Lesot, Marie-Jeanne; Marsala, Christophe; Detyniecki, Marcin
Achieving Diversity in Counterfactual Explanations: a Review and Discussion Conference
Proceedings of the 2023 ACM Conference on Fairness, Accountability, and Transparency (FAccT), 2023.
Links | BibTeX | Tags: Explainability & Interpretability
@conference{laugel2023,
title = {Achieving Diversity in Counterfactual Explanations: a Review and Discussion},
author = {Thibault Laugel and Adulam Jeyasothy and Marie-Jeanne Lesot and Christophe Marsala and Marcin Detyniecki},
url = {https://arxiv.org/abs/2305.05840},
doi = {https://doi.org/10.1145/3593013.3594122},
year = {2023},
date = {2023-06-15},
urldate = {2023-06-15},
booktitle = {Proceedings of the 2023 ACM Conference on Fairness, Accountability, and Transparency (FAccT)},
pages = {1859--1869},
keywords = {Explainability & Interpretability},
pubstate = {published},
tppubtype = {conference}
}
Bove, Clara; Lesot, Marie-Jeanne; Tijus, Charles; Detyniecki, Marcin
Investigating the Intelligibility of Plural Counterfactual Examples for Non-Expert Users, an Explanation User Interface Proposition and User Study Conference
Proceedings of the 28th International Conference on Intelligent User Interfaces, IUI'23, 2023.
BibTeX | Tags: Explainability & Interpretability
@conference{nokey,
title = {Investigating the Intelligibility of Plural Counterfactual Examples for Non-Expert Users, an Explanation User Interface Proposition and User Study},
author = {Clara Bove and Marie-Jeanne Lesot and Charles Tijus and Marcin Detyniecki},
year = {2023},
date = {2023-03-27},
booktitle = {Proceedings of the 28th International Conference on Intelligent User Interfaces, IUI'23},
keywords = {Explainability & Interpretability},
pubstate = {published},
tppubtype = {conference}
}
Krco, Natasa; Laugel, Thibault; Loubes, Jean-Michel; Detyniecki, Marcin
When Mitigating Bias is Unfair: A Comprehensive Study on the Impact of Bias Mitigation Algorithms Unpublished
2023.
BibTeX | Tags: Explainability & Interpretability, Fairness
@unpublished{krco2023mitigating,
title = {When Mitigating Bias is Unfair: A Comprehensive Study on the Impact of Bias Mitigation Algorithms},
author = {Natasa Krco and Thibault Laugel and Jean-Michel Loubes and Marcin Detyniecki},
year = {2023},
date = {2023-01-01},
urldate = {2023-01-01},
journal = {arXiv preprint arXiv:2302.07185},
keywords = {Explainability & Interpretability, Fairness},
pubstate = {published},
tppubtype = {unpublished}
}
Rychener, Yves; Renard, Xavier; Seddah, Djamé; Frossard, Pascal; Detyniecki, Marcin
On the Granularity of Explanations in Model Agnostic NLP Interpretability Journal Article
In: 2022 ECML-PKDD International Workshop on eXplainable Knowledge Discovery in Data Mining (XKDD), 2022.
Abstract | Links | BibTeX | Tags: Explainability & Interpretability
@article{rychener2020sentence,
title = {On the Granularity of Explanations in Model Agnostic NLP Interpretability},
author = {Yves Rychener and Xavier Renard and Djamé Seddah and Pascal Frossard and Marcin Detyniecki},
url = {https://arxiv.org/abs/2012.13189},
year = {2022},
date = {2022-09-19},
urldate = {2020-01-01},
journal = {2022 ECML-PKDD International Workshop on eXplainable Knowledge Discovery in Data Mining (XKDD)},
abstract = {Current methods for Black-Box NLP interpretability, like LIME or SHAP, are based on altering the text to interpret by removing words and modeling the Black-Box response. In this paper, we outline limitations of this approach when using complex BERT-based classifiers: The word-based sampling produces texts that are out-of-distribution for the classifier and further gives rise to a high-dimensional search space, which can't be sufficiently explored when time or computation power is limited. Both of these challenges can be addressed by using segments as elementary building blocks for NLP interpretability. As illustration, we show that the simple choice of sentences greatly improves on both of these challenges. As a consequence, the resulting explainer attains much better fidelity on a benchmark classification task.},
keywords = {Explainability & Interpretability},
pubstate = {published},
tppubtype = {article}
}
Grari, Vincent; Lamprier, Sylvain; Detyniecki, Marcin
Fairness without the sensitive attribute via Causal Variational Autoencoder Proceedings Article
In: 2022.
Abstract | BibTeX | Tags: Fairness
@inproceedings{nokey,
title = {Fairness without the sensitive attribute via Causal Variational Autoencoder},
author = {Vincent Grari and Sylvain Lamprier and Marcin Detyniecki},
year = {2022},
date = {2022-07-18},
urldate = {2022-07-18},
journal = {2022 International Joint Conferences on Artificial Intelligence Organization (IJCAI)},
abstract = {In recent years, most fairness strategies in machine learning models focus on mitigating unwanted biases by assuming that the sensitive information is observed. However this is not always possible in practice. Due to privacy purposes and var-ious regulations such as RGPD in EU, many personal sensitive attributes are frequently not collected. We notice a lack of approaches for mitigating bias in such difficult settings, in particular for achieving classical fairness objectives such as Demographic Parity and Equalized Odds. By leveraging recent developments for approximate inference, we propose an approach to fill this gap. Based on a causal graph, we rely on a new variational auto-encoding based framework named SRCVAE to infer a sensitive information proxy, that serve for bias mitigation in an adversarial fairness approach. We empirically demonstrate significant improvements over existing works in the field. We observe that the generated proxy's latent space recovers sensitive information and that our approach achieves a higher accuracy while obtaining the same level of fairness on two real datasets, as measured using com-mon fairness definitions.},
keywords = {Fairness},
pubstate = {published},
tppubtype = {inproceedings}
}
Grari, Vincent; Lamprier, Sylvain; Detyniecki, Marcin
Adversarial learning for counterfactual fairness Journal Article
In: Machine Learning, 2022.
Links | BibTeX | Tags: Fairness
@article{grari:hal-03923289,
title = {Adversarial learning for counterfactual fairness},
author = {Vincent Grari and Sylvain Lamprier and Marcin Detyniecki},
url = {https://hal.sorbonne-universite.fr/hal-03923289},
doi = {10.1007/s10994-022-06206-8},
year = {2022},
date = {2022-05-01},
urldate = {2022-05-01},
journal = {Machine Learning},
publisher = {Springer Verlag},
keywords = {Fairness},
pubstate = {published},
tppubtype = {article}
}
Vincent, Grari; Arthur, Charpentier; Sylvain, Lamprier; Marcin, Detyniecki
A fair pricing model via adversarial learning Journal Article
In: arXiv preprint arXiv:2202.12008, 2022.
@article{vincent2022fair,
title = {A fair pricing model via adversarial learning},
author = {Grari Vincent and Charpentier Arthur and Lamprier Sylvain and Detyniecki Marcin},
year = {2022},
date = {2022-01-01},
journal = {arXiv preprint arXiv:2202.12008},
keywords = {Fairness},
pubstate = {published},
tppubtype = {article}
}
Bove, Clara; Aigrain, Jonathan; Lesot, Marie-Jeanne; Tijus, Charles; Detyniecki, Marcin
Contextualising local explanations for non-expert users: an XAI pricing interface for insurance Journal Article
In: Proceedings of the International Conference of Intelligent User Interface, IUI 2022, 2022.
BibTeX | Tags: Human-Computer Interface & HCXAI
@article{bove2022,
title = {Contextualising local explanations for
non-expert users: an XAI pricing interface for
insurance},
author = {Clara Bove and Jonathan Aigrain and Marie-Jeanne Lesot and Charles Tijus and Marcin Detyniecki},
year = {2022},
date = {2022-01-01},
journal = {Proceedings of the International Conference of Intelligent User Interface, IUI 2022},
keywords = {Human-Computer Interface & HCXAI},
pubstate = {published},
tppubtype = {article}
}
Jeyasothy, Adulam; Laugel, Thibault; Lesot, Marie-Jeanne; Marsala, Christophe; Detyniecki, Marcin
Integrating Prior Knowledge in Post-hoc Explanations Proceedings Article
In: Information Processing and Management of Uncertainty in Knowledge-Based Systems: 19th International Conference, IPMU 2022, Milan, Italy, July 11--15, 2022, Proceedings, Part II, pp. 707–719, Springer 2022.
BibTeX | Tags: Explainability & Interpretability
@inproceedings{jeyasothy2022integrating,
title = {Integrating Prior Knowledge in Post-hoc Explanations},
author = {Adulam Jeyasothy and Thibault Laugel and Marie-Jeanne Lesot and Christophe Marsala and Marcin Detyniecki},
year = {2022},
date = {2022-01-01},
booktitle = {Information Processing and Management of Uncertainty in Knowledge-Based Systems: 19th International Conference, IPMU 2022, Milan, Italy, July 11--15, 2022, Proceedings, Part II},
pages = {707--719},
organization = {Springer},
keywords = {Explainability & Interpretability},
pubstate = {published},
tppubtype = {inproceedings}
}
Laugel, Thibault; Renard, Xavier; Detyniecki, Marcin
Explaining Local Discrepancies between Image Classification Models Journal Article
In: CVPR Explainable AI for Computer Vision Workshop (XAI4CV 2022), 2022.
BibTeX | Tags: Explainability & Interpretability
@article{laugel2022,
title = {Explaining Local Discrepancies between Image Classification Models},
author = {Thibault Laugel and Xavier Renard and Marcin Detyniecki},
year = {2022},
date = {2022-01-01},
journal = {CVPR Explainable AI for Computer Vision Workshop (XAI4CV 2022)},
keywords = {Explainability & Interpretability},
pubstate = {published},
tppubtype = {article}
}
Poyiadzi, Rafael; Renard, Xavier; Laugel, Thibault; Santos-Rodriguez, Raul; Detyniecki, Marcin
Understanding surrogate explanations: the interplay between complexity, fidelity and coverage Journal Article
In: arXiv preprint arXiv:2107.04309, 2021.
Abstract | Links | BibTeX | Tags: Explainability & Interpretability
@article{poyiadzi2021understanding,
title = {Understanding surrogate explanations: the interplay between complexity, fidelity and coverage},
author = {Rafael Poyiadzi and Xavier Renard and Thibault Laugel and Raul Santos-Rodriguez and Marcin Detyniecki},
url = {https://arxiv.org/abs/2107.04309},
year = {2021},
date = {2021-01-01},
journal = {arXiv preprint arXiv:2107.04309},
abstract = {This paper analyses the fundamental ingredients behind surrogate explanations to provide a better understanding of their inner workings. We start our exposition by considering global surrogates, describing the trade-off between complexity of the surrogate and fidelity to the black-box being modelled. We show that transitioning from global to local - reducing coverage - allows for more favourable conditions on the Pareto frontier of fidelity-complexity of a surrogate. We discuss the interplay between complexity, fidelity and coverage, and consider how different user needs can lead to problem formulations where these are either constraints or penalties. We also present experiments that demonstrate how the local surrogate interpretability procedure can be made interactive and lead to better explanations.},
keywords = {Explainability & Interpretability},
pubstate = {published},
tppubtype = {article}
}
Vermeire, Tom; Laugel, Thibault; Renard, Xavier; Martens, David; Detyniecki, Marcin
How to choose an Explainability Method? Towards a Methodical Implementation of XAI in Practice Journal Article
In: ECML PKDD International Workshop on eXplainable Knowledge Discovery in Data Mining (ECML XKDD 2021), 2021.
Abstract | Links | BibTeX | Tags: Explainability & Interpretability
@article{vermeire2021choose,
title = {How to choose an Explainability Method? Towards a Methodical Implementation of XAI in Practice},
author = {Tom Vermeire and Thibault Laugel and Xavier Renard and David Martens and Marcin Detyniecki},
url = {https://arxiv.org/abs/2107.04427},
year = {2021},
date = {2021-01-01},
journal = {ECML PKDD International Workshop on eXplainable Knowledge Discovery in Data Mining (ECML XKDD 2021)},
abstract = {Explainability is becoming an important requirement for organizations that make use of automated decision-making due to regulatory initiatives and a shift in public awareness. Various and significantly different algorithmic methods to provide this explainability have been introduced in the field, but the existing literature in the machine learning community has paid little attention to the stakeholder whose needs are rather studied in the human-computer interface community. Therefore, organizations that want or need to provide this explainability are confronted with the selection of an appropriate method for their use case. In this paper, we argue there is a need for a methodology to bridge the gap between stakeholder needs and explanation methods. We present our ongoing work on creating this methodology to help data scientists in the process of providing explainability to stakeholders. In particular, our contributions include documents used to characterize XAI methods and user requirements (shown in Appendix), which our methodology builds upon.},
keywords = {Explainability & Interpretability},
pubstate = {published},
tppubtype = {article}
}
Poyiadzi, Rafael; Renard, Xavier; Laugel, Thibault; Santos-Rodriguez, Raul; Detyniecki, Marcin
On the overlooked issue of defining explanation objectives for local-surrogate explainers Journal Article
In: International Conference on Machine Learning (ICML) Workshop on Theoretic Foundation, Criticism, and Application Trend of Explainable AI, 2021.
Abstract | Links | BibTeX | Tags: Explainability & Interpretability
@article{poyiadzi2021overlooked,
title = {On the overlooked issue of defining explanation objectives for local-surrogate explainers},
author = {Rafael Poyiadzi and Xavier Renard and Thibault Laugel and Raul Santos-Rodriguez and Marcin Detyniecki},
url = {https://arxiv.org/abs/2106.05810},
year = {2021},
date = {2021-01-01},
journal = {International Conference on Machine Learning (ICML) Workshop on Theoretic Foundation, Criticism, and Application Trend of Explainable AI},
abstract = {Local surrogate approaches for explaining machine learning model predictions have appealing properties, such as being model-agnostic and flexible in their modelling. Several methods exist that fit this description and share this goal. However, despite their shared overall procedure, they set out different objectives, extract different information from the black-box, and consequently produce diverse explanations, that are -- in general -- incomparable. In this work we review the similarities and differences amongst multiple methods, with a particular focus on what information they extract from the model, as this has large impact on the output: the explanation. We discuss the implications of the lack of agreement, and clarity, amongst the methods' objectives on the research and practice of explainability.},
keywords = {Explainability & Interpretability},
pubstate = {published},
tppubtype = {article}
}
Renard, Xavier; Laugel, Thibault; Detyniecki, Marcin
Understanding Prediction Discrepancies in Machine Learning Classifiers Journal Article
In: arXiv preprint arXiv:2104.05467, 2021.
Abstract | Links | BibTeX | Tags: Explainability & Interpretability
@article{renard2021understanding,
title = {Understanding Prediction Discrepancies in Machine Learning Classifiers},
author = {Xavier Renard and Thibault Laugel and Marcin Detyniecki},
url = {https://arxiv.org/abs/2104.05467},
year = {2021},
date = {2021-01-01},
journal = {arXiv preprint arXiv:2104.05467},
abstract = {A multitude of classifiers can be trained on the same data to achieve similar performances during test time, while having learned significantly different classification patterns. This phenomenon, which we call prediction discrepancies, is often associated with the blind selection of one model instead of another with similar performances. When making a choice, the machine learning practitioner has no understanding on the differences between models, their limits, where they agree and where they don't. But his/her choice will result in concrete consequences for instances to be classified in the discrepancy zone, since the final decision will be based on the selected classification pattern. Besides the arbitrary nature of the result, a bad choice could have further negative consequences such as loss of opportunity or lack of fairness. This paper proposes to address this question by analyzing the prediction discrepancies in a pool of best-performing models trained on the same data. A model-agnostic algorithm, DIG, is proposed to capture and explain discrepancies locally, to enable the practitioner to make the best educated decision when selecting a model by anticipating its potential undesired consequences. All the code to reproduce the experiments is available.},
keywords = {Explainability & Interpretability},
pubstate = {published},
tppubtype = {article}
}
Grari, Vincent; Lamprier, Sylvain; Detyniecki, Marcin
Fairness-aware neural Rényi minimization for continuous features Proceedings Article
In: Proceedings of the Twenty-Ninth International Conference on International Joint Conferences on Artificial Intelligence, pp. 2262–2268, 2021.
@inproceedings{grari2021fairness,
title = {Fairness-aware neural Rényi minimization for continuous features},
author = {Vincent Grari and Sylvain Lamprier and Marcin Detyniecki},
year = {2021},
date = {2021-01-01},
booktitle = {Proceedings of the Twenty-Ninth International Conference on International Joint Conferences on Artificial Intelligence},
pages = {2262--2268},
keywords = {Fairness},
pubstate = {published},
tppubtype = {inproceedings}
}
Grari, Vincent; Hajouji, Oualid El; Lamprier, Sylvain; Detyniecki, Marcin
Enforcing Individual Fairness via Rényi Variational Inference Proceedings Article
In: International Conference on Neural Information Processing, pp. 608–616, Springer 2021.
@inproceedings{grari2021enforcing,
title = {Enforcing Individual Fairness via Rényi Variational Inference},
author = {Vincent Grari and Oualid El Hajouji and Sylvain Lamprier and Marcin Detyniecki},
year = {2021},
date = {2021-01-01},
booktitle = {International Conference on Neural Information Processing},
pages = {608--616},
organization = {Springer},
keywords = {Fairness},
pubstate = {published},
tppubtype = {inproceedings}
}