Ballet, Vincent; Renard, Xavier; Aigrain, Jonathan; Laugel, Thibault; Frossard, Pascal; Detyniecki, Marcin
Imperceptible adversarial attacks on tabular data Journal Article
In: NeurIPS 2019 Workshop on Robust AI in Financial Services: Data, Fairness, Explainability, Trustworthiness, and Privacy (Robust AI in FS 2019), Vancouver, Canada, 2019.
Abstract | Links | BibTeX | Tags: Robustness
@article{ballet2019imperceptible,
title = {Imperceptible adversarial attacks on tabular data},
author = {Vincent Ballet and Xavier Renard and Jonathan Aigrain and Thibault Laugel and Pascal Frossard and Marcin Detyniecki},
url = {https://arxiv.org/abs/1911.03274},
year = {2019},
date = {2019-01-01},
journal = {NeurIPS 2019 Workshop on Robust AI in Financial Services: Data, Fairness, Explainability, Trustworthiness, and Privacy (Robust AI in FS 2019), Vancouver, Canada},
abstract = {Security of machine learning models is a concern as they may face adversarial attacks for unwarranted advantageous decisions. While research on the topic has mainly been focusing on the image domain, numerous industrial applications, in particular in finance, rely on standard tabular data. In this paper, we discuss the notion of adversarial examples in the tabular domain. We propose a formalization based on the imperceptibility of attacks in the tabular domain leading to an approach to generate imperceptible adversarial examples. Experiments show that we can generate imperceptible adversarial examples with a high fooling rate.},
keywords = {Robustness},
pubstate = {published},
tppubtype = {article}
}
Aigrain, Jonathan; Detyniecki, Marcin
Detecting adversarial examples and other misclassifications in neural networks by introspection Journal Article
In: arXiv preprint arXiv:1905.09186, 2019.
BibTeX | Tags: Robustness
@article{aigrain2019detecting,
title = {Detecting adversarial examples and other misclassifications in neural networks by introspection},
author = {Jonathan Aigrain and Marcin Detyniecki},
year = {2019},
date = {2019-01-01},
journal = {arXiv preprint arXiv:1905.09186},
keywords = {Robustness},
pubstate = {published},
tppubtype = {article}
}
Renard, Xavier; Laugel, Thibault; Lesot, Marie-Jeanne; Marsala, Christophe; Detyniecki, Marcin
Detecting potential local adversarial examples for human-interpretable defense Proceedings Article
In: ECML PKDD 2018 Workshops: Nemesis 2018, UrbReas 2018, SoGood 2018, IWAISe 2018, and Green Data Mining 2018, Dublin, Ireland, September 10-14, 2018, Proceedings, pp. 41, Springer 2019.
Abstract | Links | BibTeX | Tags: Robustness
@inproceedings{renard2018detecting,
title = {Detecting potential local adversarial examples for human-interpretable defense},
author = {Xavier Renard and Thibault Laugel and Marie-Jeanne Lesot and Christophe Marsala and Marcin Detyniecki},
url = {https://arxiv.org/abs/1809.02397},
year = {2019},
date = {2019-01-01},
booktitle = {ECML PKDD 2018 Workshops: Nemesis 2018, UrbReas 2018, SoGood 2018, IWAISe 2018, and Green Data Mining 2018, Dublin, Ireland, September 10-14, 2018, Proceedings},
volume = {11329},
pages = {41},
organization = {Springer},
abstract = {Machine learning models are increasingly used in the industry to make decisions such as credit insurance approval. Some people may be tempted to manipulate specific variables, such as the age or the salary, in order to get better chances of approval. In this ongoing work, we propose to discuss, with a first proposition, the issue of detecting a potential local adversarial example on classical tabular data by providing to a human expert the locally critical features for the classifier's decision, in order to control the provided information and avoid a fraud.},
keywords = {Robustness},
pubstate = {published},
tppubtype = {inproceedings}
}