@inproceedings{8389b680514c428baa9daa984574c5fb,
title = "Adversarial Attacks against Traffic Sign Detection for Autonomous Driving",
abstract = "Deep neural networks play a crucial role in 2D object detection based on visual data, but they are also vulnerable to adversarial samples. Attackers manipulate low-resolution images to execute data poisoning attacks. This paper introduces a method to generate realistic high-resolution adversarial samples aimed at compromising traffic sign detection models. Specifically, we propose a high-resolution adversarial sample framework built upon generative adversarial networks. Subsequently, an adversarial traffic sign detection model is developed to investigate the impact of data poisoning. To enhance the model's robustness, we conduct adversarial training. Experimental results demonstrate the efficacy of our data poisoning approach in misleading the detection model. Furthermore, the detection model exhibits improved robustness against such attacks following adversarial training.",
keywords = "2D object detection, data poisoning, generative adversarial networks",
author = "Feiyang Xu and Ying Li and Chao Yang and Weida Wang and Bin Xu",
note = "Publisher Copyright: {\textcopyright} 2023 IEEE.; 7th CAA International Conference on Vehicular Control and Intelligence, CVCI 2023 ; Conference date: 27-10-2023 Through 29-10-2023",
year = "2023",
doi = "10.1109/CVCI59596.2023.10397303",
language = "English",
series = "Proceedings of the 2023 7th CAA International Conference on Vehicular Control and Intelligence, CVCI 2023",
publisher = "Institute of Electrical and Electronics Engineers Inc.",
booktitle = "Proceedings of the 2023 7th CAA International Conference on Vehicular Control and Intelligence, CVCI 2023",
address = "United States",
}