@inproceedings{a9139e83f4f94689a6162ef4ee6182a0,
title = "Stealing Secrecy from Outside: A Novel Gradient Inversion Attack in Federated Learning",
abstract = "Knowing model parameters has been regarded as a vital factor for recovering sensitive information from the gradients in federated learning. But is it safe to use federated learning when the model parameters are unavailable for adversaries, i.e., external adversaries' In this paper, we answer this question by proposing a novel gradient inversion attack. Speciffically, we observe a widely ignored fact in federated learning that the participants' gradient data are usually transmitted via the intermediary node. Based on this fact, we show that an external adversary is able to recover the private input from the gradients, even if it does not have the model parameters. Through extensive experiments based on several real-world datasets, we demonstrate that our proposed new attack can recover the input with pixelwise accuracy and feasible efficiency.",
keywords = "federated learning, gradient inversion, grey-box attack",
author = "Chuan Zhang and Haotian Liang and Youqi Li and Tong Wu and Liehuang Zhu and Weiting Zhang",
note = "Publisher Copyright: {\textcopyright} 2023 IEEE.; 28th IEEE International Conference on Parallel and Distributed Systems, ICPADS 2022 ; Conference date: 10-01-2023 Through 12-01-2023",
year = "2023",
doi = "10.1109/ICPADS56603.2022.00044",
language = "English",
series = "Proceedings of the International Conference on Parallel and Distributed Systems - ICPADS",
publisher = "IEEE Computer Society",
pages = "282--288",
booktitle = "Proceedings - 2022 IEEE 28th International Conference on Parallel and Distributed Systems, ICPADS 2022",
address = "United States",
}