@inproceedings{bb7bfe63d1184ffc8e3b28bd612ff697,
title = "ELITE: Defending Federated Learning against Byzantine Attacks based on Information Entropy",
abstract = "Federated learning is a distributed machine learning paradigm, where physically distributed computing nodes collaboratively train a global model. In federated learning, workers usually do not share training data with others, and thus some workers are malicious workers, who can change parameters (e.g., weights/gradients) in their models to degrade the global model's training accuracy. This is generally called Byzantine attack. Existing solutions are either limited resistance to Byzantine attacks or not applicable to federated learning. In this paper, we propose ELITE, a robust parameter aggregation algorithm to defend federated learning from Byzantine attacks. Inspired by the observation that the parameters of malicious workers usually distract from the parameters of benign workers, we introduce entropy to efficiently detect malicious workers. We evaluate the performance of ELITE on image classification model training under three typical attacks, and experimental results show that ELITE can resist various Byzantine attacks and outperforms existing algorithms by improving the model accuracy at most up to 80%.",
keywords = "Byzantine attacks, Federated learning, information entropy, robust",
author = "Yongkang Wang and Yuanqing Xia and Yufeng Zhan",
note = "Publisher Copyright: {\textcopyright} 2021 IEEE; 2021 China Automation Congress, CAC 2021 ; Conference date: 22-10-2021 Through 24-10-2021",
year = "2021",
doi = "10.1109/CAC53003.2021.9727486",
language = "English",
series = "Proceeding - 2021 China Automation Congress, CAC 2021",
publisher = "Institute of Electrical and Electronics Engineers Inc.",
pages = "6049--6054",
booktitle = "Proceeding - 2021 China Automation Congress, CAC 2021",
address = "United States",
}