@inproceedings{269e48fe0579439d8dac3e55abc82ebe,
title = "Federated Learning with Quantization Constraints",
abstract = "Traditional deep learning models are trained on centralized servers using labeled sample data collected from edge devices. This data often includes private information, which the users may not be willing to share. Federated learning (FL) is an emerging approach to train such learning models without requiring the users to share their possibly private labeled data. In FL, each user trains its copy of the learning model locally. The server then collects the individual updates and aggregates them into a global model. A major challenge that arises in this method is the need of each user to efficiently transmit its learned model over the throughput limited uplink channel. In this work, we tackle this challenge using tools from quantization theory. In particular, we identify the unique characteristics associated with conveying trained models over rate-constrained channels, and characterize a suitable quantization scheme for such setups. We show that combining universal vector quantization methods with FL yields a decentralized training system, which is both efficient and feasible. We also derive theoretical performance guarantees of the system. Our numerical results illustrate the substantial performance gains of our scheme over FL with previously proposed quantization approaches.",
keywords = "Federated learning, edge computing, quantization.",
author = "Nir Shlezinger and Mingzhe Chen and Eldar, {Yonina C.} and Poor, {H. Vincent} and Shuguang Cui",
note = "Funding Information: This project has received funding from the Benoziyo Endowment Fund for the Advancement of Science, the Estate of Olga Klein – Astrachan, the European Unions Horizon 2020 research and innovation program under grant No. 646804-ERC-COG-BNYQ, from the Israel Science Foundation under grant No. 0100101, and from the U.S. National Science Foundation under grants CCF-0939370 and CCF-1513915. N. Shlezinger and Y. C. Eldar are with the Faculty of Math and CS, Weizmann Institute of Science, Rehovot, Israel (e-mail: nirshlezinger1@gmail.com; yonina@weizmann.ac.il). M. Chen and H. V. Poor are with the EE Dept., Princeton University, Princeton, NJ (e-mail: {mingzhec, poor}@princeton.edu). M. Chen is also with the Chinese University of Hong Kong, Shenzhen, China. S. Cui is with the Chinese University of Hong Kong, Shenzhen, China (e-mail: shuguangcui@cuhk.edu.cn) Publisher Copyright: {\textcopyright} 2020 IEEE.; 2020 IEEE International Conference on Acoustics, Speech, and Signal Processing, ICASSP 2020 ; Conference date: 04-05-2020 Through 08-05-2020",
year = "2020",
month = may,
doi = "10.1109/ICASSP40776.2020.9054168",
language = "English (US)",
series = "ICASSP, IEEE International Conference on Acoustics, Speech and Signal Processing - Proceedings",
publisher = "Institute of Electrical and Electronics Engineers Inc.",
pages = "8851--8855",
booktitle = "2020 IEEE International Conference on Acoustics, Speech, and Signal Processing, ICASSP 2020 - Proceedings",
address = "United States",
}