@inproceedings{305e8269437c4db8a27c65101bf8f331,
title = "Distributed Gradient Descent: Nonconvergence to Saddle Points and the Stable-Manifold Theorem",
abstract = "The paper studies continuous-time distributed gradient descent (DGD) and considers the problem of showing that in nonconvex optimization problems, DGD typically converges to local minima rather than saddle points. In centralized settings, the problem of demonstrating nonconvergence to saddle points is typically handled by way of the stable-manifold theorem from classical dynamical systems theory. However, the classical stable-manifold theorem is not applicable in the distributed setting. The paper develops an appropriate stable-manifold theorem for DGD. This shows that convergence to saddle points may only occur from a low-dimensional stable manifold. Under appropriate assumptions (e.g., coercivity), the result implies that DGD almost always converges to local minima.",
keywords = "Distributed optimization, gradient descent, multi-agent systems, nonconvex optimization, saddle points, stable-manifold theorem",
author = "Brian Swenson and Ryan Murray and Poor, {H. Vincent} and Soummya Kar",
year = "2019",
month = sep,
doi = "10.1109/ALLERTON.2019.8919658",
language = "English (US)",
series = "2019 57th Annual Allerton Conference on Communication, Control, and Computing, Allerton 2019",
publisher = "Institute of Electrical and Electronics Engineers Inc.",
pages = "595--601",
booktitle = "2019 57th Annual Allerton Conference on Communication, Control, and Computing, Allerton 2019",
address = "United States",
note = "57th Annual Allerton Conference on Communication, Control, and Computing, Allerton 2019 ; Conference date: 24-09-2019 Through 27-09-2019",
}