@inproceedings{5dddd240f7ed48dfae3616002791adfd,
title = "Elastic distributed training with fast convergence and efficient resource utilization",
abstract = "Distributed learning is now routinely conducted on cloud as well as dedicated clusters. Training with elastic resources brings new challenges and design choices. Prior studies focus on runtime performance and assume a static algorithmic behavior. In this work, by analyzing the impact of of resource scaling on convergence, we introduce schedules for synchronous stochastic gradient descent that proactively adapt the number of learners to reduce training time and improve convergence. Our approach no longer assumes a constant number of processors throughout training. In our experiment, distributed stochastic gradient descent with dynamic schedules and reduction momentum achieves better convergence and significant speedups over prior static ones. Numerous distributed training jobs running on cloud may benefit from our approach.",
keywords = "Cloud, Distributed Training, Elastic training, Momentum, SGD",
author = "Guojing Cong and Guojing Cong",
note = "Publisher Copyright: {\textcopyright} 2021 IEEE.; 20th IEEE International Conference on Machine Learning and Applications, ICMLA 2021 ; Conference date: 13-12-2021 Through 16-12-2021",
year = "2021",
doi = "10.1109/ICMLA52953.2021.00160",
language = "English",
series = "Proceedings - 20th IEEE International Conference on Machine Learning and Applications, ICMLA 2021",
publisher = "Institute of Electrical and Electronics Engineers Inc.",
pages = "972--979",
editor = "Wani, {M. Arif} and Sethi, {Ishwar K.} and Weisong Shi and Guangzhi Qu and Raicu, {Daniela Stan} and Ruoming Jin",
booktitle = "Proceedings - 20th IEEE International Conference on Machine Learning and Applications, ICMLA 2021",
}