@inproceedings{7bd52aee304b416293c58db4186c134c,
title = "Asynchronous SGD for DNN training on shared-memory parallel architectures",
abstract = "We present a parallel asynchronous Stochastic Gradient Descent algorithm for shared memory architectures. Different from previous asynchronous algorithms, we consider the case where the gradient updates are not particularly sparse. In the context of the MagmaDNN framework, we compare the parallel efficiency of the asynchronous implementation with that of the traditional synchronous implementation. Tests are performed for training deep neural networks on multicore CPUs and GPU devices.",
keywords = "Asynchronous iterative methods, Deep learning, GPU, Multicore CPU, Stochastic Gradient Descent",
author = "Florent Lopez and Edmond Chow and Stanimire Tomov and Jack Dongarra",
note = "Publisher Copyright: {\textcopyright} 2020 IEEE.; 34th IEEE International Parallel and Distributed Processing Symposium Workshops, IPDPSW 2020 ; Conference date: 18-05-2020 Through 22-05-2020",
year = "2020",
month = may,
doi = "10.1109/IPDPSW50202.2020.00168",
language = "English",
series = "Proceedings - 2020 IEEE 34th International Parallel and Distributed Processing Symposium Workshops, IPDPSW 2020",
publisher = "Institute of Electrical and Electronics Engineers Inc.",
pages = "995--998",
booktitle = "Proceedings - 2020 IEEE 34th International Parallel and Distributed Processing Symposium Workshops, IPDPSW 2020",
}