@inproceedings{b9856d97a4ad4cfc9ca88f837c14c348,
title = "Data parallel large sparse deep neural network on GPU",
abstract = "Sparse Deep Neural Network (DNN) is an emerging research area since deploying deep neural networks with limited resources is very challenging. In this work, we provide a scalable solution to the Sparse DNN Challenge-a challenge posed by MIT/IEEE/Amazon GraphChallenge.org-by designing data parallelism on GPUs. We provide a solution based on Python TensorFlow as it is a widely used tool in different scientific applications for deep learning. We use the datasets provided by GraphChallenge, derived from the MNIST handwritten letters. We use the Synthetic DNNs from RadiX-Net with varying number of neurons and layers. We implement a data parallel implementation of Sparse DNN using TensorFlow on GPU. Our solution shows up to 4.7× speedup over the basehne serial MATLAB implementation given in GraphChallenge. In addition to that, our TensorFlow GPU implementation demonstrates a 3-fold speedup over our TensorFloW CPU implementation.",
keywords = "Deep neural network, GPU, Parallel computing, Sparse data, TensorFlow",
author = "Sattar, {Naw Safrin} and Shaikh Anfuzzaman",
note = "Publisher Copyright: {\textcopyright} 2020 IEEE.; 34th IEEE International Parallel and Distributed Processing Symposium Workshops, IPDPSW 2020 ; Conference date: 18-05-2020 Through 22-05-2020",
year = "2020",
month = may,
doi = "10.1109/IPDPSW50202.2020.00170",
language = "English",
series = "Proceedings - 2020 IEEE 34th International Parallel and Distributed Processing Symposium Workshops, IPDPSW 2020",
publisher = "Institute of Electrical and Electronics Engineers Inc.",
pages = "1006--1014",
booktitle = "Proceedings - 2020 IEEE 34th International Parallel and Distributed Processing Symposium Workshops, IPDPSW 2020",
}