@inproceedings{5cc626cf0da944e0b06560906e8397e8,
title = "Performance, design, and autotuning of batched GEMM for GPUs",
abstract = "The general matrix-matrix multiplication (GEMM) is the most important numerical kernel in dense linear algebra, and is the key component for obtaining high performance in most LAPACK routines. As batched computations on relatively small problems continue to gain interest in many scientific applications, a need arises for a high performance GEMM kernel for batches of small matrices. Such a kernel should be well designed and tuned to handle small sizes, and to maintain high performance for realistic test cases found in the higher level LAPACK routines, and scientific computing applications in general. This paper presents a high performance batched GEMM kernel on Graphics Processing Units (GPUs). We address batched problems with both fixed and variable sizes, and show that specialized GEMM designs and a comprehensive autotuning process are needed to handle problems of small sizes. For most performance tests reported in this paper, the proposed kernels outperform state-of-the-art approaches using a K40c GPU.",
keywords = "Autotuning, Batched GEMM, GEMM, GPU computing, HPC",
author = "Ahmad Abdelfattah and Azzam Haidar and Stanimire Tomov and Jack Dongarra",
note = "Publisher Copyright: {\textcopyright} Springer International Publishing Switzerland 2016.; 31st International Conference on High Performance Computing, ISC High Performance 2016 ; Conference date: 19-06-2016 Through 23-06-2016",
year = "2016",
doi = "10.1007/978-3-319-41321-1_2",
language = "English",
isbn = "9783319413204",
series = "Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)",
publisher = "Springer Verlag",
pages = "21--38",
editor = "Jack Dongarra and Kunkel, {Julian M.} and Pavan Balaji",
booktitle = "High Performance Computing - 31st International Conference, ISC High Performance 2016, Proceedings",
}