@inproceedings{9cad86c950b5495480dc0f5a52e4cb8b,
title = "Row-wise Accelerator for Vision Transformer",
abstract = "Following the success of the natural language processing, the transformer for vision applications has attracted significant attention in recent years due to its excellent performance. However, existing deep learning hardware accelerators for vision cannot execute this structure efficiently due to significant model architecture differences. As a result, this paper proposes the hardware accelerator for vision transformers with row-wise scheduling, which decomposes major operations in vision transformers as a single dot product primitive for a unified and efficient execution. Furthermore, by sharing weights in columns, we can reuse the data and reduce the usage of memory. The implementation with TSMC 40nm CMOS technology only requires 262K gate count and 149KB SRAM buffer for 403.2 GOPS throughput at 600MHz clock frequency.",
keywords = "accelerators, hardware design, vision transformer",
author = "Wang, {Hong Yi} and Chang, {Tian Sheuan}",
note = "Publisher Copyright: {\textcopyright} 2022 IEEE.; 4th IEEE International Conference on Artificial Intelligence Circuits and Systems, AICAS 2022 ; Conference date: 13-06-2022 Through 15-06-2022",
year = "2022",
doi = "10.1109/AICAS54282.2022.9869928",
language = "English",
series = "Proceeding - IEEE International Conference on Artificial Intelligence Circuits and Systems, AICAS 2022",
publisher = "Institute of Electrical and Electronics Engineers Inc.",
pages = "399--402",
booktitle = "Proceeding - IEEE International Conference on Artificial Intelligence Circuits and Systems, AICAS 2022",
address = "United States",
}