@inproceedings{0764ba9f5910410187350704dc358cd8,
title = "Increasing PE Utilization with a SW/HW Co-Design Technique for Sparse Convolutional Neural Networks",
abstract = "Pruning convolution neural networks (CNN) has proved to be an effective technique to decrease the network size without loss of accuracy. By processing the compressed format of the network, the energy consumption can be considerably reduced. However, the existing SIMD-like sparse CNN accelerator suffers from low processing engine (PE) utilization due to the irregular distribution of effectual pairs. In this paper, we address this issue by proposing a software and hardware codesign technique, including a novel data compression scheme and a dedicated module to handle this compressed format. When compared to a state-of-the-art SIMD-like accelerator, the proposed co-design technique can reduce the computation time of conv3, conv4, conv5 of AlexNet by 15%, 33%, 31%.",
keywords = "Machine learning, SIMD architecture, Sparse convolution neural networks",
author = "Tseng, {Wei Fan} and Lai, {Bo Cheng} and Pan, {Jyun Wei}",
note = "Publisher Copyright: {\textcopyright} 2019 IEEE. Copyright: Copyright 2020 Elsevier B.V., All rights reserved.; null ; Conference date: 25-10-2019 Through 30-10-2019",
year = "2019",
month = oct,
doi = "10.1109/ICICE49024.2019.9117552",
language = "English",
series = "Proceedings of the 2019 8th International Conference on Innovation, Communication and Engineering, ICICE 2019",
publisher = "Institute of Electrical and Electronics Engineers Inc.",
pages = "74--77",
editor = "Shoou-Jinn Chang and Sheng-Joue Young and Lam, {Artde Donald Kin-Tak} and Liang-Wen Ji and Hao-Ying Lu and Prior, {Stephen D.}",
booktitle = "Proceedings of the 2019 8th International Conference on Innovation, Communication and Engineering, ICICE 2019",
address = "United States",
}