@inproceedings{e66e9d7d4546486786c3549b78649ffd,
title = "Hardware-Friendly Progressive Pruning Framework for CNN Model Compression using Universal Pattern Sets",
abstract = "Pattern-based weight pruning on CNNs has been proven an effective model reduction technique. In this paper, we first present how to select hardware-friendly pruning pattern sets that are universal to various models. We then propose a progressive pruning framework, which produces more globally optimized outcomes. Moreover, to the best of our knowledge, this is the first paper dealing with the pruning issue of the first and also the most sensitive layer of a CNN model through a two-staged pruning strategy. Experiment results show that the proposed framework achieves 2.25x/2x computation/model reduction while minimizing the accuracy loss.",
keywords = "convolutional neural network (CNN), first layer pruning, model compression, pattern pruning, universal pattern sets, weight pruning",
author = "Chou, {Wei Cheng} and Huang, {Cheng Wei} and Huang, {Juinn Dar}",
note = "Publisher Copyright: {\textcopyright} 2022 IEEE.; 2022 International Symposium on VLSI Design, Automation and Test, VLSI-DAT 2022 ; Conference date: 18-04-2022 Through 21-04-2022",
year = "2022",
doi = "10.1109/VLSI-DAT54769.2022.9768087",
language = "English",
series = "2022 International Symposium on VLSI Design, Automation and Test, VLSI-DAT 2022 - Proceedings",
publisher = "Institute of Electrical and Electronics Engineers Inc.",
booktitle = "2022 International Symposium on VLSI Design, Automation and Test, VLSI-DAT 2022 - Proceedings",
address = "United States",
}