@inproceedings{e1a119a0e6cf4f249cf492516516afdc,
title = "A lightweight convolutional network based on pruning algorithm for YOLO",
abstract = "With the rapid development of deep learning, neural network models have become increasingly complicated, leading to larger storage space requirements and slower reasoning speed. These factors make it difficult to be deployed on resourcelimited platforms. To alleviate this problem, network pruning, an effective model compression method, is commonly performed in a deep neural network. However, traditional pruning methods simply set redundant weights to zero, thus failing to achieve the acceleration effect. In this paper, a channel-wise model scaling method is proposed to reduce the model size and speed up reasoning by structurally removing the redundant filters in convolutional layers. To make the residual block more sparse, we develop a pruning method for residual cells. Experimental results on the YOLOv3 detector show that our proposed approach achieves a 70.6% parameter compression ratio without compromising accuracy.",
keywords = "deep learning, model compression, network pruning",
author = "Guanyu Liu and Yuzhao Li and Yuanchen Song and Yumeng Liu and Xiaofeng Xu and Zhen Zhao and Ruiheng Zhang",
note = "Publisher Copyright: {\textcopyright} COPYRIGHT SPIE. Downloading of the abstract is permitted for personal use only.; 14th International Conference on Graphics and Image Processing, ICGIP 2022 ; Conference date: 21-10-2022 Through 23-10-2022",
year = "2023",
doi = "10.1117/12.2680414",
language = "English",
series = "Proceedings of SPIE - The International Society for Optical Engineering",
publisher = "SPIE",
editor = "Liang Xiao and Jianru Xue",
booktitle = "Fourteenth International Conference on Graphics and Image Processing, ICGIP 2022",
address = "United States",
}