@inproceedings{4f932ba625ce42eda5f965b68ba331ed,
title = "Visual tracking via sparsity pattern learning",
abstract = "Recently sparse representation has been applied to visual tracking by modeling the target appearance using a sparse approximation over the template set. However, this approach is limited by the high computational cost of the ℓ1-norm minimization involved, which also impacts on the amount of particle samples that we can have. This paper introduces a basic constraint on the self-representation of the target set. The sparsity pattern in the self-representation allows us to recover the 'sparse coefficients' of the candidate samples by some small-scale ℓ2-norm minimization; this results in a fast tracking algorithm. It also leads to a principled dictionary update mechanism which is crucial for good performance. Experiments on a recently released benchmark with 50 challenging video sequences show significant runtime efficiency and tracking accuracy achieved by the proposed algorithm.",
author = "Yuxi Wang and Yue Liu and Zhuwen Li and Cheong, {Loong Fah} and Haibin Ling",
note = "Publisher Copyright: {\textcopyright} 2016 IEEE.; 23rd International Conference on Pattern Recognition, ICPR 2016 ; Conference date: 04-12-2016 Through 08-12-2016",
year = "2016",
month = jan,
day = "1",
doi = "10.1109/ICPR.2016.7900046",
language = "English",
series = "Proceedings - International Conference on Pattern Recognition",
publisher = "Institute of Electrical and Electronics Engineers Inc.",
pages = "2716--2721",
booktitle = "2016 23rd International Conference on Pattern Recognition, ICPR 2016",
address = "United States",
}