@inproceedings{c2202a0678a148ad940bc0584a913b41,
title = "Multi-view action synchronization in complex background",
abstract = "This paper addresses temporal synchronization of human actions under multiple view situation. Many researchers focused on frame by frame alignment for sync these multi-view videos, and expolited features such as interesting point trajectory or 3d human motion feature for event detecting individual. However, since background are complex and dynamic in real world, traditional image-based features are not fit for video representation. We explore the approach by using robust spatio-temporal features and self-similarity matrices to represent actions across views. Multiple sequences can be aligned their temporal patch(Sliding window) using the Dynamic Time Warping algorithm hierarchically and measured by meta-action classifiers. Two datasets including the Pump and the Olympic dataset are used as test cases. The methods are showed the effectiveness in experiment and suited general video event dataset.",
keywords = "Human action Synchronization, MoSIFT, Multi-view, Video alignment",
author = "Longfei Zhang and Shuo Tang and Shikha Singhal and Gangyi Ding",
year = "2014",
doi = "10.1007/978-3-319-04117-9_14",
language = "English",
isbn = "9783319041162",
series = "Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)",
number = "PART 2",
pages = "151--160",
booktitle = "MultiMedia Modeling - 20th Anniversary International Conference, MMM 2014, Proceedings",
edition = "PART 2",
note = "20th Anniversary International Conference on MultiMedia Modeling, MMM 2014 ; Conference date: 06-01-2014 Through 10-01-2014",
}