@inproceedings{7664b9c2e2224085bcdf22bc640f39ea,
title = "Mitigating the Discrepancy Between Video and Text Temporal Sequences: A Time-Perception Enhanced Video Grounding method for LLM",
abstract = "Existing video-LLMs excel at capturing the overall description of a video but lack the ability to demonstrate an understanding of temporal dynamics and a fine-grained grasp of localized content within the video. In this paper, we propose a Time-Perception Enhanced Video Grounding via Boundary Perception and Temporal Reasoning aimed at mitigating LLMs' difficulties in understanding the discrepancies between video and text temporality. Specifically, to address the inherent biases in current datasets, we design a series of boundary-perception tasks to enable LLMs to capture accurate video temporality. To tackle LLMs' insufficient understanding of temporal information, we develop specialized tasks for boundary perception and temporal relationship reasoning to deepen LLMs' perception of video temporality. Our experimental results show significant improvements across three datasets: ActivityNet, Charades, and DiDeMo (achieving up to 11.2\% improvement on R@0.3), demonstrating the effectiveness of our proposed temporal awareness-enhanced data construction method.",
author = "Xuefen Li and Bo Wang and Ge Shi and Chong Feng and Jiahao Teng",
note = "Publisher Copyright: {\textcopyright} 2025 Association for Computational Linguistics.; 31st International Conference on Computational Linguistics, COLING 2025 ; Conference date: 19-01-2025 Through 24-01-2025",
year = "2025",
language = "English",
series = "Proceedings - International Conference on Computational Linguistics, COLING",
publisher = "Association for Computational Linguistics (ACL)",
pages = "9804--9813",
editor = "Owen Rambow and Leo Wanner and Marianna Apidianaki and Hend Al-Khalifa and \{Di Eugenio\}, Barbara and Steven Schockaert",
booktitle = "Main Conference",
address = "United States",
}