@inproceedings{148a4c07612a4fa99914601109ba7bad,
title = "Enhancing Moving Object Segmentation with Spatio-Temporal Information Fusion",
abstract = "Sensing moving objects accurately can provide information about dynamic changes in the environment, while further segmentation can help autonomous systems make smarter decisions and better SLAM. Effective utilization of spatio-temporal information is paramount for LiDAR Moving Object Segmentation (LiDAR-MOS). We propose an efficient approach for attaining more accurate point cloud segmentation results by leveraging spatio-temporal information from multiple LiDAR scans and their corresponding poses. To be specific, using acquired pose information, we initially transform the point cloud data of the sequence into the coordinate system of the current frame. The aligned point clouds are then discretized to generate a special BEV-occupied representation. Subsequently, we employ a Spatio-Temporal Excitation (STE) module excite the spatio-temporal features of the superimposed representations and put into the spatio-temporal pyramid network (STPN) for dual-head decoding and result fusion. We trained and evaluated our network on the nuScenes dataset. The results of comparative and ablation studies demonstrate the advantage of our designed method.",
keywords = "Deep Learning Methods, LiDAR, MOS",
author = "Siyu Chen and Yilei Huang and Qilin Li and Ruosong Wang and Zhenhai Zhang",
note = "Publisher Copyright: {\textcopyright} 2024 IEEE.; 21st IEEE International Conference on Mechatronics and Automation, ICMA 2024 ; Conference date: 04-08-2024 Through 07-08-2024",
year = "2024",
doi = "10.1109/ICMA61710.2024.10633122",
language = "English",
series = "2024 IEEE International Conference on Mechatronics and Automation, ICMA 2024",
publisher = "Institute of Electrical and Electronics Engineers Inc.",
pages = "1783--1788",
booktitle = "2024 IEEE International Conference on Mechatronics and Automation, ICMA 2024",
address = "United States",
}