@inproceedings{912c34bb4463420185f7b8dd6a9c3f80,
title = "A Multimodal Emotion Perception Model based on Context-Aware Decision-Level Fusion",
abstract = "A Multimodal Emotion Perception model with Audio and Visual modalities (MEP-A V) is proposed to detect the individual emotions in public area. The framework of MEP-AV model consists of four parts, i.e., data collection module, audio expression analysis module, visual expression analysis module and multimodal fusion module. In order to ensure that the emotion perception results meet the requirement of short-term continuity, a Context-Aware Decision-Level Fusion (CADLF) model is proposed and applied in multimodal fusion module. The CADLF model estimates the affective status by using context information of multimodal emotion. The short-term continuity is considered to improve the accuracy of the emotion perception results. The experiment results evaluated by various metrics demonstrate that the performance of the multimodal structure is improved compared with that of unimodal emotion classifiers. The MEP-AV model using multimodal fusion algorithm provides the accuracies of 70.89% and 77.07% in valence and arousal respectively. The Fl-scores reaches 70.2% and 75.6% respectively, indicating the boost performance on emotion perception.",
keywords = "Context-aware, Decision-level Fusion, Emotion Perception",
author = "Yishan Chen and Zhiyang Jia and Kaoru Hirota and Yaping Dai",
note = "Publisher Copyright: {\textcopyright} 2022 Technical Committee on Control Theory, Chinese Association of Automation.; 41st Chinese Control Conference, CCC 2022 ; Conference date: 25-07-2022 Through 27-07-2022",
year = "2022",
doi = "10.23919/CCC55666.2022.9902799",
language = "English",
series = "Chinese Control Conference, CCC",
publisher = "IEEE Computer Society",
pages = "7332--7337",
editor = "Zhijun Li and Jian Sun",
booktitle = "Proceedings of the 41st Chinese Control Conference, CCC 2022",
address = "United States",
}