@inproceedings{6e44bc353b894c8cbc204abd3dc55bd8,
title = "Emotion recognition based on human gesture and speech information using RT middleware",
abstract = "A bi-modal emotion recognition approach is proposed for recognition of four emotions that integrate information from gestures and speech. The outputs from two uni-modal emotion recognition systems based on affective speech and expressive gesture are fused on a decision level fusion by using weight criterion fusion and best probability plus majority vote fusion methods, and the performance of classifier which performs better than each uni-modal and is helpful in recognizing suitable emotions for communication situations. To validate the proposal, fifty Japanese words (or phrases) and 8 types of gestures that are recorded from five participants are used, and the emotion recognition rate increases up to 85.39%. The proposal is able to extent to using more than other modalities and useful in automatic emotion recognition system for human-robot communication.",
keywords = "Affective Speech, Decision-level Fusion, Emotion Recognition, Expressive Gesture",
author = "Vu, {H. A.} and Y. Yamazaki and F. Dong and K. Hirota",
year = "2011",
doi = "10.1109/FUZZY.2011.6007557",
language = "English",
isbn = "9781424473175",
series = "IEEE International Conference on Fuzzy Systems",
pages = "787--791",
booktitle = "FUZZ 2011 - 2011 IEEE International Conference on Fuzzy Systems - Proceedings",
note = "2011 IEEE International Conference on Fuzzy Systems, FUZZ 2011 ; Conference date: 27-06-2011 Through 30-06-2011",
}