@inproceedings{62feef6b9c8f4874b75cc6206091798a,
title = "MuseGesture: A Framework for Gesture Synthesis by Virtual Agents in VR Museum Guides",
abstract = "This paper presents an innovative framework named MuseGesture, designed to generate contextually adaptive gestures for virtual agents in Virtual Reality (VR) museums. The framework leverages the robust language understanding and generation capabilities of Large Language Models (LLMs) to parse tour narration texts and generate corresponding explanatory gestures. Through reinforcement learning and adversarial skill embeddings, the framework also generates guiding gestures tailored to the virtual museum environment, integrating both gesture types using conditional motion interpolation methods. Experimental results and user studies demonstrate that this approach effectively enables voice-command-controlled virtual guide gestures, offering a novel intelligent guiding system solution that enhances the interactive experience in VR museum environments.",
keywords = "Gesture Generation, Large Language Models, Virtual Agents, VR Museum Guides",
author = "Yihua Bao and Nan Gao and Dongdong Weng and Junyu Chen and Zeyu Tian",
note = "Publisher Copyright: {\textcopyright} 2024 IEEE.; 2024 IEEE International Symposium on Mixed and Augmented Reality Adjunct, ISMAR-Adjunct 2024 ; Conference date: 21-10-2024 Through 25-10-2024",
year = "2024",
doi = "10.1109/ISMAR-Adjunct64951.2024.00079",
language = "English",
series = "Proceedings - 2024 IEEE International Symposium on Mixed and Augmented Reality Adjunct, ISMAR-Adjunct 2024",
publisher = "Institute of Electrical and Electronics Engineers Inc.",
pages = "337--338",
editor = "Ulrich Eck and Misha Sra and Jeanine Stefanucci and Maki Sugimoto and Markus Tatzgern and Ian Williams",
booktitle = "Proceedings - 2024 IEEE International Symposium on Mixed and Augmented Reality Adjunct, ISMAR-Adjunct 2024",
address = "United States",
}