@inproceedings{d7b158b6c6c94db7b0a7e6f34df07b82,
title = "Intent capturing through multimodal inputs",
abstract = "Virtual manufacturing environments need complex and accurate 3D human-computer interaction. One main problem of current virtual environments (VEs) is the heavy overloads of the users on both cognitive and motor operational aspects. This paper investigated multimodal intent delivery and intent inferring in virtual environments. Eye gazing modality is added into virtual assembly system. Typical intents expressed by dual hands and eye gazing modalities are designed. The reliability and accuracy of eye gazing modality is examined through experiments. The experiments showed that eye gazing and hand multimodal cooperation has a great potential to enhance the naturalness and efficiency of human-computer interaction (HCI).",
keywords = "Eye tracking, human-computer interaction, intent, multimodal input, virtual assembly, virtual environment",
author = "Weimin Guo and Cheng Cheng and Mingkai Cheng and Yonghan Jiang and Honglin Tang",
year = "2013",
doi = "10.1007/978-3-642-39330-3_26",
language = "English",
isbn = "9783642393297",
series = "Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)",
number = "PART 4",
pages = "243--251",
booktitle = "Human-Computer Interaction",
edition = "PART 4",
note = "15th International Conference on Human-Computer Interaction, HCI International 2013 ; Conference date: 21-07-2013 Through 26-07-2013",
}