@inproceedings{29836f5b0173476f98421be9be5824bd,
title = "CeFET: Contrast-Enhanced Guided Facial Expression Translation",
abstract = "Advancements in deep learning have stimulated the creation of multiple techniques for facial expression translation. However, these methods frequently rely on detailed annotations of action units (AU) or 3D modelling techniques. In this paper, we introduce a novel Contrast-enhanced Guided Facial Expression Translation (CeFET) method. The model uses only facial images as input and extracts facial features from these images using an encoder model based on the StyleGAN prior. We propose a contrast-enhanced guidance technique aimed at minimizing the distance between the generated face and the input face, as well as the distance between the generated expression and the reference expression. This ensures that the generated face maintains identity consistency with the source face and expression consistency with the reference face. Extensive experimental results support the effectiveness of our method.",
keywords = "Contrastive Learning, Facial Expression Translation, Generative Adversarial Network",
author = "Linfeng Han and Zhong Jin and Li, {Yi Chang} and Zhiyang Jia",
note = "Publisher Copyright: {\textcopyright} The Author(s), under exclusive license to Springer Nature Singapore Pte Ltd. 2025.; 14th International Conference on Computer Engineering and Networks, CENet 2024 ; Conference date: 18-10-2024 Through 21-10-2024",
year = "2025",
doi = "10.1007/978-981-96-4016-4_16",
language = "English",
isbn = "9789819640157",
series = "Lecture Notes in Electrical Engineering",
publisher = "Springer Science and Business Media Deutschland GmbH",
pages = "177--188",
editor = "Guangqiang Yin and Xiaodong Liu and Jian Su and Yangzhao Yang",
booktitle = "Proceedings of the 14th International Conference on Computer Engineering and Networks - Volume IV",
address = "Germany",
}