@inproceedings{46dd1a04c4f4419e8d4e1ca6ef683010,
title = "A Translucency Image Editing Method Based On StyleGAN",
abstract = "In the field of image-based material editing, while most studies focus on opaque materials, editing translucent materials remains a challenge. In this paper, we propose a method for editing the translucency for single input image, utilizing Style-based Generator Architecture for Generative Adversarial Networks (StyleGAN) with pixel2style2pixel (pSp) encoder. We propose a T-space, which is derived by autoencoders that map the latent-space of the StyleGAN into this more meaningful latent space for translucency editing. With this T-space, we train a group of multi-layer perceptions (MLPs) to obtain the directional change vectors of three chosen parameters of BRDF and BSSRDF models, which enable varying translucency level of the object from the input image in three different fashions. Experimental results demonstrate that our approach achieves effective translucency editing in both rendered and captured images.",
keywords = "Feature extraction, Generative Adversarial Networks, Image-based material editing, Translucency",
author = "Mingyuan Zhang and Hongsong Li and Shengyao Wang",
note = "Publisher Copyright: {\textcopyright} 2025 SPIE.; 8th International Conference on Computer Graphics and Virtuality, ICCGV 2025 ; Conference date: 21-02-2025 Through 23-02-2025",
year = "2025",
doi = "10.1117/12.3062834",
language = "English",
series = "Proceedings of SPIE - The International Society for Optical Engineering",
publisher = "SPIE",
editor = "Haiquan Zhao",
booktitle = "Eighth International Conference on Computer Graphics and Virtuality, ICCGV 2025",
address = "United States",
}