@inproceedings{bdc3d64d37ec44b687606abb531510f9,
title = "Talking Head Generation Based on 3D Morphable Facial Model",
abstract = "This paper presents a framework for one-shot talking-head video generation which takes a single person image and audio clips as input and synthesizes photo-realistic videos with natural head-poses and lip motion synced to the driving audio. The main idea behind this framework is to use 3D Morphable Model (3DMM) parameters as intermediate representation in generating the videos. We design an Expression Predictor and a Head Pose Predictor to predict facial expression and head-pose parameters from audio, respectively, and adopt a 3DMM model to extract identity and texture parameters from the reference image. With these parameters, facial images are rendered as an auxiliary to guide video generation. Compared to widely used facial landmarks, 3DMM parameters are more powerful in representing facial details. Experimental results show that our method can generate realistic talking-head videos and outperform many state-of-the-art methods.",
keywords = "3DMM, deep learning, image-to-image translation, self-attention, talking-head generation",
author = "Shen, {Hsin Yu} and Tsai, {Wen Jiin}",
note = "Publisher Copyright: {\textcopyright} 2024 IEEE.; 2024 Picture Coding Symposium, PCS 2024 ; Conference date: 12-06-2024 Through 14-06-2024",
year = "2024",
doi = "10.1109/PCS60826.2024.10566437",
language = "English",
series = "2024 Picture Coding Symposium, PCS 2024 - Proceedings",
publisher = "Institute of Electrical and Electronics Engineers Inc.",
booktitle = "2024 Picture Coding Symposium, PCS 2024 - Proceedings",
address = "United States",
}