@inproceedings{d62c97bd0a824647a08be10a48b7eb16,
title = "Learning to Fly with a Video Generator",
abstract = "This paper demonstrates a model-based reinforcement learning framework for training a self-flying drone. We implement the Dreamer proposed in a prior work as an environment model that responds to the action taken by the drone by predicting the next video frame as a new state signal. The Dreamer is a conditional video sequence generator. This model-based environment avoids the time-consuming interactions between the agent and the environment, speeding up largely the training process. This demonstration showcases for the first time the application of the Dreamer to train an agent that can finish the racing task in the Airsim simulator.",
author = "Chung, {Chia Chun} and Peng, {Wen Hsiao} and Cheng, {Teng Hu} and Yu, {Chia Hau}",
note = "Publisher Copyright: {\textcopyright} 2021 IEEE.; 2021 International Conference on Visual Communications and Image Processing, VCIP 2021 ; Conference date: 05-12-2021 Through 08-12-2021",
year = "2021",
doi = "10.1109/VCIP53242.2021.9675414",
language = "English",
series = "2021 International Conference on Visual Communications and Image Processing, VCIP 2021 - Proceedings",
publisher = "Institute of Electrical and Electronics Engineers Inc.",
booktitle = "2021 International Conference on Visual Communications and Image Processing, VCIP 2021 - Proceedings",
address = "美國",
}