@inproceedings{9b9cd05cb234412399849f32b3ac4d06,
title = "RESIDUAL GRAPH ATTENTION NETWORK AND EXPRESSION-RESPECT DATA AUGMENTATION AIDED VISUAL GROUNDING",
abstract = "Visual grounding aims to localize a target object in an image based on a given text description. Due to the innate complexity of language, it is still a challenging problem to perform reasoning of complex expressions and to infer the underlying relationship between the expression and the object in an image. To address these issues, we propose a residual graph attention network for visual grounding. The proposed approach first builds an expression-guided relation graph and then performs multi-step reasoning followed by matching the target object. It allows performing better visual grounding with complex expressions by using deeper layers than other graph network approaches. Moreover, to increase the diversity of training data, we perform an expression-respect data augmentation based on copy-paste operations to pairs of source and target images. The proposed approach achieves better performance with extensive experiments than other state-of-the-art graph network-based approaches and demonstrates its effectiveness.",
keywords = "Expression-respect data augmentation, Residual graph attention network, Visual grounding",
author = "Jia Wang and Wu, {Hung Yi} and Chen, {Jun Cheng} and Shuai, {Hong Han} and Cheng, {Wen Huang}",
note = "Publisher Copyright: {\textcopyright} 2022 IEEE.; 29th IEEE International Conference on Image Processing, ICIP 2022 ; Conference date: 16-10-2022 Through 19-10-2022",
year = "2022",
doi = "10.1109/ICIP46576.2022.9897564",
language = "English",
series = "Proceedings - International Conference on Image Processing, ICIP",
publisher = "IEEE Computer Society",
pages = "326--330",
booktitle = "2022 IEEE International Conference on Image Processing, ICIP 2022 - Proceedings",
address = "United States",
}