@inproceedings{00c4c9e0e38841caaf8ff12dc4f81a1e,
title = "Reward-Biased Maximum Likelihood Estimation for Neural Contextual Bandits: A Distributional Learning Perspective",
abstract = "Reward-biased maximum likelihood estimation (RBMLE) is a classic principle in the adaptive control literature for tackling explore-exploit trade-offs. This paper studies the neural contextual bandit problem from a distributional perspective and proposes NeuralRBMLE, which leverages the likelihood of surrogate parametric distributions to learn the unknown reward distributions and thereafter adapts the RBMLE principle to achieve efficient exploration by properly adding a reward-bias term. NeuralRBMLE leverages the representation power of neural networks and directly encodes exploratory behavior in the parameter space, without constructing confidence intervals of the estimated rewards. We propose two variants of NeuralRBMLE algorithms: The first variant directly obtains the RBMLE estimator by gradient ascent, and the second variant simplifies RBMLE to a simple index policy through an approximation. We show that both algorithms achieve order-optimality. Through extensive experiments, we demonstrate that the NeuralRBMLE algorithms achieve comparable or better empirical regrets than the state-of-the-art methods on real-world datasets with non-linear reward functions.",
author = "Hung, {Yu Heng} and Hsieh, {Ping Chun}",
note = "Publisher Copyright: Copyright {\textcopyright} 2023, Association for the Advancement of Artificial Intelligence (www.aaai.org). All rights reserved.; 37th AAAI Conference on Artificial Intelligence, AAAI 2023 ; Conference date: 07-02-2023 Through 14-02-2023",
year = "2023",
month = jun,
day = "27",
language = "English",
series = "Proceedings of the 37th AAAI Conference on Artificial Intelligence, AAAI 2023",
publisher = "AAAI press",
pages = "7944--7952",
editor = "Brian Williams and Yiling Chen and Jennifer Neville",
booktitle = "AAAI-23 Technical Tracks 7",
}