@inbook{1e2fe567440f4be19432bdf1da9b3e5b,
title = "Imaging valuation models in human choice",
abstract = "To make a decision, a system must assign value to each of its available choices. In the human brain, one approach to studying valuation has used rewarding stimuli to map out brain responses by varying the dimension or importance of the rewards. However, theoretical models have taught us that value computations are complex, and so reward probes alone can give only partial information about neural responses related to valuation. In recent years, computationally principled models of value learning have been used in conjunction with noninvasive neuroimaging to tease out neural valuation responses related to reward-learning and decision-making. We restrict our review to the role of these models in a new generation of experiments that seeks to build on a now-large body of diverse reward-related brain responses. We show that the models and the measurements based on them point the way forward in two important directions: the valuation of time and the valuation of fictive experience.",
keywords = "Dopamine, Fictive learning signal, Reinforcement learning, Reward, Ventral striatum",
author = "Montague, {P. Read} and Brooks King-Casas and Cohen, {Jonathan D.}",
year = "2006",
doi = "10.1146/annurev.neuro.29.051605.112903",
language = "English (US)",
isbn = "0824324293",
series = "Annual Review of Neuroscience",
pages = "417--448",
editor = "Steven Hyman and Thomas Jessell and Charles Stevens",
booktitle = "Annual Review of Neuroscience",
}