@inproceedings{c66fd34c70094f3fbd71e4ddcc2929bb,
title = "End-to-End Learning of Action Detection from Frame Glimpses in Videos",
abstract = "In this work we introduce a fully end-to-end approach for action detection in videos that learns to directly predict the temporal bounds of actions. Our intuition is that the process of detecting actions is naturally one of observation and refinement: observing moments in video, and refining hypotheses about when an action is occurring. Based on this insight, we formulate our model as a recurrent neural network-based agent that interacts with a video over time. The agent observes video frames and decides both where to look next and when to emit a prediction. Since backpropagation is not adequate in this non-differentiable setting, we use REINFORCE to learn the agent's decision policy. Our model achieves state-of-the-art results on the THUMOS'14 and ActivityNet datasets while observing only a fraction (2% or less) of the video frames.",
author = "Serena Yeung and Olga Russakovsky and Greg Mori and Li Fei-Fei",
year = "2016",
month = dec,
day = "9",
doi = "10.1109/CVPR.2016.293",
language = "English (US)",
series = "Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition",
publisher = "IEEE Computer Society",
pages = "2678--2687",
booktitle = "Proceedings - 29th IEEE Conference on Computer Vision and Pattern Recognition, CVPR 2016",
address = "United States",
note = "29th IEEE Conference on Computer Vision and Pattern Recognition, CVPR 2016 ; Conference date: 26-06-2016 Through 01-07-2016",
}