@inproceedings{375ed281b5744790a2fa8138073a2c2f,
title = "Learning semantic relationships for better action retrieval in images",
abstract = "Human actions capture a wide variety of interactions between people and objects. As a result, the set of possible actions is extremely large and it is difficult to obtain sufficient training examples for all actions. However, we could compensate for this sparsity in supervision by leveraging the rich semantic relationship between different actions. A single action is often composed of other smaller actions and is exclusive of certain others. We need a method which can reason about such relationships and extrapolate unobserved actions from known actions. Hence, we propose a novel neural network framework which jointly extracts the relationship between actions and uses them for training better action retrieval models. Our model incorporates linguistic, visual and logical consistency based cues to effectively identify these relationships. We train and test our model on a largescale image dataset of human actions. We show a significant improvement in mean AP compared to different baseline methods including the HEX-graph approach from Deng et al. [8].",
author = "Vignesh Ramanathan and Congcong Li and Jia Deng and Wei Han and Zhen Li and Kunlong Gu and Yang Song and Samy Bengio and Chuck Rossenberg and Li, {Fei Fei}",
note = "Publisher Copyright: {\textcopyright} 2015 IEEE.; IEEE Conference on Computer Vision and Pattern Recognition, CVPR 2015 ; Conference date: 07-06-2015 Through 12-06-2015",
year = "2015",
month = oct,
day = "14",
doi = "10.1109/CVPR.2015.7298713",
language = "English (US)",
series = "Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition",
publisher = "IEEE Computer Society",
pages = "1100--1109",
booktitle = "IEEE Conference on Computer Vision and Pattern Recognition, CVPR 2015",
address = "United States",
}