@inproceedings{14f1343b10f64852b46b74dd250f6e70,
title = "Mining semantic affordances of visual object categories",
abstract = "Affordances are fundamental attributes of objects. Affordances reveal the functionalities of objects and the possible actions that can be performed on them. Understanding affordances is crucial for recognizing human activities in visual data and for robots to interact with the world. In this paper we introduce the new problem of mining the knowledge of semantic affordance: given an object, determining whether an action can be performed on it. This is equivalent to connecting verb nodes and noun nodes in WordNet, or filling an affordance matrix encoding the plausibility of each action-object pair. We introduce a new benchmark with crowdsourced ground truth affordances on 20 PASCAL VOC object classes and 957 action classes. We explore a number of approaches including text mining, visual mining, and collaborative filtering. Our analyses yield a number of significant insights that reveal the most effective ways of collecting knowledge of semantic affordances.",
author = "Chao, {Yu Wei} and Zhan Wang and Rada Mihalcea and Jia Deng",
year = "2015",
month = oct,
day = "14",
doi = "10.1109/CVPR.2015.7299054",
language = "English (US)",
series = "Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition",
publisher = "IEEE Computer Society",
pages = "4259--4267",
booktitle = "IEEE Conference on Computer Vision and Pattern Recognition, CVPR 2015",
address = "United States",
note = "IEEE Conference on Computer Vision and Pattern Recognition, CVPR 2015 ; Conference date: 07-06-2015 Through 12-06-2015",
}