@inproceedings{8f4f28f5ffaf4968a4d89c95e45532cb,
title = "Evaluating vector-space models of analogy",
abstract = "Vector-space representations provide geometric tools for reasoning about the similarity of a set of objects and their relationships. Recent machine learning methods for deriving vector-space embeddings of words (e.g., word2vec) have achieved considerable success in natural language processing. These vector spaces have also been shown to exhibit a surprising capacity to capture verbal analogies, with similar results for natural images, giving new life to a classic model of analogies as parallelograms that was first proposed by cognitive scientists. We evaluate the parallelogram model of analogy as applied to modern word embeddings, providing a detailed analysis of the extent to which this approach captures human relational similarity judgments in a large benchmark dataset. We find that that some semantic relationships are better captured than others. We then provide evidence for deeper limitations of the parallelogram model based on the intrinsic geometric constraints of vector spaces, paralleling classic results for first-order similarity.",
keywords = "GloVe, analogy, vector space models, word2vec",
author = "Dawn Chen and Peterson, {Joshua C.} and Griffiths, {Thomas L.}",
note = "Publisher Copyright: {\textcopyright} CogSci 2017.; 39th Annual Meeting of the Cognitive Science Society: Computational Foundations of Cognition, CogSci 2017 ; Conference date: 26-07-2017 Through 29-07-2017",
year = "2017",
language = "English (US)",
series = "CogSci 2017 - Proceedings of the 39th Annual Meeting of the Cognitive Science Society: Computational Foundations of Cognition",
publisher = "The Cognitive Science Society",
pages = "1746--1751",
booktitle = "CogSci 2017 - Proceedings of the 39th Annual Meeting of the Cognitive Science Society",
}