@inproceedings{11fa32a90681422ca15a6bed4b32b3ac,
title = "Speaker naming in movies",
abstract = "We propose a new model for speaker naming in movies that leverages visual, textual, and acoustic modalities in an unified optimization framework. To evaluate the performance of our model, we introduce a new dataset consisting of six episodes of the Big Bang Theory TV show and eighteen full movies covering different genres. Our experiments show that our multimodal model significantly outperforms several competitive baselines on the average weighted F-score metric. To demonstrate the effectiveness of our framework, we design an end-To-end memory network model that leverages our speaker naming model and achieves state-of-The-Art results on the subtitles task of the MovieQA 2017 Challenge.",
author = "Mahmoud Azab and Mingzhe Wang and Max Smith and Noriyuki Kojima and Jia Deng and Rada Mihalcea",
year = "2018",
language = "English (US)",
series = "NAACL HLT 2018 - 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies - Proceedings of the Conference",
publisher = "Association for Computational Linguistics (ACL)",
pages = "2206--2216",
booktitle = "Long Papers",
note = "2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, NAACL HLT 2018 ; Conference date: 01-06-2018 Through 06-06-2018",
}