@InProceedings{cai-17-long, author = "Zheng Cai and Lifu Tu and Kevin Gimpel", title = "Pay Attention to the Ending: Strong Neural Baselines for the {ROC} Story Cloze Task", booktitle = {Proceedings of the Annual Meeting of the Association for Computational Linguistics}, year = {2017}, } @InProceedings{cai-17-longer, author = "Zheng Cai and Lifu Tu and Kevin Gimpel", title = "Pay Attention to the Ending: Strong Neural Baselines for the {ROC} Story Cloze Task", booktitle = {Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers)}, year = "2017", publisher = "Association for Computational Linguistics", } @InProceedings{cai-tu-gimpel:2017:Short, author = {Cai, Zheng and Tu, Lifu and Gimpel, Kevin}, title = {Pay Attention to the Ending: Strong Neural Baselines for the ROC Story Cloze Task}, booktitle = {Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers)}, month = {July}, year = {2017}, address = {Vancouver, Canada}, publisher = {Association for Computational Linguistics}, pages = {616--622}, abstract = {We consider the ROC story cloze task (Mostafazadeh et al., 2016) and present several findings. We develop a model that uses hierarchical recurrent networks with attention to encode the sentences in the story and score candidate endings. By discarding the large training set and only training on the validation set, we achieve an accuracy of 74.7%. Even when we discard the story plots (sentences before the ending) and only train to choose the better of two endings, we can still reach 72.5%. We then analyze this ending-only task setting. We estimate human accuracy to be 78% and find several types of clues that lead to this high accuracy, including those related to sentiment, negation, and general ending likelihood regardless of the story context.}, url = {http://aclweb.org/anthology/P17-2097} }