@inproceedings{tu-17, title={Learning to Embed Words in Context for Syntactic Tasks}, author={Lifu Tu and Kevin Gimpel and Karen Livescu}, booktitle={Proc. of RepL4NLP}, year={2017} } @inproceedings{tu-17-long, title={Learning to Embed Words in Context for Syntactic Tasks}, author={Lifu Tu and Kevin Gimpel and Karen Livescu}, booktitle={Proceedings of the 2nd Workshop on Representation Learning for NLP}, year={2017}, publisher = {Association for Computational Linguistics} } @InProceedings{tu-gimpel-livescu:2017:RepL4NLP, author = {Tu, Lifu and Gimpel, Kevin and Livescu, Karen}, title = {Learning to Embed Words in Context for Syntactic Tasks}, booktitle = {Proceedings of the 2nd Workshop on Representation Learning for NLP}, month = {August}, year = {2017}, address = {Vancouver, Canada}, publisher = {Association for Computational Linguistics}, pages = {265--275}, abstract = {We present models for embedding words in the context of surrounding words. Such models, which we refer to as token embeddings, represent the characteristics of a word that are specific to a given context, such as word sense, syntactic category, and semantic role. We explore simple, efficient token embedding models based on standard neural network architectures. We learn token embeddings on a large amount of unannotated text and evaluate them as features for part-of-speech taggers and dependency parsers trained on much smaller amounts of annotated data. We find that predictors endowed with token embeddings consistently outperform baseline predictors across a range of context window and training set sizes.}, url = {http://www.aclweb.org/anthology/W17-2632} }