@inproceedings{indurthi-etal-2018-cut,
title = "Cut to the Chase: A Context Zoom-in Network for Reading Comprehension",
author = "Indurthi, Sathish Reddy and
Yu, Seunghak and
Back, Seohyun and
Cuay{\'a}huitl, Heriberto",
editor = "Riloff, Ellen and
Chiang, David and
Hockenmaier, Julia and
Tsujii, Jun{'}ichi",
booktitle = "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing",
month = oct # "-" # nov,
year = "2018",
address = "Brussels, Belgium",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/D18-1054",
doi = "10.18653/v1/D18-1054",
pages = "570--575",
abstract = "In recent years many deep neural networks have been proposed to solve Reading Comprehension (RC) tasks. Most of these models suffer from reasoning over long documents and do not trivially generalize to cases where the answer is not present as a span in a given document. We present a novel neural-based architecture that is capable of extracting relevant regions based on a given question-document pair and generating a well-formed answer. To show the effectiveness of our architecture, we conducted several experiments on the recently proposed and challenging RC dataset {`}NarrativeQA{'}. The proposed architecture outperforms state-of-the-art results by 12.62{\%} (ROUGE-L) relative improvement.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="indurthi-etal-2018-cut">
<titleInfo>
<title>Cut to the Chase: A Context Zoom-in Network for Reading Comprehension</title>
</titleInfo>
<name type="personal">
<namePart type="given">Sathish</namePart>
<namePart type="given">Reddy</namePart>
<namePart type="family">Indurthi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Seunghak</namePart>
<namePart type="family">Yu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Seohyun</namePart>
<namePart type="family">Back</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Heriberto</namePart>
<namePart type="family">Cuayáhuitl</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2018-oct-nov</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing</title>
</titleInfo>
<name type="personal">
<namePart type="given">Ellen</namePart>
<namePart type="family">Riloff</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">David</namePart>
<namePart type="family">Chiang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Julia</namePart>
<namePart type="family">Hockenmaier</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jun’ichi</namePart>
<namePart type="family">Tsujii</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Brussels, Belgium</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>In recent years many deep neural networks have been proposed to solve Reading Comprehension (RC) tasks. Most of these models suffer from reasoning over long documents and do not trivially generalize to cases where the answer is not present as a span in a given document. We present a novel neural-based architecture that is capable of extracting relevant regions based on a given question-document pair and generating a well-formed answer. To show the effectiveness of our architecture, we conducted several experiments on the recently proposed and challenging RC dataset ‘NarrativeQA’. The proposed architecture outperforms state-of-the-art results by 12.62% (ROUGE-L) relative improvement.</abstract>
<identifier type="citekey">indurthi-etal-2018-cut</identifier>
<identifier type="doi">10.18653/v1/D18-1054</identifier>
<location>
<url>https://aclanthology.org/D18-1054</url>
</location>
<part>
<date>2018-oct-nov</date>
<extent unit="page">
<start>570</start>
<end>575</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Cut to the Chase: A Context Zoom-in Network for Reading Comprehension
%A Indurthi, Sathish Reddy
%A Yu, Seunghak
%A Back, Seohyun
%A Cuayáhuitl, Heriberto
%Y Riloff, Ellen
%Y Chiang, David
%Y Hockenmaier, Julia
%Y Tsujii, Jun’ichi
%S Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing
%D 2018
%8 oct nov
%I Association for Computational Linguistics
%C Brussels, Belgium
%F indurthi-etal-2018-cut
%X In recent years many deep neural networks have been proposed to solve Reading Comprehension (RC) tasks. Most of these models suffer from reasoning over long documents and do not trivially generalize to cases where the answer is not present as a span in a given document. We present a novel neural-based architecture that is capable of extracting relevant regions based on a given question-document pair and generating a well-formed answer. To show the effectiveness of our architecture, we conducted several experiments on the recently proposed and challenging RC dataset ‘NarrativeQA’. The proposed architecture outperforms state-of-the-art results by 12.62% (ROUGE-L) relative improvement.
%R 10.18653/v1/D18-1054
%U https://aclanthology.org/D18-1054
%U https://doi.org/10.18653/v1/D18-1054
%P 570-575
Markdown (Informal)
[Cut to the Chase: A Context Zoom-in Network for Reading Comprehension](https://aclanthology.org/D18-1054) (Indurthi et al., EMNLP 2018)
ACL