@inproceedings{htut-etal-2018-training,
title = "Training a Ranking Function for Open-Domain Question Answering",
author = "Htut, Phu Mon and
Bowman, Samuel and
Cho, Kyunghyun",
editor = "Cordeiro, Silvio Ricardo and
Oraby, Shereen and
Pavalanathan, Umashanthi and
Rim, Kyeongmin",
booktitle = "Proceedings of the 2018 Conference of the North {A}merican Chapter of the Association for Computational Linguistics: Student Research Workshop",
month = jun,
year = "2018",
address = "New Orleans, Louisiana, USA",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/N18-4017",
doi = "10.18653/v1/N18-4017",
pages = "120--127",
abstract = "In recent years, there have been amazing advances in deep learning methods for machine reading. In machine reading, the machine reader has to extract the answer from the given ground truth paragraph. Recently, the state-of-the-art machine reading models achieve human level performance in SQuAD which is a reading comprehension-style question answering (QA) task. The success of machine reading has inspired researchers to combine Information Retrieval with machine reading to tackle open-domain QA. However, these systems perform poorly compared to reading comprehension-style QA because it is difficult to retrieve the pieces of paragraphs that contain the answer to the question. In this study, we propose two neural network rankers that assign scores to different passages based on their likelihood of containing the answer to a given question. Additionally, we analyze the relative importance of semantic similarity and word level relevance matching in open-domain QA.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="htut-etal-2018-training">
<titleInfo>
<title>Training a Ranking Function for Open-Domain Question Answering</title>
</titleInfo>
<name type="personal">
<namePart type="given">Phu</namePart>
<namePart type="given">Mon</namePart>
<namePart type="family">Htut</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Samuel</namePart>
<namePart type="family">Bowman</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kyunghyun</namePart>
<namePart type="family">Cho</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2018-06</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Student Research Workshop</title>
</titleInfo>
<name type="personal">
<namePart type="given">Silvio</namePart>
<namePart type="given">Ricardo</namePart>
<namePart type="family">Cordeiro</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Shereen</namePart>
<namePart type="family">Oraby</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Umashanthi</namePart>
<namePart type="family">Pavalanathan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kyeongmin</namePart>
<namePart type="family">Rim</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">New Orleans, Louisiana, USA</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>In recent years, there have been amazing advances in deep learning methods for machine reading. In machine reading, the machine reader has to extract the answer from the given ground truth paragraph. Recently, the state-of-the-art machine reading models achieve human level performance in SQuAD which is a reading comprehension-style question answering (QA) task. The success of machine reading has inspired researchers to combine Information Retrieval with machine reading to tackle open-domain QA. However, these systems perform poorly compared to reading comprehension-style QA because it is difficult to retrieve the pieces of paragraphs that contain the answer to the question. In this study, we propose two neural network rankers that assign scores to different passages based on their likelihood of containing the answer to a given question. Additionally, we analyze the relative importance of semantic similarity and word level relevance matching in open-domain QA.</abstract>
<identifier type="citekey">htut-etal-2018-training</identifier>
<identifier type="doi">10.18653/v1/N18-4017</identifier>
<location>
<url>https://aclanthology.org/N18-4017</url>
</location>
<part>
<date>2018-06</date>
<extent unit="page">
<start>120</start>
<end>127</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Training a Ranking Function for Open-Domain Question Answering
%A Htut, Phu Mon
%A Bowman, Samuel
%A Cho, Kyunghyun
%Y Cordeiro, Silvio Ricardo
%Y Oraby, Shereen
%Y Pavalanathan, Umashanthi
%Y Rim, Kyeongmin
%S Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Student Research Workshop
%D 2018
%8 June
%I Association for Computational Linguistics
%C New Orleans, Louisiana, USA
%F htut-etal-2018-training
%X In recent years, there have been amazing advances in deep learning methods for machine reading. In machine reading, the machine reader has to extract the answer from the given ground truth paragraph. Recently, the state-of-the-art machine reading models achieve human level performance in SQuAD which is a reading comprehension-style question answering (QA) task. The success of machine reading has inspired researchers to combine Information Retrieval with machine reading to tackle open-domain QA. However, these systems perform poorly compared to reading comprehension-style QA because it is difficult to retrieve the pieces of paragraphs that contain the answer to the question. In this study, we propose two neural network rankers that assign scores to different passages based on their likelihood of containing the answer to a given question. Additionally, we analyze the relative importance of semantic similarity and word level relevance matching in open-domain QA.
%R 10.18653/v1/N18-4017
%U https://aclanthology.org/N18-4017
%U https://doi.org/10.18653/v1/N18-4017
%P 120-127
Markdown (Informal)
[Training a Ranking Function for Open-Domain Question Answering](https://aclanthology.org/N18-4017) (Htut et al., NAACL 2018)
ACL
- Phu Mon Htut, Samuel Bowman, and Kyunghyun Cho. 2018. Training a Ranking Function for Open-Domain Question Answering. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Student Research Workshop, pages 120–127, New Orleans, Louisiana, USA. Association for Computational Linguistics.