@inproceedings{slobodkin-etal-2022-semantics,
title = "Semantics-aware Attention Improves Neural Machine Translation",
author = "Slobodkin, Aviv and
Choshen, Leshem and
Abend, Omri",
editor = "Nastase, Vivi and
Pavlick, Ellie and
Pilehvar, Mohammad Taher and
Camacho-Collados, Jose and
Raganato, Alessandro",
booktitle = "Proceedings of the 11th Joint Conference on Lexical and Computational Semantics",
month = jul,
year = "2022",
address = "Seattle, Washington",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2022.starsem-1.3",
doi = "10.18653/v1/2022.starsem-1.3",
pages = "28--43",
abstract = "The integration of syntactic structures into Transformer machine translation has shown positive results, but to our knowledge, no work has attempted to do so with semantic structures. In this work we propose two novel parameter-free methods for injecting semantic information into Transformers, both rely on semantics-aware masking of (some of) the attention heads. One such method operates on the encoder, through a Scene-Aware Self-Attention (SASA) head. Another on the decoder, through a Scene-Aware Cross-Attention (SACrA) head. We show a consistent improvement over the vanilla Transformer and syntax-aware models for four language pairs. We further show an additional gain when using both semantic and syntactic structures in some language pairs.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="slobodkin-etal-2022-semantics">
<titleInfo>
<title>Semantics-aware Attention Improves Neural Machine Translation</title>
</titleInfo>
<name type="personal">
<namePart type="given">Aviv</namePart>
<namePart type="family">Slobodkin</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Leshem</namePart>
<namePart type="family">Choshen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Omri</namePart>
<namePart type="family">Abend</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2022-07</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 11th Joint Conference on Lexical and Computational Semantics</title>
</titleInfo>
<name type="personal">
<namePart type="given">Vivi</namePart>
<namePart type="family">Nastase</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ellie</namePart>
<namePart type="family">Pavlick</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Mohammad</namePart>
<namePart type="given">Taher</namePart>
<namePart type="family">Pilehvar</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jose</namePart>
<namePart type="family">Camacho-Collados</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Alessandro</namePart>
<namePart type="family">Raganato</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Seattle, Washington</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>The integration of syntactic structures into Transformer machine translation has shown positive results, but to our knowledge, no work has attempted to do so with semantic structures. In this work we propose two novel parameter-free methods for injecting semantic information into Transformers, both rely on semantics-aware masking of (some of) the attention heads. One such method operates on the encoder, through a Scene-Aware Self-Attention (SASA) head. Another on the decoder, through a Scene-Aware Cross-Attention (SACrA) head. We show a consistent improvement over the vanilla Transformer and syntax-aware models for four language pairs. We further show an additional gain when using both semantic and syntactic structures in some language pairs.</abstract>
<identifier type="citekey">slobodkin-etal-2022-semantics</identifier>
<identifier type="doi">10.18653/v1/2022.starsem-1.3</identifier>
<location>
<url>https://aclanthology.org/2022.starsem-1.3</url>
</location>
<part>
<date>2022-07</date>
<extent unit="page">
<start>28</start>
<end>43</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Semantics-aware Attention Improves Neural Machine Translation
%A Slobodkin, Aviv
%A Choshen, Leshem
%A Abend, Omri
%Y Nastase, Vivi
%Y Pavlick, Ellie
%Y Pilehvar, Mohammad Taher
%Y Camacho-Collados, Jose
%Y Raganato, Alessandro
%S Proceedings of the 11th Joint Conference on Lexical and Computational Semantics
%D 2022
%8 July
%I Association for Computational Linguistics
%C Seattle, Washington
%F slobodkin-etal-2022-semantics
%X The integration of syntactic structures into Transformer machine translation has shown positive results, but to our knowledge, no work has attempted to do so with semantic structures. In this work we propose two novel parameter-free methods for injecting semantic information into Transformers, both rely on semantics-aware masking of (some of) the attention heads. One such method operates on the encoder, through a Scene-Aware Self-Attention (SASA) head. Another on the decoder, through a Scene-Aware Cross-Attention (SACrA) head. We show a consistent improvement over the vanilla Transformer and syntax-aware models for four language pairs. We further show an additional gain when using both semantic and syntactic structures in some language pairs.
%R 10.18653/v1/2022.starsem-1.3
%U https://aclanthology.org/2022.starsem-1.3
%U https://doi.org/10.18653/v1/2022.starsem-1.3
%P 28-43
Markdown (Informal)
[Semantics-aware Attention Improves Neural Machine Translation](https://aclanthology.org/2022.starsem-1.3) (Slobodkin et al., *SEM 2022)
ACL