@inproceedings{simoncini-spanakis-2021-seqattack,
title = "{S}eq{A}ttack: {O}n Adversarial Attacks for Named Entity Recognition",
author = "Simoncini, Walter and
Spanakis, Gerasimos",
editor = "Adel, Heike and
Shi, Shuming",
booktitle = "Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing: System Demonstrations",
month = nov,
year = "2021",
address = "Online and Punta Cana, Dominican Republic",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2021.emnlp-demo.35",
doi = "10.18653/v1/2021.emnlp-demo.35",
pages = "308--318",
abstract = "Named Entity Recognition is a fundamental task in information extraction and is an essential element for various Natural Language Processing pipelines. Adversarial attacks have been shown to greatly affect the performance of text classification systems but knowledge about their effectiveness against named entity recognition models is limited. This paper investigates the effectiveness and portability of adversarial attacks from text classification to named entity recognition and the ability of adversarial training to counteract these attacks. We find that character-level and word-level attacks are the most effective, but adversarial training can grant significant protection at little to no expense of standard performance. Alongside our results, we also release SeqAttack, a framework to conduct adversarial attacks against token classification models (used in this work for named entity recognition) and a companion web application to inspect and cherry pick adversarial examples.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="simoncini-spanakis-2021-seqattack">
<titleInfo>
<title>SeqAttack: On Adversarial Attacks for Named Entity Recognition</title>
</titleInfo>
<name type="personal">
<namePart type="given">Walter</namePart>
<namePart type="family">Simoncini</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Gerasimos</namePart>
<namePart type="family">Spanakis</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2021-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing: System Demonstrations</title>
</titleInfo>
<name type="personal">
<namePart type="given">Heike</namePart>
<namePart type="family">Adel</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Shuming</namePart>
<namePart type="family">Shi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online and Punta Cana, Dominican Republic</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Named Entity Recognition is a fundamental task in information extraction and is an essential element for various Natural Language Processing pipelines. Adversarial attacks have been shown to greatly affect the performance of text classification systems but knowledge about their effectiveness against named entity recognition models is limited. This paper investigates the effectiveness and portability of adversarial attacks from text classification to named entity recognition and the ability of adversarial training to counteract these attacks. We find that character-level and word-level attacks are the most effective, but adversarial training can grant significant protection at little to no expense of standard performance. Alongside our results, we also release SeqAttack, a framework to conduct adversarial attacks against token classification models (used in this work for named entity recognition) and a companion web application to inspect and cherry pick adversarial examples.</abstract>
<identifier type="citekey">simoncini-spanakis-2021-seqattack</identifier>
<identifier type="doi">10.18653/v1/2021.emnlp-demo.35</identifier>
<location>
<url>https://aclanthology.org/2021.emnlp-demo.35</url>
</location>
<part>
<date>2021-11</date>
<extent unit="page">
<start>308</start>
<end>318</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T SeqAttack: On Adversarial Attacks for Named Entity Recognition
%A Simoncini, Walter
%A Spanakis, Gerasimos
%Y Adel, Heike
%Y Shi, Shuming
%S Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing: System Demonstrations
%D 2021
%8 November
%I Association for Computational Linguistics
%C Online and Punta Cana, Dominican Republic
%F simoncini-spanakis-2021-seqattack
%X Named Entity Recognition is a fundamental task in information extraction and is an essential element for various Natural Language Processing pipelines. Adversarial attacks have been shown to greatly affect the performance of text classification systems but knowledge about their effectiveness against named entity recognition models is limited. This paper investigates the effectiveness and portability of adversarial attacks from text classification to named entity recognition and the ability of adversarial training to counteract these attacks. We find that character-level and word-level attacks are the most effective, but adversarial training can grant significant protection at little to no expense of standard performance. Alongside our results, we also release SeqAttack, a framework to conduct adversarial attacks against token classification models (used in this work for named entity recognition) and a companion web application to inspect and cherry pick adversarial examples.
%R 10.18653/v1/2021.emnlp-demo.35
%U https://aclanthology.org/2021.emnlp-demo.35
%U https://doi.org/10.18653/v1/2021.emnlp-demo.35
%P 308-318
Markdown (Informal)
[SeqAttack: On Adversarial Attacks for Named Entity Recognition](https://aclanthology.org/2021.emnlp-demo.35) (Simoncini & Spanakis, EMNLP 2021)
ACL
- Walter Simoncini and Gerasimos Spanakis. 2021. SeqAttack: On Adversarial Attacks for Named Entity Recognition. In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing: System Demonstrations, pages 308–318, Online and Punta Cana, Dominican Republic. Association for Computational Linguistics.