@inproceedings{su-etal-2024-refine,
title = "Refine, Align, and Aggregate: Multi-view Linguistic Features Enhancement for Aspect Sentiment Triplet Extraction",
author = "Su, Guixin and
Wu, Mingmin and
Huang, Zhongqiang and
Zhang, Yongcheng and
Wang, Tongguan and
Hu, Yuxue and
Sha, Ying",
editor = "Ku, Lun-Wei and
Martins, Andre and
Srikumar, Vivek",
booktitle = "Findings of the Association for Computational Linguistics: ACL 2024",
month = aug,
year = "2024",
address = "Bangkok, Thailand",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2024.findings-acl.191",
doi = "10.18653/v1/2024.findings-acl.191",
pages = "3212--3228",
abstract = "Aspect Sentiment Triplet Extraction (ASTE) aims to extract the triplets of aspect terms, their associated sentiment and opinion terms. Previous works based on different modeling paradigms have achieved promising results. However, these methods struggle to comprehensively explore the various specific relations between sentiment elements in multi-view linguistic features, which is the prior indication effect for facilitating sentiment triplets extraction, requiring to align and aggregate them to capture the complementary higher-order interactions. In this paper, we propose Multi-view Linguistic Features Enhancement (MvLFE) to explore the aforementioned prior indication effect in the {``}Refine, Align, and Aggregate{''} learning process. Specifically, we first introduce the relational graph attention network to encode the word-pair relations represented by each linguistic feature and refine them to pay more attention to the aspect-opinion pairs. Next, we employ the multi-view contrastive learning to align them at a fine-grained level in the contextual semantic space to maintain semantic consistency. Finally, we utilize the multi-semantic cross attention to capture and aggregate the complementary higher-order interactions between diverse linguistic features to enhance the aspect-opinion relations. Experimental results on several benchmark datasets show the effectiveness and robustness of our model, which achieves state-of-the-art performance.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="su-etal-2024-refine">
<titleInfo>
<title>Refine, Align, and Aggregate: Multi-view Linguistic Features Enhancement for Aspect Sentiment Triplet Extraction</title>
</titleInfo>
<name type="personal">
<namePart type="given">Guixin</namePart>
<namePart type="family">Su</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Mingmin</namePart>
<namePart type="family">Wu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Zhongqiang</namePart>
<namePart type="family">Huang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yongcheng</namePart>
<namePart type="family">Zhang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Tongguan</namePart>
<namePart type="family">Wang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yuxue</namePart>
<namePart type="family">Hu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ying</namePart>
<namePart type="family">Sha</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2024-08</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Findings of the Association for Computational Linguistics: ACL 2024</title>
</titleInfo>
<name type="personal">
<namePart type="given">Lun-Wei</namePart>
<namePart type="family">Ku</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Andre</namePart>
<namePart type="family">Martins</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Vivek</namePart>
<namePart type="family">Srikumar</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Bangkok, Thailand</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Aspect Sentiment Triplet Extraction (ASTE) aims to extract the triplets of aspect terms, their associated sentiment and opinion terms. Previous works based on different modeling paradigms have achieved promising results. However, these methods struggle to comprehensively explore the various specific relations between sentiment elements in multi-view linguistic features, which is the prior indication effect for facilitating sentiment triplets extraction, requiring to align and aggregate them to capture the complementary higher-order interactions. In this paper, we propose Multi-view Linguistic Features Enhancement (MvLFE) to explore the aforementioned prior indication effect in the “Refine, Align, and Aggregate” learning process. Specifically, we first introduce the relational graph attention network to encode the word-pair relations represented by each linguistic feature and refine them to pay more attention to the aspect-opinion pairs. Next, we employ the multi-view contrastive learning to align them at a fine-grained level in the contextual semantic space to maintain semantic consistency. Finally, we utilize the multi-semantic cross attention to capture and aggregate the complementary higher-order interactions between diverse linguistic features to enhance the aspect-opinion relations. Experimental results on several benchmark datasets show the effectiveness and robustness of our model, which achieves state-of-the-art performance.</abstract>
<identifier type="citekey">su-etal-2024-refine</identifier>
<identifier type="doi">10.18653/v1/2024.findings-acl.191</identifier>
<location>
<url>https://aclanthology.org/2024.findings-acl.191</url>
</location>
<part>
<date>2024-08</date>
<extent unit="page">
<start>3212</start>
<end>3228</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Refine, Align, and Aggregate: Multi-view Linguistic Features Enhancement for Aspect Sentiment Triplet Extraction
%A Su, Guixin
%A Wu, Mingmin
%A Huang, Zhongqiang
%A Zhang, Yongcheng
%A Wang, Tongguan
%A Hu, Yuxue
%A Sha, Ying
%Y Ku, Lun-Wei
%Y Martins, Andre
%Y Srikumar, Vivek
%S Findings of the Association for Computational Linguistics: ACL 2024
%D 2024
%8 August
%I Association for Computational Linguistics
%C Bangkok, Thailand
%F su-etal-2024-refine
%X Aspect Sentiment Triplet Extraction (ASTE) aims to extract the triplets of aspect terms, their associated sentiment and opinion terms. Previous works based on different modeling paradigms have achieved promising results. However, these methods struggle to comprehensively explore the various specific relations between sentiment elements in multi-view linguistic features, which is the prior indication effect for facilitating sentiment triplets extraction, requiring to align and aggregate them to capture the complementary higher-order interactions. In this paper, we propose Multi-view Linguistic Features Enhancement (MvLFE) to explore the aforementioned prior indication effect in the “Refine, Align, and Aggregate” learning process. Specifically, we first introduce the relational graph attention network to encode the word-pair relations represented by each linguistic feature and refine them to pay more attention to the aspect-opinion pairs. Next, we employ the multi-view contrastive learning to align them at a fine-grained level in the contextual semantic space to maintain semantic consistency. Finally, we utilize the multi-semantic cross attention to capture and aggregate the complementary higher-order interactions between diverse linguistic features to enhance the aspect-opinion relations. Experimental results on several benchmark datasets show the effectiveness and robustness of our model, which achieves state-of-the-art performance.
%R 10.18653/v1/2024.findings-acl.191
%U https://aclanthology.org/2024.findings-acl.191
%U https://doi.org/10.18653/v1/2024.findings-acl.191
%P 3212-3228
Markdown (Informal)
[Refine, Align, and Aggregate: Multi-view Linguistic Features Enhancement for Aspect Sentiment Triplet Extraction](https://aclanthology.org/2024.findings-acl.191) (Su et al., Findings 2024)
ACL
- Guixin Su, Mingmin Wu, Zhongqiang Huang, Yongcheng Zhang, Tongguan Wang, Yuxue Hu, and Ying Sha. 2024. Refine, Align, and Aggregate: Multi-view Linguistic Features Enhancement for Aspect Sentiment Triplet Extraction. In Findings of the Association for Computational Linguistics: ACL 2024, pages 3212–3228, Bangkok, Thailand. Association for Computational Linguistics.