@inproceedings{wu-etal-2024-evaluating,
title = "Evaluating Large Language Models on Social Signal Sensitivity: An Appraisal Theory Approach",
author = "Wu, Zhen and
Dutt, Ritam and
Rose, Carolyn",
editor = "Soni, Nikita and
Flek, Lucie and
Sharma, Ashish and
Yang, Diyi and
Hooker, Sara and
Schwartz, H. Andrew",
booktitle = "Proceedings of the 1st Human-Centered Large Language Modeling Workshop",
month = aug,
year = "2024",
address = "TBD",
publisher = "ACL",
url = "https://aclanthology.org/2024.hucllm-1.6",
doi = "10.18653/v1/2024.hucllm-1.6",
pages = "67--80",
abstract = "We present a framework to assess the sensitivity of Large Language Models (LLMs) to textually embedded social signals using an Appraisal Theory perspective. We report on an experiment that uses prompts encoding three dimensions of social signals: Affect, Judgment, and Appreciation. In response to the prompt, an LLM generates both an analysis (Insight) and a conversational Response, which are analyzed in terms of sensitivity to the signals. We quantitatively evaluate the output text through topical analysis of the Insight and predicted social intelligence scores of the Response in terms of empathy and emotional polarity. Key findings show that LLMs are more sensitive to positive signals. The personas impact Responses but not the Insight. We discuss how our framework can be extended to a broader set of social signals, personas, and scenarios to evaluate LLM behaviors under various conditions.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="wu-etal-2024-evaluating">
<titleInfo>
<title>Evaluating Large Language Models on Social Signal Sensitivity: An Appraisal Theory Approach</title>
</titleInfo>
<name type="personal">
<namePart type="given">Zhen</namePart>
<namePart type="family">Wu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ritam</namePart>
<namePart type="family">Dutt</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Carolyn</namePart>
<namePart type="family">Rose</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2024-08</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 1st Human-Centered Large Language Modeling Workshop</title>
</titleInfo>
<name type="personal">
<namePart type="given">Nikita</namePart>
<namePart type="family">Soni</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Lucie</namePart>
<namePart type="family">Flek</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ashish</namePart>
<namePart type="family">Sharma</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Diyi</namePart>
<namePart type="family">Yang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sara</namePart>
<namePart type="family">Hooker</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">H</namePart>
<namePart type="given">Andrew</namePart>
<namePart type="family">Schwartz</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>ACL</publisher>
<place>
<placeTerm type="text">TBD</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>We present a framework to assess the sensitivity of Large Language Models (LLMs) to textually embedded social signals using an Appraisal Theory perspective. We report on an experiment that uses prompts encoding three dimensions of social signals: Affect, Judgment, and Appreciation. In response to the prompt, an LLM generates both an analysis (Insight) and a conversational Response, which are analyzed in terms of sensitivity to the signals. We quantitatively evaluate the output text through topical analysis of the Insight and predicted social intelligence scores of the Response in terms of empathy and emotional polarity. Key findings show that LLMs are more sensitive to positive signals. The personas impact Responses but not the Insight. We discuss how our framework can be extended to a broader set of social signals, personas, and scenarios to evaluate LLM behaviors under various conditions.</abstract>
<identifier type="citekey">wu-etal-2024-evaluating</identifier>
<identifier type="doi">10.18653/v1/2024.hucllm-1.6</identifier>
<location>
<url>https://aclanthology.org/2024.hucllm-1.6</url>
</location>
<part>
<date>2024-08</date>
<extent unit="page">
<start>67</start>
<end>80</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Evaluating Large Language Models on Social Signal Sensitivity: An Appraisal Theory Approach
%A Wu, Zhen
%A Dutt, Ritam
%A Rose, Carolyn
%Y Soni, Nikita
%Y Flek, Lucie
%Y Sharma, Ashish
%Y Yang, Diyi
%Y Hooker, Sara
%Y Schwartz, H. Andrew
%S Proceedings of the 1st Human-Centered Large Language Modeling Workshop
%D 2024
%8 August
%I ACL
%C TBD
%F wu-etal-2024-evaluating
%X We present a framework to assess the sensitivity of Large Language Models (LLMs) to textually embedded social signals using an Appraisal Theory perspective. We report on an experiment that uses prompts encoding three dimensions of social signals: Affect, Judgment, and Appreciation. In response to the prompt, an LLM generates both an analysis (Insight) and a conversational Response, which are analyzed in terms of sensitivity to the signals. We quantitatively evaluate the output text through topical analysis of the Insight and predicted social intelligence scores of the Response in terms of empathy and emotional polarity. Key findings show that LLMs are more sensitive to positive signals. The personas impact Responses but not the Insight. We discuss how our framework can be extended to a broader set of social signals, personas, and scenarios to evaluate LLM behaviors under various conditions.
%R 10.18653/v1/2024.hucllm-1.6
%U https://aclanthology.org/2024.hucllm-1.6
%U https://doi.org/10.18653/v1/2024.hucllm-1.6
%P 67-80
Markdown (Informal)
[Evaluating Large Language Models on Social Signal Sensitivity: An Appraisal Theory Approach](https://aclanthology.org/2024.hucllm-1.6) (Wu et al., HuCLLM-WS 2024)
ACL