@inproceedings{lykartsis-kotti-2019-prediction,
title = "Prediction of User Emotion and Dialogue Success Using Audio Spectrograms and Convolutional Neural Networks",
author = "Lykartsis, Athanasios and
Kotti, Margarita",
editor = "Nakamura, Satoshi and
Gasic, Milica and
Zukerman, Ingrid and
Skantze, Gabriel and
Nakano, Mikio and
Papangelis, Alexandros and
Ultes, Stefan and
Yoshino, Koichiro",
booktitle = "Proceedings of the 20th Annual SIGdial Meeting on Discourse and Dialogue",
month = sep,
year = "2019",
address = "Stockholm, Sweden",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W19-5939",
doi = "10.18653/v1/W19-5939",
pages = "336--344",
abstract = "In this paper we aim to predict dialogue success and user satisfaction as well as emotion on a turn level. To achieve this, we investigate the use of spectrogram representations, extracted from audio files, in combination with several types of convolutional neural networks. The experiments were performed on the Let{'}s Go V2 database, comprising 5065 audio files and having labels for subjective and objective dialogue turn success, as well as the emotional state of the user. Results show that by using only audio, it is possible to predict turn success with very high accuracy for all three labels (90{\%}). The best performing input representation were 1s long mel-spectrograms in combination with a CNN with a bottleneck architecture. The resulting system has the potential to be used real-time. Our results significantly surpass the state of the art for dialogue success prediction based only on audio.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="lykartsis-kotti-2019-prediction">
<titleInfo>
<title>Prediction of User Emotion and Dialogue Success Using Audio Spectrograms and Convolutional Neural Networks</title>
</titleInfo>
<name type="personal">
<namePart type="given">Athanasios</namePart>
<namePart type="family">Lykartsis</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Margarita</namePart>
<namePart type="family">Kotti</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2019-09</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 20th Annual SIGdial Meeting on Discourse and Dialogue</title>
</titleInfo>
<name type="personal">
<namePart type="given">Satoshi</namePart>
<namePart type="family">Nakamura</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Milica</namePart>
<namePart type="family">Gasic</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ingrid</namePart>
<namePart type="family">Zukerman</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Gabriel</namePart>
<namePart type="family">Skantze</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Mikio</namePart>
<namePart type="family">Nakano</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Alexandros</namePart>
<namePart type="family">Papangelis</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Stefan</namePart>
<namePart type="family">Ultes</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Koichiro</namePart>
<namePart type="family">Yoshino</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Stockholm, Sweden</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>In this paper we aim to predict dialogue success and user satisfaction as well as emotion on a turn level. To achieve this, we investigate the use of spectrogram representations, extracted from audio files, in combination with several types of convolutional neural networks. The experiments were performed on the Let’s Go V2 database, comprising 5065 audio files and having labels for subjective and objective dialogue turn success, as well as the emotional state of the user. Results show that by using only audio, it is possible to predict turn success with very high accuracy for all three labels (90%). The best performing input representation were 1s long mel-spectrograms in combination with a CNN with a bottleneck architecture. The resulting system has the potential to be used real-time. Our results significantly surpass the state of the art for dialogue success prediction based only on audio.</abstract>
<identifier type="citekey">lykartsis-kotti-2019-prediction</identifier>
<identifier type="doi">10.18653/v1/W19-5939</identifier>
<location>
<url>https://aclanthology.org/W19-5939</url>
</location>
<part>
<date>2019-09</date>
<extent unit="page">
<start>336</start>
<end>344</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Prediction of User Emotion and Dialogue Success Using Audio Spectrograms and Convolutional Neural Networks
%A Lykartsis, Athanasios
%A Kotti, Margarita
%Y Nakamura, Satoshi
%Y Gasic, Milica
%Y Zukerman, Ingrid
%Y Skantze, Gabriel
%Y Nakano, Mikio
%Y Papangelis, Alexandros
%Y Ultes, Stefan
%Y Yoshino, Koichiro
%S Proceedings of the 20th Annual SIGdial Meeting on Discourse and Dialogue
%D 2019
%8 September
%I Association for Computational Linguistics
%C Stockholm, Sweden
%F lykartsis-kotti-2019-prediction
%X In this paper we aim to predict dialogue success and user satisfaction as well as emotion on a turn level. To achieve this, we investigate the use of spectrogram representations, extracted from audio files, in combination with several types of convolutional neural networks. The experiments were performed on the Let’s Go V2 database, comprising 5065 audio files and having labels for subjective and objective dialogue turn success, as well as the emotional state of the user. Results show that by using only audio, it is possible to predict turn success with very high accuracy for all three labels (90%). The best performing input representation were 1s long mel-spectrograms in combination with a CNN with a bottleneck architecture. The resulting system has the potential to be used real-time. Our results significantly surpass the state of the art for dialogue success prediction based only on audio.
%R 10.18653/v1/W19-5939
%U https://aclanthology.org/W19-5939
%U https://doi.org/10.18653/v1/W19-5939
%P 336-344
Markdown (Informal)
[Prediction of User Emotion and Dialogue Success Using Audio Spectrograms and Convolutional Neural Networks](https://aclanthology.org/W19-5939) (Lykartsis & Kotti, SIGDIAL 2019)
ACL