@inproceedings{kennedy-etal-2020-contextualizing,
title = "Contextualizing Hate Speech Classifiers with Post-hoc Explanation",
author = "Kennedy, Brendan and
Jin, Xisen and
Mostafazadeh Davani, Aida and
Dehghani, Morteza and
Ren, Xiang",
editor = "Jurafsky, Dan and
Chai, Joyce and
Schluter, Natalie and
Tetreault, Joel",
booktitle = "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics",
month = jul,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2020.acl-main.483",
doi = "10.18653/v1/2020.acl-main.483",
pages = "5435--5442",
abstract = "Hate speech classifiers trained on imbalanced datasets struggle to determine if group identifiers like {``}gay{''} or {``}black{''} are used in offensive or prejudiced ways. Such biases manifest in false positives when these identifiers are present, due to models{'} inability to learn the contexts which constitute a hateful usage of identifiers. We extract post-hoc explanations from fine-tuned BERT classifiers to detect bias towards identity terms. Then, we propose a novel regularization technique based on these explanations that encourages models to learn from the context of group identifiers in addition to the identifiers themselves. Our approach improved over baselines in limiting false positives on out-of-domain data while maintaining and in cases improving in-domain performance.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="kennedy-etal-2020-contextualizing">
<titleInfo>
<title>Contextualizing Hate Speech Classifiers with Post-hoc Explanation</title>
</titleInfo>
<name type="personal">
<namePart type="given">Brendan</namePart>
<namePart type="family">Kennedy</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Xisen</namePart>
<namePart type="family">Jin</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Aida</namePart>
<namePart type="family">Mostafazadeh Davani</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Morteza</namePart>
<namePart type="family">Dehghani</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Xiang</namePart>
<namePart type="family">Ren</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2020-07</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics</title>
</titleInfo>
<name type="personal">
<namePart type="given">Dan</namePart>
<namePart type="family">Jurafsky</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Joyce</namePart>
<namePart type="family">Chai</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Natalie</namePart>
<namePart type="family">Schluter</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Joel</namePart>
<namePart type="family">Tetreault</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Hate speech classifiers trained on imbalanced datasets struggle to determine if group identifiers like “gay” or “black” are used in offensive or prejudiced ways. Such biases manifest in false positives when these identifiers are present, due to models’ inability to learn the contexts which constitute a hateful usage of identifiers. We extract post-hoc explanations from fine-tuned BERT classifiers to detect bias towards identity terms. Then, we propose a novel regularization technique based on these explanations that encourages models to learn from the context of group identifiers in addition to the identifiers themselves. Our approach improved over baselines in limiting false positives on out-of-domain data while maintaining and in cases improving in-domain performance.</abstract>
<identifier type="citekey">kennedy-etal-2020-contextualizing</identifier>
<identifier type="doi">10.18653/v1/2020.acl-main.483</identifier>
<location>
<url>https://aclanthology.org/2020.acl-main.483</url>
</location>
<part>
<date>2020-07</date>
<extent unit="page">
<start>5435</start>
<end>5442</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Contextualizing Hate Speech Classifiers with Post-hoc Explanation
%A Kennedy, Brendan
%A Jin, Xisen
%A Mostafazadeh Davani, Aida
%A Dehghani, Morteza
%A Ren, Xiang
%Y Jurafsky, Dan
%Y Chai, Joyce
%Y Schluter, Natalie
%Y Tetreault, Joel
%S Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics
%D 2020
%8 July
%I Association for Computational Linguistics
%C Online
%F kennedy-etal-2020-contextualizing
%X Hate speech classifiers trained on imbalanced datasets struggle to determine if group identifiers like “gay” or “black” are used in offensive or prejudiced ways. Such biases manifest in false positives when these identifiers are present, due to models’ inability to learn the contexts which constitute a hateful usage of identifiers. We extract post-hoc explanations from fine-tuned BERT classifiers to detect bias towards identity terms. Then, we propose a novel regularization technique based on these explanations that encourages models to learn from the context of group identifiers in addition to the identifiers themselves. Our approach improved over baselines in limiting false positives on out-of-domain data while maintaining and in cases improving in-domain performance.
%R 10.18653/v1/2020.acl-main.483
%U https://aclanthology.org/2020.acl-main.483
%U https://doi.org/10.18653/v1/2020.acl-main.483
%P 5435-5442
Markdown (Informal)
[Contextualizing Hate Speech Classifiers with Post-hoc Explanation](https://aclanthology.org/2020.acl-main.483) (Kennedy et al., ACL 2020)
ACL