@inproceedings{xu-he-2023-security,
title = "Security Challenges in Natural Language Processing Models",
author = "Xu, Qiongkai and
He, Xuanli",
editor = "Zhang, Qi and
Sajjad, Hassan",
booktitle = "Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing: Tutorial Abstracts",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.emnlp-tutorial.2",
doi = "10.18653/v1/2023.emnlp-tutorial.2",
pages = "7--12",
abstract = "Large-scale natural language processing models have been developed and integrated into numerous applications, given the advantage of their remarkable performance. Nonetheless, the security concerns associated with these models prevent the widespread adoption of these black-box machine learning models. In this tutorial, we will dive into three emerging security issues in NLP research, i.e., backdoor attacks, private data leakage, and imitation attacks. These threats will be introduced in accordance with their threatening usage scenarios, attack methodologies, and defense technologies.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="xu-he-2023-security">
<titleInfo>
<title>Security Challenges in Natural Language Processing Models</title>
</titleInfo>
<name type="personal">
<namePart type="given">Qiongkai</namePart>
<namePart type="family">Xu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Xuanli</namePart>
<namePart type="family">He</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2023-12</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing: Tutorial Abstracts</title>
</titleInfo>
<name type="personal">
<namePart type="given">Qi</namePart>
<namePart type="family">Zhang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Hassan</namePart>
<namePart type="family">Sajjad</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Singapore</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Large-scale natural language processing models have been developed and integrated into numerous applications, given the advantage of their remarkable performance. Nonetheless, the security concerns associated with these models prevent the widespread adoption of these black-box machine learning models. In this tutorial, we will dive into three emerging security issues in NLP research, i.e., backdoor attacks, private data leakage, and imitation attacks. These threats will be introduced in accordance with their threatening usage scenarios, attack methodologies, and defense technologies.</abstract>
<identifier type="citekey">xu-he-2023-security</identifier>
<identifier type="doi">10.18653/v1/2023.emnlp-tutorial.2</identifier>
<location>
<url>https://aclanthology.org/2023.emnlp-tutorial.2</url>
</location>
<part>
<date>2023-12</date>
<extent unit="page">
<start>7</start>
<end>12</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Security Challenges in Natural Language Processing Models
%A Xu, Qiongkai
%A He, Xuanli
%Y Zhang, Qi
%Y Sajjad, Hassan
%S Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing: Tutorial Abstracts
%D 2023
%8 December
%I Association for Computational Linguistics
%C Singapore
%F xu-he-2023-security
%X Large-scale natural language processing models have been developed and integrated into numerous applications, given the advantage of their remarkable performance. Nonetheless, the security concerns associated with these models prevent the widespread adoption of these black-box machine learning models. In this tutorial, we will dive into three emerging security issues in NLP research, i.e., backdoor attacks, private data leakage, and imitation attacks. These threats will be introduced in accordance with their threatening usage scenarios, attack methodologies, and defense technologies.
%R 10.18653/v1/2023.emnlp-tutorial.2
%U https://aclanthology.org/2023.emnlp-tutorial.2
%U https://doi.org/10.18653/v1/2023.emnlp-tutorial.2
%P 7-12
Markdown (Informal)
[Security Challenges in Natural Language Processing Models](https://aclanthology.org/2023.emnlp-tutorial.2) (Xu & He, EMNLP 2023)
ACL