@inproceedings{kwon-mihindukulasooriya-2023-finspector,
title = "Finspector: A Human-Centered Visual Inspection Tool for Exploring and Comparing Biases among Foundation Models",
author = "Kwon, Bum Chul and
Mihindukulasooriya, Nandana",
editor = "Bollegala, Danushka and
Huang, Ruihong and
Ritter, Alan",
booktitle = "Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 3: System Demonstrations)",
month = jul,
year = "2023",
address = "Toronto, Canada",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.acl-demo.4/",
doi = "10.18653/v1/2023.acl-demo.4",
pages = "42--50",
abstract = "Pre-trained transformer-based language models are becoming increasingly popular due to their exceptional performance on various benchmarks. However, concerns persist regarding the presence of hidden biases within these models, which can lead to discriminatory outcomes and reinforce harmful stereotypes. To address this issue, we propose Finspector, a human-centered visual inspection tool designed to detect biases in different categories through log-likelihood scores generated by language models. The goal of the tool is to enable researchers to easily identify potential biases using visual analytics, ultimately contributing to a fairer and more just deployment of these models in both academic and industrial settings. Finspector is available at \url{https://github.com/IBM/finspector}."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="kwon-mihindukulasooriya-2023-finspector">
<titleInfo>
<title>Finspector: A Human-Centered Visual Inspection Tool for Exploring and Comparing Biases among Foundation Models</title>
</titleInfo>
<name type="personal">
<namePart type="given">Bum</namePart>
<namePart type="given">Chul</namePart>
<namePart type="family">Kwon</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Nandana</namePart>
<namePart type="family">Mihindukulasooriya</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2023-07</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 3: System Demonstrations)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Danushka</namePart>
<namePart type="family">Bollegala</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ruihong</namePart>
<namePart type="family">Huang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Alan</namePart>
<namePart type="family">Ritter</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Toronto, Canada</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Pre-trained transformer-based language models are becoming increasingly popular due to their exceptional performance on various benchmarks. However, concerns persist regarding the presence of hidden biases within these models, which can lead to discriminatory outcomes and reinforce harmful stereotypes. To address this issue, we propose Finspector, a human-centered visual inspection tool designed to detect biases in different categories through log-likelihood scores generated by language models. The goal of the tool is to enable researchers to easily identify potential biases using visual analytics, ultimately contributing to a fairer and more just deployment of these models in both academic and industrial settings. Finspector is available at https://github.com/IBM/finspector.</abstract>
<identifier type="citekey">kwon-mihindukulasooriya-2023-finspector</identifier>
<identifier type="doi">10.18653/v1/2023.acl-demo.4</identifier>
<location>
<url>https://aclanthology.org/2023.acl-demo.4/</url>
</location>
<part>
<date>2023-07</date>
<extent unit="page">
<start>42</start>
<end>50</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Finspector: A Human-Centered Visual Inspection Tool for Exploring and Comparing Biases among Foundation Models
%A Kwon, Bum Chul
%A Mihindukulasooriya, Nandana
%Y Bollegala, Danushka
%Y Huang, Ruihong
%Y Ritter, Alan
%S Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 3: System Demonstrations)
%D 2023
%8 July
%I Association for Computational Linguistics
%C Toronto, Canada
%F kwon-mihindukulasooriya-2023-finspector
%X Pre-trained transformer-based language models are becoming increasingly popular due to their exceptional performance on various benchmarks. However, concerns persist regarding the presence of hidden biases within these models, which can lead to discriminatory outcomes and reinforce harmful stereotypes. To address this issue, we propose Finspector, a human-centered visual inspection tool designed to detect biases in different categories through log-likelihood scores generated by language models. The goal of the tool is to enable researchers to easily identify potential biases using visual analytics, ultimately contributing to a fairer and more just deployment of these models in both academic and industrial settings. Finspector is available at https://github.com/IBM/finspector.
%R 10.18653/v1/2023.acl-demo.4
%U https://aclanthology.org/2023.acl-demo.4/
%U https://doi.org/10.18653/v1/2023.acl-demo.4
%P 42-50
Markdown (Informal)
[Finspector: A Human-Centered Visual Inspection Tool for Exploring and Comparing Biases among Foundation Models](https://aclanthology.org/2023.acl-demo.4/) (Kwon & Mihindukulasooriya, ACL 2023)
ACL