@inproceedings{anschutz-etal-2024-images,
title = "Images Speak Volumes: User-Centric Assessment of Image Generation for Accessible Communication",
author = {Ansch{\"u}tz, Miriam and
Sylaj, Tringa and
Groh, Georg},
editor = "Shardlow, Matthew and
Saggion, Horacio and
Alva-Manchego, Fernando and
Zampieri, Marcos and
North, Kai and
{\v{S}}tajner, Sanja and
Stodden, Regina",
booktitle = "Proceedings of the Third Workshop on Text Simplification, Accessibility and Readability (TSAR 2024)",
month = nov,
year = "2024",
address = "Miami, Florida, USA",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2024.tsar-1.4",
doi = "10.18653/v1/2024.tsar-1.4",
pages = "27--40",
abstract = "Explanatory images play a pivotal role in accessible and easy-to-read (E2R) texts. However, the images available in online databases are not tailored toward the respective texts, and the creation of customized images is expensive. In this large-scale study, we investigated whether text-to-image generation models can close this gap by providing customizable images quickly and easily. We benchmarked seven, four open- and three closed-source, image generation models and provide an extensive evaluation of the resulting images. In addition, we performed a user study with people from the E2R target group to examine whether the images met their requirements. We find that some of the models show remarkable performance, but none of the models are ready to be used at a larger scale without human supervision. Our research is an important step toward facilitating the creation of accessible information for E2R creators and tailoring accessible images to the target group{'}s needs.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="anschutz-etal-2024-images">
<titleInfo>
<title>Images Speak Volumes: User-Centric Assessment of Image Generation for Accessible Communication</title>
</titleInfo>
<name type="personal">
<namePart type="given">Miriam</namePart>
<namePart type="family">Anschütz</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Tringa</namePart>
<namePart type="family">Sylaj</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Georg</namePart>
<namePart type="family">Groh</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2024-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Third Workshop on Text Simplification, Accessibility and Readability (TSAR 2024)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Matthew</namePart>
<namePart type="family">Shardlow</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Horacio</namePart>
<namePart type="family">Saggion</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Fernando</namePart>
<namePart type="family">Alva-Manchego</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Marcos</namePart>
<namePart type="family">Zampieri</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kai</namePart>
<namePart type="family">North</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sanja</namePart>
<namePart type="family">Štajner</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Regina</namePart>
<namePart type="family">Stodden</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Miami, Florida, USA</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Explanatory images play a pivotal role in accessible and easy-to-read (E2R) texts. However, the images available in online databases are not tailored toward the respective texts, and the creation of customized images is expensive. In this large-scale study, we investigated whether text-to-image generation models can close this gap by providing customizable images quickly and easily. We benchmarked seven, four open- and three closed-source, image generation models and provide an extensive evaluation of the resulting images. In addition, we performed a user study with people from the E2R target group to examine whether the images met their requirements. We find that some of the models show remarkable performance, but none of the models are ready to be used at a larger scale without human supervision. Our research is an important step toward facilitating the creation of accessible information for E2R creators and tailoring accessible images to the target group’s needs.</abstract>
<identifier type="citekey">anschutz-etal-2024-images</identifier>
<identifier type="doi">10.18653/v1/2024.tsar-1.4</identifier>
<location>
<url>https://aclanthology.org/2024.tsar-1.4</url>
</location>
<part>
<date>2024-11</date>
<extent unit="page">
<start>27</start>
<end>40</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Images Speak Volumes: User-Centric Assessment of Image Generation for Accessible Communication
%A Anschütz, Miriam
%A Sylaj, Tringa
%A Groh, Georg
%Y Shardlow, Matthew
%Y Saggion, Horacio
%Y Alva-Manchego, Fernando
%Y Zampieri, Marcos
%Y North, Kai
%Y Štajner, Sanja
%Y Stodden, Regina
%S Proceedings of the Third Workshop on Text Simplification, Accessibility and Readability (TSAR 2024)
%D 2024
%8 November
%I Association for Computational Linguistics
%C Miami, Florida, USA
%F anschutz-etal-2024-images
%X Explanatory images play a pivotal role in accessible and easy-to-read (E2R) texts. However, the images available in online databases are not tailored toward the respective texts, and the creation of customized images is expensive. In this large-scale study, we investigated whether text-to-image generation models can close this gap by providing customizable images quickly and easily. We benchmarked seven, four open- and three closed-source, image generation models and provide an extensive evaluation of the resulting images. In addition, we performed a user study with people from the E2R target group to examine whether the images met their requirements. We find that some of the models show remarkable performance, but none of the models are ready to be used at a larger scale without human supervision. Our research is an important step toward facilitating the creation of accessible information for E2R creators and tailoring accessible images to the target group’s needs.
%R 10.18653/v1/2024.tsar-1.4
%U https://aclanthology.org/2024.tsar-1.4
%U https://doi.org/10.18653/v1/2024.tsar-1.4
%P 27-40
Markdown (Informal)
[Images Speak Volumes: User-Centric Assessment of Image Generation for Accessible Communication](https://aclanthology.org/2024.tsar-1.4) (Anschütz et al., TSAR 2024)
ACL