@inproceedings{bexte-etal-2021-implicit,
title = "Implicit Phenomena in Short-answer Scoring Data",
author = "Bexte, Marie and
Horbach, Andrea and
Zesch, Torsten",
editor = "Roth, Michael and
Tsarfaty, Reut and
Goldberg, Yoav",
booktitle = "Proceedings of the 1st Workshop on Understanding Implicit and Underspecified Language",
month = aug,
year = "2021",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2021.unimplicit-1.2/",
doi = "10.18653/v1/2021.unimplicit-1.2",
pages = "11--19",
abstract = "Short-answer scoring is the task of assessing the correctness of a short text given as response to a question that can come from a variety of educational scenarios. As only content, not form, is important, the exact wording including the explicitness of an answer should not matter. However, many state-of-the-art scoring models heavily rely on lexical information, be it word embeddings in a neural network or n-grams in an SVM. Thus, the exact wording of an answer might very well make a difference. We therefore quantify to what extent implicit language phenomena occur in short answer datasets and examine the influence they have on automatic scoring performance. We find that the level of implicitness depends on the individual question, and that some phenomena are very frequent. Resolving implicit wording to explicit formulations indeed tends to improve automatic scoring performance."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="bexte-etal-2021-implicit">
<titleInfo>
<title>Implicit Phenomena in Short-answer Scoring Data</title>
</titleInfo>
<name type="personal">
<namePart type="given">Marie</namePart>
<namePart type="family">Bexte</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Andrea</namePart>
<namePart type="family">Horbach</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Torsten</namePart>
<namePart type="family">Zesch</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2021-08</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 1st Workshop on Understanding Implicit and Underspecified Language</title>
</titleInfo>
<name type="personal">
<namePart type="given">Michael</namePart>
<namePart type="family">Roth</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Reut</namePart>
<namePart type="family">Tsarfaty</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yoav</namePart>
<namePart type="family">Goldberg</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Short-answer scoring is the task of assessing the correctness of a short text given as response to a question that can come from a variety of educational scenarios. As only content, not form, is important, the exact wording including the explicitness of an answer should not matter. However, many state-of-the-art scoring models heavily rely on lexical information, be it word embeddings in a neural network or n-grams in an SVM. Thus, the exact wording of an answer might very well make a difference. We therefore quantify to what extent implicit language phenomena occur in short answer datasets and examine the influence they have on automatic scoring performance. We find that the level of implicitness depends on the individual question, and that some phenomena are very frequent. Resolving implicit wording to explicit formulations indeed tends to improve automatic scoring performance.</abstract>
<identifier type="citekey">bexte-etal-2021-implicit</identifier>
<identifier type="doi">10.18653/v1/2021.unimplicit-1.2</identifier>
<location>
<url>https://aclanthology.org/2021.unimplicit-1.2/</url>
</location>
<part>
<date>2021-08</date>
<extent unit="page">
<start>11</start>
<end>19</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Implicit Phenomena in Short-answer Scoring Data
%A Bexte, Marie
%A Horbach, Andrea
%A Zesch, Torsten
%Y Roth, Michael
%Y Tsarfaty, Reut
%Y Goldberg, Yoav
%S Proceedings of the 1st Workshop on Understanding Implicit and Underspecified Language
%D 2021
%8 August
%I Association for Computational Linguistics
%C Online
%F bexte-etal-2021-implicit
%X Short-answer scoring is the task of assessing the correctness of a short text given as response to a question that can come from a variety of educational scenarios. As only content, not form, is important, the exact wording including the explicitness of an answer should not matter. However, many state-of-the-art scoring models heavily rely on lexical information, be it word embeddings in a neural network or n-grams in an SVM. Thus, the exact wording of an answer might very well make a difference. We therefore quantify to what extent implicit language phenomena occur in short answer datasets and examine the influence they have on automatic scoring performance. We find that the level of implicitness depends on the individual question, and that some phenomena are very frequent. Resolving implicit wording to explicit formulations indeed tends to improve automatic scoring performance.
%R 10.18653/v1/2021.unimplicit-1.2
%U https://aclanthology.org/2021.unimplicit-1.2/
%U https://doi.org/10.18653/v1/2021.unimplicit-1.2
%P 11-19
Markdown (Informal)
[Implicit Phenomena in Short-answer Scoring Data](https://aclanthology.org/2021.unimplicit-1.2/) (Bexte et al., unimplicit 2021)
ACL
- Marie Bexte, Andrea Horbach, and Torsten Zesch. 2021. Implicit Phenomena in Short-answer Scoring Data. In Proceedings of the 1st Workshop on Understanding Implicit and Underspecified Language, pages 11–19, Online. Association for Computational Linguistics.