@inproceedings{dessi-baroni-2019-cnns,
title = "{CNN}s found to jump around more skillfully than {RNN}s: Compositional Generalization in Seq2seq Convolutional Networks",
author = "Dess{\`\i}, Roberto and
Baroni, Marco",
editor = "Korhonen, Anna and
Traum, David and
M{\`a}rquez, Llu{\'\i}s",
booktitle = "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics",
month = jul,
year = "2019",
address = "Florence, Italy",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/P19-1381",
doi = "10.18653/v1/P19-1381",
pages = "3919--3923",
abstract = "Lake and Baroni (2018) introduced the SCAN dataset probing the ability of seq2seq models to capture compositional generalizations, such as inferring the meaning of {``}jump around{''} 0-shot from the component words. Recurrent networks (RNNs) were found to completely fail the most challenging generalization cases. We test here a convolutional network (CNN) on these tasks, reporting hugely improved performance with respect to RNNs. Despite the big improvement, the CNN has however not induced systematic rules, suggesting that the difference between compositional and non-compositional behaviour is not clear-cut.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="dessi-baroni-2019-cnns">
<titleInfo>
<title>CNNs found to jump around more skillfully than RNNs: Compositional Generalization in Seq2seq Convolutional Networks</title>
</titleInfo>
<name type="personal">
<namePart type="given">Roberto</namePart>
<namePart type="family">Dessì</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Marco</namePart>
<namePart type="family">Baroni</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2019-07</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics</title>
</titleInfo>
<name type="personal">
<namePart type="given">Anna</namePart>
<namePart type="family">Korhonen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">David</namePart>
<namePart type="family">Traum</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Lluís</namePart>
<namePart type="family">Màrquez</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Florence, Italy</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Lake and Baroni (2018) introduced the SCAN dataset probing the ability of seq2seq models to capture compositional generalizations, such as inferring the meaning of “jump around” 0-shot from the component words. Recurrent networks (RNNs) were found to completely fail the most challenging generalization cases. We test here a convolutional network (CNN) on these tasks, reporting hugely improved performance with respect to RNNs. Despite the big improvement, the CNN has however not induced systematic rules, suggesting that the difference between compositional and non-compositional behaviour is not clear-cut.</abstract>
<identifier type="citekey">dessi-baroni-2019-cnns</identifier>
<identifier type="doi">10.18653/v1/P19-1381</identifier>
<location>
<url>https://aclanthology.org/P19-1381</url>
</location>
<part>
<date>2019-07</date>
<extent unit="page">
<start>3919</start>
<end>3923</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T CNNs found to jump around more skillfully than RNNs: Compositional Generalization in Seq2seq Convolutional Networks
%A Dessì, Roberto
%A Baroni, Marco
%Y Korhonen, Anna
%Y Traum, David
%Y Màrquez, Lluís
%S Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics
%D 2019
%8 July
%I Association for Computational Linguistics
%C Florence, Italy
%F dessi-baroni-2019-cnns
%X Lake and Baroni (2018) introduced the SCAN dataset probing the ability of seq2seq models to capture compositional generalizations, such as inferring the meaning of “jump around” 0-shot from the component words. Recurrent networks (RNNs) were found to completely fail the most challenging generalization cases. We test here a convolutional network (CNN) on these tasks, reporting hugely improved performance with respect to RNNs. Despite the big improvement, the CNN has however not induced systematic rules, suggesting that the difference between compositional and non-compositional behaviour is not clear-cut.
%R 10.18653/v1/P19-1381
%U https://aclanthology.org/P19-1381
%U https://doi.org/10.18653/v1/P19-1381
%P 3919-3923
Markdown (Informal)
[CNNs found to jump around more skillfully than RNNs: Compositional Generalization in Seq2seq Convolutional Networks](https://aclanthology.org/P19-1381) (Dessì & Baroni, ACL 2019)
ACL