Datasets:
Tasks:
Text Classification
Sub-tasks:
acceptability-classification
Languages:
Italian
ArXiv:
License:
| import csv | |
| import sys | |
| import datasets | |
| from typing import List | |
| csv.field_size_limit(sys.maxsize) | |
| _CITATION = """\ | |
| @inproceedings{trotta-etal-2021-monolingual, | |
| author = {Trotta, Daniela and Guarasci, Raffaele and Leonardelli, Elisa and Tonelli, Sara}, | |
| title = {Monolingual and Cross-Lingual Acceptability Judgments with the Italian {CoLA} corpus}, | |
| booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2021", | |
| month = nov, | |
| year = {2021}, | |
| address = "Punta Cana, Dominican Republic and Online", | |
| publisher = "Association for Computational Linguistics", | |
| url = "https://arxiv.org/abs/2109.12053", | |
| } | |
| """ | |
| _DESCRIPTION = """\ | |
| The Italian Corpus of Linguistic Acceptability includes almost 10k sentences taken from | |
| linguistic literature with a binary annotation made by the original authors themselves. | |
| The work is inspired by the English Corpus of Linguistic Acceptability (CoLA) by Warstadt et al. | |
| Part of the dataset has been manually annotated to highlight 9 linguistic phenomena. | |
| """ | |
| _HOMEPAGE = "https://github.com/dhfbk/ItaCoLA-dataset" | |
| _LICENSE = "None" | |
| _SPLITS = ["train", "test"] | |
| class ItaColaConfig(datasets.BuilderConfig): | |
| """BuilderConfig for ItaCoLA.""" | |
| def __init__( | |
| self, | |
| features, | |
| data_url, | |
| **kwargs, | |
| ): | |
| """ | |
| Args: | |
| features: `list[string]`, list of the features that will appear in the | |
| feature dict. Should not include "label". | |
| data_url: `string`, url to download the zip file from. | |
| **kwargs: keyword arguments forwarded to super. | |
| """ | |
| super().__init__(version=datasets.Version("1.0.0"), **kwargs) | |
| self.data_url = data_url | |
| self.features = features | |
| class ItaCola(datasets.GeneratorBasedBuilder): | |
| VERSION = datasets.Version("1.0.0") | |
| BUILDER_CONFIGS = [ | |
| ItaColaConfig( | |
| name="scores", | |
| features=["unique_id", "source", "acceptability", "sentence"], | |
| data_url="https://raw.githubusercontent.com/dhfbk/ItaCoLA-dataset/main/ItaCoLA_dataset.tsv" | |
| ), | |
| ItaColaConfig( | |
| name="phenomena", | |
| features=[ | |
| "unique_id", | |
| "source", | |
| "acceptability", | |
| "sentence", | |
| "cleft_construction", | |
| "copular_construction", | |
| "subject_verb_agreement", | |
| "wh_islands_violations", | |
| "simple", | |
| "question", | |
| "auxiliary", | |
| "bind", | |
| "indefinite_pronouns", | |
| ], | |
| data_url="https://github.com/dhfbk/ItaCoLA-dataset/raw/main/ItaCoLA_dataset_phenomenon.tsv" | |
| ), | |
| ] | |
| DEFAULT_CONFIG_NAME = "scores" | |
| def _info(self): | |
| features = {feature: datasets.Value("int32") for feature in self.config.features} | |
| features["source"] = datasets.Value("string") | |
| features["sentence"] = datasets.Value("string") | |
| return datasets.DatasetInfo( | |
| description=_DESCRIPTION, | |
| features=datasets.Features(features), | |
| homepage=_HOMEPAGE, | |
| license=_LICENSE, | |
| citation=_CITATION, | |
| ) | |
| def _split_generators(self, dl_manager): | |
| """Returns SplitGenerators.""" | |
| data_file = dl_manager.download_and_extract(self.config.data_url) | |
| if self.config.name == "scores": | |
| return [ | |
| datasets.SplitGenerator( | |
| name=datasets.Split.TRAIN, | |
| gen_kwargs={ | |
| "filepath": data_file, | |
| "split": "train", | |
| "features": self.config.features, | |
| }, | |
| ), | |
| datasets.SplitGenerator( | |
| name=datasets.Split.TEST, | |
| gen_kwargs={ | |
| "filepath": data_file, | |
| "split": "test", | |
| "features": self.config.features, | |
| }, | |
| ), | |
| ] | |
| else: | |
| return [ | |
| datasets.SplitGenerator( | |
| name=datasets.Split.TRAIN, | |
| gen_kwargs={ | |
| "filepath": data_file, | |
| "split": "train", | |
| "features": self.config.features, | |
| }, | |
| ), | |
| ] | |
| def _generate_examples(self, filepath: str, split: str, features: List[str]): | |
| """Yields examples as (key, example) tuples.""" | |
| with open(filepath, encoding="utf8") as f: | |
| for id_, row in enumerate(f): | |
| if id_ == 0: | |
| continue | |
| ex_split = None | |
| fields = row.strip().split("\t") | |
| if len(fields) < 6: | |
| ex_split = fields[-1] | |
| fields = fields[:-1] | |
| if ex_split is None or ex_split.strip() == split: | |
| yield id_, { | |
| k:v.strip() for k,v in zip(features, fields) | |
| } | |