| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| """MultiSim is a growing collection of Text Simplfication datasets in multiple languages. Each dataset is a set of complex and simple sentence pairs.""" |
|
|
| import pandas as pd |
| import os |
| from collections import defaultdict |
| import urllib.parse |
| from huggingface_hub import snapshot_download |
|
|
|
|
| import datasets |
|
|
| _CITATION = """\ |
| @inproceedings{ryan-etal-2023-revisiting, |
| title = "Revisiting non-{E}nglish Text Simplification: A Unified Multilingual Benchmark", |
| author = "Ryan, Michael and |
| Naous, Tarek and |
| Xu, Wei", |
| booktitle = "Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)", |
| month = jul, |
| year = "2023", |
| address = "Toronto, Canada", |
| publisher = "Association for Computational Linguistics", |
| url = "https://aclanthology.org/2023.acl-long.269", |
| pages = "4898--4927", |
| abstract = "Recent advancements in high-quality, large-scale English resources have pushed the frontier of English Automatic Text Simplification (ATS) research. However, less work has been done on multilingual text simplification due to the lack of a diverse evaluation benchmark that covers complex-simple sentence pairs in many languages. This paper introduces the MultiSim benchmark, a collection of 27 resources in 12 distinct languages containing over 1.7 million complex-simple sentence pairs. This benchmark will encourage research in developing more effective multilingual text simplification models and evaluation metrics. Our experiments using MultiSim with pre-trained multilingual language models reveal exciting performance improvements from multilingual training in non-English settings. We observe strong performance from Russian in zero-shot cross-lingual transfer to low-resource languages. We further show that few-shot prompting with BLOOM-176b achieves comparable quality to reference simplifications outperforming fine-tuned models in most languages. We validate these findings through human evaluation.", |
| } |
| """ |
|
|
| |
| |
| _DESCRIPTION = """\ |
| MultiSim is a growing collection of Text Simplfication datasets in multiple languages. Each dataset is a set of complex and simple sentence pairs. |
| """ |
|
|
| |
| _HOMEPAGE = "https://github.com/XenonMolecule/MultiSim" |
|
|
| |
| _LICENSE = """MIT License |
| |
| Copyright (c) 2023 Michael Ryan |
| |
| Permission is hereby granted, free of charge, to any person obtaining a copy |
| of this software and associated documentation files (the "Software"), to deal |
| in the Software without restriction, including without limitation the rights |
| to use, copy, modify, merge, publish, distribute, sublicense, and/or sell |
| copies of the Software, and to permit persons to whom the Software is |
| furnished to do so, subject to the following conditions: |
| |
| The above copyright notice and this permission notice shall be included in all |
| copies or substantial portions of the Software. |
| |
| THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
| AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
| OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| SOFTWARE.""" |
|
|
| _SUBCORPORA = { |
| |
| |
| |
| |
| "WikiAutoEN": { |
| "path": "data/English/WikiAuto", |
| "language": "en" |
| }, |
| "ASSET": { |
| "path": "data/English/ASSET", |
| "language": "en" |
| }, |
| |
| |
| |
| |
| |
| |
| |
| |
| "Terence": { |
| "path" : "data/Italian/Terence", |
| "language": "it" |
| }, |
| "Teacher": { |
| "path": "data/Italian/Teacher", |
| "language": "it" |
| }, |
| "SimpitikiWiki": { |
| "path": "data/Italian/Simpitiki Italian Wikipedia", |
| "language": "it" |
| }, |
| "AdminIt": { |
| "path": "data/Italian/AdminIT", |
| "language": "it" |
| }, |
| "PaCCSS-IT": { |
| "path": "data/Italian/PaCCSS-IT Corpus", |
| "language": "it" |
| }, |
| "CLEAR" : { |
| "path" : "data/French/CLEAR Corpus", |
| "language": "fr" |
| }, |
| "WikiLargeFR": { |
| "path" : "data/French/WikiLargeFR Corpus", |
| "language": "fr" |
| }, |
| "EasyJapanese": { |
| "path": "data/Japanese/Easy Japanese Corpus", |
| "language": "ja" |
| }, |
| "EasyJapaneseExtended": { |
| "path": "data/Japanese/Easy Japanese Extended", |
| "language": "ja" |
| }, |
| "PorSimples" : { |
| "path": "data/Brazilian Portuguese/PorSimples", |
| "language": "pt-br" |
| }, |
| "TextComplexityDE" : { |
| "path": "data/German/TextComplexityDE Parallel Corpus", |
| "language": "de" |
| }, |
| "GEOLinoTest" : { |
| "path" : "data/German/GEOLino Corpus", |
| "language": "de" |
| }, |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| "RuWikiLarge": { |
| "path" : "data/Russian/RuWikiLarge", |
| "language": "ru" |
| }, |
| "RSSE" : { |
| "path": "data/Russian/RSSE Corpus", |
| "language": "ru" |
| }, |
| |
| |
| |
| |
| "RuAdaptFairytales" : { |
| "path": "data/Russian/RuAdapt Fairytales", |
| "language": "ru" |
| }, |
| "RuAdaptEncy" : { |
| "path" : "data/Russian/RuAdapt Ency", |
| "language": "ru" |
| }, |
| "TSSlovene" : { |
| "path" : "data/Slovene/Text Simplification Slovene", |
| "language": "sl" |
| } |
| } |
|
|
| _URL = "https://huggingface.co/datasets/MichaelR207/MultiSim/raw/main" |
|
|
| _URLS = { |
| dataset+"-"+split: urllib.parse.quote(os.path.join(_URL, _SUBCORPORA[dataset]["path"] + "_" + split + ".csv"), safe=':/') |
| for split in ["train", "val", "test"] |
| for dataset in _SUBCORPORA.keys() |
| } |
|
|
| _LANGUAGES = { |
| "English":'en', |
| "Spanish":'es', |
| "Italian":'it', |
| "French" : 'fr', |
| "Japanese": 'ja', |
| "Brazilian Portuguese": 'pt-br', |
| "German": 'de', |
| "Basque": 'eu', |
| "Danish": 'da', |
| "Urdu": 'ur', |
| "Russian": 'ru', |
| "Slovene": 'sl' |
| } |
|
|
|
|
| class MultiSim(datasets.GeneratorBasedBuilder): |
| """MultiSim is a growing collection of Text Simplfication datasets in multiple languages. Each dataset is a set of complex and simple sentence pairs.""" |
|
|
| VERSION = datasets.Version("1.0.0") |
|
|
| |
| |
| |
|
|
| |
| |
| |
|
|
| |
| |
| |
| BUILDER_CONFIGS = [ |
| |
| datasets.BuilderConfig(name="WikiAutoEN", version=VERSION, description="TODO: Descriptions"), |
| datasets.BuilderConfig(name="ASSET", version=VERSION, description="TODO: Descriptions"), |
| |
| |
| datasets.BuilderConfig(name="Terence", version=VERSION, description="TODO: Descriptions"), |
| datasets.BuilderConfig(name="Teacher", version=VERSION, description="TODO: Descriptions"), |
| datasets.BuilderConfig(name="SimpitikiWiki", version=VERSION, description="TODO: Descriptions"), |
| datasets.BuilderConfig(name="AdminIt", version=VERSION, description="TODO: Descriptions"), |
| datasets.BuilderConfig(name="PaCCSS-IT", version=VERSION, description="TODO: Descriptions"), |
| datasets.BuilderConfig(name="CLEAR", version=VERSION, description="TODO: Descriptions"), |
| datasets.BuilderConfig(name="WikiLargeFR", version=VERSION, description="TODO: Descriptions"), |
| datasets.BuilderConfig(name="EasyJapanese", version=VERSION, description="TODO: Descriptions"), |
| datasets.BuilderConfig(name="EasyJapaneseExtended", version=VERSION, description="TODO: Descriptions"), |
| datasets.BuilderConfig(name="PorSimples", version=VERSION, description="TODO: Descriptions"), |
| datasets.BuilderConfig(name="TextComplexityDE", version=VERSION, description="TODO: Descriptions"), |
| datasets.BuilderConfig(name="GEOLinoTest", version=VERSION, description="TODO: Descriptions"), |
| |
| |
| |
| |
| datasets.BuilderConfig(name="RuWikiLarge", version=VERSION, description="TODO: Descriptions"), |
| datasets.BuilderConfig(name="RSSE", version=VERSION, description="TODO: Descriptions"), |
| |
| datasets.BuilderConfig(name="RuAdaptFairytales", version=VERSION, description="TODO: Descriptions"), |
| datasets.BuilderConfig(name="RuAdaptEncy", version=VERSION, description="TODO: Descriptions"), |
| datasets.BuilderConfig(name="TSSlovene", version=VERSION, description="TODO: Descriptions"), |
| |
| datasets.BuilderConfig(name="English", version=VERSION, description="TODO: Descriptions"), |
| |
| datasets.BuilderConfig(name="Italian", version=VERSION, description="TODO: Descriptions"), |
| datasets.BuilderConfig(name="French", version=VERSION, description="TODO: Descriptions"), |
| datasets.BuilderConfig(name="Japanese", version=VERSION, description="TODO: Descriptions"), |
| datasets.BuilderConfig(name="Brazilian Portuguese", version=VERSION, description="TODO: Descriptions"), |
| datasets.BuilderConfig(name="German", version=VERSION, description="TODO: Descriptions"), |
| |
| |
| |
| datasets.BuilderConfig(name="Russian", version=VERSION, description="TODO: Descriptions"), |
| datasets.BuilderConfig(name="Slovene", version=VERSION, description="TODO: Descriptions"), |
|
|
| datasets.BuilderConfig(name="all", version=VERSION, description="TODO: Descriptions"), |
| ] |
|
|
| DEFAULT_CONFIG_NAME = "all" |
|
|
| def _info(self): |
| |
| features = datasets.Features( |
| { |
| "original": datasets.Value("string"), |
| "simple": datasets.Sequence(feature={"simplifications" : datasets.Value("string")}) |
| } |
| ) |
| return datasets.DatasetInfo( |
| |
| description=_DESCRIPTION, |
| |
| features=features, |
| |
| |
| |
| |
| homepage=_HOMEPAGE, |
| |
| license=_LICENSE, |
| |
| citation=_CITATION, |
| ) |
|
|
| def _split_generators(self, dl_manager): |
| |
| |
|
|
| |
| |
| |
| dataset_path = snapshot_download(repo_id="MichaelR207/MultiSim", repo_type="dataset") |
| |
| filepaths = [] |
| if (self.config.name == 'all'): |
| for subcorpus in _SUBCORPORA: |
| filepaths.append(os.path.join(dataset_path,_SUBCORPORA[subcorpus]['path'])) |
| elif (self.config.name in _LANGUAGES): |
| lang_code = _LANGUAGES[self.config.name] |
| for subcorpus in _SUBCORPORA: |
| if _SUBCORPORA[subcorpus]['language'] == lang_code: |
| filepaths.append(os.path.join(dataset_path,_SUBCORPORA[subcorpus]['path'])) |
| elif (self.config.name in _SUBCORPORA): |
| filepaths = [os.path.join(dataset_path,_SUBCORPORA[self.config.name]['path'])] |
| else: |
| print("Invalid configuration name: " + self.config.name + ". Try 'all', 'English', 'ASSET', etc.") |
| return [ |
| datasets.SplitGenerator( |
| name=datasets.Split.TRAIN, |
| |
| gen_kwargs={ |
| "filepaths": filepaths, |
| "split": "train", |
| }, |
| ), |
| datasets.SplitGenerator( |
| name=datasets.Split.VALIDATION, |
| |
| gen_kwargs={ |
| "filepaths": filepaths, |
| "split": "val", |
| }, |
| ), |
| datasets.SplitGenerator( |
| name=datasets.Split.TEST, |
| |
| gen_kwargs={ |
| "filepaths": filepaths, |
| "split": "test" |
| }, |
| ), |
| ] |
|
|
| |
| def _generate_examples(self, filepaths, split): |
| |
| |
| df = pd.DataFrame() |
|
|
| if (len(filepaths) > 1): |
| for filepath in filepaths: |
| if os.path.exists(filepath + "_" + split + ".csv"): |
| df = pd.concat([df, pd.read_csv(filepath + "_" + split + ".csv")]) |
|
|
| |
| df = df.sample(frac=1, random_state=3600).reset_index(drop=True) |
| else: |
| if os.path.exists(filepaths[0] + "_" + split + ".csv"): |
| df = pd.read_csv(filepaths[0] + "_" + split + ".csv") |
|
|
| if len(df) > 0: |
| for key, row in df.iterrows(): |
| |
| original = row["original"] |
| simple = [] |
| for label,content in row.items(): |
| if label != "original" and type(content) != float: |
| simple.append({"simplifications": content}) |
| yield key, { |
| "original": original, |
| "simple": simple |
| } |