|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""Wikipedia dataset containing cleaned articles of all languages.""" |
|
|
|
|
|
import bz2 |
|
import codecs |
|
import json |
|
import re |
|
import xml.etree.cElementTree as etree |
|
from urllib.parse import quote |
|
import mwparserfromhell |
|
from multiprocess import Process, Manager |
|
from tqdm import tqdm |
|
import multiprocessing |
|
import datasets |
|
from functools import partial |
|
from pathlib import Path |
|
|
|
logger = datasets.logging.get_logger(__name__) |
|
|
|
|
|
_CITATION = """""" |
|
|
|
_DESCRIPTION = """""" |
|
|
|
_LICENSE = ( |
|
"This work is licensed under the Creative Commons Attribution-ShareAlike " |
|
"3.0 Unported License. To view a copy of this license, visit " |
|
"http://creativecommons.org/licenses/by-sa/3.0/ or send a letter to " |
|
"Creative Commons, PO Box 1866, Mountain View, CA 94042, USA." |
|
) |
|
|
|
_INFO_FILE = "dumpstatus.json" |
|
|
|
|
|
_VERSION = datasets.Version("2.0.0", "") |
|
_NUM_SPLITS = 68 |
|
|
|
class WikipediaConfig(datasets.BuilderConfig): |
|
"""BuilderConfig for Wikipedia.""" |
|
|
|
def __init__(self, shard=None, version=_VERSION, **kwargs): |
|
"""BuilderConfig for Wikipedia. |
|
|
|
Args: |
|
split: int, split number. |
|
**kwargs: keyword arguments forwarded to super. |
|
""" |
|
super().__init__( |
|
name=f"shard_{shard}", |
|
description=f"Wikipedia dataset for split {shard}", |
|
version=version, |
|
**kwargs, |
|
) |
|
self.shard = shard |
|
print(f"Split: {self.shard}") |
|
|
|
|
|
class Wikipedia(datasets.GeneratorBasedBuilder): |
|
"""Wikipedia dataset.""" |
|
|
|
|
|
BUILDER_CONFIG_CLASS = WikipediaConfig |
|
BUILDER_CONFIG = [WikipediaConfig(shard=str(id)) for id in range(_NUM_SPLITS)] |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
"identifier": datasets.Value("string"), |
|
"name": datasets.Value("string"), |
|
"namespace_name": datasets.Value("string"), |
|
"namespace_identifier": datasets.Value("string"), |
|
"categories": [ |
|
{ |
|
"name": datasets.Value("string"), |
|
"url": datasets.Value("string"), |
|
} |
|
], |
|
"date_modified": datasets.Value("string"), |
|
"url": datasets.Value("string"), |
|
"html": datasets.Value("string"), |
|
"wikitext": datasets.Value("string"), |
|
"in_language": datasets.Value("string"), |
|
"main_entity": { |
|
"identifier": datasets.Value("string"), |
|
"url": datasets.Value("string"), |
|
}, |
|
"is_part_of" : { |
|
"name": datasets.Value("string"), |
|
"identifier": datasets.Value("string"), |
|
}, |
|
"license":[ { |
|
"name": datasets.Value("string"), |
|
"url": datasets.Value("string"), |
|
"identifier": datasets.Value("string"), |
|
}] |
|
} |
|
), |
|
|
|
supervised_keys=None, |
|
homepage="https://dumps.wikimedia.org", |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
data_paths = [ |
|
Path(self.config.data_dir) / f"enwiki_{self.config.shard}.ndjson" |
|
] |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, gen_kwargs={"filepaths": data_paths} |
|
) |
|
] |
|
|
|
def _generate_examples(self, filepaths, ): |
|
|
|
|
|
print("Parsing and cleaning Wikipedia examples") |
|
|
|
for filepath in filepaths: |
|
with open(filepath, 'r') as f: |
|
for line in tqdm(f): |
|
example = json.loads(line) |
|
clean_example = {} |
|
clean_example['name'] = example['name'] |
|
clean_example['identifier'] = example['identifier'] |
|
clean_example['date_modified'] = example['date_modified'] |
|
clean_example['namespace_name'] = example['namespace']["name"] |
|
clean_example['namespace_identifier'] = example['namespace']["identifier"] |
|
clean_example["categories"] = example.get("categories", None) |
|
clean_example['url'] = example['url'] |
|
clean_example['html'] = f'{example["article_body"]["html"]}' |
|
clean_example['wikitext'] = example['article_body']['wikitext'] |
|
clean_example['in_language'] = example['in_language'] |
|
clean_example['main_entity'] = example.get('main_entity', None) |
|
clean_example['is_part_of'] = example['is_part_of'] |
|
clean_example['license'] = example['license'] |
|
yield clean_example['identifier'], clean_example |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def parse_and_clean(filepath): |
|
examples = [] |
|
with open(filepath, 'r') as f: |
|
for line in tqdm(f): |
|
example = json.loads(line) |
|
clean_example = {} |
|
clean_example['id'] = example['identifier'] |
|
clean_example['date_modified'] = example['date_modified'] |
|
clean_example['url'] = example['url'] |
|
clean_example['html'] = f'{example["article_body"]["html"]}' |
|
clean_example['wikitext'] = example['article_body']['wikitext'] |
|
|
|
examples.append(clean_example) |
|
return examples |