Datasets:
Yeb Havinga
commited on
Commit
·
d68620a
1
Parent(s):
c306330
Add _en_nl configs that interleave english with dutch documents
Browse files- README.md +12 -9
- mc4_nl_cleaned.py +84 -79
README.md
CHANGED
@@ -125,15 +125,18 @@ For Dutch, the whole corpus of scraped text was divided in `1032` jsonl files, `
|
|
125 |
the naming style `c4-nl-cleaned.tfrecord-0XXXX-of-01024.json.gz` and 4 for validation following the
|
126 |
naming style `c4-nl-cleaned.tfrecord-0000X-of-00004.json.gz`. The full set of preprocessed files takes roughly 208GB of disk space to download with Git LFS.
|
127 |
|
128 |
-
For ease of use under different storage capacities, the following incremental
|
129 |
-
|
130 |
-
|
|
131 |
-
|
132 |
-
|tiny
|
133 |
-
|small
|
134 |
-
|medium|
|
135 |
-
|large
|
136 |
-
|full
|
|
|
|
|
|
|
137 |
|
138 |
You can load any subset like this:
|
139 |
|
|
|
125 |
the naming style `c4-nl-cleaned.tfrecord-0XXXX-of-01024.json.gz` and 4 for validation following the
|
126 |
naming style `c4-nl-cleaned.tfrecord-0000X-of-00004.json.gz`. The full set of preprocessed files takes roughly 208GB of disk space to download with Git LFS.
|
127 |
|
128 |
+
For ease of use under different storage capacities, the following incremental configs are available: (note: files on disk are compressed)
|
129 |
+
|
130 |
+
| subset | train size (docs, words, download + preproc disk space) | validation size |
|
131 |
+
|:-------|--------------------------------------------------------:|----------------:|
|
132 |
+
| tiny | 6M docs, 2B words (6 GB + 15 GB) | 16k docs |
|
133 |
+
| small | 15M docs, 6B words (14 GB + 36 GB) | 16k docs |
|
134 |
+
| medium | 31M docs, 12B words (28 GB + 72 GB) | 32k docs |
|
135 |
+
| large | 47M docs, 19B words (42 GB + 108 GB) | 48k docs |
|
136 |
+
| full | 64M docs, 25B words (58 GB + 148 GB) | 64k docs |
|
137 |
+
|
138 |
+
For each subset as `tiny` there also exists a config `tiny_en_nl` that interleaves examples from the cleaned
|
139 |
+
`en` variant of C4.
|
140 |
|
141 |
You can load any subset like this:
|
142 |
|
mc4_nl_cleaned.py
CHANGED
@@ -19,6 +19,7 @@ import json
|
|
19 |
import gzip
|
20 |
import textwrap
|
21 |
import datasets
|
|
|
22 |
|
23 |
logger = datasets.logging.get_logger(__name__)
|
24 |
|
@@ -49,24 +50,30 @@ _HOMEPAGE = "https://github.com/allenai/allennlp/discussions/5056"
|
|
49 |
|
50 |
_LICENSE = "Open Data Commons Attribution License (ODC-By) v1.0"
|
51 |
|
52 |
-
|
|
|
|
|
|
|
|
|
|
|
53 |
|
54 |
_CONFIGS = dict(
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
|
|
70 |
)
|
71 |
|
72 |
|
@@ -86,55 +93,16 @@ class Mc4(datasets.GeneratorBasedBuilder):
|
|
86 |
|
87 |
BUILDER_CONFIGS = [
|
88 |
Mc4NlCleanedConfig(
|
89 |
-
name=
|
90 |
version=datasets.Version("1.0.0"),
|
91 |
description=textwrap.dedent(
|
92 |
f"""\
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
),
|
97 |
-
)
|
98 |
-
|
99 |
-
name="small",
|
100 |
-
version=datasets.Version("1.0.0"),
|
101 |
-
description=textwrap.dedent(
|
102 |
-
f"""\
|
103 |
-
A small cleaned version of the Dutch portion of the multilingual C4 corpus.
|
104 |
-
Estimated size of compressed files: 25GB
|
105 |
-
"""
|
106 |
-
),
|
107 |
-
),
|
108 |
-
Mc4NlCleanedConfig(
|
109 |
-
name="medium",
|
110 |
-
version=datasets.Version("1.0.0"),
|
111 |
-
description=textwrap.dedent(
|
112 |
-
f"""\
|
113 |
-
A medium cleaned version of the Dutch portion of the multilingual C4 corpus.
|
114 |
-
Estimated size of compressed files: 50GB
|
115 |
-
"""
|
116 |
-
),
|
117 |
-
),
|
118 |
-
Mc4NlCleanedConfig(
|
119 |
-
name="large",
|
120 |
-
version=datasets.Version("1.0.0"),
|
121 |
-
description=textwrap.dedent(
|
122 |
-
f"""\
|
123 |
-
A large cleaned version of the Dutch portion of the multilingual C4 corpus.
|
124 |
-
Estimated size of compressed files: 75GB
|
125 |
-
"""
|
126 |
-
),
|
127 |
-
),
|
128 |
-
Mc4NlCleanedConfig(
|
129 |
-
name="full",
|
130 |
-
version=datasets.Version("1.0.0"),
|
131 |
-
description=textwrap.dedent(
|
132 |
-
f"""\
|
133 |
-
The full cleaned version of the Dutch portion of the multilingual C4 corpus.
|
134 |
-
Estimated size of compressed files: 103GB
|
135 |
-
"""
|
136 |
-
),
|
137 |
-
),
|
138 |
]
|
139 |
|
140 |
for i in range(10):
|
@@ -151,6 +119,19 @@ class Mc4(datasets.GeneratorBasedBuilder):
|
|
151 |
),
|
152 |
)
|
153 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
154 |
def _info(self):
|
155 |
return datasets.DatasetInfo(
|
156 |
description=_DESCRIPTION,
|
@@ -169,20 +150,31 @@ class Mc4(datasets.GeneratorBasedBuilder):
|
|
169 |
|
170 |
def _split_generators(self, dl_manager):
|
171 |
data_urls = {}
|
|
|
172 |
for split in ["train", "validation"]:
|
173 |
-
start_file = (
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
|
|
|
|
183 |
)
|
184 |
-
|
185 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
186 |
train_downloaded_files = dl_manager.download(data_urls["train"])
|
187 |
validation_downloaded_files = dl_manager.download(data_urls["validation"])
|
188 |
return [
|
@@ -196,14 +188,27 @@ class Mc4(datasets.GeneratorBasedBuilder):
|
|
196 |
),
|
197 |
]
|
198 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
199 |
def _generate_examples(self, filepaths):
|
200 |
"""This function returns the examples in the raw (text) form by iterating on all the files."""
|
201 |
id_ = 0
|
202 |
-
for
|
203 |
-
logger.info(f"Generating examples from {
|
204 |
-
with gzip.open(
|
205 |
-
|
206 |
-
|
207 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
208 |
yield id_, example
|
209 |
id_ += 1
|
|
|
19 |
import gzip
|
20 |
import textwrap
|
21 |
import datasets
|
22 |
+
from itertools import zip_longest
|
23 |
|
24 |
logger = datasets.logging.get_logger(__name__)
|
25 |
|
|
|
50 |
|
51 |
_LICENSE = "Open Data Commons Attribution License (ODC-By) v1.0"
|
52 |
|
53 |
+
_DATA_URL_NL = "https://huggingface.co/datasets/yhavinga/mc4_nl_cleaned/resolve/main/mc4_nl_cleaned/{split}/c4-nl{validation}-cleaned.tfrecord-{index:05d}-of-{n_shards:05d}.json.gz"
|
54 |
+
_DATA_URL_EN = "https://huggingface.co/datasets/allenai/c4/resolve/1ddc917116b730e1859edef32896ec5c16be51d0/{name}/c4-{split}.{index:05d}-of-{n_shards:05d}.json.gz"
|
55 |
+
_C4_EN_VARIANT = "en"
|
56 |
+
|
57 |
+
_CONFIG_NAMES = ["micro", "tiny", "small", "medium", "large", "full"]
|
58 |
+
_CONFIG_EN_NL_SUFFIX = "_en_nl"
|
59 |
|
60 |
_CONFIGS = dict(
|
61 |
+
micro={"train": 2, "validation": 1, "estimate": "1GB"},
|
62 |
+
tiny={"train": 100, "validation": 1, "estimate": "10GB"},
|
63 |
+
small={"train": 250, "validation": 1, "estimate": "25GB"},
|
64 |
+
medium={"train": 500, "validation": 2, "estimate": "50GB"},
|
65 |
+
large={"train": 750, "validation": 3, "estimate": "75GB"},
|
66 |
+
full={"train": 1024, "validation": 4, "estimate": "103GB"},
|
67 |
+
tiny_0={"start": 0, "train": 100, "validation": 1, "estimate": "10GB"},
|
68 |
+
tiny_1={"start": 100, "train": 100, "validation": 1, "estimate": "10GB"},
|
69 |
+
tiny_2={"start": 200, "train": 100, "validation": 1, "estimate": "10GB"},
|
70 |
+
tiny_3={"start": 300, "train": 100, "validation": 1, "estimate": "10GB"},
|
71 |
+
tiny_4={"start": 400, "train": 100, "validation": 1, "estimate": "10GB"},
|
72 |
+
tiny_5={"start": 500, "train": 100, "validation": 1, "estimate": "10GB"},
|
73 |
+
tiny_6={"start": 600, "train": 100, "validation": 1, "estimate": "10GB"},
|
74 |
+
tiny_7={"start": 700, "train": 100, "validation": 1, "estimate": "10GB"},
|
75 |
+
tiny_8={"start": 800, "train": 100, "validation": 1, "estimate": "10GB"},
|
76 |
+
tiny_9={"start": 900, "train": 100, "validation": 1, "estimate": "10GB"},
|
77 |
)
|
78 |
|
79 |
|
|
|
93 |
|
94 |
BUILDER_CONFIGS = [
|
95 |
Mc4NlCleanedConfig(
|
96 |
+
name=name,
|
97 |
version=datasets.Version("1.0.0"),
|
98 |
description=textwrap.dedent(
|
99 |
f"""\
|
100 |
+
A {name} cleaned version of the Dutch portion of the multilingual C4 corpus.
|
101 |
+
Estimated size of compressed files: {_CONFIGS[name]['estimate']}
|
102 |
+
"""
|
103 |
),
|
104 |
+
)
|
105 |
+
for name in _CONFIG_NAMES
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
106 |
]
|
107 |
|
108 |
for i in range(10):
|
|
|
119 |
),
|
120 |
)
|
121 |
|
122 |
+
BUILDER_CONFIGS += [
|
123 |
+
Mc4NlCleanedConfig(
|
124 |
+
name=f"{name}{_CONFIG_EN_NL_SUFFIX}",
|
125 |
+
version=datasets.Version("1.0.0"),
|
126 |
+
description=textwrap.dedent(
|
127 |
+
f"""\
|
128 |
+
A {name} cleaned version of the Dutch and English portion of the multilingual C4 corpus.
|
129 |
+
"""
|
130 |
+
),
|
131 |
+
)
|
132 |
+
for name in _CONFIG_NAMES
|
133 |
+
]
|
134 |
+
|
135 |
def _info(self):
|
136 |
return datasets.DatasetInfo(
|
137 |
description=_DESCRIPTION,
|
|
|
150 |
|
151 |
def _split_generators(self, dl_manager):
|
152 |
data_urls = {}
|
153 |
+
config = _CONFIGS[self.config.name.replace(_CONFIG_EN_NL_SUFFIX, "")]
|
154 |
for split in ["train", "validation"]:
|
155 |
+
start_file = config.get("start", 0) if split == "train" else 0
|
156 |
+
num_files = config.get(split)
|
157 |
+
|
158 |
+
data_urls[split] = []
|
159 |
+
for index in range(start_file, start_file + num_files):
|
160 |
+
data_urls[split].append(
|
161 |
+
_DATA_URL_NL.format(
|
162 |
+
split=split,
|
163 |
+
index=index,
|
164 |
+
validation="-validation" if split == "validation" else "",
|
165 |
+
n_shards=4 if split == "validation" else 1024,
|
166 |
+
)
|
167 |
)
|
168 |
+
if self.config.name.endswith(_CONFIG_EN_NL_SUFFIX):
|
169 |
+
data_urls[split].append(
|
170 |
+
_DATA_URL_EN.format(
|
171 |
+
name=_C4_EN_VARIANT,
|
172 |
+
split=split,
|
173 |
+
index=index,
|
174 |
+
validation="-validation" if split == "validation" else "",
|
175 |
+
n_shards=8 if split == "validation" else 1024,
|
176 |
+
)
|
177 |
+
)
|
178 |
train_downloaded_files = dl_manager.download(data_urls["train"])
|
179 |
validation_downloaded_files = dl_manager.download(data_urls["validation"])
|
180 |
return [
|
|
|
188 |
),
|
189 |
]
|
190 |
|
191 |
+
@staticmethod
|
192 |
+
def grouper(iterable, n, fillvalue=None):
|
193 |
+
"""Collect data into fixed-length chunks or blocks"""
|
194 |
+
# grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx"
|
195 |
+
args = [iter(iterable)] * n
|
196 |
+
return zip_longest(*args, fillvalue=fillvalue)
|
197 |
+
|
198 |
def _generate_examples(self, filepaths):
|
199 |
"""This function returns the examples in the raw (text) form by iterating on all the files."""
|
200 |
id_ = 0
|
201 |
+
for filepath1, filepath2 in self.grouper(filepaths, 2, None):
|
202 |
+
logger.info(f"Generating examples from {filepath1} and {filepath2}")
|
203 |
+
with gzip.open(
|
204 |
+
open(filepath1, "rb"), "rt", encoding="utf-8"
|
205 |
+
) as f1, gzip.open(open(filepath2, "rb"), "rt", encoding="utf-8") as f2:
|
206 |
+
for line1, line2 in zip(f1, f2):
|
207 |
+
if line1:
|
208 |
+
example = json.loads(line1)
|
209 |
+
yield id_, example
|
210 |
+
id_ += 1
|
211 |
+
if line2:
|
212 |
+
example = json.loads(line2)
|
213 |
yield id_, example
|
214 |
id_ += 1
|