Datasets:
Yeb Havinga
commited on
Commit
·
c306330
1
Parent(s):
613de92
Add 10 tiny configs each with different files
Browse files- mc4_nl_cleaned.py +30 -2
mc4_nl_cleaned.py
CHANGED
@@ -57,6 +57,16 @@ _CONFIGS = dict(
|
|
57 |
medium={"train": 500, "validation": 2},
|
58 |
large={"train": 750, "validation": 3},
|
59 |
full={"train": 1024, "validation": 4},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
60 |
)
|
61 |
|
62 |
|
@@ -127,6 +137,20 @@ class Mc4(datasets.GeneratorBasedBuilder):
|
|
127 |
),
|
128 |
]
|
129 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
130 |
def _info(self):
|
131 |
return datasets.DatasetInfo(
|
132 |
description=_DESCRIPTION,
|
@@ -146,14 +170,18 @@ class Mc4(datasets.GeneratorBasedBuilder):
|
|
146 |
def _split_generators(self, dl_manager):
|
147 |
data_urls = {}
|
148 |
for split in ["train", "validation"]:
|
|
|
|
|
|
|
|
|
149 |
data_urls[split] = [
|
150 |
_BASE_URL.format(
|
151 |
split=split,
|
152 |
index=index,
|
153 |
-
validation="-validation" if split=="validation" else "",
|
154 |
n_shards=4 if split == "validation" else 1024,
|
155 |
)
|
156 |
-
for index in range(
|
157 |
]
|
158 |
train_downloaded_files = dl_manager.download(data_urls["train"])
|
159 |
validation_downloaded_files = dl_manager.download(data_urls["validation"])
|
|
|
57 |
medium={"train": 500, "validation": 2},
|
58 |
large={"train": 750, "validation": 3},
|
59 |
full={"train": 1024, "validation": 4},
|
60 |
+
tiny_0={"start": 0, "train": 100, "validation": 1},
|
61 |
+
tiny_1={"start": 100, "train": 100, "validation": 1},
|
62 |
+
tiny_2={"start": 200, "train": 100, "validation": 1},
|
63 |
+
tiny_3={"start": 300, "train": 100, "validation": 1},
|
64 |
+
tiny_4={"start": 400, "train": 100, "validation": 1},
|
65 |
+
tiny_5={"start": 500, "train": 100, "validation": 1},
|
66 |
+
tiny_6={"start": 600, "train": 100, "validation": 1},
|
67 |
+
tiny_7={"start": 700, "train": 100, "validation": 1},
|
68 |
+
tiny_8={"start": 800, "train": 100, "validation": 1},
|
69 |
+
tiny_9={"start": 900, "train": 100, "validation": 1},
|
70 |
)
|
71 |
|
72 |
|
|
|
137 |
),
|
138 |
]
|
139 |
|
140 |
+
for i in range(10):
|
141 |
+
BUILDER_CONFIGS.append(
|
142 |
+
Mc4NlCleanedConfig(
|
143 |
+
name=f"tiny_{i}",
|
144 |
+
version=datasets.Version("1.0.0"),
|
145 |
+
description=textwrap.dedent(
|
146 |
+
f"""\
|
147 |
+
The tiny_{i} slice of the full cleaned version of the Dutch portion of the multilingual C4 corpus.
|
148 |
+
Estimated size of compressed files: 10GB
|
149 |
+
"""
|
150 |
+
),
|
151 |
+
),
|
152 |
+
)
|
153 |
+
|
154 |
def _info(self):
|
155 |
return datasets.DatasetInfo(
|
156 |
description=_DESCRIPTION,
|
|
|
170 |
def _split_generators(self, dl_manager):
|
171 |
data_urls = {}
|
172 |
for split in ["train", "validation"]:
|
173 |
+
start_file = (
|
174 |
+
_CONFIGS[self.config.name].get("start", 0) if split == "train" else 0
|
175 |
+
)
|
176 |
+
num_files = _CONFIGS[self.config.name].get(split)
|
177 |
data_urls[split] = [
|
178 |
_BASE_URL.format(
|
179 |
split=split,
|
180 |
index=index,
|
181 |
+
validation="-validation" if split == "validation" else "",
|
182 |
n_shards=4 if split == "validation" else 1024,
|
183 |
)
|
184 |
+
for index in range(start_file, start_file + num_files)
|
185 |
]
|
186 |
train_downloaded_files = dl_manager.download(data_urls["train"])
|
187 |
validation_downloaded_files = dl_manager.download(data_urls["validation"])
|