Datasets:
License:
# coding=utf-8 | |
# Copyright 2025 The Leo-Ai and HuggingFace Datasets Authors and the current dataset script contributor. | |
# | |
# Licensed under the Apache License, Version 2.0 (the "License"); | |
# you may not use this file except in compliance with the License. | |
# You may obtain a copy of the License at | |
# | |
# http://www.apache.org/licenses/LICENSE-2.0 | |
# | |
# Unless required by applicable law or agreed to in writing, software | |
# distributed under the License is distributed on an "AS IS" BASIS, | |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
# See the License for the specific language governing permissions and | |
# limitations under the License. | |
import os | |
from collections import OrderedDict | |
import datasets | |
logger = datasets.logging.get_logger(__name__) | |
""" Soreva Dataset""" | |
_SOREVA_LANG_TO_ID = OrderedDict([ | |
("Afrikaans", "af"), ("Bafia", "ksf"), ("Bafut", "bfd"), ("Baka", "bdh"), ("Bakoko", "bkh"), | |
("Bamun", "bax"), ("Basaa", "bas"), ("Duala", "dua"), ("Ejagham", "etu"), ("Eton", "eto"), | |
("Ewondo", "ewo"), ("Fe", "fmp"), ("Fulfulde", "fub"), ("Gbaya", "gya"), ("Ghamála", "bbj"), | |
("Hausa", "ha"), ("Igbo", "ibo"), ("isiXhosa", "xho"), ("isiZulu", "zul"), ("Isu", "isu"), | |
("Kera", "ker"), ("Kiswahili", "swa"), ("Kom", "bkm"), ("Kwasio", "kqs"), ("Lamso", "lns"), | |
("Lingala", "lin"), ("Maka", "mcp"), ("Malagasy", "mg"), ("Medumba", "byv"), ("Mka", "bqz"), | |
("Mundang", "mua"), ("Nda", "nda"), ("Ngiemboon", "nnh"), ("Ngombala", "nla"), ("Nomaande", "lem"), | |
("Nugunu", "yas"), ("Pidgin", "pcm"), ("Pulaar", "fuc"), ("Sepedi", "nso"), ("Tuki", "bag"), | |
("Tunen", "tvu"), ("Twi", "twi"), ("Vute", "vut"), ("Wolof", "wol"), ("Yambeta", "yat"), | |
("Yangben", "yav"), ("Yemba", "ybb"), ("Yoruba", "yor"), ("Éwé", "ewe") | |
]) | |
_SOREVA_LANG_SHORT_TO_LONG = {v: k for k, v in _SOREVA_LANG_TO_ID.items()} | |
_SOREVA_LANG = sorted([ | |
"af_za", "bag_cm", "bas_cm", "bax_cm", "bbj_cm", "bqz_cm", "bdh_cm", "bfd_cm", "bkh_cm", "bkm_cm", | |
"ksf_cm", "byv_cm", "dua_cm", "ewe_tg", "etu_cm", "eto_cm", "ewo_cm", "fmp_cm", "fub_cm", "fuc_sn", | |
"gya_cf", "ha_ng", "ibo_ng", "isu_cm", "ker_td", "kqs_cm", "lem_cm", "lin_cd", "lns_cm", "mcp_cm", | |
"mg_mg", "tvu_cm", "mua_cm", "nda_cm", "nla_cm", "nnh_cm", "nso_za", "pcm_cm", "swa_ke", "twi_gh", | |
"vut_cm", "wol_sn", "xho_za", "yas_cm", "yav_cm", "ybb_cm", "yor_ng", "zul_za",'yat_cm' | |
]) | |
_SOREVA_LONG_TO_LANG = {_SOREVA_LANG_SHORT_TO_LONG["_".join(k.split("_")[:-1]) or k]: k for k in _SOREVA_LANG} | |
_SOREVA_LANG_TO_LONG = {v: k for k, v in _SOREVA_LONG_TO_LANG.items()} | |
_ALL_LANG = _SOREVA_LANG | |
_ALL_CONFIGS = [] | |
for langs in _SOREVA_LANG: | |
_ALL_CONFIGS.append(langs) | |
_ALL_CONFIGS.append("all") | |
# TODO(Soreva) | |
_DESCRIPTION = "SOREVA is a multilingual speech dataset designed for the evaluation" \ | |
"of text-to-speech (TTS) and speech representation models in low-resource African languages. " \ | |
"This dataset specifically targets out-of-domain generalization, addressing the lack of evaluation sets for" \ | |
" languages typically trained on narrow-domain corpora such as religious texts." | |
_CITATION = "" | |
_HOMEPAGE_URL = "" | |
_BASE_PATH = "data/{langs}/" | |
_DATA_URL = _BASE_PATH + "audio/{split}.tar.gz" | |
_META_URL = _BASE_PATH + "{split}.tsv" | |
class sorevaConfig(datasets.BuilderConfig): | |
"""BuilderConfig for xtreme-s""" | |
def __init__( | |
self, name, description, citation, homepage | |
): | |
super(sorevaConfig, self).__init__( | |
name=self.name, | |
version=datasets.Version("1.0.0", ""), | |
description=self.description, | |
) | |
self.name = name | |
self.description = description | |
self.citation = citation | |
self.homepage = homepage | |
def _build_config(name): | |
return sorevaConfig( | |
name=name, | |
description=_DESCRIPTION, | |
citation=_CITATION, | |
homepage=_HOMEPAGE_URL, | |
) | |
class soreva(datasets.GeneratorBasedBuilder): | |
DEFAULT_WRITER_BATCH_SIZE = 1000 | |
BUILDER_CONFIGS = [_build_config(name) for name in _ALL_CONFIGS] | |
def _info(self): | |
langs = _ALL_CONFIGS | |
features = datasets.Features( | |
{ | |
"path": datasets.Value("string"), | |
"audio": datasets.Audio(sampling_rate=16_000), | |
"transcription": datasets.Value("string"), | |
"raw_transcription": datasets.Value("string"), | |
"gender": datasets.ClassLabel(names=["male", "female", "other"]), | |
"lang_id": datasets.ClassLabel(names=langs), | |
"language": datasets.Value("string"), | |
} | |
) | |
return datasets.DatasetInfo( | |
description=self.config.description + "\n" + _DESCRIPTION, | |
features=features, | |
supervised_keys=("audio", "transcription"), | |
homepage=self.config.homepage, | |
citation=self.config.citation + "\n" + _CITATION, | |
) | |
# soreva | |
def _split_generators(self, dl_manager): | |
all_splits = ["train", "dev", "test"] | |
available_splits = [] | |
if self.config.name == "all": | |
langs = _SOREVA_LANG | |
else: | |
langs = [self.config.name] | |
data_urls = {} | |
meta_urls = {} | |
for split in all_splits: | |
try: | |
if self.config.name == "all": | |
data_urls[split] = [_DATA_URL.format(langs=lang, split=split) for lang in langs] | |
meta_urls[split] = [_META_URL.format(langs=lang, split=split) for lang in langs] | |
else: | |
data_urls[split] = [_DATA_URL.format(langs=self.config.name, split=split)] | |
meta_urls[split] = [_META_URL.format(langs=self.config.name, split=split)] | |
# Test of downloading existing split | |
dl_manager.download(meta_urls[split]) | |
available_splits.append(split) | |
except Exception as e: | |
logger.warning(f"Split '{split}' not available : {e}") | |
archive_paths = dl_manager.download({s: data_urls[s] for s in available_splits}) | |
local_extracted_archives = dl_manager.extract(archive_paths) if not dl_manager.is_streaming else {} | |
archive_iters = {s: [dl_manager.iter_archive(p) for p in archive_paths[s]] for s in available_splits} | |
meta_paths = dl_manager.download({s: meta_urls[s] for s in available_splits}) | |
split_gens = [] | |
for split in available_splits: | |
split_name = { | |
"train": datasets.Split.TRAIN, | |
"dev": datasets.Split.VALIDATION, | |
"test": datasets.Split.TEST | |
}[split] | |
split_gens.append( | |
datasets.SplitGenerator( | |
name=split_name, | |
gen_kwargs={ | |
"local_extracted_archives": local_extracted_archives.get(split, [None] * len(meta_paths.get(split))), | |
"archive_iters": archive_iters.get(split), | |
"text_paths": meta_paths.get(split) | |
}, | |
) | |
) | |
return split_gens | |
def _get_data(self, lines, lang_id): | |
data = {} | |
gender_to_id = {"MALE": 0, "FEMALE": 1, "OTHER": 2} | |
for line in lines: | |
if isinstance(line, bytes): | |
line = line.decode("utf-8") | |
( | |
file_name, | |
raw_transcription, | |
transcription, | |
gender, | |
) = line.strip().split("\t") | |
data[file_name] = { | |
"raw_transcription": raw_transcription, | |
"transcription": transcription, | |
"gender": gender_to_id[gender], | |
"lang_id": _SOREVA_LANG.index(lang_id), | |
"language": _SOREVA_LANG_TO_LONG[lang_id], | |
} | |
return data | |
def _generate_examples(self, local_extracted_archives, archive_iters, text_paths): | |
assert len(local_extracted_archives) == len(archive_iters) == len(text_paths) | |
key = 0 | |
if self.config.name == "all": | |
langs = _SOREVA_LANG | |
else: | |
langs = [self.config.name] | |
for archive, text_path, local_extracted_path, lang_id in zip(archive_iters, text_paths, local_extracted_archives, langs): | |
with open(text_path, encoding="utf-8") as f: | |
lines = f.readlines() | |
data = self._get_data(lines, lang_id) | |
for audio_path, audio_file in archive: | |
audio_filename = audio_path.split("/")[-1] | |
if audio_filename not in data.keys(): | |
continue | |
result = data[audio_filename] | |
extracted_audio_path = ( | |
os.path.join(local_extracted_path, audio_filename) | |
if local_extracted_path is not None | |
else None | |
) | |
result["path"] = extracted_audio_path | |
result["audio"] = {"path": audio_path, "bytes": audio_file.read()} | |
yield key, result | |
key += 1 | |