conceptofmind commited on
Commit
227b186
·
verified ·
1 Parent(s): bcd8cea

Update megawika.py

Browse files
Files changed (1) hide show
  1. megawika.py +32 -70
megawika.py CHANGED
@@ -35,12 +35,24 @@ _HOMEPAGE = "https://huggingface.co/datasets/DataProvenanceInitiative/Megawika_s
35
  _LICENSE = "cc-by-sa-4.0"
36
  _URL = "https://huggingface.co/datasets/DataProvenanceInitiative/Megawika_subset"
37
 
38
- # Load language-specific file paths
39
  def load_file_paths():
 
 
 
 
 
 
 
 
 
 
 
 
40
  file_list_url = "https://huggingface.co/datasets/DataProvenanceInitiative/Megawika_subset/raw/main/files.yml"
41
  try:
42
  with urllib.request.urlopen(file_list_url) as f:
43
- return yaml.safe_load(f)['fnames']
 
44
  except (yaml.YAMLError, urllib.error.URLError) as exc:
45
  print(f"Error loading dataset file paths: {exc}")
46
  return {}
@@ -63,11 +75,9 @@ class MegaWika(datasets.GeneratorBasedBuilder):
63
 
64
  VERSION = datasets.Version("1.0.0")
65
 
66
- # Load available languages
67
- _DATA_URL = load_file_paths()
68
- LANGUAGES = list(_DATA_URL.keys())
69
 
70
- # Create configs for each language and an 'all' config
71
  BUILDER_CONFIGS = ([
72
  MegaWikaConfig(
73
  name="all",
@@ -87,79 +97,31 @@ class MegaWika(datasets.GeneratorBasedBuilder):
87
 
88
  DEFAULT_CONFIG_NAME = "all"
89
 
90
- def _info(self):
91
- return datasets.DatasetInfo(
92
- description=_DESCRIPTION,
93
- features=datasets.Features(
94
- {
95
- "article_title": datasets.Value("string"),
96
- "article_text": datasets.Value("string"),
97
- "entries": datasets.features.Sequence(
98
- {
99
- "id": datasets.Value("string"),
100
- "passage": {
101
- "text": [datasets.Value("string")],
102
- "parse": datasets.Value("string"),
103
- "en_tokens": [datasets.Value("string")],
104
- "lang_tokens": [datasets.Value("string")],
105
- "en_lang_token_map": [[datasets.Value("int32")]]
106
- },
107
- "mt": {
108
- "original": datasets.Value("string"),
109
- "original_sents": [datasets.Value("string")],
110
- "translation": datasets.Value("string"),
111
- "translation_sents": [datasets.Value("string")],
112
- "translation_probs": [[datasets.Value("float32")]],
113
- "repetitious_translation": datasets.Value("bool")
114
- },
115
- "source_lang": datasets.Value("string"),
116
- "source_url": datasets.Value("string"),
117
- "source_text": datasets.Value("string"),
118
- "qa_pairs": datasets.Sequence(
119
- {
120
- "question": datasets.Value("string"),
121
- "en_answer": datasets.Value("string"),
122
- "lang_answer": datasets.Value("string"),
123
- "frames": datasets.Sequence(
124
- {
125
- "frame": datasets.Value("string"),
126
- "argument": datasets.Value("string")
127
- }
128
- ),
129
- "en_matches_in_source": [[datasets.Value("int32")]],
130
- "en_match_in_passage": [datasets.Value("int32")],
131
- "lang_matches_in_source": [[datasets.Value("int32")]],
132
- "lang_match_in_passage": [datasets.Value("int32")],
133
- "passage": [datasets.Value("string")],
134
- "en_answer_tokens": [datasets.Value("string")],
135
- "match_disambiguated_question": datasets.Value("string"),
136
- }
137
- )
138
- }
139
- )
140
- }
141
- ),
142
- supervised_keys=None,
143
- homepage=_HOMEPAGE,
144
- license=_LICENSE,
145
- citation=_CITATION,
146
- )
147
-
148
  def _split_generators(self, dl_manager):
149
  """Returns SplitGenerators."""
 
 
 
150
  if self.config.name == "all":
151
- data_sources = self._DATA_URL
 
152
  else:
153
- if self.config.name not in self._DATA_URL:
154
- raise ValueError(f"Language {self.config.name} not found in available languages: {list(self._DATA_URL.keys())}")
155
- data_sources = {self.config.name: self._DATA_URL[self.config.name]}
 
 
 
156
 
157
  return [
158
  datasets.SplitGenerator(
159
  name=datasets.Split.TRAIN,
160
- gen_kwargs={"filepaths": dl_manager.download(data_sources[lang]), "language": lang}
 
 
 
161
  )
162
- for lang in data_sources
163
  ]
164
 
165
  def _get_qa_pair_list_features(self, qa_pair, feature_name):
 
35
  _LICENSE = "cc-by-sa-4.0"
36
  _URL = "https://huggingface.co/datasets/DataProvenanceInitiative/Megawika_subset"
37
 
 
38
  def load_file_paths():
39
+ """Load and parse the files.yml containing dataset file paths.
40
+
41
+ Expected YAML structure:
42
+ en:
43
+ - https://huggingface.co/datasets/DataProvenanceInitiative/Megawika_subset/resolve/main/en/en-00000-of-06154.jsonl
44
+ fr:
45
+ - https://huggingface.co/datasets/DataProvenanceInitiative/Megawika_subset/resolve/main/fr/fr-00000-of-00123.jsonl
46
+ ...
47
+
48
+ Returns:
49
+ dict: Dictionary mapping language codes to lists of file URLs
50
+ """
51
  file_list_url = "https://huggingface.co/datasets/DataProvenanceInitiative/Megawika_subset/raw/main/files.yml"
52
  try:
53
  with urllib.request.urlopen(file_list_url) as f:
54
+ # Direct YAML parsing - the structure is already in the correct format
55
+ return yaml.safe_load(f)
56
  except (yaml.YAMLError, urllib.error.URLError) as exc:
57
  print(f"Error loading dataset file paths: {exc}")
58
  return {}
 
75
 
76
  VERSION = datasets.Version("1.0.0")
77
 
78
+ # Load available languages directly from YAML structure
79
+ LANGUAGES = list(load_file_paths().keys())
 
80
 
 
81
  BUILDER_CONFIGS = ([
82
  MegaWikaConfig(
83
  name="all",
 
97
 
98
  DEFAULT_CONFIG_NAME = "all"
99
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
100
  def _split_generators(self, dl_manager):
101
  """Returns SplitGenerators."""
102
+ # Load the file paths afresh to ensure we have the latest data
103
+ data_sources = load_file_paths()
104
+
105
  if self.config.name == "all":
106
+ # Process all languages
107
+ selected_sources = data_sources
108
  else:
109
+ # Process single language
110
+ if self.config.name not in data_sources:
111
+ raise ValueError(
112
+ f"Language '{self.config.name}' not found in available languages: {list(data_sources.keys())}"
113
+ )
114
+ selected_sources = {self.config.name: data_sources[self.config.name]}
115
 
116
  return [
117
  datasets.SplitGenerator(
118
  name=datasets.Split.TRAIN,
119
+ gen_kwargs={
120
+ "filepaths": dl_manager.download(urls),
121
+ "language": lang
122
+ }
123
  )
124
+ for lang, urls in selected_sources.items()
125
  ]
126
 
127
  def _get_qa_pair_list_features(self, qa_pair, feature_name):