Remove extra print (and extra tabs)
Browse files
E3C.py
CHANGED
@@ -45,7 +45,7 @@ class E3C(datasets.GeneratorBasedBuilder):
|
|
45 |
BUILDER_CONFIGS += [
|
46 |
datasets.BuilderConfig(name=f"{lang}_temporal", version="1.0.0", description=f"The {lang} subset of the E3C corpus") for lang in _LANGUAGES
|
47 |
]
|
48 |
-
|
49 |
DEFAULT_CONFIG_NAME = "French_clinical"
|
50 |
|
51 |
def _info(self):
|
@@ -57,7 +57,7 @@ class E3C(datasets.GeneratorBasedBuilder):
|
|
57 |
names = ["O","B-CLINENTITY","I-CLINENTITY"]
|
58 |
elif self.config.name.find("temporal") != -1:
|
59 |
names = ["O", "B-EVENT", "B-ACTOR", "B-BODYPART", "B-TIMEX3", "B-RML", "I-EVENT", "I-ACTOR", "I-BODYPART", "I-TIMEX3", "I-RML"]
|
60 |
-
|
61 |
features = datasets.Features(
|
62 |
{
|
63 |
"id": datasets.Value("string"),
|
@@ -70,7 +70,7 @@ class E3C(datasets.GeneratorBasedBuilder):
|
|
70 |
),
|
71 |
}
|
72 |
)
|
73 |
-
|
74 |
return datasets.DatasetInfo(
|
75 |
description=_DESCRIPTION,
|
76 |
features=features,
|
@@ -82,12 +82,8 @@ class E3C(datasets.GeneratorBasedBuilder):
|
|
82 |
|
83 |
data_dir = dl_manager.download_and_extract(_URL)
|
84 |
|
85 |
-
print(data_dir)
|
86 |
-
|
87 |
if self.config.name.find("clinical") != -1:
|
88 |
-
|
89 |
-
print("clinical")
|
90 |
-
|
91 |
return [
|
92 |
datasets.SplitGenerator(
|
93 |
name=datasets.Split.TRAIN,
|
@@ -111,11 +107,9 @@ class E3C(datasets.GeneratorBasedBuilder):
|
|
111 |
},
|
112 |
),
|
113 |
]
|
114 |
-
|
115 |
elif self.config.name.find("temporal") != -1:
|
116 |
-
|
117 |
-
print("temporal")
|
118 |
-
|
119 |
return [
|
120 |
datasets.SplitGenerator(
|
121 |
name=datasets.Split.TRAIN,
|
@@ -161,14 +155,14 @@ class E3C(datasets.GeneratorBasedBuilder):
|
|
161 |
def get_parsed_data(self, filepath: str):
|
162 |
|
163 |
for root, _, files in os.walk(filepath):
|
164 |
-
|
165 |
for file in files:
|
166 |
-
|
167 |
with open(f"{root}/{file}") as soup_file:
|
168 |
-
|
169 |
soup = BeautifulSoup(soup_file, "xml")
|
170 |
text = soup.find("cas:Sofa").get("sofaString")
|
171 |
-
|
172 |
yield {
|
173 |
"CLINENTITY": self.get_clinical_annotations(soup.find_all("custom:CLINENTITY"), text),
|
174 |
"EVENT": self.get_annotations(soup.find_all("custom:EVENT"), text),
|
@@ -243,60 +237,60 @@ class E3C(datasets.GeneratorBasedBuilder):
|
|
243 |
_labels = clinical_labels
|
244 |
elif self.config.name.find("temporal") != -1:
|
245 |
_labels = temporal_information_labels
|
246 |
-
|
247 |
all_res.append({
|
248 |
"id": key,
|
249 |
"text": sentence[-1],
|
250 |
"tokens": list(map(lambda token: token[2], filtered_tokens)),
|
251 |
"ner_tags": _labels,
|
252 |
})
|
253 |
-
|
254 |
key += 1
|
255 |
-
|
256 |
if self.config.name.find("clinical") != -1:
|
257 |
-
|
258 |
if split != "test":
|
259 |
-
|
260 |
ids = [r["id"] for r in all_res]
|
261 |
-
|
262 |
random.seed(4)
|
263 |
random.shuffle(ids)
|
264 |
random.shuffle(ids)
|
265 |
random.shuffle(ids)
|
266 |
|
267 |
train, validation = np.split(ids, [int(len(ids)*0.8738)])
|
268 |
-
|
269 |
if split == "train":
|
270 |
allowed_ids = list(train)
|
271 |
elif split == "validation":
|
272 |
allowed_ids = list(validation)
|
273 |
-
|
274 |
for r in all_res:
|
275 |
if r["id"] in allowed_ids:
|
276 |
yield r["id"], r
|
277 |
else:
|
278 |
-
|
279 |
for r in all_res:
|
280 |
yield r["id"], r
|
281 |
-
|
282 |
elif self.config.name.find("temporal") != -1:
|
283 |
-
|
284 |
ids = [r["id"] for r in all_res]
|
285 |
-
|
286 |
random.seed(4)
|
287 |
random.shuffle(ids)
|
288 |
random.shuffle(ids)
|
289 |
random.shuffle(ids)
|
290 |
-
|
291 |
train, validation, test = np.split(ids, [int(len(ids)*0.70), int(len(ids)*0.80)])
|
292 |
-
|
293 |
if split == "train":
|
294 |
allowed_ids = list(train)
|
295 |
elif split == "validation":
|
296 |
allowed_ids = list(validation)
|
297 |
elif split == "test":
|
298 |
allowed_ids = list(test)
|
299 |
-
|
300 |
for r in all_res:
|
301 |
if r["id"] in allowed_ids:
|
302 |
yield r["id"], r
|
|
|
45 |
BUILDER_CONFIGS += [
|
46 |
datasets.BuilderConfig(name=f"{lang}_temporal", version="1.0.0", description=f"The {lang} subset of the E3C corpus") for lang in _LANGUAGES
|
47 |
]
|
48 |
+
|
49 |
DEFAULT_CONFIG_NAME = "French_clinical"
|
50 |
|
51 |
def _info(self):
|
|
|
57 |
names = ["O","B-CLINENTITY","I-CLINENTITY"]
|
58 |
elif self.config.name.find("temporal") != -1:
|
59 |
names = ["O", "B-EVENT", "B-ACTOR", "B-BODYPART", "B-TIMEX3", "B-RML", "I-EVENT", "I-ACTOR", "I-BODYPART", "I-TIMEX3", "I-RML"]
|
60 |
+
|
61 |
features = datasets.Features(
|
62 |
{
|
63 |
"id": datasets.Value("string"),
|
|
|
70 |
),
|
71 |
}
|
72 |
)
|
73 |
+
|
74 |
return datasets.DatasetInfo(
|
75 |
description=_DESCRIPTION,
|
76 |
features=features,
|
|
|
82 |
|
83 |
data_dir = dl_manager.download_and_extract(_URL)
|
84 |
|
|
|
|
|
85 |
if self.config.name.find("clinical") != -1:
|
86 |
+
|
|
|
|
|
87 |
return [
|
88 |
datasets.SplitGenerator(
|
89 |
name=datasets.Split.TRAIN,
|
|
|
107 |
},
|
108 |
),
|
109 |
]
|
110 |
+
|
111 |
elif self.config.name.find("temporal") != -1:
|
112 |
+
|
|
|
|
|
113 |
return [
|
114 |
datasets.SplitGenerator(
|
115 |
name=datasets.Split.TRAIN,
|
|
|
155 |
def get_parsed_data(self, filepath: str):
|
156 |
|
157 |
for root, _, files in os.walk(filepath):
|
158 |
+
|
159 |
for file in files:
|
160 |
+
|
161 |
with open(f"{root}/{file}") as soup_file:
|
162 |
+
|
163 |
soup = BeautifulSoup(soup_file, "xml")
|
164 |
text = soup.find("cas:Sofa").get("sofaString")
|
165 |
+
|
166 |
yield {
|
167 |
"CLINENTITY": self.get_clinical_annotations(soup.find_all("custom:CLINENTITY"), text),
|
168 |
"EVENT": self.get_annotations(soup.find_all("custom:EVENT"), text),
|
|
|
237 |
_labels = clinical_labels
|
238 |
elif self.config.name.find("temporal") != -1:
|
239 |
_labels = temporal_information_labels
|
240 |
+
|
241 |
all_res.append({
|
242 |
"id": key,
|
243 |
"text": sentence[-1],
|
244 |
"tokens": list(map(lambda token: token[2], filtered_tokens)),
|
245 |
"ner_tags": _labels,
|
246 |
})
|
247 |
+
|
248 |
key += 1
|
249 |
+
|
250 |
if self.config.name.find("clinical") != -1:
|
251 |
+
|
252 |
if split != "test":
|
253 |
+
|
254 |
ids = [r["id"] for r in all_res]
|
255 |
+
|
256 |
random.seed(4)
|
257 |
random.shuffle(ids)
|
258 |
random.shuffle(ids)
|
259 |
random.shuffle(ids)
|
260 |
|
261 |
train, validation = np.split(ids, [int(len(ids)*0.8738)])
|
262 |
+
|
263 |
if split == "train":
|
264 |
allowed_ids = list(train)
|
265 |
elif split == "validation":
|
266 |
allowed_ids = list(validation)
|
267 |
+
|
268 |
for r in all_res:
|
269 |
if r["id"] in allowed_ids:
|
270 |
yield r["id"], r
|
271 |
else:
|
272 |
+
|
273 |
for r in all_res:
|
274 |
yield r["id"], r
|
275 |
+
|
276 |
elif self.config.name.find("temporal") != -1:
|
277 |
+
|
278 |
ids = [r["id"] for r in all_res]
|
279 |
+
|
280 |
random.seed(4)
|
281 |
random.shuffle(ids)
|
282 |
random.shuffle(ids)
|
283 |
random.shuffle(ids)
|
284 |
+
|
285 |
train, validation, test = np.split(ids, [int(len(ids)*0.70), int(len(ids)*0.80)])
|
286 |
+
|
287 |
if split == "train":
|
288 |
allowed_ids = list(train)
|
289 |
elif split == "validation":
|
290 |
allowed_ids = list(validation)
|
291 |
elif split == "test":
|
292 |
allowed_ids = list(test)
|
293 |
+
|
294 |
for r in all_res:
|
295 |
if r["id"] in allowed_ids:
|
296 |
yield r["id"], r
|