Datasets:

Languages:
English
ArXiv:
License:
gabrielaltay commited on
Commit
dc30617
·
1 Parent(s): 612d1a3

upload hubscripts/medmentions_hub.py to hub from bigbio repo

Browse files
Files changed (1) hide show
  1. medmentions.py +307 -0
medmentions.py ADDED
@@ -0,0 +1,307 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and Simon Ott, github: nomisto
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """
17
+ MedMentions is a new manually annotated resource for the recognition of biomedical concepts.
18
+ What distinguishes MedMentions from other annotated biomedical corpora is its size (over 4,000
19
+ abstracts and over 350,000 linked mentions), as well as the size of the concept ontology (over
20
+ 3 million concepts from UMLS 2017) and its broad coverage of biomedical disciplines.
21
+
22
+ Corpus: The MedMentions corpus consists of 4,392 papers (Titles and Abstracts) randomly selected
23
+ from among papers released on PubMed in 2016, that were in the biomedical field, published in
24
+ the English language, and had both a Title and an Abstract.
25
+
26
+ Annotators: We recruited a team of professional annotators with rich experience in biomedical
27
+ content curation to exhaustively annotate all UMLS® (2017AA full version) entity mentions in
28
+ these papers.
29
+
30
+ Annotation quality: We did not collect stringent IAA (Inter-annotator agreement) data. To gain
31
+ insight on the annotation quality of MedMentions, we randomly selected eight papers from the
32
+ annotated corpus, containing a total of 469 concepts. Two biologists ('Reviewer') who did not
33
+ participate in the annotation task then each reviewed four papers. The agreement between
34
+ Reviewers and Annotators, an estimate of the Precision of the annotations, was 97.3%.
35
+
36
+ For more information visit: https://github.com/chanzuckerberg/MedMentions
37
+ """
38
+
39
+ import itertools as it
40
+ from typing import List
41
+
42
+ import datasets
43
+
44
+ from .bigbiohub import kb_features
45
+ from .bigbiohub import BigBioConfig
46
+ from .bigbiohub import Tasks
47
+
48
+ _LANGUAGES = ['English']
49
+ _PUBMED = True
50
+ _LOCAL = False
51
+ _CITATION = """\
52
+ @misc{mohan2019medmentions,
53
+ title={MedMentions: A Large Biomedical Corpus Annotated with UMLS Concepts},
54
+ author={Sunil Mohan and Donghui Li},
55
+ year={2019},
56
+ eprint={1902.09476},
57
+ archivePrefix={arXiv},
58
+ primaryClass={cs.CL}
59
+ }
60
+ """
61
+
62
+ _DATASETNAME = "medmentions"
63
+ _DISPLAYNAME = "MedMentions"
64
+
65
+ _DESCRIPTION = """\
66
+ MedMentions is a new manually annotated resource for the recognition of biomedical concepts.
67
+ What distinguishes MedMentions from other annotated biomedical corpora is its size (over 4,000
68
+ abstracts and over 350,000 linked mentions), as well as the size of the concept ontology (over
69
+ 3 million concepts from UMLS 2017) and its broad coverage of biomedical disciplines.
70
+
71
+ Corpus: The MedMentions corpus consists of 4,392 papers (Titles and Abstracts) randomly selected
72
+ from among papers released on PubMed in 2016, that were in the biomedical field, published in
73
+ the English language, and had both a Title and an Abstract.
74
+
75
+ Annotators: We recruited a team of professional annotators with rich experience in biomedical
76
+ content curation to exhaustively annotate all UMLS® (2017AA full version) entity mentions in
77
+ these papers.
78
+
79
+ Annotation quality: We did not collect stringent IAA (Inter-annotator agreement) data. To gain
80
+ insight on the annotation quality of MedMentions, we randomly selected eight papers from the
81
+ annotated corpus, containing a total of 469 concepts. Two biologists ('Reviewer') who did not
82
+ participate in the annotation task then each reviewed four papers. The agreement between
83
+ Reviewers and Annotators, an estimate of the Precision of the annotations, was 97.3%.
84
+ """
85
+
86
+ _HOMEPAGE = "https://github.com/chanzuckerberg/MedMentions"
87
+
88
+ _LICENSE = 'Creative Commons Zero v1.0 Universal'
89
+
90
+ _URLS = {
91
+ "medmentions_full": [
92
+ "https://github.com/chanzuckerberg/MedMentions/raw/master/full/data/corpus_pubtator.txt.gz",
93
+ "https://github.com/chanzuckerberg/MedMentions/raw/master/full/data/corpus_pubtator_pmids_trng.txt",
94
+ "https://github.com/chanzuckerberg/MedMentions/raw/master/full/data/corpus_pubtator_pmids_dev.txt",
95
+ "https://github.com/chanzuckerberg/MedMentions/raw/master/full/data/corpus_pubtator_pmids_test.txt",
96
+ ],
97
+ "medmentions_st21pv": [
98
+ "https://github.com/chanzuckerberg/MedMentions/raw/master/st21pv/data/corpus_pubtator.txt.gz",
99
+ "https://github.com/chanzuckerberg/MedMentions/raw/master/full/data/corpus_pubtator_pmids_trng.txt",
100
+ "https://github.com/chanzuckerberg/MedMentions/raw/master/full/data/corpus_pubtator_pmids_dev.txt",
101
+ "https://github.com/chanzuckerberg/MedMentions/raw/master/full/data/corpus_pubtator_pmids_test.txt",
102
+ ],
103
+ }
104
+
105
+ _SUPPORTED_TASKS = [Tasks.NAMED_ENTITY_DISAMBIGUATION, Tasks.NAMED_ENTITY_RECOGNITION]
106
+
107
+ _SOURCE_VERSION = "1.0.0"
108
+
109
+ _BIGBIO_VERSION = "1.0.0"
110
+
111
+
112
+ class MedMentionsDataset(datasets.GeneratorBasedBuilder):
113
+ """MedMentions dataset for named-entity disambiguation (NED)"""
114
+
115
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
116
+ BIGBIO_VERSION = datasets.Version(_BIGBIO_VERSION)
117
+
118
+ BUILDER_CONFIGS = [
119
+ BigBioConfig(
120
+ name="medmentions_full_source",
121
+ version=SOURCE_VERSION,
122
+ description="MedMentions Full source schema",
123
+ schema="source",
124
+ subset_id="medmentions_full",
125
+ ),
126
+ BigBioConfig(
127
+ name="medmentions_full_bigbio_kb",
128
+ version=BIGBIO_VERSION,
129
+ description="MedMentions Full BigBio schema",
130
+ schema="bigbio_kb",
131
+ subset_id="medmentions_full",
132
+ ),
133
+ BigBioConfig(
134
+ name="medmentions_st21pv_source",
135
+ version=SOURCE_VERSION,
136
+ description="MedMentions ST21pv source schema",
137
+ schema="source",
138
+ subset_id="medmentions_st21pv",
139
+ ),
140
+ BigBioConfig(
141
+ name="medmentions_st21pv_bigbio_kb",
142
+ version=BIGBIO_VERSION,
143
+ description="MedMentions ST21pv BigBio schema",
144
+ schema="bigbio_kb",
145
+ subset_id="medmentions_st21pv",
146
+ ),
147
+ ]
148
+
149
+ DEFAULT_CONFIG_NAME = "medmentions_full_source"
150
+
151
+ def _info(self) -> datasets.DatasetInfo:
152
+
153
+ if self.config.schema == "source":
154
+ features = datasets.Features(
155
+ {
156
+ "pmid": datasets.Value("string"),
157
+ "passages": [
158
+ {
159
+ "type": datasets.Value("string"),
160
+ "text": datasets.Sequence(datasets.Value("string")),
161
+ "offsets": datasets.Sequence([datasets.Value("int32")]),
162
+ }
163
+ ],
164
+ "entities": [
165
+ {
166
+ "text": datasets.Sequence(datasets.Value("string")),
167
+ "offsets": datasets.Sequence([datasets.Value("int32")]),
168
+ "concept_id": datasets.Value("string"),
169
+ "semantic_type_id": datasets.Sequence(
170
+ datasets.Value("string")
171
+ ),
172
+ }
173
+ ],
174
+ }
175
+ )
176
+
177
+ elif self.config.schema == "bigbio_kb":
178
+ features = kb_features
179
+
180
+ return datasets.DatasetInfo(
181
+ description=_DESCRIPTION,
182
+ features=features,
183
+ homepage=_HOMEPAGE,
184
+ license=str(_LICENSE),
185
+ citation=_CITATION,
186
+ )
187
+
188
+ def _split_generators(self, dl_manager) -> List[datasets.SplitGenerator]:
189
+
190
+ urls = _URLS[self.config.subset_id]
191
+ (
192
+ corpus_path,
193
+ pmids_train,
194
+ pmids_dev,
195
+ pmids_test,
196
+ ) = dl_manager.download_and_extract(urls)
197
+
198
+ return [
199
+ datasets.SplitGenerator(
200
+ name=datasets.Split.TRAIN,
201
+ gen_kwargs={"corpus_path": corpus_path, "pmids_path": pmids_train},
202
+ ),
203
+ datasets.SplitGenerator(
204
+ name=datasets.Split.TEST,
205
+ gen_kwargs={"corpus_path": corpus_path, "pmids_path": pmids_test},
206
+ ),
207
+ datasets.SplitGenerator(
208
+ name=datasets.Split.VALIDATION,
209
+ gen_kwargs={"corpus_path": corpus_path, "pmids_path": pmids_dev},
210
+ ),
211
+ ]
212
+
213
+ def _generate_examples(self, corpus_path, pmids_path):
214
+ with open(pmids_path, encoding="utf8") as infile:
215
+ pmids = infile.readlines()
216
+ pmids = {int(x.strip()) for x in pmids}
217
+
218
+ if self.config.schema == "source":
219
+ with open(corpus_path, encoding="utf8") as corpus:
220
+ for document in self._generate_parsed_documents(corpus, pmids):
221
+ yield document["pmid"], document
222
+
223
+ elif self.config.schema == "bigbio_kb":
224
+ uid = it.count(0)
225
+ with open(corpus_path, encoding="utf8") as corpus:
226
+ for document in self._generate_parsed_documents(corpus, pmids):
227
+ document["id"] = next(uid)
228
+ document["document_id"] = document.pop("pmid")
229
+
230
+ entities_ = []
231
+ for entity in document["entities"]:
232
+ for type in entity["semantic_type_id"]:
233
+ entities_.append(
234
+ {
235
+ "id": next(uid),
236
+ "type": type,
237
+ "text": entity["text"],
238
+ "offsets": entity["offsets"],
239
+ "normalized": [
240
+ {
241
+ "db_name": "UMLS",
242
+ "db_id": entity["concept_id"],
243
+ }
244
+ ],
245
+ }
246
+ )
247
+ document["entities"] = entities_
248
+
249
+ for passage in document["passages"]:
250
+ passage["id"] = next(uid)
251
+ document["relations"] = []
252
+ document["events"] = []
253
+ document["coreferences"] = []
254
+ yield document["document_id"], document
255
+
256
+ def _generate_parsed_documents(self, fstream, pmids):
257
+ for raw_document in self._generate_raw_documents(fstream):
258
+ if self._parse_pmid(raw_document) in pmids:
259
+ yield self._parse_document(raw_document)
260
+
261
+ def _generate_raw_documents(self, fstream):
262
+ raw_document = []
263
+ for line in fstream:
264
+ if line.strip():
265
+ raw_document.append(line.strip())
266
+ elif raw_document:
267
+ yield raw_document
268
+ raw_document = []
269
+ # needed for last document
270
+ if raw_document:
271
+ yield raw_document
272
+
273
+ def _parse_pmid(self, raw_document):
274
+ pmid, _ = raw_document[0].split("|", 1)
275
+ return int(pmid)
276
+
277
+ def _parse_document(self, raw_document):
278
+ pmid, type, title = raw_document[0].split("|", 2)
279
+ pmid_, type, abstract = raw_document[1].split("|", 2)
280
+ passages = [
281
+ {"type": "title", "text": [title], "offsets": [[0, len(title)]]},
282
+ {
283
+ "type": "abstract",
284
+ "text": [abstract],
285
+ "offsets": [[len(title) + 1, len(title) + len(abstract) + 1]],
286
+ },
287
+ ]
288
+
289
+ entities = []
290
+ for line in raw_document[2:]:
291
+ (
292
+ pmid_,
293
+ start_idx,
294
+ end_idx,
295
+ mention,
296
+ semantic_type_id,
297
+ entity_id,
298
+ ) = line.split("\t")
299
+ entity = {
300
+ "offsets": [[int(start_idx), int(end_idx)]],
301
+ "text": [mention],
302
+ "semantic_type_id": semantic_type_id.split(","),
303
+ "concept_id": entity_id,
304
+ }
305
+ entities.append(entity)
306
+
307
+ return {"pmid": int(pmid), "entities": entities, "passages": passages}