|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""TODO: Add a description here.""" |
|
|
|
|
|
import csv |
|
import json |
|
import os |
|
|
|
import datasets |
|
|
|
|
|
|
|
|
|
_CITATION = """\ |
|
@misc{støverud2023aeropath, |
|
title={AeroPath: An airway segmentation benchmark dataset with challenging pathology}, |
|
author={Karen-Helene Støverud and David Bouget and Andre Pedersen and Håkon Olav Leira and Thomas Langø and Erlend Fagertun Hofstad}, |
|
year={2023}, |
|
eprint={2311.01138}, |
|
archivePrefix={arXiv}, |
|
primaryClass={cs.CV} |
|
} |
|
""" |
|
|
|
|
|
|
|
_DESCRIPTION = """\ |
|
AeroPath: An airway segmentation benchmark dataset with challenging pathology. |
|
""" |
|
|
|
|
|
_HOMEPAGE = "https://github.com/raidionics/AeroPath" |
|
|
|
|
|
_LICENSE = "MIT" |
|
|
|
|
|
|
|
|
|
_URLS = { |
|
|
|
|
|
"zenodo": "https://zenodo.org/records/10069289/files/AeroPath.zip?download=1" |
|
} |
|
|
|
|
|
|
|
class AeroPath(datasets.GeneratorBasedBuilder): |
|
"""An airway segmentation benchmark dataset with challenging pathology.""" |
|
|
|
VERSION = datasets.Version("1.0.0") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
BUILDER_CONFIGS = [ |
|
|
|
|
|
datasets.BuilderConfig(name="zenodo", version=VERSION, description="This includes all 27 CTs stored as a single zip on Zenodo"), |
|
] |
|
|
|
DEFAULT_CONFIG_NAME = "zenodo" |
|
|
|
def get_data_paths(self): |
|
return |
|
|
|
def get_patient(self, patient_id): |
|
if (patient_id < 1) or (patiend_id > 27): |
|
raise ValueError("patient_id should be an integer in range [1, 27].") |
|
|
|
""" |
|
def _info(self): |
|
# TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset |
|
if self.config.name == "zenodo": # This is the name of the configuration selected in BUILDER_CONFIGS above |
|
features = datasets.Features( |
|
{ |
|
"sentence": datasets.Value("string"), |
|
"option1": datasets.Value("string"), |
|
"answer": datasets.Value("string") |
|
# These are the features of your dataset like images, labels ... |
|
} |
|
) |
|
else: |
|
raise ValueError("Only 'zenodo' is supported.")# This is an example to show how to have different features for "first_domain" and "second_domain" |
|
|
|
return datasets.DatasetInfo( |
|
# This is the description that will appear on the datasets page. |
|
description=_DESCRIPTION, |
|
# This defines the different columns of the dataset and their types |
|
features=features, # Here we define them above because they are different between the two configurations |
|
# If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and |
|
# specify them. They'll be used if as_supervised=True in builder.as_dataset. |
|
# supervised_keys=("sentence", "label"), |
|
# Homepage of the dataset for documentation |
|
homepage=_HOMEPAGE, |
|
# License for the dataset if available |
|
license=_LICENSE, |
|
# Citation for the dataset |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
# TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration |
|
# If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name |
|
|
|
# dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS |
|
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files. |
|
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive |
|
urls = _URLS[self.config.name] |
|
data_dir = dl_manager.download_and_extract(urls) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
# These kwargs will be passed to _generate_examples |
|
gen_kwargs={ |
|
"filepath": os.path.join(data_dir, "train.jsonl"), |
|
"split": "train", |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
# These kwargs will be passed to _generate_examples |
|
gen_kwargs={ |
|
"filepath": os.path.join(data_dir, "dev.jsonl"), |
|
"split": "dev", |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
# These kwargs will be passed to _generate_examples |
|
gen_kwargs={ |
|
"filepath": os.path.join(data_dir, "test.jsonl"), |
|
"split": "test" |
|
}, |
|
), |
|
] |
|
|
|
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators` |
|
def _generate_examples(self, filepath, split): |
|
# TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset. |
|
# The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example. |
|
with open(filepath, encoding="utf-8") as f: |
|
for key, row in enumerate(f): |
|
data = json.loads(row) |
|
if self.config.name == "first_domain": |
|
# Yields examples as (key, example) tuples |
|
yield key, { |
|
"sentence": data["sentence"], |
|
"option1": data["option1"], |
|
"answer": "" if split == "test" else data["answer"], |
|
} |
|
else: |
|
yield key, { |
|
"sentence": data["sentence"], |
|
"option2": data["option2"], |
|
"second_domain_answer": "" if split == "test" else data["second_domain_answer"], |
|
} |
|
""" |