andreped commited on
Commit
fb5dee2
·
1 Parent(s): ae2c72d

Update IBDColEpi.py

Browse files
Files changed (1) hide show
  1. IBDColEpi.py +1 -14
IBDColEpi.py CHANGED
@@ -11,9 +11,6 @@
11
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
  # See the License for the specific language governing permissions and
13
  # limitations under the License.
14
- # TODO: Address all TODOs and remove all explanatory comments
15
- """TODO: Add a description here."""
16
-
17
 
18
  import csv
19
  import json
@@ -22,7 +19,6 @@ import os
22
  import datasets
23
 
24
 
25
- # TODO: Add BibTeX citation
26
  # Find for instance the citation on arxiv or on the dataset repo/website
27
  _CITATION = """\
28
  @ARTICLE{10.3389/fmed.2021.816281,
@@ -38,19 +34,14 @@ ABSTRACT={Application of deep learning on histopathological whole slide images (
38
  }
39
  """
40
 
41
- # TODO: Add description of the dataset here
42
- # You can copy an official description
43
  _DESCRIPTION = """\
44
  IBDColEpi: 140 HE and 111 CD3-stained colon biopsies of active and inactivate inflammatory bowel disease with epithelium annotated.
45
  """
46
 
47
- # TODO: Add a link to an official homepage for the dataset here
48
  _HOMEPAGE = "https://github.com/andreped/NoCodeSeg"
49
 
50
- # TODO: Add the licence for the dataset here if you can find it
51
  _LICENSE = "MIT"
52
 
53
- # TODO: Add link to the official dataset URLs here
54
  # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
55
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
56
  base_path = "https://huggingface.co/datasets/andreped/IBDColEpi/resolve/main/"
@@ -58,7 +49,6 @@ _URLS = {"part_0" + str(x): base_path + "WSI_part_0" + str(x) + ".zip" for x in
58
  _URLS["part_10"] = base_path + "WSI_part_10.zip"
59
  _URLS["wsi-annotations"] = base_path + "TIFF-annotations.zip"
60
 
61
- # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
62
  class IBDColEpi(datasets.GeneratorBasedBuilder):
63
  """140 HE and 111 CD3-stained colon biopsies of active and inactivate inflammatory bowel disease with epithelium annotated."""
64
 
@@ -91,7 +81,6 @@ class IBDColEpi(datasets.GeneratorBasedBuilder):
91
  self.DATA_DIR = None
92
 
93
  def _info(self):
94
- # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
95
  if self.config.name.startswith("part"): # This is the name of the configuration selected in BUILDER_CONFIGS above
96
  features = datasets.Features(
97
  {
@@ -130,7 +119,7 @@ class IBDColEpi(datasets.GeneratorBasedBuilder):
130
  return self.DATA_DIR
131
 
132
  def _split_generators(self, dl_manager):
133
- # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
134
  # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
135
 
136
  # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
@@ -139,8 +128,6 @@ class IBDColEpi(datasets.GeneratorBasedBuilder):
139
  urls = _URLS[self.config.name]
140
  self.DATA_DIR = dl_manager.download_and_extract(urls)
141
 
142
- # @TODO: use custom data downloader instead because DataverseNO is trash
143
-
144
  # append AeroPath
145
  # self.DATA_DIR = os.path.join(self.DATA_DIR, "IBDColEpi")
146
 
 
11
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
  # See the License for the specific language governing permissions and
13
  # limitations under the License.
 
 
 
14
 
15
  import csv
16
  import json
 
19
  import datasets
20
 
21
 
 
22
  # Find for instance the citation on arxiv or on the dataset repo/website
23
  _CITATION = """\
24
  @ARTICLE{10.3389/fmed.2021.816281,
 
34
  }
35
  """
36
 
 
 
37
  _DESCRIPTION = """\
38
  IBDColEpi: 140 HE and 111 CD3-stained colon biopsies of active and inactivate inflammatory bowel disease with epithelium annotated.
39
  """
40
 
 
41
  _HOMEPAGE = "https://github.com/andreped/NoCodeSeg"
42
 
 
43
  _LICENSE = "MIT"
44
 
 
45
  # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
46
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
47
  base_path = "https://huggingface.co/datasets/andreped/IBDColEpi/resolve/main/"
 
49
  _URLS["part_10"] = base_path + "WSI_part_10.zip"
50
  _URLS["wsi-annotations"] = base_path + "TIFF-annotations.zip"
51
 
 
52
  class IBDColEpi(datasets.GeneratorBasedBuilder):
53
  """140 HE and 111 CD3-stained colon biopsies of active and inactivate inflammatory bowel disease with epithelium annotated."""
54
 
 
81
  self.DATA_DIR = None
82
 
83
  def _info(self):
 
84
  if self.config.name.startswith("part"): # This is the name of the configuration selected in BUILDER_CONFIGS above
85
  features = datasets.Features(
86
  {
 
119
  return self.DATA_DIR
120
 
121
  def _split_generators(self, dl_manager):
122
+ # This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
123
  # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
124
 
125
  # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
 
128
  urls = _URLS[self.config.name]
129
  self.DATA_DIR = dl_manager.download_and_extract(urls)
130
 
 
 
131
  # append AeroPath
132
  # self.DATA_DIR = os.path.join(self.DATA_DIR, "IBDColEpi")
133