|
|
|
"""MultiTask Dataset module compatible with torch.utils.data.Dataset & DataLoader.""" |
|
from __future__ import annotations |
|
import os |
|
from pathlib import Path |
|
from typing import Dict, List, Tuple |
|
from argparse import ArgumentParser |
|
from pprint import pprint |
|
from natsort import natsorted |
|
from loggez import loggez_logger as logger |
|
import torch as tr |
|
import numpy as np |
|
from torch.utils.data import Dataset, DataLoader |
|
from lovely_tensors import monkey_patch |
|
|
|
from npz_representation import NpzRepresentation |
|
|
|
monkey_patch() |
|
BuildDatasetTuple = Tuple[Dict[str, List[Path]], List[str]] |
|
MultiTaskItem = Tuple[Dict[str, tr.Tensor], str, List[str]] |
|
TaskStatistics = Tuple[tr.Tensor, tr.Tensor, tr.Tensor, tr.Tensor] |
|
|
|
class MultiTaskDataset(Dataset): |
|
""" |
|
MultiTaskDataset implementation. Reads data from npz files and returns them as a dict. |
|
|
|
Parameters: |
|
- path: Path to the directory containing the npz files. |
|
- task_names: List of tasks that are present in the dataset. If set to None, will infer from the files on disk. |
|
- handle_missing_data: Modes to handle missing data. Valid options are: |
|
- drop: Drop the data point if any of the representations is missing. |
|
- fill_none: Fill the missing data with Nones. |
|
|
|
Expected directory structure: |
|
path/ |
|
- task_1/0.npz, ..., N.npz |
|
- ... |
|
- task_n/0.npz, ..., N.npz |
|
|
|
Names can be in a different format (i.e. 2022-01-01.npz), but must be consistent and equal across all tasks. |
|
""" |
|
|
|
def __init__(self, path: Path, task_names: list[str] | None = None, handle_missing_data: str = "fill_none", |
|
files_suffix: str = "npz", task_types: dict[str, type] | None = None, |
|
files_per_repr_overwrites: dict[str, str] | None = None, |
|
compute_statistics: bool = False): |
|
assert Path(path).exists(), f"Provided path '{path}' doesn't exist!" |
|
assert handle_missing_data in ("drop", "fill_none", "fill_zero", "fill_nan"), \ |
|
f"Invalid handle_missing_data mode: {handle_missing_data}" |
|
assert files_suffix == "npz", "Only npz supported right now (though trivial to update)" |
|
self.path = Path(path).absolute() |
|
self.handle_missing_data = handle_missing_data |
|
self.suffix = files_suffix |
|
self.files_per_repr_overwrites = files_per_repr_overwrites |
|
self.all_files_per_repr = self._get_all_npz_files() |
|
self.files_per_repr, self.file_names = self._build_dataset() |
|
if task_types is None: |
|
logger.debug("No explicit task types. Defaulting all of them to NpzRepresentation.") |
|
task_types = {} |
|
|
|
if task_names is None: |
|
task_names = list(self.files_per_repr.keys()) |
|
logger.debug(f"No explicit tasks provided. Using all of them as read from the paths ({len(task_names)}).") |
|
assert all(task in self.files_per_repr for task in task_names), (task_names, self.files_per_repr.keys()) |
|
self.task_types = {k: task_types.get(k, NpzRepresentation) for k in task_names} |
|
assert all(isinstance(x, str) for x in task_names), tuple(zip(task_names, (type(x) for x in task_names))) |
|
self.task_names = sorted(task_names) |
|
self._data_shape: tuple[int, ...] | None = None |
|
self._tasks: list[NpzRepresentation] | None = None |
|
self.name_to_task = {task.name: task for task in self.tasks} |
|
logger.info(f"Tasks used in this dataset: {self.task_names}") |
|
self._default_vals: dict[str, tr.Tensor] | None = None |
|
self.statistics = None if compute_statistics is False else self._compute_statistics() |
|
|
|
|
|
|
|
@property |
|
def default_vals(self) -> dict[str, tr.Tensor]: |
|
"""default values for __getitem__ if item is not on disk but we retrieve a full batch anyway""" |
|
if self._default_vals is None: |
|
_default_val = float("nan") if self.handle_missing_data == "fill_nan" else 0 |
|
self._default_vals = {task: None if self.handle_missing_data == "fill_none" else |
|
tr.full(self.data_shape[task], _default_val) for task in self.task_names} |
|
return self._default_vals |
|
|
|
@property |
|
def data_shape(self) -> dict[str, tuple[int, ...]]: |
|
"""Returns a {task: shape_tuple} for all representations. At least one npz file must exist for each.""" |
|
first_npz = {task: [_v for _v in files if _v is not None][0] for task, files in self.files_per_repr.items()} |
|
data_shape = {task: self.name_to_task[task].load_from_disk(first_npz[task]).shape for task in self.task_names} |
|
return data_shape |
|
|
|
@property |
|
def tasks(self) -> list[NpzRepresentation]: |
|
""" |
|
Returns a list of instantiated tasks in the same order as self.task_names. Overwrite this to add |
|
new tasks and semantics (i.e. plot_fn or doing some preprocessing after loading from disk in some tasks. |
|
""" |
|
if self._tasks is not None: |
|
return self._tasks |
|
self._tasks = [] |
|
for task_name in self.task_names: |
|
t = self.task_types[task_name] |
|
try: |
|
t = t(task_name) |
|
except Exception: |
|
pass |
|
self._tasks.append(t) |
|
assert all(t.name == t_n for t, t_n in zip(self._tasks, self.task_names)), (self.task_names, self._tasks) |
|
return self._tasks |
|
|
|
def collate_fn(self, items: list[MultiTaskItem]) -> MultiTaskItem: |
|
""" |
|
given a list of items (i.e. from a reader[n:n+k] call), return the item batched on 1st dimension. |
|
Nones (missing data points) are turned into nans as per the data shape of that dim. |
|
""" |
|
assert all(item[2] == self.task_names for item in items), ([item[2] for item in items], self.task_names) |
|
items_name = [item[1] for item in items] |
|
res = {k: tr.zeros(len(items), *self.data_shape[k]).float() for k in self.task_names} |
|
for i in range(len(items)): |
|
for k in self.task_names: |
|
res[k][i][:] = items[i][0][k] if items[i][0][k] is not None else float("nan") |
|
return res, items_name, self.task_names |
|
|
|
|
|
|
|
def _get_all_npz_files(self) -> dict[str, list[Path]]: |
|
"""returns a dict of form: {"rgb": ["0.npz", "1.npz", ..., "N.npz"]}""" |
|
in_files = {} |
|
all_repr_dirs: list[str] = [x.name for x in self.path.iterdir() if x.is_dir()] |
|
for repr_dir_name in all_repr_dirs: |
|
dir_name = self.path / repr_dir_name |
|
if all(f.is_dir() for f in dir_name.iterdir()): |
|
all_files = [] |
|
for part in dir_name.iterdir(): |
|
all_files.extend(part.glob(f"*.{self.suffix}")) |
|
else: |
|
all_files = dir_name.glob(f"*.{self.suffix}") |
|
in_files[repr_dir_name] = natsorted(all_files, key=lambda x: x.name) |
|
assert not any(len(x) == 0 for x in in_files.values()), f"{ [k for k, v in in_files.items() if len(v) == 0] }" |
|
return in_files |
|
|
|
def _build_dataset(self) -> BuildDatasetTuple: |
|
logger.debug(f"Building dataset from: '{self.path}'") |
|
if self.handle_missing_data == "drop": |
|
files_per_repr, common = self._build_dataset_drop_missing() |
|
else: |
|
files_per_repr, common = self._build_dataset_fill_missing() |
|
if self.files_per_repr_overwrites is not None: |
|
for left, right in self.files_per_repr_overwrites.items(): |
|
if right not in (fpr := files_per_repr): |
|
logger.info(f"Overwrite: {left} => {right} provided, but {right} is not in {fpr.keys()}") |
|
continue |
|
assert left not in fpr.keys(), f"Overwrite: {left} => {right}. {left} already exists in {fpr.keys()}" |
|
files_per_repr[left] = files_per_repr[right] |
|
return files_per_repr, common |
|
|
|
def _build_dataset_drop_missing(self) -> BuildDatasetTuple: |
|
in_files = self.all_files_per_repr |
|
name_to_node_path = {k: {_v.name: _v for _v in v} for k, v in in_files.items()} |
|
common = set(x.name for x in next(iter(in_files.values()))) |
|
for node in (nodes := in_files.keys()): |
|
common = common.intersection([f.name for f in in_files[node]]) |
|
assert len(common) > 0, f"Node '{node}' made the intersection null" |
|
common = natsorted(list(common)) |
|
logger.info(f"Found {len(common)} data points for each node ({len(nodes)} nodes).") |
|
files_per_repr = {node: [name_to_node_path[node][x] for x in common] for node in nodes} |
|
assert len(files_per_repr) > 0 |
|
return files_per_repr, common |
|
|
|
def _build_dataset_fill_missing(self) -> BuildDatasetTuple: |
|
in_files = self.all_files_per_repr |
|
name_to_node_path = {k: {_v.name: _v for _v in v} for k, v in in_files.items()} |
|
all_files = set(x.name for x in next(iter(in_files.values()))) |
|
nodes = in_files.keys() |
|
for node in (nodes := in_files.keys()): |
|
all_files = all_files.union([f.name for f in in_files[node]]) |
|
all_files = natsorted(list(all_files)) |
|
logger.info(f"Found {len(all_files)} data points as union of all nodes' data ({len(nodes)} nodes).") |
|
|
|
files_per_repr = {node: [] for node in nodes} |
|
for node in nodes: |
|
for file_name in all_files: |
|
file_path = name_to_node_path[node].get(file_name, None) |
|
files_per_repr[node].append(file_path) |
|
assert len(files_per_repr) > 0 |
|
return files_per_repr, all_files |
|
|
|
def _compute_statistics(self) -> dict[str, tr.Tensor]: |
|
cache_path = self.path / f".task_statistics.npz" |
|
res: dict[str, TaskStatistics] = {} |
|
if os.getenv("CACHE_IMG_STATS", "0") == "1" and cache_path.exists(): |
|
res = np.load(cache_path, allow_pickle=True)["arr_0"].item() |
|
logger.info(f"Loaded task statistics: { {k: v.shape for k, v in res.items()} }") |
|
missing_tasks = list(set(self.task_names).difference(res.keys())) |
|
if len(missing_tasks) == 0: |
|
return res |
|
logger.info(f"Computing global task statistics (dataset len {len(self)}) for {missing_tasks}") |
|
old_tasks = self.tasks |
|
self._tasks = [t for t in self.tasks if t.name in missing_tasks] |
|
res = {**res, **self._compute_channel_level_stats(missing_tasks)} |
|
self._tasks = old_tasks |
|
logger.info(f"Computed task statistics: { {k: v[0].shape for k, v in res.items()} }") |
|
if os.getenv("CACHE_IMG_STATS", "0") == "1": |
|
np.savez(cache_path, res) |
|
return res |
|
|
|
def _compute_channel_level_stats(self, missing_tasks: list[str]) -> dict[str, TaskStatistics]: |
|
ch = {k: v[-1] if len(v) == 3 else 1 for k, v in self.data_shape.items()} |
|
sums = {task_name: tr.zeros(ch[task_name]).type(tr.float64) for task_name in missing_tasks} |
|
counts = {task_name: tr.zeros(ch[task_name]).long() for task_name in missing_tasks} |
|
mins = {task_name: tr.zeros(ch[task_name]).type(tr.float64) - 1<<31 for task_name in missing_tasks} |
|
maxs = {task_name: tr.zeros(ch[task_name]).type(tr.float64) + 1<<31 for task_name in missing_tasks} |
|
|
|
|
|
for ix in range(len(self)): |
|
item = self.base_dataset[ix][0] |
|
for task in missing_tasks: |
|
item_flat_ch = item[task].reshape(-1, self.ch[task]) |
|
sums[task] += item_flat_ch.nan_to_num(0).type(tr.float64).sum(0) |
|
counts[task] += (item_flat_ch == item_flat_ch).long().sum(0) |
|
res_ch = {k: (sums[k] / counts[k]).nan_to_num(0).float() for k in missing_tasks} |
|
res = {k: v.reshape(-1, 1, 1).repeat(1, self.h, self.w) for k, v in res_ch.items()} |
|
return res |
|
|
|
|
|
|
|
def __getitem__(self, index: int | slice | list[int] | tuple) -> MultiTaskItem: |
|
"""Read the data all the desired nodes""" |
|
assert isinstance(index, (int, slice, list, tuple, str)), type(index) |
|
if isinstance(index, slice): |
|
assert index.start is not None and index.stop is not None and index.step is None, "Only reader[l:r] allowed" |
|
index = list(range(index.stop)[index]) |
|
if isinstance(index, (list, tuple)): |
|
return self.collate_fn([self.__getitem__(ix) for ix in index]) |
|
if isinstance(index, str): |
|
return self.__getitem__(self.file_names.index(index)) |
|
res = {} |
|
item_name = self.file_names[index] |
|
|
|
for task in self.tasks: |
|
file_path = self.files_per_repr[task.name][index] |
|
file_path = None if file_path is None or not (fpr := file_path.resolve()).exists() else fpr |
|
res[task.name] = task.load_from_disk(file_path) if file_path is not None else self.default_vals[task.name] |
|
return (res, item_name, self.task_names) |
|
|
|
def __len__(self) -> int: |
|
return len(self.files_per_repr[self.task_names[0]]) |
|
|
|
def __str__(self): |
|
f_str = f"[{str(type(self)).rsplit('.', maxsplit=1)[-1][0:-2]}]" |
|
f_str += f"\n - Path: '{self.path}'" |
|
f_str += f"\n - Tasks ({len(self.tasks)}): {self.tasks}" |
|
f_str += f"\n - Length: {len(self)}" |
|
f_str += f"\n - Handle missing data mode: '{self.handle_missing_data}'" |
|
return f_str |
|
|
|
def __repr__(self): |
|
return str(self) |
|
|
|
def main(): |
|
"""main fn""" |
|
parser = ArgumentParser() |
|
parser.add_argument("dataset_path", type=Path) |
|
parser.add_argument("--handle_missing_data", choices=("drop", "fill_none"), default="fill_none") |
|
args = parser.parse_args() |
|
|
|
reader = MultiTaskDataset(args.dataset_path, task_names=None, handle_missing_data=args.handle_missing_data) |
|
print(reader) |
|
print(f"Shape: {reader.data_shape}") |
|
|
|
rand_ix = np.random.randint(len(reader)) |
|
data, name, repr_names = reader[rand_ix] |
|
print(f"Name: {name}. Nodes: {repr_names}") |
|
pprint({k: v for k, v in data.items()}) |
|
|
|
data, name, repr_names = reader[rand_ix: min(len(reader), rand_ix + 5)] |
|
print(f"Name: {name}. Nodes: {repr_names}") |
|
pprint({k: v for k, v in data.items()}) |
|
|
|
loader = DataLoader(reader, collate_fn=reader.collate_fn, batch_size=5, shuffle=True) |
|
data, name, repr_names = next(iter(loader)) |
|
print(f"Name: {name}. Nodes: {repr_names}") |
|
pprint({k: v for k, v in data.items()}) |
|
|
|
if __name__ == "__main__": |
|
main() |
|
|