"""Dronescapes representations -- adds various loading/writing/image showing capabilities to dronescapes tasks""" from __future__ import annotations from pathlib import Path from typing import Callable import numpy as np import torch as tr import flow_vis from skimage.color import rgb2hsv from overrides import overrides from matplotlib.cm import Spectral # pylint: disable=no-name-in-module from torch.nn import functional as F try: from npz_representation import NpzRepresentation except ImportError: from .npz_representation import NpzRepresentation class RGBRepresentation(NpzRepresentation): def __init__(self, *args, **kwargs): super().__init__(*args, n_channels=3, **kwargs) class HSVRepresentation(RGBRepresentation): @overrides def load_from_disk(self, path: Path) -> tr.Tensor: rgb = super().load_from_disk(path) return tr.from_numpy(rgb2hsv(rgb)).float() class EdgesRepresentation(NpzRepresentation): def __init__(self, *args, **kwargs): super().__init__(*args, n_channels=1, **kwargs) class DepthRepresentation(NpzRepresentation): """DepthRepresentation. Implements depth task-specific stuff, like spectral map for plots.""" def __init__(self, name: str, min_depth: float, max_depth: float, *args, **kwargs): super().__init__(name, n_channels=1, *args, **kwargs) self.min_depth = min_depth self.max_depth = max_depth @overrides def load_from_disk(self, path: Path) -> tr.Tensor: """Reads the npz data from the disk and transforms it properly""" res = super().load_from_disk(path) res_clip = res.clip(self.min_depth, self.max_depth) return res_clip @overrides def plot_fn(self, x: tr.Tensor) -> np.ndarray: x = x.detach().clip(0, 1).squeeze().cpu().numpy() _min, _max = np.percentile(x, [1, 95]) x = np.nan_to_num((x - _min) / (_max - _min), False, 0, 0, 0).clip(0, 1) y: np.ndarray = Spectral(x)[..., 0:3] * 255 return y.astype(np.uint8) class NormalsRepresentation(NpzRepresentation): def __init__(self, *args, **kwargs): super().__init__(*args, n_channels=3, **kwargs) class OpticalFlowRepresentation(NpzRepresentation): """OpticalFlowRepresentation. Implements flow task-specific stuff, like using flow_vis.""" def __init__(self, *args, **kwargs): super().__init__(*args, n_channels=2, **kwargs) @overrides def plot_fn(self, x: tr.Tensor) -> np.ndarray: _min, _max = x.min(0)[0].min(0)[0], x.max(0)[0].max(0)[0] x = ((x - _min) / (_max - _min)).nan_to_num(0, 0, 0).detach().cpu().numpy() return flow_vis.flow_to_color(x) class SemanticRepresentation(NpzRepresentation): """SemanticRepresentation. Implements semantic task-specific stuff, like argmaxing if needed""" def __init__(self, *args, classes: int | list[str], color_map: list[tuple[int, int, int]], **kwargs): self.n_classes = len(list(range(classes)) if isinstance(classes, int) else classes) super().__init__(*args, **kwargs, n_channels=self.n_classes) self.classes = list(range(classes)) if isinstance(classes, int) else classes self.color_map = color_map assert len(color_map) == self.n_classes and self.n_classes > 1, (color_map, self.n_classes) @overrides def load_from_disk(self, path: Path) -> tr.Tensor: res = super().load_from_disk(path) if len(res.shape) == 3: assert res.shape[-1] == self.n_classes, f"Expected {self.n_classes} (HxWxC), got {res.shape[-1]}" res = res.argmax(-1) assert len(res.shape) == 2, f"Only argmaxed data supported, got: {res.shape}" res = F.one_hot(res.long(), num_classes=self.n_classes).float() return res @overrides def plot_fn(self, x: tr.Tensor) -> np.ndarray: x_argmax = x.squeeze().nan_to_num(0).detach().argmax(-1).cpu().numpy() new_images = np.zeros((*x_argmax.shape, 3), dtype=np.uint8) for i in range(self.n_classes): new_images[x_argmax == i] = self.color_map[i] return new_images class SemanticMapper(SemanticRepresentation): """ Maps one or more semantic segmentations to a final one + a merge fn. Copy-pasta from VRE. TODO: allow non-semantic dependencies (must pass the correct load_fn or object instead of assuming semantic deps) """ def __init__(self, *args, original_classes: list[list[str]], mapping: list[dict[str, list[str]]], color_map: list[tuple[int, int, int]], merge_fn: Callable[[list[np.ndarray]], np.ndarray] | None = None, **kwargs): super().__init__(*args, classes=list(mapping[0].keys()), color_map=color_map, **kwargs) assert len(self.dependencies) >= 1 and self.dependencies[0] != self.name, \ "No dependencies provided. Need at least one semantic segmentation to map." assert isinstance(mapping, list), type(mapping) assert len(mapping) == (B := len(self.dependencies)), (len(mapping), B) assert (A := len(original_classes)) == len(self.dependencies), (A, B) assert all(m.keys() == mapping[0].keys() for m in mapping), [list(m.keys()) for m in mapping] assert len(color_map) == len(mapping[0].keys()), (len(color_map), len(mapping[0].keys())) self.original_classes = original_classes self.mapping = mapping self.merge_fn = merge_fn if merge_fn is not None else SemanticMapper._default_merge_fn @staticmethod def _default_merge_fn(dep_data: list[np.ndarray]) -> np.ndarray: if len(dep_data) > 1: raise ValueError(f"default_merge_fn doesnt' work with >1 dependencies: {len(dep_data)}") return dep_data[0] def _make_one(self, path: Path, mapping: dict[str, list[str]], original_classes: list[str]) -> np.ndarray: semantic_dep_data: np.ndarray = NpzRepresentation.load_from_disk(self, path).numpy() semantic_dep_data = semantic_dep_data.argmax(-1) if len(semantic_dep_data.shape) == 3 else semantic_dep_data assert len(semantic_dep_data.shape) == 2, f"Only argmaxed data supported, got: {semantic_dep_data.shape}" assert semantic_dep_data.dtype in (np.uint8, np.uint16), semantic_dep_data.dtype mapping_ix = {list(mapping.keys()).index(k): [original_classes.index(_v) for _v in v] for k, v in mapping.items()} flat_mapping = {} for k, v in mapping_ix.items(): for _v in v: flat_mapping[_v] = k mapped_data = np.vectorize(flat_mapping.get)(semantic_dep_data).astype(np.uint8) return mapped_data def load_from_disk(self, path: Path | list[Path]): # note: assuming SemanticRepresentation for all deps. TODO: generic deps. paths = [path] if isinstance(path, Path) else path assert len(paths) == len(self.dependencies), (len(path), len(self.dependencies)) individual_semantics = [] for path, mapping, original_classes in zip(paths, self.mapping, self.original_classes): individual_semantics.append(self._make_one(path, mapping, original_classes)) res = self.merge_fn(individual_semantics) res_torch = F.one_hot(tr.from_numpy(res).long(), num_classes=self.n_classes).float() return res_torch color_map_8classes = [[0, 255, 0], [0, 127, 0], [255, 255, 0], [255, 255, 255], [255, 0, 0], [0, 0, 255], [0, 255, 255], [127, 127, 63]] coco_classes = ["person", "bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck", "boat", "traffic light", "fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat", "dog", "horse", "sheep", "cow", "elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee", "skis", "snowboard", "sports ball", "kite", "baseball bat", "baseball glove", "skateboard", "surfboard", "tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", "apple", "sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", "chair", "couch", "potted plant", "bed", "dining table", "toilet", "tv", "laptop", "mouse", "remote", "keyboard", "cell phone", "microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors", "teddy bear", "hair drier", "toothbrush", "banner", "blanket", "bridge", "cardboard", "counter", "curtain", "door-stuff", "floor-wood", "flower", "fruit", "gravel", "house", "light", "mirror-stuff", "net", "pillow", "platform", "playingfield", "railroad", "river", "road", "roof", "sand", "sea", "shelf", "snow", "stairs", "tent", "towel", "wall-brick", "wall-stone", "wall-tile", "wall-wood", "water-other", "window-blind", "window-other", "tree-merged", "fence-merged", "ceiling-merged", "sky-other-merged", "cabinet-merged", "table-merged", "floor-other-merged", "pavement-merged", "mountain-merged", "grass-merged", "dirt-merged", "paper-merged", "food-other-merged", "building-other-merged", "rock-merged", "wall-other-merged", "rug-merged"] coco_color_map = [[220, 20, 60], [119, 11, 32], [0, 0, 142], [0, 0, 230], [106, 0, 228], [0, 60, 100], [0, 80, 100], [0, 0, 70], [0, 0, 192], [250, 170, 30], [100, 170, 30], [220, 220, 0], [175, 116, 175], [250, 0, 30], [165, 42, 42], [255, 77, 255], [0, 226, 252], [182, 182, 255], [0, 82, 0], [120, 166, 157], [110, 76, 0], [174, 57, 255], [199, 100, 0], [72, 0, 118], [255, 179, 240], [0, 125, 92], [209, 0, 151], [188, 208, 182], [0, 220, 176], [255, 99, 164], [92, 0, 73], [133, 129, 255], [78, 180, 255], [0, 228, 0], [174, 255, 243], [45, 89, 255], [134, 134, 103], [145, 148, 174], [255, 208, 186], [197, 226, 255], [171, 134, 1], [109, 63, 54], [207, 138, 255], [151, 0, 95], [9, 80, 61], [84, 105, 51], [74, 65, 105], [166, 196, 102], [208, 195, 210], [255, 109, 65], [0, 143, 149], [179, 0, 194], [209, 99, 106], [5, 121, 0], [227, 255, 205], [147, 186, 208], [153, 69, 1], [3, 95, 161], [163, 255, 0], [119, 0, 170], [0, 182, 199], [0, 165, 120], [183, 130, 88], [95, 32, 0], [130, 114, 135], [110, 129, 133], [166, 74, 118], [219, 142, 185], [79, 210, 114], [178, 90, 62], [65, 70, 15], [127, 167, 115], [59, 105, 106], [142, 108, 45], [196, 172, 0], [95, 54, 80], [128, 76, 255], [201, 57, 1], [246, 0, 122], [191, 162, 208], [255, 255, 128], [147, 211, 203], [150, 100, 100], [168, 171, 172], [146, 112, 198], [210, 170, 100], [92, 136, 89], [218, 88, 184], [241, 129, 0], [217, 17, 255], [124, 74, 181], [70, 70, 70], [255, 228, 255], [154, 208, 0], [193, 0, 92], [76, 91, 113], [255, 180, 195], [106, 154, 176], [230, 150, 140], [60, 143, 255], [128, 64, 128], [92, 82, 55], [254, 212, 124], [73, 77, 174], [255, 160, 98], [255, 255, 255], [104, 84, 109], [169, 164, 131], [225, 199, 255], [137, 54, 74], [135, 158, 223], [7, 246, 231], [107, 255, 200], [58, 41, 149], [183, 121, 142], [255, 73, 97], [107, 142, 35], [190, 153, 153], [146, 139, 141], [70, 130, 180], [134, 199, 156], [209, 226, 140], [96, 36, 108], [96, 96, 96], [64, 170, 64], [152, 251, 152], [208, 229, 228], [206, 186, 171], [152, 161, 64], [116, 112, 0], [0, 114, 143], [102, 102, 156], [250, 141, 255]] mapillary_classes = ["Bird", "Ground Animal", "Curb", "Fence", "Guard Rail", "Barrier", "Wall", "Bike Lane", "Crosswalk - Plain", "Curb Cut", "Parking", "Pedestrian Area", "Rail Track", "Road", "Service Lane", "Sidewalk", "Bridge", "Building", "Tunnel", "Person", "Bicyclist", "Motorcyclist", "Other Rider", "Lane Marking - Crosswalk", "Lane Marking - General", "Mountain", "Sand", "Sky", "Snow", "Terrain", "Vegetation", "Water", "Banner", "Bench", "Bike Rack", "Billboard", "Catch Basin", "CCTV Camera", "Fire Hydrant", "Junction Box", "Mailbox", "Manhole", "Phone Booth", "Pothole", "Street Light", "Pole", "Traffic Sign Frame", "Utility Pole", "Traffic Light", "Traffic Sign (Back)", "Traffic Sign (Front)", "Trash Can", "Bicycle", "Boat", "Bus", "Car", "Caravan", "Motorcycle", "On Rails", "Other Vehicle", "Trailer", "Truck", "Wheeled Slow", "Car Mount", "Ego Vehicle"] mapillary_color_map = [[165, 42, 42], [0, 192, 0], [196, 196, 196], [190, 153, 153], [180, 165, 180], [90, 120, 150], [102, 102, 156], [128, 64, 255], [140, 140, 200], [170, 170, 170], [250, 170, 160], [96, 96, 96], [230, 150, 140], [128, 64, 128], [110, 110, 110], [244, 35, 232], [150, 100, 100], [70, 70, 70], [150, 120, 90], [220, 20, 60], [255, 0, 0], [255, 0, 100], [255, 0, 200], [200, 128, 128], [255, 255, 255], [64, 170, 64], [230, 160, 50], [70, 130, 180], [190, 255, 255], [152, 251, 152], [107, 142, 35], [0, 170, 30], [255, 255, 128], [250, 0, 30], [100, 140, 180], [220, 220, 220], [220, 128, 128], [222, 40, 40], [100, 170, 30], [40, 40, 40], [33, 33, 33], [100, 128, 160], [142, 0, 0], [70, 100, 150], [210, 170, 100], [153, 153, 153], [128, 128, 128], [0, 0, 80], [250, 170, 30], [192, 192, 192], [220, 220, 0], [140, 140, 20], [119, 11, 32], [150, 0, 255], [0, 60, 100], [0, 0, 142], [0, 0, 90], [0, 0, 230], [0, 80, 100], [128, 64, 64], [0, 0, 110], [0, 0, 70], [0, 0, 192], [32, 32, 32], [120, 10, 10]] m2f_mapillary_to_8_classes = { "land": ["Terrain", "Sand", "Snow"], "forest": ["Vegetation"], "residential": ["Building", "Utility Pole", "Pole", "Fence", "Wall", "Manhole", "Street Light", "Curb", "Guard Rail", "Caravan", "Junction Box", "Traffic Sign (Front)", "Billboard", "Banner", "Mailbox", "Traffic Sign (Back)", "Bench", "Fire Hydrant", "Trash Can", "CCTV Camera", "Traffic Light", "Barrier", "Rail Track", "Phone Booth", "Curb Cut", "Traffic Sign Frame", "Bike Rack"], "road": ["Road", "Lane Marking - General", "Sidewalk", "Bridge", "Other Vehicle", "Motorcyclist", "Pothole", "Catch Basin", "Car Mount", "Tunnel", "Parking", "Service Lane", "Lane Marking - Crosswalk", "Pedestrian Area", "On Rails", "Bike Lane", "Crosswalk - Plain"], "little-objects": ["Car", "Person", "Truck", "Boat", "Wheeled Slow", "Trailer", "Ground Animal", "Bicycle", "Motorcycle", "Bird", "Bus", "Ego Vehicle", "Bicyclist", "Other Rider"], "water": ["Water"], "sky": ["Sky"], "hill": ["Mountain"] } tasks = [ # some pre-baked representations RGBRepresentation("rgb"), HSVRepresentation("hsv", dependencies=["rgb"]), EdgesRepresentation("edges_dexined"), EdgesRepresentation("edges_gb"), DepthRepresentation("depth_dpt", min_depth=0, max_depth=0.999), DepthRepresentation("depth_sfm_manual202204", min_depth=0, max_depth=300), DepthRepresentation("depth_ufo", min_depth=0, max_depth=1), DepthRepresentation("depth_marigold", min_depth=0, max_depth=1), NormalsRepresentation("normals_sfm_manual202204"), OpticalFlowRepresentation("opticalflow_rife"), SemanticRepresentation("semantic_segprop8", classes=8, color_map=color_map_8classes), SemanticRepresentation("semantic_mask2former_swin_mapillary_converted", classes=8, color_map=color_map_8classes), SemanticMapper("semantic_mask2former_swin_mapillary_converted2", original_classes=[mapillary_classes], mapping=[m2f_mapillary_to_8_classes], color_map=color_map_8classes, dependencies=["semantic_mask2former_mapillary_49189528_0"]), SemanticRepresentation("semantic_mask2former_coco_47429163_0", classes=coco_classes, color_map=coco_color_map), SemanticRepresentation("semantic_mask2former_mapillary_49189528_0", classes=mapillary_classes, color_map=mapillary_color_map), NpzRepresentation("softseg_gb", 3), ] dronescapes_task_types: dict[str, NpzRepresentation] = {task.name: task for task in tasks}