|
|
|
import numpy as np |
|
from mmcv.transforms import to_tensor |
|
from mmcv.transforms.base import BaseTransform |
|
from mmengine.structures import InstanceData, PixelData |
|
|
|
from mmdet.registry import TRANSFORMS |
|
from mmdet.structures import DetDataSample |
|
from mmdet.structures.bbox import BaseBoxes |
|
|
|
|
|
@TRANSFORMS.register_module() |
|
class PackDetInputs(BaseTransform): |
|
"""Pack the inputs data for the detection / semantic segmentation / |
|
panoptic segmentation. |
|
|
|
The ``img_meta`` item is always populated. The contents of the |
|
``img_meta`` dictionary depends on ``meta_keys``. By default this includes: |
|
|
|
- ``img_id``: id of the image |
|
|
|
- ``img_path``: path to the image file |
|
|
|
- ``ori_shape``: original shape of the image as a tuple (h, w) |
|
|
|
- ``img_shape``: shape of the image input to the network as a tuple \ |
|
(h, w). Note that images may be zero padded on the \ |
|
bottom/right if the batch tensor is larger than this shape. |
|
|
|
- ``scale_factor``: a float indicating the preprocessing scale |
|
|
|
- ``flip``: a boolean indicating if image flip transform was used |
|
|
|
- ``flip_direction``: the flipping direction |
|
|
|
Args: |
|
meta_keys (Sequence[str], optional): Meta keys to be converted to |
|
``mmcv.DataContainer`` and collected in ``data[img_metas]``. |
|
Default: ``('img_id', 'img_path', 'ori_shape', 'img_shape', |
|
'scale_factor', 'flip', 'flip_direction')`` |
|
""" |
|
mapping_table = { |
|
'gt_bboxes': 'bboxes', |
|
'gt_bboxes_labels': 'labels', |
|
'gt_masks': 'masks' |
|
|
|
} |
|
|
|
def __init__(self, |
|
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', |
|
'scale_factor', 'flip', 'flip_direction')): |
|
self.meta_keys = meta_keys |
|
|
|
def transform(self, results: dict) -> dict: |
|
"""Method to pack the input data. |
|
|
|
Args: |
|
results (dict): Result dict from the data pipeline. |
|
|
|
Returns: |
|
dict: |
|
|
|
- 'inputs' (obj:`torch.Tensor`): The forward data of models. |
|
- 'data_sample' (obj:`DetDataSample`): The annotation info of the |
|
sample. |
|
""" |
|
packed_results = dict() |
|
if 'img' in results: |
|
img = results['img'] |
|
if len(img.shape) < 3: |
|
img = np.expand_dims(img, -1) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if not img.flags.c_contiguous: |
|
img = np.ascontiguousarray(img.transpose(2, 0, 1)) |
|
img = to_tensor(img) |
|
else: |
|
img = to_tensor(img).permute(2, 0, 1).contiguous() |
|
|
|
packed_results['inputs'] = img |
|
|
|
if 'gt_ignore_flags' in results: |
|
valid_idx = np.where(results['gt_ignore_flags'] == 0)[0] |
|
ignore_idx = np.where(results['gt_ignore_flags'] == 1)[0] |
|
|
|
data_sample = DetDataSample() |
|
instance_data = InstanceData() |
|
ignore_instance_data = InstanceData() |
|
|
|
for key in self.mapping_table.keys(): |
|
if key not in results: |
|
continue |
|
if key == 'gt_masks' or isinstance(results[key], BaseBoxes): |
|
if 'gt_ignore_flags' in results: |
|
instance_data[ |
|
self.mapping_table[key]] = results[key][valid_idx] |
|
ignore_instance_data[ |
|
self.mapping_table[key]] = results[key][ignore_idx] |
|
else: |
|
instance_data[self.mapping_table[key]] = results[key] |
|
else: |
|
if 'gt_ignore_flags' in results: |
|
instance_data[self.mapping_table[key]] = to_tensor( |
|
results[key][valid_idx]) |
|
ignore_instance_data[self.mapping_table[key]] = to_tensor( |
|
results[key][ignore_idx]) |
|
else: |
|
instance_data[self.mapping_table[key]] = to_tensor( |
|
results[key]) |
|
data_sample.gt_instances = instance_data |
|
data_sample.ignored_instances = ignore_instance_data |
|
|
|
if 'proposals' in results: |
|
proposals = InstanceData( |
|
bboxes=to_tensor(results['proposals']), |
|
scores=to_tensor(results['proposals_scores'])) |
|
data_sample.proposals = proposals |
|
|
|
if 'gt_seg_map' in results: |
|
gt_sem_seg_data = dict( |
|
sem_seg=to_tensor(results['gt_seg_map'][None, ...].copy())) |
|
data_sample.gt_sem_seg = PixelData(**gt_sem_seg_data) |
|
|
|
img_meta = {} |
|
for key in self.meta_keys: |
|
assert key in results, f'`{key}` is not found in `results`, ' \ |
|
f'the valid keys are {list(results)}.' |
|
img_meta[key] = results[key] |
|
|
|
data_sample.set_metainfo(img_meta) |
|
packed_results['data_samples'] = data_sample |
|
|
|
return packed_results |
|
|
|
def __repr__(self) -> str: |
|
repr_str = self.__class__.__name__ |
|
repr_str += f'(meta_keys={self.meta_keys})' |
|
return repr_str |
|
|
|
|
|
@TRANSFORMS.register_module() |
|
class ToTensor: |
|
"""Convert some results to :obj:`torch.Tensor` by given keys. |
|
|
|
Args: |
|
keys (Sequence[str]): Keys that need to be converted to Tensor. |
|
""" |
|
|
|
def __init__(self, keys): |
|
self.keys = keys |
|
|
|
def __call__(self, results): |
|
"""Call function to convert data in results to :obj:`torch.Tensor`. |
|
|
|
Args: |
|
results (dict): Result dict contains the data to convert. |
|
|
|
Returns: |
|
dict: The result dict contains the data converted |
|
to :obj:`torch.Tensor`. |
|
""" |
|
for key in self.keys: |
|
results[key] = to_tensor(results[key]) |
|
return results |
|
|
|
def __repr__(self): |
|
return self.__class__.__name__ + f'(keys={self.keys})' |
|
|
|
|
|
@TRANSFORMS.register_module() |
|
class ImageToTensor: |
|
"""Convert image to :obj:`torch.Tensor` by given keys. |
|
|
|
The dimension order of input image is (H, W, C). The pipeline will convert |
|
it to (C, H, W). If only 2 dimension (H, W) is given, the output would be |
|
(1, H, W). |
|
|
|
Args: |
|
keys (Sequence[str]): Key of images to be converted to Tensor. |
|
""" |
|
|
|
def __init__(self, keys): |
|
self.keys = keys |
|
|
|
def __call__(self, results): |
|
"""Call function to convert image in results to :obj:`torch.Tensor` and |
|
transpose the channel order. |
|
|
|
Args: |
|
results (dict): Result dict contains the image data to convert. |
|
|
|
Returns: |
|
dict: The result dict contains the image converted |
|
to :obj:`torch.Tensor` and permuted to (C, H, W) order. |
|
""" |
|
for key in self.keys: |
|
img = results[key] |
|
if len(img.shape) < 3: |
|
img = np.expand_dims(img, -1) |
|
results[key] = to_tensor(img).permute(2, 0, 1).contiguous() |
|
|
|
return results |
|
|
|
def __repr__(self): |
|
return self.__class__.__name__ + f'(keys={self.keys})' |
|
|
|
|
|
@TRANSFORMS.register_module() |
|
class Transpose: |
|
"""Transpose some results by given keys. |
|
|
|
Args: |
|
keys (Sequence[str]): Keys of results to be transposed. |
|
order (Sequence[int]): Order of transpose. |
|
""" |
|
|
|
def __init__(self, keys, order): |
|
self.keys = keys |
|
self.order = order |
|
|
|
def __call__(self, results): |
|
"""Call function to transpose the channel order of data in results. |
|
|
|
Args: |
|
results (dict): Result dict contains the data to transpose. |
|
|
|
Returns: |
|
dict: The result dict contains the data transposed to \ |
|
``self.order``. |
|
""" |
|
for key in self.keys: |
|
results[key] = results[key].transpose(self.order) |
|
return results |
|
|
|
def __repr__(self): |
|
return self.__class__.__name__ + \ |
|
f'(keys={self.keys}, order={self.order})' |
|
|
|
|
|
@TRANSFORMS.register_module() |
|
class WrapFieldsToLists: |
|
"""Wrap fields of the data dictionary into lists for evaluation. |
|
|
|
This class can be used as a last step of a test or validation |
|
pipeline for single image evaluation or inference. |
|
|
|
Example: |
|
>>> test_pipeline = [ |
|
>>> dict(type='LoadImageFromFile'), |
|
>>> dict(type='Normalize', |
|
mean=[123.675, 116.28, 103.53], |
|
std=[58.395, 57.12, 57.375], |
|
to_rgb=True), |
|
>>> dict(type='Pad', size_divisor=32), |
|
>>> dict(type='ImageToTensor', keys=['img']), |
|
>>> dict(type='Collect', keys=['img']), |
|
>>> dict(type='WrapFieldsToLists') |
|
>>> ] |
|
""" |
|
|
|
def __call__(self, results): |
|
"""Call function to wrap fields into lists. |
|
|
|
Args: |
|
results (dict): Result dict contains the data to wrap. |
|
|
|
Returns: |
|
dict: The result dict where value of ``self.keys`` are wrapped \ |
|
into list. |
|
""" |
|
|
|
|
|
for key, val in results.items(): |
|
results[key] = [val] |
|
return results |
|
|
|
def __repr__(self): |
|
return f'{self.__class__.__name__}()' |
|
|