Spaces:
Runtime error
Runtime error
# ------------------------------------------------------------------------ | |
# HOTR official code : hotr/util/misc.py | |
# Copyright (c) Kakao Brain, Inc. and its affiliates. All Rights Reserved | |
# ------------------------------------------------------------------------ | |
# Modified from DETR (https://github.com/facebookresearch/detr) | |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved | |
# ------------------------------------------------------------------------ | |
""" | |
Misc functions, including distributed helpers. | |
Mostly copy-paste from torchvision references. | |
""" | |
import os | |
import subprocess | |
from collections import deque | |
import pickle | |
import socket | |
from typing import Optional, List | |
import ast | |
import torch | |
import torch.distributed as dist | |
from torch import Tensor | |
# needed due to empty tensor bug in pytorch and torchvision 0.5 | |
import torchvision | |
if float(torchvision.__version__[:3]) < 0.7: | |
from torchvision.ops import _new_empty_tensor | |
from torchvision.ops.misc import _output_size | |
os.environ['MASTER_PORT']='8993' | |
class SmoothedValue(object): | |
"""Track a series of values and provide access to smoothed values over a | |
window or the global series average. | |
""" | |
def __init__(self, window_size=20, fmt=None): | |
if fmt is None: | |
fmt = "{median:.4f} ({global_avg:.4f})" | |
self.deque = deque(maxlen=window_size) | |
self.total = 0.0 | |
self.count = 0 | |
self.fmt = fmt | |
def update(self, value, n=1): | |
self.deque.append(value) | |
self.count += n | |
self.total += value * n | |
def synchronize_between_processes(self): | |
""" | |
Warning: does not synchronize the deque! | |
""" | |
if not is_dist_avail_and_initialized(): | |
return | |
t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda') | |
dist.barrier() | |
dist.all_reduce(t) | |
t = t.tolist() | |
self.count = int(t[0]) | |
self.total = t[1] | |
def median(self): | |
d = torch.tensor(list(self.deque)) | |
return d.median().item() | |
def avg(self): | |
d = torch.tensor(list(self.deque), dtype=torch.float32) | |
return d.mean().item() | |
def global_avg(self): | |
return self.total / self.count | |
def max(self): | |
return max(self.deque) | |
def value(self): | |
return self.deque[-1] | |
def __str__(self): | |
return self.fmt.format( | |
median=self.median, | |
avg=self.avg, | |
global_avg=self.global_avg, | |
max=self.max, | |
value=self.value) | |
def all_gather(data): | |
""" | |
Run all_gather on arbitrary picklable data (not necessarily tensors) | |
Args: | |
data: any picklable object | |
Returns: | |
list[data]: list of data gathered from each rank | |
""" | |
world_size = get_world_size() | |
if world_size == 1: | |
return [data] | |
# serialized to a Tensor | |
buffer = pickle.dumps(data) | |
storage = torch.ByteStorage.from_buffer(buffer) | |
tensor = torch.ByteTensor(storage).to("cuda") | |
# obtain Tensor size of each rank | |
local_size = torch.tensor([tensor.numel()], device="cuda") | |
size_list = [torch.tensor([0], device="cuda") for _ in range(world_size)] | |
dist.all_gather(size_list, local_size) | |
size_list = [int(size.item()) for size in size_list] | |
max_size = max(size_list) | |
# receiving Tensor from all ranks | |
# we pad the tensor because torch all_gather does not support | |
# gathering tensors of different shapes | |
tensor_list = [] | |
for _ in size_list: | |
tensor_list.append(torch.empty((max_size,), dtype=torch.uint8, device="cuda")) | |
if local_size != max_size: | |
padding = torch.empty(size=(max_size - local_size,), dtype=torch.uint8, device="cuda") | |
tensor = torch.cat((tensor, padding), dim=0) | |
dist.all_gather(tensor_list, tensor) | |
data_list = [] | |
for size, tensor in zip(size_list, tensor_list): | |
buffer = tensor.cpu().numpy().tobytes()[:size] | |
data_list.append(pickle.loads(buffer)) | |
return data_list | |
def reduce_dict(input_dict, average=True): | |
""" | |
Args: | |
input_dict (dict): all the values will be reduced | |
average (bool): whether to do average or sum | |
Reduce the values in the dictionary from all processes so that all processes | |
have the averaged results. Returns a dict with the same fields as | |
input_dict, after reduction. | |
""" | |
world_size = get_world_size() | |
if world_size < 2: | |
return input_dict | |
with torch.no_grad(): | |
names = [] | |
values = [] | |
# sort the keys so that they are consistent across processes | |
for k in sorted(input_dict.keys()): | |
names.append(k) | |
values.append(input_dict[k]) | |
values = torch.stack(values, dim=0) | |
dist.all_reduce(values) | |
if average: | |
values /= world_size | |
reduced_dict = {k: v for k, v in zip(names, values)} | |
return reduced_dict | |
def get_sha(): | |
cwd = os.path.dirname(os.path.abspath(__file__)) | |
def _run(command): | |
return subprocess.check_output(command, cwd=cwd).decode('ascii').strip() | |
sha = 'N/A' | |
diff = "clean" | |
branch = 'N/A' | |
try: | |
sha = _run(['git', 'rev-parse', 'HEAD']) | |
subprocess.check_output(['git', 'diff'], cwd=cwd) | |
diff = _run(['git', 'diff-index', 'HEAD']) | |
diff = "has uncommited changes" if diff else "clean" | |
branch = _run(['git', 'rev-parse', '--abbrev-ref', 'HEAD']) | |
except Exception: | |
pass | |
message = f"sha: {sha}, status: {diff}, branch: {branch}" | |
return message | |
def collate_fn(batch): | |
batch = list(zip(*batch)) | |
batch[0] = nested_tensor_from_tensor_list(batch[0]) | |
return tuple(batch) | |
def _max_by_axis(the_list): | |
# type: (List[List[int]]) -> List[int] | |
maxes = the_list[0] | |
for sublist in the_list[1:]: | |
for index, item in enumerate(sublist): | |
maxes[index] = max(maxes[index], item) | |
return maxes | |
def nested_tensor_from_tensor_list(tensor_list: List[Tensor]): | |
# TODO make this more general | |
if tensor_list[0].ndim == 3: | |
# TODO make it support different-sized images | |
max_size = _max_by_axis([list(img.shape) for img in tensor_list]) | |
# min_size = tuple(min(s) for s in zip(*[img.shape for img in tensor_list])) | |
batch_shape = [len(tensor_list)] + max_size | |
b, c, h, w = batch_shape | |
dtype = tensor_list[0].dtype | |
device = tensor_list[0].device | |
tensor = torch.zeros(batch_shape, dtype=dtype, device=device) | |
mask = torch.ones((b, h, w), dtype=torch.bool, device=device) | |
for img, pad_img, m in zip(tensor_list, tensor, mask): | |
pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img) | |
m[: img.shape[1], :img.shape[2]] = False | |
else: | |
raise ValueError('not supported') | |
return NestedTensor(tensor, mask) | |
class NestedTensor(object): | |
def __init__(self, tensors, mask: Optional[Tensor]): | |
self.tensors = tensors | |
self.mask = mask | |
def to(self, device): | |
# type: (Device) -> NestedTensor # noqa | |
cast_tensor = self.tensors.to(device) | |
mask = self.mask | |
if mask is not None: | |
assert mask is not None | |
cast_mask = mask.to(device) | |
else: | |
cast_mask = None | |
return NestedTensor(cast_tensor, cast_mask) | |
def decompose(self): | |
return self.tensors, self.mask | |
def __repr__(self): | |
return str(self.tensors) | |
def setup_for_distributed(is_master): | |
""" | |
This function disables printing when not in master process | |
""" | |
import builtins as __builtin__ | |
builtin_print = __builtin__.print | |
def print(*args, **kwargs): | |
force = kwargs.pop('force', False) | |
if is_master or force: | |
builtin_print(*args, **kwargs) | |
__builtin__.print = print | |
def is_dist_avail_and_initialized(): | |
if not dist.is_available(): | |
return False | |
if not dist.is_initialized(): | |
return False | |
return True | |
def get_world_size(): | |
if not is_dist_avail_and_initialized(): | |
return 1 | |
return dist.get_world_size() | |
def get_rank(): | |
if not is_dist_avail_and_initialized(): | |
return 0 | |
return dist.get_rank() | |
def is_main_process(): | |
return get_rank() == 0 | |
def save_on_master(*args, **kwargs): | |
if is_main_process(): | |
torch.save(*args, **kwargs) | |
def _check_if_valid_ip(ip): | |
try: | |
socket.inet_aton(ip) | |
# legal | |
except socket.error: | |
# Not legal | |
return False | |
return True | |
def arg_as_list(s): | |
v = ast.literal_eval(s) | |
if type(v) is not list: | |
raise argparse.ArgumentTypeError("List should be given.") | |
return v | |
def _maybe_gethostbyname(addr): | |
"""to be compatible with Braincloud on which one can access the nodes by their task names. | |
Each node has to wait until all the tasks in the group are up on the cloud.""" | |
if _check_if_valid_ip(addr): | |
# If IP address is given, do nothing | |
return addr | |
# Otherwise, find the IP address by hostname | |
done = False | |
retry = 0 | |
print(f"Get URL by the given hostname '{addr}' in Braincloud..") | |
while not done: | |
try: | |
addr = socket.gethostbyname(addr) | |
done = True | |
except: | |
retry += 1 | |
print(f"Retrying count: {retry}") | |
time.sleep(3) | |
print(f"Found the host by IP address: {addr}") | |
return addr | |
def init_distributed_mode(args): | |
if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ: | |
os.environ["MASTER_ADDR"] = _maybe_gethostbyname(os.environ["MASTER_ADDR"]) | |
args.rank = int(os.environ["RANK"]) | |
args.world_size = int(os.environ['WORLD_SIZE']) | |
args.gpu = int(os.environ['LOCAL_RANK']) | |
args.dist_url = 'env://' | |
os.environ['LOCAL_SIZE'] = str(torch.cuda.device_count()) | |
elif 'SLURM_PROCID' in os.environ: | |
proc_id = int(os.environ['SLURM_PROCID']) | |
ntasks = int(os.environ['SLURM_NTASKS']) | |
node_list = os.environ['SLURM_NODELIST'] | |
num_gpus = torch.cuda.device_count() | |
addr = subprocess.getoutput( | |
'scontrol show hostname {} | head -n1'.format(node_list)) | |
os.environ['MASTER_PORT'] = os.environ.get('MASTER_PORT', '29500') | |
os.environ['MASTER_ADDR'] = addr | |
os.environ['WORLD_SIZE'] = str(ntasks) | |
os.environ['RANK'] = str(proc_id) | |
os.environ['LOCAL_RANK'] = str(proc_id % num_gpus) | |
os.environ['LOCAL_SIZE'] = str(num_gpus) | |
args.dist_url = 'env://' | |
args.world_size = ntasks | |
args.rank = proc_id | |
args.gpu = proc_id % num_gpus | |
else: | |
print('Not using distributed mode') | |
args.distributed = False | |
return | |
args.distributed = True | |
torch.cuda.set_device(args.gpu) | |
args.dist_backend = 'nccl' | |
print('| distributed init (rank {}): {}'.format( | |
args.rank, args.dist_url), flush=True) | |
torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url, | |
world_size=args.world_size, rank=args.rank) | |
torch.distributed.barrier() | |
setup_for_distributed(args.rank == 0) | |
def accuracy(output, target, topk=(1,)): | |
"""Computes the precision@k for the specified values of k""" | |
if target.numel() == 0: | |
return [torch.zeros([], device=output.device)] | |
maxk = max(topk) | |
batch_size = target.size(0) | |
_, pred = output.topk(maxk, 1, True, True) | |
pred = pred.t() | |
correct = pred.eq(target.view(1, -1).expand_as(pred)) | |
res = [] | |
for k in topk: | |
correct_k = correct[:k].view(-1).float().sum(0) | |
res.append(correct_k.mul_(100.0 / batch_size)) | |
return res | |
def interpolate(input, size=None, scale_factor=None, mode="nearest", align_corners=None): | |
# type: (Tensor, Optional[List[int]], Optional[float], str, Optional[bool]) -> Tensor | |
""" | |
Equivalent to nn.functional.interpolate, but with support for empty batch sizes. | |
This will eventually be supported natively by PyTorch, and this | |
class can go away. | |
""" | |
if float(torchvision.__version__[:3]) < 0.7: | |
if input.numel() > 0: | |
return torch.nn.functional.interpolate( | |
input, size, scale_factor, mode, align_corners | |
) | |
output_shape = _output_size(2, input, size, scale_factor) | |
output_shape = list(input.shape[:-2]) + list(output_shape) | |
return _new_empty_tensor(input, output_shape) | |
else: | |
return torchvision.ops.misc.interpolate(input, size, scale_factor, mode, align_corners) |