python_code
stringlengths
0
4.04M
repo_name
stringlengths
8
58
file_path
stringlengths
5
147
# Copyright (c) Facebook, Inc. and its affiliates. import numpy as np from typing import Tuple import torch from PIL import Image from torch.nn import functional as F from detectron2.structures import Boxes __all__ = ["paste_masks_in_image"] BYTES_PER_FLOAT = 4 # TODO: This memory limit may be too much or too little. It would be better to # determine it based on available resources. GPU_MEM_LIMIT = 1024 ** 3 # 1 GB memory limit def _do_paste_mask(masks, boxes, img_h: int, img_w: int, skip_empty: bool = True): """ Args: masks: N, 1, H, W boxes: N, 4 img_h, img_w (int): skip_empty (bool): only paste masks within the region that tightly bound all boxes, and returns the results this region only. An important optimization for CPU. Returns: if skip_empty == False, a mask of shape (N, img_h, img_w) if skip_empty == True, a mask of shape (N, h', w'), and the slice object for the corresponding region. """ # On GPU, paste all masks together (up to chunk size) # by using the entire image to sample the masks # Compared to pasting them one by one, # this has more operations but is faster on COCO-scale dataset. device = masks.device if skip_empty and not torch.jit.is_scripting(): x0_int, y0_int = torch.clamp(boxes.min(dim=0).values.floor()[:2] - 1, min=0).to( dtype=torch.int32 ) x1_int = torch.clamp(boxes[:, 2].max().ceil() + 1, max=img_w).to(dtype=torch.int32) y1_int = torch.clamp(boxes[:, 3].max().ceil() + 1, max=img_h).to(dtype=torch.int32) else: x0_int, y0_int = 0, 0 x1_int, y1_int = img_w, img_h x0, y0, x1, y1 = torch.split(boxes, 1, dim=1) # each is Nx1 N = masks.shape[0] img_y = torch.arange(y0_int, y1_int, device=device, dtype=torch.float32) + 0.5 img_x = torch.arange(x0_int, x1_int, device=device, dtype=torch.float32) + 0.5 img_y = (img_y - y0) / (y1 - y0) * 2 - 1 img_x = (img_x - x0) / (x1 - x0) * 2 - 1 # img_x, img_y have shapes (N, w), (N, h) gx = img_x[:, None, :].expand(N, img_y.size(1), img_x.size(1)) gy = img_y[:, :, None].expand(N, img_y.size(1), img_x.size(1)) grid = torch.stack([gx, gy], dim=3) if not torch.jit.is_scripting(): if not masks.dtype.is_floating_point: masks = masks.float() img_masks = F.grid_sample(masks, grid.to(masks.dtype), align_corners=False) if skip_empty and not torch.jit.is_scripting(): return img_masks[:, 0], (slice(y0_int, y1_int), slice(x0_int, x1_int)) else: return img_masks[:, 0], () def paste_masks_in_image( masks: torch.Tensor, boxes: Boxes, image_shape: Tuple[int, int], threshold: float = 0.5 ): """ Paste a set of masks that are of a fixed resolution (e.g., 28 x 28) into an image. The location, height, and width for pasting each mask is determined by their corresponding bounding boxes in boxes. Note: This is a complicated but more accurate implementation. In actual deployment, it is often enough to use a faster but less accurate implementation. See :func:`paste_mask_in_image_old` in this file for an alternative implementation. Args: masks (tensor): Tensor of shape (Bimg, Hmask, Wmask), where Bimg is the number of detected object instances in the image and Hmask, Wmask are the mask width and mask height of the predicted mask (e.g., Hmask = Wmask = 28). Values are in [0, 1]. boxes (Boxes or Tensor): A Boxes of length Bimg or Tensor of shape (Bimg, 4). boxes[i] and masks[i] correspond to the same object instance. image_shape (tuple): height, width threshold (float): A threshold in [0, 1] for converting the (soft) masks to binary masks. Returns: img_masks (Tensor): A tensor of shape (Bimg, Himage, Wimage), where Bimg is the number of detected object instances and Himage, Wimage are the image width and height. img_masks[i] is a binary mask for object instance i. """ assert masks.shape[-1] == masks.shape[-2], "Only square mask predictions are supported" N = len(masks) if N == 0: return masks.new_empty((0,) + image_shape, dtype=torch.uint8) if not isinstance(boxes, torch.Tensor): boxes = boxes.tensor device = boxes.device assert len(boxes) == N, boxes.shape img_h, img_w = image_shape # The actual implementation split the input into chunks, # and paste them chunk by chunk. if device.type == "cpu" or torch.jit.is_scripting(): # CPU is most efficient when they are pasted one by one with skip_empty=True # so that it performs minimal number of operations. num_chunks = N else: # GPU benefits from parallelism for larger chunks, but may have memory issue # int(img_h) because shape may be tensors in tracing num_chunks = int(np.ceil(N * int(img_h) * int(img_w) * BYTES_PER_FLOAT / GPU_MEM_LIMIT)) assert ( num_chunks <= N ), "Default GPU_MEM_LIMIT in mask_ops.py is too small; try increasing it" chunks = torch.chunk(torch.arange(N, device=device), num_chunks) img_masks = torch.zeros( N, img_h, img_w, device=device, dtype=torch.bool if threshold >= 0 else torch.uint8 ) for inds in chunks: masks_chunk, spatial_inds = _do_paste_mask( masks[inds, None, :, :], boxes[inds], img_h, img_w, skip_empty=device.type == "cpu" ) if threshold >= 0: masks_chunk = (masks_chunk >= threshold).to(dtype=torch.bool) else: # for visualization and debugging masks_chunk = (masks_chunk * 255).to(dtype=torch.uint8) if torch.jit.is_scripting(): # Scripting does not use the optimized codepath img_masks[inds] = masks_chunk else: img_masks[(inds,) + spatial_inds] = masks_chunk return img_masks # The below are the original paste function (from Detectron1) which has # larger quantization error. # It is faster on CPU, while the aligned one is faster on GPU thanks to grid_sample. def paste_mask_in_image_old(mask, box, img_h, img_w, threshold): """ Paste a single mask in an image. This is a per-box implementation of :func:`paste_masks_in_image`. This function has larger quantization error due to incorrect pixel modeling and is not used any more. Args: mask (Tensor): A tensor of shape (Hmask, Wmask) storing the mask of a single object instance. Values are in [0, 1]. box (Tensor): A tensor of shape (4, ) storing the x0, y0, x1, y1 box corners of the object instance. img_h, img_w (int): Image height and width. threshold (float): Mask binarization threshold in [0, 1]. Returns: im_mask (Tensor): The resized and binarized object mask pasted into the original image plane (a tensor of shape (img_h, img_w)). """ # Conversion from continuous box coordinates to discrete pixel coordinates # via truncation (cast to int32). This determines which pixels to paste the # mask onto. box = box.to(dtype=torch.int32) # Continuous to discrete coordinate conversion # An example (1D) box with continuous coordinates (x0=0.7, x1=4.3) will map to # a discrete coordinates (x0=0, x1=4). Note that box is mapped to 5 = x1 - x0 + 1 # pixels (not x1 - x0 pixels). samples_w = box[2] - box[0] + 1 # Number of pixel samples, *not* geometric width samples_h = box[3] - box[1] + 1 # Number of pixel samples, *not* geometric height # Resample the mask from it's original grid to the new samples_w x samples_h grid mask = Image.fromarray(mask.cpu().numpy()) mask = mask.resize((samples_w, samples_h), resample=Image.BILINEAR) mask = np.array(mask, copy=False) if threshold >= 0: mask = np.array(mask > threshold, dtype=np.uint8) mask = torch.from_numpy(mask) else: # for visualization and debugging, we also # allow it to return an unmodified mask mask = torch.from_numpy(mask * 255).to(torch.uint8) im_mask = torch.zeros((img_h, img_w), dtype=torch.uint8) x_0 = max(box[0], 0) x_1 = min(box[2] + 1, img_w) y_0 = max(box[1], 0) y_1 = min(box[3] + 1, img_h) im_mask[y_0:y_1, x_0:x_1] = mask[ (y_0 - box[1]) : (y_1 - box[1]), (x_0 - box[0]) : (x_1 - box[0]) ] return im_mask # Our pixel modeling requires extrapolation for any continuous # coordinate < 0.5 or > length - 0.5. When sampling pixels on the masks, # we would like this extrapolation to be an interpolation between boundary values and zero, # instead of using absolute zero or boundary values. # Therefore `paste_mask_in_image_old` is often used with zero padding around the masks like this: # masks, scale = pad_masks(masks[:, 0, :, :], 1) # boxes = scale_boxes(boxes.tensor, scale) def pad_masks(masks, padding): """ Args: masks (tensor): A tensor of shape (B, M, M) representing B masks. padding (int): Number of cells to pad on all sides. Returns: The padded masks and the scale factor of the padding size / original size. """ B = masks.shape[0] M = masks.shape[-1] pad2 = 2 * padding scale = float(M + pad2) / M padded_masks = masks.new_zeros((B, M + pad2, M + pad2)) padded_masks[:, padding:-padding, padding:-padding] = masks return padded_masks, scale def scale_boxes(boxes, scale): """ Args: boxes (tensor): A tensor of shape (B, 4) representing B boxes with 4 coords representing the corners x0, y0, x1, y1, scale (float): The box scaling factor. Returns: Scaled boxes. """ w_half = (boxes[:, 2] - boxes[:, 0]) * 0.5 h_half = (boxes[:, 3] - boxes[:, 1]) * 0.5 x_c = (boxes[:, 2] + boxes[:, 0]) * 0.5 y_c = (boxes[:, 3] + boxes[:, 1]) * 0.5 w_half *= scale h_half *= scale scaled_boxes = torch.zeros_like(boxes) scaled_boxes[:, 0] = x_c - w_half scaled_boxes[:, 2] = x_c + w_half scaled_boxes[:, 1] = y_c - h_half scaled_boxes[:, 3] = y_c + h_half return scaled_boxes
banmo-main
third_party/detectron2_old/detectron2/layers/mask_ops.py
# Copyright (c) Facebook, Inc. and its affiliates. """ Wrappers around on some nn functions, mainly to support empty tensors. Ideally, add support directly in PyTorch to empty tensors in those functions. These can be removed once https://github.com/pytorch/pytorch/issues/12013 is implemented """ from typing import List import torch from torch.nn import functional as F def cat(tensors: List[torch.Tensor], dim: int = 0): """ Efficient version of torch.cat that avoids a copy if there is only a single element in a list """ assert isinstance(tensors, (list, tuple)) if len(tensors) == 1: return tensors[0] return torch.cat(tensors, dim) def cross_entropy(input, target, *, reduction="mean", **kwargs): """ Same as `torch.nn.functional.cross_entropy`, but returns 0 (instead of nan) for empty inputs. """ if target.numel() == 0 and reduction == "mean": return input.sum() * 0.0 # connect the gradient return F.cross_entropy(input, target, **kwargs) class _NewEmptyTensorOp(torch.autograd.Function): @staticmethod def forward(ctx, x, new_shape): ctx.shape = x.shape return x.new_empty(new_shape) @staticmethod def backward(ctx, grad): shape = ctx.shape return _NewEmptyTensorOp.apply(grad, shape), None class Conv2d(torch.nn.Conv2d): """ A wrapper around :class:`torch.nn.Conv2d` to support empty inputs and more features. """ def __init__(self, *args, **kwargs): """ Extra keyword arguments supported in addition to those in `torch.nn.Conv2d`: Args: norm (nn.Module, optional): a normalization layer activation (callable(Tensor) -> Tensor): a callable activation function It assumes that norm layer is used before activation. """ norm = kwargs.pop("norm", None) activation = kwargs.pop("activation", None) super().__init__(*args, **kwargs) self.norm = norm self.activation = activation def forward(self, x): # torchscript does not support SyncBatchNorm yet # https://github.com/pytorch/pytorch/issues/40507 # and we skip these codes in torchscript since: # 1. currently we only support torchscript in evaluation mode # 2. features needed by exporting module to torchscript are added in PyTorch 1.6 or # later version, `Conv2d` in these PyTorch versions has already supported empty inputs. if not torch.jit.is_scripting(): if x.numel() == 0 and self.training: # https://github.com/pytorch/pytorch/issues/12013 assert not isinstance( self.norm, torch.nn.SyncBatchNorm ), "SyncBatchNorm does not support empty inputs!" x = F.conv2d( x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups ) if self.norm is not None: x = self.norm(x) if self.activation is not None: x = self.activation(x) return x ConvTranspose2d = torch.nn.ConvTranspose2d BatchNorm2d = torch.nn.BatchNorm2d interpolate = F.interpolate Linear = torch.nn.Linear def nonzero_tuple(x): """ A 'as_tuple=True' version of torch.nonzero to support torchscript. because of https://github.com/pytorch/pytorch/issues/38718 """ if torch.jit.is_scripting(): if x.dim() == 0: return x.unsqueeze(0).nonzero().unbind(1) return x.nonzero().unbind(1) else: return x.nonzero(as_tuple=True)
banmo-main
third_party/detectron2_old/detectron2/layers/wrappers.py
# -*- coding: utf-8 -*- # Copyright (c) Facebook, Inc. and its affiliates. import fvcore.nn.weight_init as weight_init from torch import nn from .batch_norm import FrozenBatchNorm2d, get_norm from .wrappers import Conv2d """ CNN building blocks. """ class CNNBlockBase(nn.Module): """ A CNN block is assumed to have input channels, output channels and a stride. The input and output of `forward()` method must be NCHW tensors. The method can perform arbitrary computation but must match the given channels and stride specification. Attribute: in_channels (int): out_channels (int): stride (int): """ def __init__(self, in_channels, out_channels, stride): """ The `__init__` method of any subclass should also contain these arguments. Args: in_channels (int): out_channels (int): stride (int): """ super().__init__() self.in_channels = in_channels self.out_channels = out_channels self.stride = stride def freeze(self): """ Make this block not trainable. This method sets all parameters to `requires_grad=False`, and convert all BatchNorm layers to FrozenBatchNorm Returns: the block itself """ for p in self.parameters(): p.requires_grad = False FrozenBatchNorm2d.convert_frozen_batchnorm(self) return self class DepthwiseSeparableConv2d(nn.Module): """ A kxk depthwise convolution + a 1x1 convolution. In :paper:`xception`, norm & activation are applied on the second conv. :paper:`mobilenet` uses norm & activation on both convs. """ def __init__( self, in_channels, out_channels, kernel_size=3, padding=1, dilation=1, *, norm1=None, activation1=None, norm2=None, activation2=None, ): """ Args: norm1, norm2 (str or callable): normalization for the two conv layers. activation1, activation2 (callable(Tensor) -> Tensor): activation function for the two conv layers. """ super().__init__() self.depthwise = Conv2d( in_channels, in_channels, kernel_size=kernel_size, padding=padding, dilation=dilation, groups=in_channels, bias=not norm1, norm=get_norm(norm1, in_channels), activation=activation1, ) self.pointwise = Conv2d( in_channels, out_channels, kernel_size=1, bias=not norm2, norm=get_norm(norm2, out_channels), activation=activation2, ) # default initialization weight_init.c2_msra_fill(self.depthwise) weight_init.c2_msra_fill(self.pointwise) def forward(self, x): return self.pointwise(self.depthwise(x))
banmo-main
third_party/detectron2_old/detectron2/layers/blocks.py
# Copyright (c) Facebook, Inc. and its affiliates. import logging import torch import torch.distributed as dist from fvcore.nn.distributed import differentiable_all_reduce from torch import nn from torch.nn import functional as F from detectron2.utils import comm, env from .wrappers import BatchNorm2d class FrozenBatchNorm2d(nn.Module): """ BatchNorm2d where the batch statistics and the affine parameters are fixed. It contains non-trainable buffers called "weight" and "bias", "running_mean", "running_var", initialized to perform identity transformation. The pre-trained backbone models from Caffe2 only contain "weight" and "bias", which are computed from the original four parameters of BN. The affine transform `x * weight + bias` will perform the equivalent computation of `(x - running_mean) / sqrt(running_var) * weight + bias`. When loading a backbone model from Caffe2, "running_mean" and "running_var" will be left unchanged as identity transformation. Other pre-trained backbone models may contain all 4 parameters. The forward is implemented by `F.batch_norm(..., training=False)`. """ _version = 3 def __init__(self, num_features, eps=1e-5): super().__init__() self.num_features = num_features self.eps = eps self.register_buffer("weight", torch.ones(num_features)) self.register_buffer("bias", torch.zeros(num_features)) self.register_buffer("running_mean", torch.zeros(num_features)) self.register_buffer("running_var", torch.ones(num_features) - eps) def forward(self, x): if x.requires_grad: # When gradients are needed, F.batch_norm will use extra memory # because its backward op computes gradients for weight/bias as well. scale = self.weight * (self.running_var + self.eps).rsqrt() bias = self.bias - self.running_mean * scale scale = scale.reshape(1, -1, 1, 1) bias = bias.reshape(1, -1, 1, 1) out_dtype = x.dtype # may be half return x * scale.to(out_dtype) + bias.to(out_dtype) else: # When gradients are not needed, F.batch_norm is a single fused op # and provide more optimization opportunities. return F.batch_norm( x, self.running_mean, self.running_var, self.weight, self.bias, training=False, eps=self.eps, ) def _load_from_state_dict( self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs ): version = local_metadata.get("version", None) if version is None or version < 2: # No running_mean/var in early versions # This will silent the warnings if prefix + "running_mean" not in state_dict: state_dict[prefix + "running_mean"] = torch.zeros_like(self.running_mean) if prefix + "running_var" not in state_dict: state_dict[prefix + "running_var"] = torch.ones_like(self.running_var) # NOTE: if a checkpoint is trained with BatchNorm and loaded (together with # version number) to FrozenBatchNorm, running_var will be wrong. One solution # is to remove the version number from the checkpoint. if version is not None and version < 3: logger = logging.getLogger(__name__) logger.info("FrozenBatchNorm {} is upgraded to version 3.".format(prefix.rstrip("."))) # In version < 3, running_var are used without +eps. state_dict[prefix + "running_var"] -= self.eps super()._load_from_state_dict( state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs ) def __repr__(self): return "FrozenBatchNorm2d(num_features={}, eps={})".format(self.num_features, self.eps) @classmethod def convert_frozen_batchnorm(cls, module): """ Convert all BatchNorm/SyncBatchNorm in module into FrozenBatchNorm. Args: module (torch.nn.Module): Returns: If module is BatchNorm/SyncBatchNorm, returns a new module. Otherwise, in-place convert module and return it. Similar to convert_sync_batchnorm in https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/batchnorm.py """ bn_module = nn.modules.batchnorm bn_module = (bn_module.BatchNorm2d, bn_module.SyncBatchNorm) res = module if isinstance(module, bn_module): res = cls(module.num_features) if module.affine: res.weight.data = module.weight.data.clone().detach() res.bias.data = module.bias.data.clone().detach() res.running_mean.data = module.running_mean.data res.running_var.data = module.running_var.data res.eps = module.eps else: for name, child in module.named_children(): new_child = cls.convert_frozen_batchnorm(child) if new_child is not child: res.add_module(name, new_child) return res def get_norm(norm, out_channels): """ Args: norm (str or callable): either one of BN, SyncBN, FrozenBN, GN; or a callable that takes a channel number and returns the normalization layer as a nn.Module. Returns: nn.Module or None: the normalization layer """ if norm is None: return None if isinstance(norm, str): if len(norm) == 0: return None norm = { "BN": BatchNorm2d, # Fixed in https://github.com/pytorch/pytorch/pull/36382 "SyncBN": NaiveSyncBatchNorm if env.TORCH_VERSION <= (1, 5) else nn.SyncBatchNorm, "FrozenBN": FrozenBatchNorm2d, "GN": lambda channels: nn.GroupNorm(32, channels), # for debugging: "nnSyncBN": nn.SyncBatchNorm, "naiveSyncBN": NaiveSyncBatchNorm, }[norm] return norm(out_channels) class NaiveSyncBatchNorm(BatchNorm2d): """ In PyTorch<=1.5, ``nn.SyncBatchNorm`` has incorrect gradient when the batch size on each worker is different. (e.g., when scale augmentation is used, or when it is applied to mask head). This is a slower but correct alternative to `nn.SyncBatchNorm`. Note: There isn't a single definition of Sync BatchNorm. When ``stats_mode==""``, this module computes overall statistics by using statistics of each worker with equal weight. The result is true statistics of all samples (as if they are all on one worker) only when all workers have the same (N, H, W). This mode does not support inputs with zero batch size. When ``stats_mode=="N"``, this module computes overall statistics by weighting the statistics of each worker by their ``N``. The result is true statistics of all samples (as if they are all on one worker) only when all workers have the same (H, W). It is slower than ``stats_mode==""``. Even though the result of this module may not be the true statistics of all samples, it may still be reasonable because it might be preferrable to assign equal weights to all workers, regardless of their (H, W) dimension, instead of putting larger weight on larger images. From preliminary experiments, little difference is found between such a simplified implementation and an accurate computation of overall mean & variance. """ def __init__(self, *args, stats_mode="", **kwargs): super().__init__(*args, **kwargs) assert stats_mode in ["", "N"] self._stats_mode = stats_mode def forward(self, input): if comm.get_world_size() == 1 or not self.training: return super().forward(input) B, C = input.shape[0], input.shape[1] half_input = input.dtype == torch.float16 if half_input: # fp16 does not have good enough numerics for the reduction here input = input.float() mean = torch.mean(input, dim=[0, 2, 3]) meansqr = torch.mean(input * input, dim=[0, 2, 3]) if self._stats_mode == "": assert B > 0, 'SyncBatchNorm(stats_mode="") does not support zero batch size.' vec = torch.cat([mean, meansqr], dim=0) vec = differentiable_all_reduce(vec) * (1.0 / dist.get_world_size()) mean, meansqr = torch.split(vec, C) momentum = self.momentum else: if B == 0: vec = torch.zeros([2 * C + 1], device=mean.device, dtype=mean.dtype) vec = vec + input.sum() # make sure there is gradient w.r.t input else: vec = torch.cat( [mean, meansqr, torch.ones([1], device=mean.device, dtype=mean.dtype)], dim=0 ) vec = differentiable_all_reduce(vec * B) total_batch = vec[-1].detach() momentum = total_batch.clamp(max=1) * self.momentum # no update if total_batch is 0 mean, meansqr, _ = torch.split(vec / total_batch.clamp(min=1), C) # avoid div-by-zero var = meansqr - mean * mean invstd = torch.rsqrt(var + self.eps) scale = self.weight * invstd bias = self.bias - mean * scale scale = scale.reshape(1, -1, 1, 1) bias = bias.reshape(1, -1, 1, 1) self.running_mean += momentum * (mean.detach() - self.running_mean) self.running_var += momentum * (var.detach() - self.running_var) ret = input * scale + bias if half_input: ret = ret.half() return ret
banmo-main
third_party/detectron2_old/detectron2/layers/batch_norm.py
# Copyright (c) Facebook, Inc. and its affiliates. from __future__ import absolute_import, division, print_function, unicode_literals from detectron2 import _C def pairwise_iou_rotated(boxes1, boxes2): """ Return intersection-over-union (Jaccard index) of boxes. Both sets of boxes are expected to be in (x_center, y_center, width, height, angle) format. Arguments: boxes1 (Tensor[N, 5]) boxes2 (Tensor[M, 5]) Returns: iou (Tensor[N, M]): the NxM matrix containing the pairwise IoU values for every element in boxes1 and boxes2 """ return _C.box_iou_rotated(boxes1, boxes2)
banmo-main
third_party/detectron2_old/detectron2/layers/rotated_boxes.py
# Copyright (c) Facebook, Inc. and its affiliates. import itertools from typing import Any, Dict, List, Tuple, Union import torch class Instances: """ This class represents a list of instances in an image. It stores the attributes of instances (e.g., boxes, masks, labels, scores) as "fields". All fields must have the same ``__len__`` which is the number of instances. All other (non-field) attributes of this class are considered private: they must start with '_' and are not modifiable by a user. Some basic usage: 1. Set/get/check a field: .. code-block:: python instances.gt_boxes = Boxes(...) print(instances.pred_masks) # a tensor of shape (N, H, W) print('gt_masks' in instances) 2. ``len(instances)`` returns the number of instances 3. Indexing: ``instances[indices]`` will apply the indexing on all the fields and returns a new :class:`Instances`. Typically, ``indices`` is a integer vector of indices, or a binary mask of length ``num_instances`` .. code-block:: python category_3_detections = instances[instances.pred_classes == 3] confident_detections = instances[instances.scores > 0.9] """ def __init__(self, image_size: Tuple[int, int], **kwargs: Any): """ Args: image_size (height, width): the spatial size of the image. kwargs: fields to add to this `Instances`. """ self._image_size = image_size self._fields: Dict[str, Any] = {} for k, v in kwargs.items(): self.set(k, v) @property def image_size(self) -> Tuple[int, int]: """ Returns: tuple: height, width """ return self._image_size def __setattr__(self, name: str, val: Any) -> None: if name.startswith("_"): super().__setattr__(name, val) else: self.set(name, val) def __getattr__(self, name: str) -> Any: if name == "_fields" or name not in self._fields: raise AttributeError("Cannot find field '{}' in the given Instances!".format(name)) return self._fields[name] def set(self, name: str, value: Any) -> None: """ Set the field named `name` to `value`. The length of `value` must be the number of instances, and must agree with other existing fields in this object. """ data_len = len(value) if len(self._fields): assert ( len(self) == data_len ), "Adding a field of length {} to a Instances of length {}".format(data_len, len(self)) self._fields[name] = value def has(self, name: str) -> bool: """ Returns: bool: whether the field called `name` exists. """ return name in self._fields def remove(self, name: str) -> None: """ Remove the field called `name`. """ del self._fields[name] def get(self, name: str) -> Any: """ Returns the field called `name`. """ return self._fields[name] def get_fields(self) -> Dict[str, Any]: """ Returns: dict: a dict which maps names (str) to data of the fields Modifying the returned dict will modify this instance. """ return self._fields # Tensor-like methods def to(self, *args: Any, **kwargs: Any) -> "Instances": """ Returns: Instances: all fields are called with a `to(device)`, if the field has this method. """ ret = Instances(self._image_size) for k, v in self._fields.items(): if hasattr(v, "to"): v = v.to(*args, **kwargs) ret.set(k, v) return ret def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> "Instances": """ Args: item: an index-like object and will be used to index all the fields. Returns: If `item` is a string, return the data in the corresponding field. Otherwise, returns an `Instances` where all fields are indexed by `item`. """ if type(item) == int: if item >= len(self) or item < -len(self): raise IndexError("Instances index out of range!") else: item = slice(item, None, len(self)) ret = Instances(self._image_size) for k, v in self._fields.items(): ret.set(k, v[item]) return ret def __len__(self) -> int: for v in self._fields.values(): # use __len__ because len() has to be int and is not friendly to tracing return v.__len__() raise NotImplementedError("Empty Instances does not support __len__!") def __iter__(self): raise NotImplementedError("`Instances` object is not iterable!") @staticmethod def cat(instance_lists: List["Instances"]) -> "Instances": """ Args: instance_lists (list[Instances]) Returns: Instances """ assert all(isinstance(i, Instances) for i in instance_lists) assert len(instance_lists) > 0 if len(instance_lists) == 1: return instance_lists[0] image_size = instance_lists[0].image_size for i in instance_lists[1:]: assert i.image_size == image_size ret = Instances(image_size) for k in instance_lists[0]._fields.keys(): values = [i.get(k) for i in instance_lists] v0 = values[0] if isinstance(v0, torch.Tensor): values = torch.cat(values, dim=0) elif isinstance(v0, list): values = list(itertools.chain(*values)) elif hasattr(type(v0), "cat"): values = type(v0).cat(values) else: raise ValueError("Unsupported type {} for concatenation".format(type(v0))) ret.set(k, values) return ret def __str__(self) -> str: s = self.__class__.__name__ + "(" s += "num_instances={}, ".format(len(self)) s += "image_height={}, ".format(self._image_size[0]) s += "image_width={}, ".format(self._image_size[1]) s += "fields=[{}])".format(", ".join((f"{k}: {v}" for k, v in self._fields.items()))) return s __repr__ = __str__
banmo-main
third_party/detectron2_old/detectron2/structures/instances.py
# Copyright (c) Facebook, Inc. and its affiliates. from .boxes import Boxes, BoxMode, pairwise_iou, pairwise_ioa from .image_list import ImageList from .instances import Instances from .keypoints import Keypoints, heatmaps_to_keypoints from .masks import BitMasks, PolygonMasks, polygons_to_bitmask, ROIMasks from .rotated_boxes import RotatedBoxes from .rotated_boxes import pairwise_iou as pairwise_iou_rotated __all__ = [k for k in globals().keys() if not k.startswith("_")] from detectron2.utils.env import fixup_module_metadata fixup_module_metadata(__name__, globals(), __all__) del fixup_module_metadata
banmo-main
third_party/detectron2_old/detectron2/structures/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. import math import numpy as np from enum import IntEnum, unique from typing import List, Tuple, Union import torch from torch import device from detectron2.utils.env import TORCH_VERSION _RawBoxType = Union[List[float], Tuple[float, ...], torch.Tensor, np.ndarray] if TORCH_VERSION < (1, 8): _maybe_jit_unused = torch.jit.unused else: def _maybe_jit_unused(x): return x @unique class BoxMode(IntEnum): """ Enum of different ways to represent a box. """ XYXY_ABS = 0 """ (x0, y0, x1, y1) in absolute floating points coordinates. The coordinates in range [0, width or height]. """ XYWH_ABS = 1 """ (x0, y0, w, h) in absolute floating points coordinates. """ XYXY_REL = 2 """ Not yet supported! (x0, y0, x1, y1) in range [0, 1]. They are relative to the size of the image. """ XYWH_REL = 3 """ Not yet supported! (x0, y0, w, h) in range [0, 1]. They are relative to the size of the image. """ XYWHA_ABS = 4 """ (xc, yc, w, h, a) in absolute floating points coordinates. (xc, yc) is the center of the rotated box, and the angle a is in degrees ccw. """ @staticmethod def convert(box: _RawBoxType, from_mode: "BoxMode", to_mode: "BoxMode") -> _RawBoxType: """ Args: box: can be a k-tuple, k-list or an Nxk array/tensor, where k = 4 or 5 from_mode, to_mode (BoxMode) Returns: The converted box of the same type. """ if from_mode == to_mode: return box original_type = type(box) is_numpy = isinstance(box, np.ndarray) single_box = isinstance(box, (list, tuple)) if single_box: assert len(box) == 4 or len(box) == 5, ( "BoxMode.convert takes either a k-tuple/list or an Nxk array/tensor," " where k == 4 or 5" ) arr = torch.tensor(box)[None, :] else: # avoid modifying the input box if is_numpy: arr = torch.from_numpy(np.asarray(box)).clone() else: arr = box.clone() assert to_mode not in [BoxMode.XYXY_REL, BoxMode.XYWH_REL] and from_mode not in [ BoxMode.XYXY_REL, BoxMode.XYWH_REL, ], "Relative mode not yet supported!" if from_mode == BoxMode.XYWHA_ABS and to_mode == BoxMode.XYXY_ABS: assert ( arr.shape[-1] == 5 ), "The last dimension of input shape must be 5 for XYWHA format" original_dtype = arr.dtype arr = arr.double() w = arr[:, 2] h = arr[:, 3] a = arr[:, 4] c = torch.abs(torch.cos(a * math.pi / 180.0)) s = torch.abs(torch.sin(a * math.pi / 180.0)) # This basically computes the horizontal bounding rectangle of the rotated box new_w = c * w + s * h new_h = c * h + s * w # convert center to top-left corner arr[:, 0] -= new_w / 2.0 arr[:, 1] -= new_h / 2.0 # bottom-right corner arr[:, 2] = arr[:, 0] + new_w arr[:, 3] = arr[:, 1] + new_h arr = arr[:, :4].to(dtype=original_dtype) elif from_mode == BoxMode.XYWH_ABS and to_mode == BoxMode.XYWHA_ABS: original_dtype = arr.dtype arr = arr.double() arr[:, 0] += arr[:, 2] / 2.0 arr[:, 1] += arr[:, 3] / 2.0 angles = torch.zeros((arr.shape[0], 1), dtype=arr.dtype) arr = torch.cat((arr, angles), axis=1).to(dtype=original_dtype) else: if to_mode == BoxMode.XYXY_ABS and from_mode == BoxMode.XYWH_ABS: arr[:, 2] += arr[:, 0] arr[:, 3] += arr[:, 1] elif from_mode == BoxMode.XYXY_ABS and to_mode == BoxMode.XYWH_ABS: arr[:, 2] -= arr[:, 0] arr[:, 3] -= arr[:, 1] else: raise NotImplementedError( "Conversion from BoxMode {} to {} is not supported yet".format( from_mode, to_mode ) ) if single_box: return original_type(arr.flatten().tolist()) if is_numpy: return arr.numpy() else: return arr class Boxes: """ This structure stores a list of boxes as a Nx4 torch.Tensor. It supports some common methods about boxes (`area`, `clip`, `nonempty`, etc), and also behaves like a Tensor (support indexing, `to(device)`, `.device`, and iteration over all boxes) Attributes: tensor (torch.Tensor): float matrix of Nx4. Each row is (x1, y1, x2, y2). """ def __init__(self, tensor: torch.Tensor): """ Args: tensor (Tensor[float]): a Nx4 matrix. Each row is (x1, y1, x2, y2). """ device = tensor.device if isinstance(tensor, torch.Tensor) else torch.device("cpu") tensor = torch.as_tensor(tensor, dtype=torch.float32, device=device) if tensor.numel() == 0: # Use reshape, so we don't end up creating a new tensor that does not depend on # the inputs (and consequently confuses jit) tensor = tensor.reshape((-1, 4)).to(dtype=torch.float32, device=device) assert tensor.dim() == 2 and tensor.size(-1) == 4, tensor.size() self.tensor = tensor def clone(self) -> "Boxes": """ Clone the Boxes. Returns: Boxes """ return Boxes(self.tensor.clone()) @_maybe_jit_unused def to(self, device: torch.device): # Boxes are assumed float32 and does not support to(dtype) return Boxes(self.tensor.to(device=device)) def area(self) -> torch.Tensor: """ Computes the area of all the boxes. Returns: torch.Tensor: a vector with areas of each box. """ box = self.tensor area = (box[:, 2] - box[:, 0]) * (box[:, 3] - box[:, 1]) return area def clip(self, box_size: Tuple[int, int]) -> None: """ Clip (in place) the boxes by limiting x coordinates to the range [0, width] and y coordinates to the range [0, height]. Args: box_size (height, width): The clipping box's size. """ assert torch.isfinite(self.tensor).all(), "Box tensor contains infinite or NaN!" h, w = box_size x1 = self.tensor[:, 0].clamp(min=0, max=w) y1 = self.tensor[:, 1].clamp(min=0, max=h) x2 = self.tensor[:, 2].clamp(min=0, max=w) y2 = self.tensor[:, 3].clamp(min=0, max=h) self.tensor = torch.stack((x1, y1, x2, y2), dim=-1) def nonempty(self, threshold: float = 0.0) -> torch.Tensor: """ Find boxes that are non-empty. A box is considered empty, if either of its side is no larger than threshold. Returns: Tensor: a binary vector which represents whether each box is empty (False) or non-empty (True). """ box = self.tensor widths = box[:, 2] - box[:, 0] heights = box[:, 3] - box[:, 1] keep = (widths > threshold) & (heights > threshold) return keep def __getitem__(self, item) -> "Boxes": """ Args: item: int, slice, or a BoolTensor Returns: Boxes: Create a new :class:`Boxes` by indexing. The following usage are allowed: 1. `new_boxes = boxes[3]`: return a `Boxes` which contains only one box. 2. `new_boxes = boxes[2:10]`: return a slice of boxes. 3. `new_boxes = boxes[vector]`, where vector is a torch.BoolTensor with `length = len(boxes)`. Nonzero elements in the vector will be selected. Note that the returned Boxes might share storage with this Boxes, subject to Pytorch's indexing semantics. """ if isinstance(item, int): return Boxes(self.tensor[item].view(1, -1)) b = self.tensor[item] assert b.dim() == 2, "Indexing on Boxes with {} failed to return a matrix!".format(item) return Boxes(b) def __len__(self) -> int: return self.tensor.shape[0] def __repr__(self) -> str: return "Boxes(" + str(self.tensor) + ")" def inside_box(self, box_size: Tuple[int, int], boundary_threshold: int = 0) -> torch.Tensor: """ Args: box_size (height, width): Size of the reference box. boundary_threshold (int): Boxes that extend beyond the reference box boundary by more than boundary_threshold are considered "outside". Returns: a binary vector, indicating whether each box is inside the reference box. """ height, width = box_size inds_inside = ( (self.tensor[..., 0] >= -boundary_threshold) & (self.tensor[..., 1] >= -boundary_threshold) & (self.tensor[..., 2] < width + boundary_threshold) & (self.tensor[..., 3] < height + boundary_threshold) ) return inds_inside def get_centers(self) -> torch.Tensor: """ Returns: The box centers in a Nx2 array of (x, y). """ return (self.tensor[:, :2] + self.tensor[:, 2:]) / 2 def scale(self, scale_x: float, scale_y: float) -> None: """ Scale the box with horizontal and vertical scaling factors """ self.tensor[:, 0::2] *= scale_x self.tensor[:, 1::2] *= scale_y @classmethod @_maybe_jit_unused def cat(cls, boxes_list: List["Boxes"]) -> "Boxes": """ Concatenates a list of Boxes into a single Boxes Arguments: boxes_list (list[Boxes]) Returns: Boxes: the concatenated Boxes """ assert isinstance(boxes_list, (list, tuple)) if len(boxes_list) == 0: return cls(torch.empty(0)) assert all([isinstance(box, Boxes) for box in boxes_list]) # use torch.cat (v.s. layers.cat) so the returned boxes never share storage with input cat_boxes = cls(torch.cat([b.tensor for b in boxes_list], dim=0)) return cat_boxes @property def device(self) -> device: return self.tensor.device # type "Iterator[torch.Tensor]", yield, and iter() not supported by torchscript # https://github.com/pytorch/pytorch/issues/18627 @torch.jit.unused def __iter__(self): """ Yield a box as a Tensor of shape (4,) at a time. """ yield from self.tensor def pairwise_intersection(boxes1: Boxes, boxes2: Boxes) -> torch.Tensor: """ Given two lists of boxes of size N and M, compute the intersection area between __all__ N x M pairs of boxes. The box order must be (xmin, ymin, xmax, ymax) Args: boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively. Returns: Tensor: intersection, sized [N,M]. """ boxes1, boxes2 = boxes1.tensor, boxes2.tensor width_height = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) - torch.max( boxes1[:, None, :2], boxes2[:, :2] ) # [N,M,2] width_height.clamp_(min=0) # [N,M,2] intersection = width_height.prod(dim=2) # [N,M] return intersection # implementation from https://github.com/kuangliu/torchcv/blob/master/torchcv/utils/box.py # with slight modifications def pairwise_iou(boxes1: Boxes, boxes2: Boxes) -> torch.Tensor: """ Given two lists of boxes of size N and M, compute the IoU (intersection over union) between **all** N x M pairs of boxes. The box order must be (xmin, ymin, xmax, ymax). Args: boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively. Returns: Tensor: IoU, sized [N,M]. """ area1 = boxes1.area() # [N] area2 = boxes2.area() # [M] inter = pairwise_intersection(boxes1, boxes2) # handle empty boxes iou = torch.where( inter > 0, inter / (area1[:, None] + area2 - inter), torch.zeros(1, dtype=inter.dtype, device=inter.device), ) return iou def pairwise_ioa(boxes1: Boxes, boxes2: Boxes) -> torch.Tensor: """ Similar to :func:`pariwise_iou` but compute the IoA (intersection over boxes2 area). Args: boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively. Returns: Tensor: IoA, sized [N,M]. """ area2 = boxes2.area() # [M] inter = pairwise_intersection(boxes1, boxes2) # handle empty boxes ioa = torch.where( inter > 0, inter / area2, torch.zeros(1, dtype=inter.dtype, device=inter.device) ) return ioa def matched_boxlist_iou(boxes1: Boxes, boxes2: Boxes) -> torch.Tensor: """ Compute pairwise intersection over union (IOU) of two sets of matched boxes. The box order must be (xmin, ymin, xmax, ymax). Similar to boxlist_iou, but computes only diagonal elements of the matrix Args: boxes1: (Boxes) bounding boxes, sized [N,4]. boxes2: (Boxes) bounding boxes, sized [N,4]. Returns: Tensor: iou, sized [N]. """ assert len(boxes1) == len( boxes2 ), "boxlists should have the same" "number of entries, got {}, {}".format( len(boxes1), len(boxes2) ) area1 = boxes1.area() # [N] area2 = boxes2.area() # [N] box1, box2 = boxes1.tensor, boxes2.tensor lt = torch.max(box1[:, :2], box2[:, :2]) # [N,2] rb = torch.min(box1[:, 2:], box2[:, 2:]) # [N,2] wh = (rb - lt).clamp(min=0) # [N,2] inter = wh[:, 0] * wh[:, 1] # [N] iou = inter / (area1 + area2 - inter) # [N] return iou
banmo-main
third_party/detectron2_old/detectron2/structures/boxes.py
# Copyright (c) Facebook, Inc. and its affiliates. import numpy as np from typing import Any, List, Tuple, Union import torch from torch.nn import functional as F from detectron2.utils.env import TORCH_VERSION if TORCH_VERSION < (1, 8): def script_if_tracing(fn): return fn else: script_if_tracing = torch.jit.script_if_tracing class Keypoints: """ Stores keypoint **annotation** data. GT Instances have a `gt_keypoints` property containing the x,y location and visibility flag of each keypoint. This tensor has shape (N, K, 3) where N is the number of instances and K is the number of keypoints per instance. The visibility flag follows the COCO format and must be one of three integers: * v=0: not labeled (in which case x=y=0) * v=1: labeled but not visible * v=2: labeled and visible """ def __init__(self, keypoints: Union[torch.Tensor, np.ndarray, List[List[float]]]): """ Arguments: keypoints: A Tensor, numpy array, or list of the x, y, and visibility of each keypoint. The shape should be (N, K, 3) where N is the number of instances, and K is the number of keypoints per instance. """ device = keypoints.device if isinstance(keypoints, torch.Tensor) else torch.device("cpu") keypoints = torch.as_tensor(keypoints, dtype=torch.float32, device=device) assert keypoints.dim() == 3 and keypoints.shape[2] == 3, keypoints.shape self.tensor = keypoints def __len__(self) -> int: return self.tensor.size(0) def to(self, *args: Any, **kwargs: Any) -> "Keypoints": return type(self)(self.tensor.to(*args, **kwargs)) @property def device(self) -> torch.device: return self.tensor.device def to_heatmap(self, boxes: torch.Tensor, heatmap_size: int) -> torch.Tensor: """ Convert keypoint annotations to a heatmap of one-hot labels for training, as described in :paper:`Mask R-CNN`. Arguments: boxes: Nx4 tensor, the boxes to draw the keypoints to Returns: heatmaps: A tensor of shape (N, K), each element is integer spatial label in the range [0, heatmap_size**2 - 1] for each keypoint in the input. valid: A tensor of shape (N, K) containing whether each keypoint is in the roi or not. """ return _keypoints_to_heatmap(self.tensor, boxes, heatmap_size) def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> "Keypoints": """ Create a new `Keypoints` by indexing on this `Keypoints`. The following usage are allowed: 1. `new_kpts = kpts[3]`: return a `Keypoints` which contains only one instance. 2. `new_kpts = kpts[2:10]`: return a slice of key points. 3. `new_kpts = kpts[vector]`, where vector is a torch.ByteTensor with `length = len(kpts)`. Nonzero elements in the vector will be selected. Note that the returned Keypoints might share storage with this Keypoints, subject to Pytorch's indexing semantics. """ if isinstance(item, int): return Keypoints([self.tensor[item]]) return Keypoints(self.tensor[item]) def __repr__(self) -> str: s = self.__class__.__name__ + "(" s += "num_instances={})".format(len(self.tensor)) return s # TODO make this nicer, this is a direct translation from C2 (but removing the inner loop) def _keypoints_to_heatmap( keypoints: torch.Tensor, rois: torch.Tensor, heatmap_size: int ) -> Tuple[torch.Tensor, torch.Tensor]: """ Encode keypoint locations into a target heatmap for use in SoftmaxWithLoss across space. Maps keypoints from the half-open interval [x1, x2) on continuous image coordinates to the closed interval [0, heatmap_size - 1] on discrete image coordinates. We use the continuous-discrete conversion from Heckbert 1990 ("What is the coordinate of a pixel?"): d = floor(c) and c = d + 0.5, where d is a discrete coordinate and c is a continuous coordinate. Arguments: keypoints: tensor of keypoint locations in of shape (N, K, 3). rois: Nx4 tensor of rois in xyxy format heatmap_size: integer side length of square heatmap. Returns: heatmaps: A tensor of shape (N, K) containing an integer spatial label in the range [0, heatmap_size**2 - 1] for each keypoint in the input. valid: A tensor of shape (N, K) containing whether each keypoint is in the roi or not. """ if rois.numel() == 0: return rois.new().long(), rois.new().long() offset_x = rois[:, 0] offset_y = rois[:, 1] scale_x = heatmap_size / (rois[:, 2] - rois[:, 0]) scale_y = heatmap_size / (rois[:, 3] - rois[:, 1]) offset_x = offset_x[:, None] offset_y = offset_y[:, None] scale_x = scale_x[:, None] scale_y = scale_y[:, None] x = keypoints[..., 0] y = keypoints[..., 1] x_boundary_inds = x == rois[:, 2][:, None] y_boundary_inds = y == rois[:, 3][:, None] x = (x - offset_x) * scale_x x = x.floor().long() y = (y - offset_y) * scale_y y = y.floor().long() x[x_boundary_inds] = heatmap_size - 1 y[y_boundary_inds] = heatmap_size - 1 valid_loc = (x >= 0) & (y >= 0) & (x < heatmap_size) & (y < heatmap_size) vis = keypoints[..., 2] > 0 valid = (valid_loc & vis).long() lin_ind = y * heatmap_size + x heatmaps = lin_ind * valid return heatmaps, valid @script_if_tracing def heatmaps_to_keypoints(maps: torch.Tensor, rois: torch.Tensor) -> torch.Tensor: """ Extract predicted keypoint locations from heatmaps. Args: maps (Tensor): (#ROIs, #keypoints, POOL_H, POOL_W). The predicted heatmap of logits for each ROI and each keypoint. rois (Tensor): (#ROIs, 4). The box of each ROI. Returns: Tensor of shape (#ROIs, #keypoints, 4) with the last dimension corresponding to (x, y, logit, score) for each keypoint. When converting discrete pixel indices in an NxN image to a continuous keypoint coordinate, we maintain consistency with :meth:`Keypoints.to_heatmap` by using the conversion from Heckbert 1990: c = d + 0.5, where d is a discrete coordinate and c is a continuous coordinate. """ # The decorator use of torch.no_grad() was not supported by torchscript. # https://github.com/pytorch/pytorch/issues/44768 maps = maps.detach() rois = rois.detach() offset_x = rois[:, 0] offset_y = rois[:, 1] widths = (rois[:, 2] - rois[:, 0]).clamp(min=1) heights = (rois[:, 3] - rois[:, 1]).clamp(min=1) widths_ceil = widths.ceil() heights_ceil = heights.ceil() num_rois, num_keypoints = maps.shape[:2] xy_preds = maps.new_zeros(rois.shape[0], num_keypoints, 4) width_corrections = widths / widths_ceil height_corrections = heights / heights_ceil keypoints_idx = torch.arange(num_keypoints, device=maps.device) for i in range(num_rois): outsize = (int(heights_ceil[i]), int(widths_ceil[i])) roi_map = F.interpolate( maps[[i]], size=outsize, mode="bicubic", align_corners=False ).squeeze( 0 ) # #keypoints x H x W # softmax over the spatial region max_score, _ = roi_map.view(num_keypoints, -1).max(1) max_score = max_score.view(num_keypoints, 1, 1) tmp_full_resolution = (roi_map - max_score).exp_() tmp_pool_resolution = (maps[i] - max_score).exp_() # Produce scores over the region H x W, but normalize with POOL_H x POOL_W, # so that the scores of objects of different absolute sizes will be more comparable roi_map_scores = tmp_full_resolution / tmp_pool_resolution.sum((1, 2), keepdim=True) w = roi_map.shape[2] pos = roi_map.view(num_keypoints, -1).argmax(1) x_int = pos % w y_int = (pos - x_int) // w assert ( roi_map_scores[keypoints_idx, y_int, x_int] == roi_map_scores.view(num_keypoints, -1).max(1)[0] ).all() x = (x_int.float() + 0.5) * width_corrections[i] y = (y_int.float() + 0.5) * height_corrections[i] xy_preds[i, :, 0] = x + offset_x[i] xy_preds[i, :, 1] = y + offset_y[i] xy_preds[i, :, 2] = roi_map[keypoints_idx, y_int, x_int] xy_preds[i, :, 3] = roi_map_scores[keypoints_idx, y_int, x_int] return xy_preds
banmo-main
third_party/detectron2_old/detectron2/structures/keypoints.py
# Copyright (c) Facebook, Inc. and its affiliates. import copy import itertools import numpy as np from typing import Any, Iterator, List, Union import pycocotools.mask as mask_util import torch from torch import device from detectron2.layers.roi_align import ROIAlign from detectron2.utils.memory import retry_if_cuda_oom from .boxes import Boxes def polygon_area(x, y): # Using the shoelace formula # https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates return 0.5 * np.abs(np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1))) def polygons_to_bitmask(polygons: List[np.ndarray], height: int, width: int) -> np.ndarray: """ Args: polygons (list[ndarray]): each array has shape (Nx2,) height, width (int) Returns: ndarray: a bool mask of shape (height, width) """ assert len(polygons) > 0, "COCOAPI does not support empty polygons" rles = mask_util.frPyObjects(polygons, height, width) rle = mask_util.merge(rles) return mask_util.decode(rle).astype(np.bool) def rasterize_polygons_within_box( polygons: List[np.ndarray], box: np.ndarray, mask_size: int ) -> torch.Tensor: """ Rasterize the polygons into a mask image and crop the mask content in the given box. The cropped mask is resized to (mask_size, mask_size). This function is used when generating training targets for mask head in Mask R-CNN. Given original ground-truth masks for an image, new ground-truth mask training targets in the size of `mask_size x mask_size` must be provided for each predicted box. This function will be called to produce such targets. Args: polygons (list[ndarray[float]]): a list of polygons, which represents an instance. box: 4-element numpy array mask_size (int): Returns: Tensor: BoolTensor of shape (mask_size, mask_size) """ # 1. Shift the polygons w.r.t the boxes w, h = box[2] - box[0], box[3] - box[1] polygons = copy.deepcopy(polygons) for p in polygons: p[0::2] = p[0::2] - box[0] p[1::2] = p[1::2] - box[1] # 2. Rescale the polygons to the new box size # max() to avoid division by small number ratio_h = mask_size / max(h, 0.1) ratio_w = mask_size / max(w, 0.1) if ratio_h == ratio_w: for p in polygons: p *= ratio_h else: for p in polygons: p[0::2] *= ratio_w p[1::2] *= ratio_h # 3. Rasterize the polygons with coco api mask = polygons_to_bitmask(polygons, mask_size, mask_size) mask = torch.from_numpy(mask) return mask class BitMasks: """ This class stores the segmentation masks for all objects in one image, in the form of bitmaps. Attributes: tensor: bool Tensor of N,H,W, representing N instances in the image. """ def __init__(self, tensor: Union[torch.Tensor, np.ndarray]): """ Args: tensor: bool Tensor of N,H,W, representing N instances in the image. """ device = tensor.device if isinstance(tensor, torch.Tensor) else torch.device("cpu") tensor = torch.as_tensor(tensor, dtype=torch.bool, device=device) assert tensor.dim() == 3, tensor.size() self.image_size = tensor.shape[1:] self.tensor = tensor @torch.jit.unused def to(self, *args: Any, **kwargs: Any) -> "BitMasks": return BitMasks(self.tensor.to(*args, **kwargs)) @property def device(self) -> torch.device: return self.tensor.device @torch.jit.unused def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> "BitMasks": """ Returns: BitMasks: Create a new :class:`BitMasks` by indexing. The following usage are allowed: 1. `new_masks = masks[3]`: return a `BitMasks` which contains only one mask. 2. `new_masks = masks[2:10]`: return a slice of masks. 3. `new_masks = masks[vector]`, where vector is a torch.BoolTensor with `length = len(masks)`. Nonzero elements in the vector will be selected. Note that the returned object might share storage with this object, subject to Pytorch's indexing semantics. """ if isinstance(item, int): return BitMasks(self.tensor[item].view(1, -1)) m = self.tensor[item] assert m.dim() == 3, "Indexing on BitMasks with {} returns a tensor with shape {}!".format( item, m.shape ) return BitMasks(m) @torch.jit.unused def __iter__(self) -> torch.Tensor: yield from self.tensor @torch.jit.unused def __repr__(self) -> str: s = self.__class__.__name__ + "(" s += "num_instances={})".format(len(self.tensor)) return s def __len__(self) -> int: return self.tensor.shape[0] def nonempty(self) -> torch.Tensor: """ Find masks that are non-empty. Returns: Tensor: a BoolTensor which represents whether each mask is empty (False) or non-empty (True). """ return self.tensor.flatten(1).any(dim=1) @staticmethod def from_polygon_masks( polygon_masks: Union["PolygonMasks", List[List[np.ndarray]]], height: int, width: int ) -> "BitMasks": """ Args: polygon_masks (list[list[ndarray]] or PolygonMasks) height, width (int) """ if isinstance(polygon_masks, PolygonMasks): polygon_masks = polygon_masks.polygons masks = [polygons_to_bitmask(p, height, width) for p in polygon_masks] return BitMasks(torch.stack([torch.from_numpy(x) for x in masks])) @staticmethod def from_roi_masks(roi_masks: "ROIMasks", height: int, width: int) -> "BitMasks": """ Args: roi_masks: height, width (int): """ return roi_masks.to_bitmasks(height, width) def crop_and_resize(self, boxes: torch.Tensor, mask_size: int) -> torch.Tensor: """ Crop each bitmask by the given box, and resize results to (mask_size, mask_size). This can be used to prepare training targets for Mask R-CNN. It has less reconstruction error compared to rasterization with polygons. However we observe no difference in accuracy, but BitMasks requires more memory to store all the masks. Args: boxes (Tensor): Nx4 tensor storing the boxes for each mask mask_size (int): the size of the rasterized mask. Returns: Tensor: A bool tensor of shape (N, mask_size, mask_size), where N is the number of predicted boxes for this image. """ assert len(boxes) == len(self), "{} != {}".format(len(boxes), len(self)) device = self.tensor.device batch_inds = torch.arange(len(boxes), device=device).to(dtype=boxes.dtype)[:, None] rois = torch.cat([batch_inds, boxes], dim=1) # Nx5 bit_masks = self.tensor.to(dtype=torch.float32) rois = rois.to(device=device) output = ( ROIAlign((mask_size, mask_size), 1.0, 0, aligned=True) .forward(bit_masks[:, None, :, :], rois) .squeeze(1) ) output = output >= 0.5 return output def get_bounding_boxes(self) -> Boxes: """ Returns: Boxes: tight bounding boxes around bitmasks. If a mask is empty, it's bounding box will be all zero. """ boxes = torch.zeros(self.tensor.shape[0], 4, dtype=torch.float32) x_any = torch.any(self.tensor, dim=1) y_any = torch.any(self.tensor, dim=2) for idx in range(self.tensor.shape[0]): x = torch.where(x_any[idx, :])[0] y = torch.where(y_any[idx, :])[0] if len(x) > 0 and len(y) > 0: boxes[idx, :] = torch.as_tensor( [x[0], y[0], x[-1] + 1, y[-1] + 1], dtype=torch.float32 ) return Boxes(boxes) @staticmethod def cat(bitmasks_list: List["BitMasks"]) -> "BitMasks": """ Concatenates a list of BitMasks into a single BitMasks Arguments: bitmasks_list (list[BitMasks]) Returns: BitMasks: the concatenated BitMasks """ assert isinstance(bitmasks_list, (list, tuple)) assert len(bitmasks_list) > 0 assert all(isinstance(bitmask, BitMasks) for bitmask in bitmasks_list) cat_bitmasks = type(bitmasks_list[0])(torch.cat([bm.tensor for bm in bitmasks_list], dim=0)) return cat_bitmasks class PolygonMasks: """ This class stores the segmentation masks for all objects in one image, in the form of polygons. Attributes: polygons: list[list[ndarray]]. Each ndarray is a float64 vector representing a polygon. """ def __init__(self, polygons: List[List[Union[torch.Tensor, np.ndarray]]]): """ Arguments: polygons (list[list[np.ndarray]]): The first level of the list correspond to individual instances, the second level to all the polygons that compose the instance, and the third level to the polygon coordinates. The third level array should have the format of [x0, y0, x1, y1, ..., xn, yn] (n >= 3). """ if not isinstance(polygons, list): raise ValueError( "Cannot create PolygonMasks: Expect a list of list of polygons per image. " "Got '{}' instead.".format(type(polygons)) ) def _make_array(t: Union[torch.Tensor, np.ndarray]) -> np.ndarray: # Use float64 for higher precision, because why not? # Always put polygons on CPU (self.to is a no-op) since they # are supposed to be small tensors. # May need to change this assumption if GPU placement becomes useful if isinstance(t, torch.Tensor): t = t.cpu().numpy() return np.asarray(t).astype("float64") def process_polygons( polygons_per_instance: List[Union[torch.Tensor, np.ndarray]] ) -> List[np.ndarray]: if not isinstance(polygons_per_instance, list): raise ValueError( "Cannot create polygons: Expect a list of polygons per instance. " "Got '{}' instead.".format(type(polygons_per_instance)) ) # transform each polygon to a numpy array polygons_per_instance = [_make_array(p) for p in polygons_per_instance] for polygon in polygons_per_instance: if len(polygon) % 2 != 0 or len(polygon) < 6: raise ValueError(f"Cannot create a polygon from {len(polygon)} coordinates.") return polygons_per_instance self.polygons: List[List[np.ndarray]] = [ process_polygons(polygons_per_instance) for polygons_per_instance in polygons ] def to(self, *args: Any, **kwargs: Any) -> "PolygonMasks": return self @property def device(self) -> torch.device: return torch.device("cpu") def get_bounding_boxes(self) -> Boxes: """ Returns: Boxes: tight bounding boxes around polygon masks. """ boxes = torch.zeros(len(self.polygons), 4, dtype=torch.float32) for idx, polygons_per_instance in enumerate(self.polygons): minxy = torch.as_tensor([float("inf"), float("inf")], dtype=torch.float32) maxxy = torch.zeros(2, dtype=torch.float32) for polygon in polygons_per_instance: coords = torch.from_numpy(polygon).view(-1, 2).to(dtype=torch.float32) minxy = torch.min(minxy, torch.min(coords, dim=0).values) maxxy = torch.max(maxxy, torch.max(coords, dim=0).values) boxes[idx, :2] = minxy boxes[idx, 2:] = maxxy return Boxes(boxes) def nonempty(self) -> torch.Tensor: """ Find masks that are non-empty. Returns: Tensor: a BoolTensor which represents whether each mask is empty (False) or not (True). """ keep = [1 if len(polygon) > 0 else 0 for polygon in self.polygons] return torch.from_numpy(np.asarray(keep, dtype=np.bool)) def __getitem__(self, item: Union[int, slice, List[int], torch.BoolTensor]) -> "PolygonMasks": """ Support indexing over the instances and return a `PolygonMasks` object. `item` can be: 1. An integer. It will return an object with only one instance. 2. A slice. It will return an object with the selected instances. 3. A list[int]. It will return an object with the selected instances, correpsonding to the indices in the list. 4. A vector mask of type BoolTensor, whose length is num_instances. It will return an object with the instances whose mask is nonzero. """ if isinstance(item, int): selected_polygons = [self.polygons[item]] elif isinstance(item, slice): selected_polygons = self.polygons[item] elif isinstance(item, list): selected_polygons = [self.polygons[i] for i in item] elif isinstance(item, torch.Tensor): # Polygons is a list, so we have to move the indices back to CPU. if item.dtype == torch.bool: assert item.dim() == 1, item.shape item = item.nonzero().squeeze(1).cpu().numpy().tolist() elif item.dtype in [torch.int32, torch.int64]: item = item.cpu().numpy().tolist() else: raise ValueError("Unsupported tensor dtype={} for indexing!".format(item.dtype)) selected_polygons = [self.polygons[i] for i in item] return PolygonMasks(selected_polygons) def __iter__(self) -> Iterator[List[np.ndarray]]: """ Yields: list[ndarray]: the polygons for one instance. Each Tensor is a float64 vector representing a polygon. """ return iter(self.polygons) def __repr__(self) -> str: s = self.__class__.__name__ + "(" s += "num_instances={})".format(len(self.polygons)) return s def __len__(self) -> int: return len(self.polygons) def crop_and_resize(self, boxes: torch.Tensor, mask_size: int) -> torch.Tensor: """ Crop each mask by the given box, and resize results to (mask_size, mask_size). This can be used to prepare training targets for Mask R-CNN. Args: boxes (Tensor): Nx4 tensor storing the boxes for each mask mask_size (int): the size of the rasterized mask. Returns: Tensor: A bool tensor of shape (N, mask_size, mask_size), where N is the number of predicted boxes for this image. """ assert len(boxes) == len(self), "{} != {}".format(len(boxes), len(self)) device = boxes.device # Put boxes on the CPU, as the polygon representation is not efficient GPU-wise # (several small tensors for representing a single instance mask) boxes = boxes.to(torch.device("cpu")) results = [ rasterize_polygons_within_box(poly, box.numpy(), mask_size) for poly, box in zip(self.polygons, boxes) ] """ poly: list[list[float]], the polygons for one instance box: a tensor of shape (4,) """ if len(results) == 0: return torch.empty(0, mask_size, mask_size, dtype=torch.bool, device=device) return torch.stack(results, dim=0).to(device=device) def area(self): """ Computes area of the mask. Only works with Polygons, using the shoelace formula: https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates Returns: Tensor: a vector, area for each instance """ area = [] for polygons_per_instance in self.polygons: area_per_instance = 0 for p in polygons_per_instance: area_per_instance += polygon_area(p[0::2], p[1::2]) area.append(area_per_instance) return torch.tensor(area) @staticmethod def cat(polymasks_list: List["PolygonMasks"]) -> "PolygonMasks": """ Concatenates a list of PolygonMasks into a single PolygonMasks Arguments: polymasks_list (list[PolygonMasks]) Returns: PolygonMasks: the concatenated PolygonMasks """ assert isinstance(polymasks_list, (list, tuple)) assert len(polymasks_list) > 0 assert all(isinstance(polymask, PolygonMasks) for polymask in polymasks_list) cat_polymasks = type(polymasks_list[0])( list(itertools.chain.from_iterable(pm.polygons for pm in polymasks_list)) ) return cat_polymasks class ROIMasks: """ Represent masks by N smaller masks defined in some ROIs. Once ROI boxes are given, full-image bitmask can be obtained by "pasting" the mask on the region defined by the corresponding ROI box. """ def __init__(self, tensor: torch.Tensor): """ Args: tensor: (N, M, M) mask tensor that defines the mask within each ROI. """ if tensor.dim() != 3: raise ValueError("ROIMasks must take a masks of 3 dimension.") self.tensor = tensor def to(self, device: torch.device) -> "ROIMasks": return ROIMasks(self.tensor.to(device)) @property def device(self) -> device: return self.tensor.device def __len__(self): return self.tensor.shape[0] def __getitem__(self, item) -> "ROIMasks": """ Returns: ROIMasks: Create a new :class:`ROIMasks` by indexing. The following usage are allowed: 1. `new_masks = masks[2:10]`: return a slice of masks. 2. `new_masks = masks[vector]`, where vector is a torch.BoolTensor with `length = len(masks)`. Nonzero elements in the vector will be selected. Note that the returned object might share storage with this object, subject to Pytorch's indexing semantics. """ t = self.tensor[item] if t.dim() != 3: raise ValueError( f"Indexing on ROIMasks with {item} returns a tensor with shape {t.shape}!" ) return ROIMasks(t) @torch.jit.unused def __repr__(self) -> str: s = self.__class__.__name__ + "(" s += "num_instances={})".format(len(self.tensor)) return s @torch.jit.unused def to_bitmasks(self, boxes: torch.Tensor, height, width, threshold=0.5): """ Args: """ from detectron2.layers import paste_masks_in_image paste = retry_if_cuda_oom(paste_masks_in_image) bitmasks = paste( self.tensor, boxes, (height, width), threshold=threshold, ) return BitMasks(bitmasks)
banmo-main
third_party/detectron2_old/detectron2/structures/masks.py
# Copyright (c) Facebook, Inc. and its affiliates. from __future__ import division from typing import Any, List, Tuple import torch from torch import device from torch.nn import functional as F from detectron2.utils.env import TORCH_VERSION def _as_tensor(x: Tuple[int, int]) -> torch.Tensor: """ An equivalent of `torch.as_tensor`, but works under tracing if input is a list of tensor. `torch.as_tensor` will record a constant in tracing, but this function will use `torch.stack` instead. """ if torch.jit.is_scripting(): return torch.as_tensor(x) if isinstance(x, (list, tuple)) and all([isinstance(t, torch.Tensor) for t in x]): return torch.stack(x) return torch.as_tensor(x) class ImageList(object): """ Structure that holds a list of images (of possibly varying sizes) as a single tensor. This works by padding the images to the same size, and storing in a field the original sizes of each image Attributes: image_sizes (list[tuple[int, int]]): each tuple is (h, w). During tracing, it becomes list[Tensor] instead. """ def __init__(self, tensor: torch.Tensor, image_sizes: List[Tuple[int, int]]): """ Arguments: tensor (Tensor): of shape (N, H, W) or (N, C_1, ..., C_K, H, W) where K >= 1 image_sizes (list[tuple[int, int]]): Each tuple is (h, w). It can be smaller than (H, W) due to padding. """ self.tensor = tensor self.image_sizes = image_sizes def __len__(self) -> int: return len(self.image_sizes) def __getitem__(self, idx) -> torch.Tensor: """ Access the individual image in its original size. Args: idx: int or slice Returns: Tensor: an image of shape (H, W) or (C_1, ..., C_K, H, W) where K >= 1 """ size = self.image_sizes[idx] return self.tensor[idx, ..., : size[0], : size[1]] @torch.jit.unused def to(self, *args: Any, **kwargs: Any) -> "ImageList": cast_tensor = self.tensor.to(*args, **kwargs) return ImageList(cast_tensor, self.image_sizes) @property def device(self) -> device: return self.tensor.device @staticmethod def from_tensors( tensors: List[torch.Tensor], size_divisibility: int = 0, pad_value: float = 0.0 ) -> "ImageList": """ Args: tensors: a tuple or list of `torch.Tensor`, each of shape (Hi, Wi) or (C_1, ..., C_K, Hi, Wi) where K >= 1. The Tensors will be padded to the same shape with `pad_value`. size_divisibility (int): If `size_divisibility > 0`, add padding to ensure the common height and width is divisible by `size_divisibility`. This depends on the model and many models need a divisibility of 32. pad_value (float): value to pad Returns: an `ImageList`. """ assert len(tensors) > 0 assert isinstance(tensors, (tuple, list)) for t in tensors: assert isinstance(t, torch.Tensor), type(t) assert t.shape[:-2] == tensors[0].shape[:-2], t.shape image_sizes = [(im.shape[-2], im.shape[-1]) for im in tensors] image_sizes_tensor = [_as_tensor(x) for x in image_sizes] max_size = torch.stack(image_sizes_tensor).max(0).values if size_divisibility > 1: stride = size_divisibility # the last two dims are H,W, both subject to divisibility requirement max_size = (max_size + (stride - 1)) // stride * stride # handle weirdness of scripting and tracing ... if torch.jit.is_scripting(): max_size: List[int] = max_size.to(dtype=torch.long).tolist() else: # https://github.com/pytorch/pytorch/issues/42448 if TORCH_VERSION >= (1, 7) and torch.jit.is_tracing(): image_sizes = image_sizes_tensor if len(tensors) == 1: # This seems slightly (2%) faster. # TODO: check whether it's faster for multiple images as well image_size = image_sizes[0] padding_size = [0, max_size[-1] - image_size[1], 0, max_size[-2] - image_size[0]] batched_imgs = F.pad(tensors[0], padding_size, value=pad_value).unsqueeze_(0) else: # max_size can be a tensor in tracing mode, therefore convert to list batch_shape = [len(tensors)] + list(tensors[0].shape[:-2]) + list(max_size) batched_imgs = tensors[0].new_full(batch_shape, pad_value) for img, pad_img in zip(tensors, batched_imgs): pad_img[..., : img.shape[-2], : img.shape[-1]].copy_(img) return ImageList(batched_imgs.contiguous(), image_sizes)
banmo-main
third_party/detectron2_old/detectron2/structures/image_list.py
# Copyright (c) Facebook, Inc. and its affiliates. import math from typing import List, Tuple import torch from detectron2.layers.rotated_boxes import pairwise_iou_rotated from .boxes import Boxes, _maybe_jit_unused class RotatedBoxes(Boxes): """ This structure stores a list of rotated boxes as a Nx5 torch.Tensor. It supports some common methods about boxes (`area`, `clip`, `nonempty`, etc), and also behaves like a Tensor (support indexing, `to(device)`, `.device`, and iteration over all boxes) """ def __init__(self, tensor: torch.Tensor): """ Args: tensor (Tensor[float]): a Nx5 matrix. Each row is (x_center, y_center, width, height, angle), in which angle is represented in degrees. While there's no strict range restriction for it, the recommended principal range is between [-180, 180) degrees. Assume we have a horizontal box B = (x_center, y_center, width, height), where width is along the x-axis and height is along the y-axis. The rotated box B_rot (x_center, y_center, width, height, angle) can be seen as: 1. When angle == 0: B_rot == B 2. When angle > 0: B_rot is obtained by rotating B w.r.t its center by :math:`|angle|` degrees CCW; 3. When angle < 0: B_rot is obtained by rotating B w.r.t its center by :math:`|angle|` degrees CW. Mathematically, since the right-handed coordinate system for image space is (y, x), where y is top->down and x is left->right, the 4 vertices of the rotated rectangle :math:`(yr_i, xr_i)` (i = 1, 2, 3, 4) can be obtained from the vertices of the horizontal rectangle :math:`(y_i, x_i)` (i = 1, 2, 3, 4) in the following way (:math:`\\theta = angle*\\pi/180` is the angle in radians, :math:`(y_c, x_c)` is the center of the rectangle): .. math:: yr_i = \\cos(\\theta) (y_i - y_c) - \\sin(\\theta) (x_i - x_c) + y_c, xr_i = \\sin(\\theta) (y_i - y_c) + \\cos(\\theta) (x_i - x_c) + x_c, which is the standard rigid-body rotation transformation. Intuitively, the angle is (1) the rotation angle from y-axis in image space to the height vector (top->down in the box's local coordinate system) of the box in CCW, and (2) the rotation angle from x-axis in image space to the width vector (left->right in the box's local coordinate system) of the box in CCW. More intuitively, consider the following horizontal box ABCD represented in (x1, y1, x2, y2): (3, 2, 7, 4), covering the [3, 7] x [2, 4] region of the continuous coordinate system which looks like this: .. code:: none O--------> x | | A---B | | | | D---C | v y Note that each capital letter represents one 0-dimensional geometric point instead of a 'square pixel' here. In the example above, using (x, y) to represent a point we have: .. math:: O = (0, 0), A = (3, 2), B = (7, 2), C = (7, 4), D = (3, 4) We name vector AB = vector DC as the width vector in box's local coordinate system, and vector AD = vector BC as the height vector in box's local coordinate system. Initially, when angle = 0 degree, they're aligned with the positive directions of x-axis and y-axis in the image space, respectively. For better illustration, we denote the center of the box as E, .. code:: none O--------> x | | A---B | | E | | D---C | v y where the center E = ((3+7)/2, (2+4)/2) = (5, 3). Also, .. math:: width = |AB| = |CD| = 7 - 3 = 4, height = |AD| = |BC| = 4 - 2 = 2. Therefore, the corresponding representation for the same shape in rotated box in (x_center, y_center, width, height, angle) format is: (5, 3, 4, 2, 0), Now, let's consider (5, 3, 4, 2, 90), which is rotated by 90 degrees CCW (counter-clockwise) by definition. It looks like this: .. code:: none O--------> x | B-C | | | | |E| | | | | A-D v y The center E is still located at the same point (5, 3), while the vertices ABCD are rotated by 90 degrees CCW with regard to E: A = (4, 5), B = (4, 1), C = (6, 1), D = (6, 5) Here, 90 degrees can be seen as the CCW angle to rotate from y-axis to vector AD or vector BC (the top->down height vector in box's local coordinate system), or the CCW angle to rotate from x-axis to vector AB or vector DC (the left->right width vector in box's local coordinate system). .. math:: width = |AB| = |CD| = 5 - 1 = 4, height = |AD| = |BC| = 6 - 4 = 2. Next, how about (5, 3, 4, 2, -90), which is rotated by 90 degrees CW (clockwise) by definition? It looks like this: .. code:: none O--------> x | D-A | | | | |E| | | | | C-B v y The center E is still located at the same point (5, 3), while the vertices ABCD are rotated by 90 degrees CW with regard to E: A = (6, 1), B = (6, 5), C = (4, 5), D = (4, 1) .. math:: width = |AB| = |CD| = 5 - 1 = 4, height = |AD| = |BC| = 6 - 4 = 2. This covers exactly the same region as (5, 3, 4, 2, 90) does, and their IoU will be 1. However, these two will generate different RoI Pooling results and should not be treated as an identical box. On the other hand, it's easy to see that (X, Y, W, H, A) is identical to (X, Y, W, H, A+360N), for any integer N. For example (5, 3, 4, 2, 270) would be identical to (5, 3, 4, 2, -90), because rotating the shape 270 degrees CCW is equivalent to rotating the same shape 90 degrees CW. We could rotate further to get (5, 3, 4, 2, 180), or (5, 3, 4, 2, -180): .. code:: none O--------> x | | C---D | | E | | B---A | v y .. math:: A = (7, 4), B = (3, 4), C = (3, 2), D = (7, 2), width = |AB| = |CD| = 7 - 3 = 4, height = |AD| = |BC| = 4 - 2 = 2. Finally, this is a very inaccurate (heavily quantized) illustration of how (5, 3, 4, 2, 60) looks like in case anyone wonders: .. code:: none O--------> x | B\ | / C | /E / | A / | `D v y It's still a rectangle with center of (5, 3), width of 4 and height of 2, but its angle (and thus orientation) is somewhere between (5, 3, 4, 2, 0) and (5, 3, 4, 2, 90). """ device = tensor.device if isinstance(tensor, torch.Tensor) else torch.device("cpu") tensor = torch.as_tensor(tensor, dtype=torch.float32, device=device) if tensor.numel() == 0: # Use reshape, so we don't end up creating a new tensor that does not depend on # the inputs (and consequently confuses jit) tensor = tensor.reshape((0, 5)).to(dtype=torch.float32, device=device) assert tensor.dim() == 2 and tensor.size(-1) == 5, tensor.size() self.tensor = tensor def clone(self) -> "RotatedBoxes": """ Clone the RotatedBoxes. Returns: RotatedBoxes """ return RotatedBoxes(self.tensor.clone()) @_maybe_jit_unused def to(self, device: torch.device): # Boxes are assumed float32 and does not support to(dtype) return RotatedBoxes(self.tensor.to(device=device)) def area(self) -> torch.Tensor: """ Computes the area of all the boxes. Returns: torch.Tensor: a vector with areas of each box. """ box = self.tensor area = box[:, 2] * box[:, 3] return area def normalize_angles(self) -> None: """ Restrict angles to the range of [-180, 180) degrees """ self.tensor[:, 4] = (self.tensor[:, 4] + 180.0) % 360.0 - 180.0 def clip(self, box_size: Tuple[int, int], clip_angle_threshold: float = 1.0) -> None: """ Clip (in place) the boxes by limiting x coordinates to the range [0, width] and y coordinates to the range [0, height]. For RRPN: Only clip boxes that are almost horizontal with a tolerance of clip_angle_threshold to maintain backward compatibility. Rotated boxes beyond this threshold are not clipped for two reasons: 1. There are potentially multiple ways to clip a rotated box to make it fit within the image. 2. It's tricky to make the entire rectangular box fit within the image and still be able to not leave out pixels of interest. Therefore we rely on ops like RoIAlignRotated to safely handle this. Args: box_size (height, width): The clipping box's size. clip_angle_threshold: Iff. abs(normalized(angle)) <= clip_angle_threshold (in degrees), we do the clipping as horizontal boxes. """ h, w = box_size # normalize angles to be within (-180, 180] degrees self.normalize_angles() idx = torch.where(torch.abs(self.tensor[:, 4]) <= clip_angle_threshold)[0] # convert to (x1, y1, x2, y2) x1 = self.tensor[idx, 0] - self.tensor[idx, 2] / 2.0 y1 = self.tensor[idx, 1] - self.tensor[idx, 3] / 2.0 x2 = self.tensor[idx, 0] + self.tensor[idx, 2] / 2.0 y2 = self.tensor[idx, 1] + self.tensor[idx, 3] / 2.0 # clip x1.clamp_(min=0, max=w) y1.clamp_(min=0, max=h) x2.clamp_(min=0, max=w) y2.clamp_(min=0, max=h) # convert back to (xc, yc, w, h) self.tensor[idx, 0] = (x1 + x2) / 2.0 self.tensor[idx, 1] = (y1 + y2) / 2.0 # make sure widths and heights do not increase due to numerical errors self.tensor[idx, 2] = torch.min(self.tensor[idx, 2], x2 - x1) self.tensor[idx, 3] = torch.min(self.tensor[idx, 3], y2 - y1) def nonempty(self, threshold: float = 0.0) -> torch.Tensor: """ Find boxes that are non-empty. A box is considered empty, if either of its side is no larger than threshold. Returns: Tensor: a binary vector which represents whether each box is empty (False) or non-empty (True). """ box = self.tensor widths = box[:, 2] heights = box[:, 3] keep = (widths > threshold) & (heights > threshold) return keep def __getitem__(self, item) -> "RotatedBoxes": """ Returns: RotatedBoxes: Create a new :class:`RotatedBoxes` by indexing. The following usage are allowed: 1. `new_boxes = boxes[3]`: return a `RotatedBoxes` which contains only one box. 2. `new_boxes = boxes[2:10]`: return a slice of boxes. 3. `new_boxes = boxes[vector]`, where vector is a torch.ByteTensor with `length = len(boxes)`. Nonzero elements in the vector will be selected. Note that the returned RotatedBoxes might share storage with this RotatedBoxes, subject to Pytorch's indexing semantics. """ if isinstance(item, int): return RotatedBoxes(self.tensor[item].view(1, -1)) b = self.tensor[item] assert b.dim() == 2, "Indexing on RotatedBoxes with {} failed to return a matrix!".format( item ) return RotatedBoxes(b) def __len__(self) -> int: return self.tensor.shape[0] def __repr__(self) -> str: return "RotatedBoxes(" + str(self.tensor) + ")" def inside_box(self, box_size: Tuple[int, int], boundary_threshold: int = 0) -> torch.Tensor: """ Args: box_size (height, width): Size of the reference box covering [0, width] x [0, height] boundary_threshold (int): Boxes that extend beyond the reference box boundary by more than boundary_threshold are considered "outside". For RRPN, it might not be necessary to call this function since it's common for rotated box to extend to outside of the image boundaries (the clip function only clips the near-horizontal boxes) Returns: a binary vector, indicating whether each box is inside the reference box. """ height, width = box_size cnt_x = self.tensor[..., 0] cnt_y = self.tensor[..., 1] half_w = self.tensor[..., 2] / 2.0 half_h = self.tensor[..., 3] / 2.0 a = self.tensor[..., 4] c = torch.abs(torch.cos(a * math.pi / 180.0)) s = torch.abs(torch.sin(a * math.pi / 180.0)) # This basically computes the horizontal bounding rectangle of the rotated box max_rect_dx = c * half_w + s * half_h max_rect_dy = c * half_h + s * half_w inds_inside = ( (cnt_x - max_rect_dx >= -boundary_threshold) & (cnt_y - max_rect_dy >= -boundary_threshold) & (cnt_x + max_rect_dx < width + boundary_threshold) & (cnt_y + max_rect_dy < height + boundary_threshold) ) return inds_inside def get_centers(self) -> torch.Tensor: """ Returns: The box centers in a Nx2 array of (x, y). """ return self.tensor[:, :2] def scale(self, scale_x: float, scale_y: float) -> None: """ Scale the rotated box with horizontal and vertical scaling factors Note: when scale_factor_x != scale_factor_y, the rotated box does not preserve the rectangular shape when the angle is not a multiple of 90 degrees under resize transformation. Instead, the shape is a parallelogram (that has skew) Here we make an approximation by fitting a rotated rectangle to the parallelogram. """ self.tensor[:, 0] *= scale_x self.tensor[:, 1] *= scale_y theta = self.tensor[:, 4] * math.pi / 180.0 c = torch.cos(theta) s = torch.sin(theta) # In image space, y is top->down and x is left->right # Consider the local coordintate system for the rotated box, # where the box center is located at (0, 0), and the four vertices ABCD are # A(-w / 2, -h / 2), B(w / 2, -h / 2), C(w / 2, h / 2), D(-w / 2, h / 2) # the midpoint of the left edge AD of the rotated box E is: # E = (A+D)/2 = (-w / 2, 0) # the midpoint of the top edge AB of the rotated box F is: # F(0, -h / 2) # To get the old coordinates in the global system, apply the rotation transformation # (Note: the right-handed coordinate system for image space is yOx): # (old_x, old_y) = (s * y + c * x, c * y - s * x) # E(old) = (s * 0 + c * (-w/2), c * 0 - s * (-w/2)) = (-c * w / 2, s * w / 2) # F(old) = (s * (-h / 2) + c * 0, c * (-h / 2) - s * 0) = (-s * h / 2, -c * h / 2) # After applying the scaling factor (sfx, sfy): # E(new) = (-sfx * c * w / 2, sfy * s * w / 2) # F(new) = (-sfx * s * h / 2, -sfy * c * h / 2) # The new width after scaling tranformation becomes: # w(new) = |E(new) - O| * 2 # = sqrt[(sfx * c * w / 2)^2 + (sfy * s * w / 2)^2] * 2 # = sqrt[(sfx * c)^2 + (sfy * s)^2] * w # i.e., scale_factor_w = sqrt[(sfx * c)^2 + (sfy * s)^2] # # For example, # when angle = 0 or 180, |c| = 1, s = 0, scale_factor_w == scale_factor_x; # when |angle| = 90, c = 0, |s| = 1, scale_factor_w == scale_factor_y self.tensor[:, 2] *= torch.sqrt((scale_x * c) ** 2 + (scale_y * s) ** 2) # h(new) = |F(new) - O| * 2 # = sqrt[(sfx * s * h / 2)^2 + (sfy * c * h / 2)^2] * 2 # = sqrt[(sfx * s)^2 + (sfy * c)^2] * h # i.e., scale_factor_h = sqrt[(sfx * s)^2 + (sfy * c)^2] # # For example, # when angle = 0 or 180, |c| = 1, s = 0, scale_factor_h == scale_factor_y; # when |angle| = 90, c = 0, |s| = 1, scale_factor_h == scale_factor_x self.tensor[:, 3] *= torch.sqrt((scale_x * s) ** 2 + (scale_y * c) ** 2) # The angle is the rotation angle from y-axis in image space to the height # vector (top->down in the box's local coordinate system) of the box in CCW. # # angle(new) = angle_yOx(O - F(new)) # = angle_yOx( (sfx * s * h / 2, sfy * c * h / 2) ) # = atan2(sfx * s * h / 2, sfy * c * h / 2) # = atan2(sfx * s, sfy * c) # # For example, # when sfx == sfy, angle(new) == atan2(s, c) == angle(old) self.tensor[:, 4] = torch.atan2(scale_x * s, scale_y * c) * 180 / math.pi @classmethod @_maybe_jit_unused def cat(cls, boxes_list: List["RotatedBoxes"]) -> "RotatedBoxes": """ Concatenates a list of RotatedBoxes into a single RotatedBoxes Arguments: boxes_list (list[RotatedBoxes]) Returns: RotatedBoxes: the concatenated RotatedBoxes """ assert isinstance(boxes_list, (list, tuple)) if len(boxes_list) == 0: return cls(torch.empty(0)) assert all([isinstance(box, RotatedBoxes) for box in boxes_list]) # use torch.cat (v.s. layers.cat) so the returned boxes never share storage with input cat_boxes = cls(torch.cat([b.tensor for b in boxes_list], dim=0)) return cat_boxes @property def device(self) -> torch.device: return self.tensor.device @torch.jit.unused def __iter__(self): """ Yield a box as a Tensor of shape (5,) at a time. """ yield from self.tensor def pairwise_iou(boxes1: RotatedBoxes, boxes2: RotatedBoxes) -> None: """ Given two lists of rotated boxes of size N and M, compute the IoU (intersection over union) between **all** N x M pairs of boxes. The box order must be (x_center, y_center, width, height, angle). Args: boxes1, boxes2 (RotatedBoxes): two `RotatedBoxes`. Contains N & M rotated boxes, respectively. Returns: Tensor: IoU, sized [N,M]. """ return pairwise_iou_rotated(boxes1.tensor, boxes2.tensor)
banmo-main
third_party/detectron2_old/detectron2/structures/rotated_boxes.py
# -*- coding: utf-8 -*- # Copyright (c) Facebook, Inc. and its affiliates. import functools import inspect import logging from fvcore.common.config import CfgNode as _CfgNode from detectron2.utils.file_io import PathManager class CfgNode(_CfgNode): """ The same as `fvcore.common.config.CfgNode`, but different in: 1. Use unsafe yaml loading by default. Note that this may lead to arbitrary code execution: you must not load a config file from untrusted sources before manually inspecting the content of the file. 2. Support config versioning. When attempting to merge an old config, it will convert the old config automatically. """ @classmethod def _open_cfg(cls, filename): return PathManager.open(filename, "r") # Note that the default value of allow_unsafe is changed to True def merge_from_file(self, cfg_filename: str, allow_unsafe: bool = True) -> None: assert PathManager.isfile(cfg_filename), f"Config file '{cfg_filename}' does not exist!" loaded_cfg = self.load_yaml_with_base(cfg_filename, allow_unsafe=allow_unsafe) loaded_cfg = type(self)(loaded_cfg) # defaults.py needs to import CfgNode from .defaults import _C latest_ver = _C.VERSION assert ( latest_ver == self.VERSION ), "CfgNode.merge_from_file is only allowed on a config object of latest version!" logger = logging.getLogger(__name__) loaded_ver = loaded_cfg.get("VERSION", None) if loaded_ver is None: from .compat import guess_version loaded_ver = guess_version(loaded_cfg, cfg_filename) assert loaded_ver <= self.VERSION, "Cannot merge a v{} config into a v{} config.".format( loaded_ver, self.VERSION ) if loaded_ver == self.VERSION: self.merge_from_other_cfg(loaded_cfg) else: # compat.py needs to import CfgNode from .compat import upgrade_config, downgrade_config logger.warning( "Loading an old v{} config file '{}' by automatically upgrading to v{}. " "See docs/CHANGELOG.md for instructions to update your files.".format( loaded_ver, cfg_filename, self.VERSION ) ) # To convert, first obtain a full config at an old version old_self = downgrade_config(self, to_version=loaded_ver) old_self.merge_from_other_cfg(loaded_cfg) new_config = upgrade_config(old_self) self.clear() self.update(new_config) def dump(self, *args, **kwargs): """ Returns: str: a yaml string representation of the config """ # to make it show up in docs return super().dump(*args, **kwargs) global_cfg = CfgNode() def get_cfg() -> CfgNode: """ Get a copy of the default config. Returns: a detectron2 CfgNode instance. """ from .defaults import _C return _C.clone() def set_global_cfg(cfg: CfgNode) -> None: """ Let the global config point to the given cfg. Assume that the given "cfg" has the key "KEY", after calling `set_global_cfg(cfg)`, the key can be accessed by: :: from detectron2.config import global_cfg print(global_cfg.KEY) By using a hacky global config, you can access these configs anywhere, without having to pass the config object or the values deep into the code. This is a hacky feature introduced for quick prototyping / research exploration. """ global global_cfg global_cfg.clear() global_cfg.update(cfg) def configurable(init_func=None, *, from_config=None): """ Decorate a function or a class's __init__ method so that it can be called with a :class:`CfgNode` object using a :func:`from_config` function that translates :class:`CfgNode` to arguments. Examples: :: # Usage 1: Decorator on __init__: class A: @configurable def __init__(self, a, b=2, c=3): pass @classmethod def from_config(cls, cfg): # 'cfg' must be the first argument # Returns kwargs to be passed to __init__ return {"a": cfg.A, "b": cfg.B} a1 = A(a=1, b=2) # regular construction a2 = A(cfg) # construct with a cfg a3 = A(cfg, b=3, c=4) # construct with extra overwrite # Usage 2: Decorator on any function. Needs an extra from_config argument: @configurable(from_config=lambda cfg: {"a: cfg.A, "b": cfg.B}) def a_func(a, b=2, c=3): pass a1 = a_func(a=1, b=2) # regular call a2 = a_func(cfg) # call with a cfg a3 = a_func(cfg, b=3, c=4) # call with extra overwrite Args: init_func (callable): a class's ``__init__`` method in usage 1. The class must have a ``from_config`` classmethod which takes `cfg` as the first argument. from_config (callable): the from_config function in usage 2. It must take `cfg` as its first argument. """ if init_func is not None: assert ( inspect.isfunction(init_func) and from_config is None and init_func.__name__ == "__init__" ), "Incorrect use of @configurable. Check API documentation for examples." @functools.wraps(init_func) def wrapped(self, *args, **kwargs): try: from_config_func = type(self).from_config except AttributeError as e: raise AttributeError( "Class with @configurable must have a 'from_config' classmethod." ) from e if not inspect.ismethod(from_config_func): raise TypeError("Class with @configurable must have a 'from_config' classmethod.") if _called_with_cfg(*args, **kwargs): explicit_args = _get_args_from_config(from_config_func, *args, **kwargs) init_func(self, **explicit_args) else: init_func(self, *args, **kwargs) return wrapped else: if from_config is None: return configurable # @configurable() is made equivalent to @configurable assert inspect.isfunction( from_config ), "from_config argument of configurable must be a function!" def wrapper(orig_func): @functools.wraps(orig_func) def wrapped(*args, **kwargs): if _called_with_cfg(*args, **kwargs): explicit_args = _get_args_from_config(from_config, *args, **kwargs) return orig_func(**explicit_args) else: return orig_func(*args, **kwargs) return wrapped return wrapper def _get_args_from_config(from_config_func, *args, **kwargs): """ Use `from_config` to obtain explicit arguments. Returns: dict: arguments to be used for cls.__init__ """ signature = inspect.signature(from_config_func) if list(signature.parameters.keys())[0] != "cfg": if inspect.isfunction(from_config_func): name = from_config_func.__name__ else: name = f"{from_config_func.__self__}.from_config" raise TypeError(f"{name} must take 'cfg' as the first argument!") support_var_arg = any( param.kind in [param.VAR_POSITIONAL, param.VAR_KEYWORD] for param in signature.parameters.values() ) if support_var_arg: # forward all arguments to from_config, if from_config accepts them ret = from_config_func(*args, **kwargs) else: # forward supported arguments to from_config supported_arg_names = set(signature.parameters.keys()) extra_kwargs = {} for name in list(kwargs.keys()): if name not in supported_arg_names: extra_kwargs[name] = kwargs.pop(name) ret = from_config_func(*args, **kwargs) # forward the other arguments to __init__ ret.update(extra_kwargs) return ret def _called_with_cfg(*args, **kwargs): """ Returns: bool: whether the arguments contain CfgNode and should be considered forwarded to from_config. """ from omegaconf import DictConfig if len(args) and isinstance(args[0], (_CfgNode, DictConfig)): return True if isinstance(kwargs.pop("cfg", None), (_CfgNode, DictConfig)): return True # `from_config`'s first argument is forced to be "cfg". # So the above check covers all cases. return False
banmo-main
third_party/detectron2_old/detectron2/config/config.py
# Copyright (c) Facebook, Inc. and its affiliates. """ Backward compatibility of configs. Instructions to bump version: + It's not needed to bump version if new keys are added. It's only needed when backward-incompatible changes happen (i.e., some existing keys disappear, or the meaning of a key changes) + To bump version, do the following: 1. Increment _C.VERSION in defaults.py 2. Add a converter in this file. Each ConverterVX has a function "upgrade" which in-place upgrades config from X-1 to X, and a function "downgrade" which in-place downgrades config from X to X-1 In each function, VERSION is left unchanged. Each converter assumes that its input has the relevant keys (i.e., the input is not a partial config). 3. Run the tests (test_config.py) to make sure the upgrade & downgrade functions are consistent. """ import logging from typing import List, Optional, Tuple from .config import CfgNode as CN from .defaults import _C __all__ = ["upgrade_config", "downgrade_config"] def upgrade_config(cfg: CN, to_version: Optional[int] = None) -> CN: """ Upgrade a config from its current version to a newer version. Args: cfg (CfgNode): to_version (int): defaults to the latest version. """ cfg = cfg.clone() if to_version is None: to_version = _C.VERSION assert cfg.VERSION <= to_version, "Cannot upgrade from v{} to v{}!".format( cfg.VERSION, to_version ) for k in range(cfg.VERSION, to_version): converter = globals()["ConverterV" + str(k + 1)] converter.upgrade(cfg) cfg.VERSION = k + 1 return cfg def downgrade_config(cfg: CN, to_version: int) -> CN: """ Downgrade a config from its current version to an older version. Args: cfg (CfgNode): to_version (int): Note: A general downgrade of arbitrary configs is not always possible due to the different functionalities in different versions. The purpose of downgrade is only to recover the defaults in old versions, allowing it to load an old partial yaml config. Therefore, the implementation only needs to fill in the default values in the old version when a general downgrade is not possible. """ cfg = cfg.clone() assert cfg.VERSION >= to_version, "Cannot downgrade from v{} to v{}!".format( cfg.VERSION, to_version ) for k in range(cfg.VERSION, to_version, -1): converter = globals()["ConverterV" + str(k)] converter.downgrade(cfg) cfg.VERSION = k - 1 return cfg def guess_version(cfg: CN, filename: str) -> int: """ Guess the version of a partial config where the VERSION field is not specified. Returns the version, or the latest if cannot make a guess. This makes it easier for users to migrate. """ logger = logging.getLogger(__name__) def _has(name: str) -> bool: cur = cfg for n in name.split("."): if n not in cur: return False cur = cur[n] return True # Most users' partial configs have "MODEL.WEIGHT", so guess on it ret = None if _has("MODEL.WEIGHT") or _has("TEST.AUG_ON"): ret = 1 if ret is not None: logger.warning("Config '{}' has no VERSION. Assuming it to be v{}.".format(filename, ret)) else: ret = _C.VERSION logger.warning( "Config '{}' has no VERSION. Assuming it to be compatible with latest v{}.".format( filename, ret ) ) return ret def _rename(cfg: CN, old: str, new: str) -> None: old_keys = old.split(".") new_keys = new.split(".") def _set(key_seq: List[str], val: str) -> None: cur = cfg for k in key_seq[:-1]: if k not in cur: cur[k] = CN() cur = cur[k] cur[key_seq[-1]] = val def _get(key_seq: List[str]) -> CN: cur = cfg for k in key_seq: cur = cur[k] return cur def _del(key_seq: List[str]) -> None: cur = cfg for k in key_seq[:-1]: cur = cur[k] del cur[key_seq[-1]] if len(cur) == 0 and len(key_seq) > 1: _del(key_seq[:-1]) _set(new_keys, _get(old_keys)) _del(old_keys) class _RenameConverter: """ A converter that handles simple rename. """ RENAME: List[Tuple[str, str]] = [] # list of tuples of (old name, new name) @classmethod def upgrade(cls, cfg: CN) -> None: for old, new in cls.RENAME: _rename(cfg, old, new) @classmethod def downgrade(cls, cfg: CN) -> None: for old, new in cls.RENAME[::-1]: _rename(cfg, new, old) class ConverterV1(_RenameConverter): RENAME = [("MODEL.RPN_HEAD.NAME", "MODEL.RPN.HEAD_NAME")] class ConverterV2(_RenameConverter): """ A large bulk of rename, before public release. """ RENAME = [ ("MODEL.WEIGHT", "MODEL.WEIGHTS"), ("MODEL.PANOPTIC_FPN.SEMANTIC_LOSS_SCALE", "MODEL.SEM_SEG_HEAD.LOSS_WEIGHT"), ("MODEL.PANOPTIC_FPN.RPN_LOSS_SCALE", "MODEL.RPN.LOSS_WEIGHT"), ("MODEL.PANOPTIC_FPN.INSTANCE_LOSS_SCALE", "MODEL.PANOPTIC_FPN.INSTANCE_LOSS_WEIGHT"), ("MODEL.PANOPTIC_FPN.COMBINE_ON", "MODEL.PANOPTIC_FPN.COMBINE.ENABLED"), ( "MODEL.PANOPTIC_FPN.COMBINE_OVERLAP_THRESHOLD", "MODEL.PANOPTIC_FPN.COMBINE.OVERLAP_THRESH", ), ( "MODEL.PANOPTIC_FPN.COMBINE_STUFF_AREA_LIMIT", "MODEL.PANOPTIC_FPN.COMBINE.STUFF_AREA_LIMIT", ), ( "MODEL.PANOPTIC_FPN.COMBINE_INSTANCES_CONFIDENCE_THRESHOLD", "MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH", ), ("MODEL.ROI_HEADS.SCORE_THRESH", "MODEL.ROI_HEADS.SCORE_THRESH_TEST"), ("MODEL.ROI_HEADS.NMS", "MODEL.ROI_HEADS.NMS_THRESH_TEST"), ("MODEL.RETINANET.INFERENCE_SCORE_THRESHOLD", "MODEL.RETINANET.SCORE_THRESH_TEST"), ("MODEL.RETINANET.INFERENCE_TOPK_CANDIDATES", "MODEL.RETINANET.TOPK_CANDIDATES_TEST"), ("MODEL.RETINANET.INFERENCE_NMS_THRESHOLD", "MODEL.RETINANET.NMS_THRESH_TEST"), ("TEST.DETECTIONS_PER_IMG", "TEST.DETECTIONS_PER_IMAGE"), ("TEST.AUG_ON", "TEST.AUG.ENABLED"), ("TEST.AUG_MIN_SIZES", "TEST.AUG.MIN_SIZES"), ("TEST.AUG_MAX_SIZE", "TEST.AUG.MAX_SIZE"), ("TEST.AUG_FLIP", "TEST.AUG.FLIP"), ] @classmethod def upgrade(cls, cfg: CN) -> None: super().upgrade(cfg) if cfg.MODEL.META_ARCHITECTURE == "RetinaNet": _rename( cfg, "MODEL.RETINANET.ANCHOR_ASPECT_RATIOS", "MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS" ) _rename(cfg, "MODEL.RETINANET.ANCHOR_SIZES", "MODEL.ANCHOR_GENERATOR.SIZES") del cfg["MODEL"]["RPN"]["ANCHOR_SIZES"] del cfg["MODEL"]["RPN"]["ANCHOR_ASPECT_RATIOS"] else: _rename(cfg, "MODEL.RPN.ANCHOR_ASPECT_RATIOS", "MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS") _rename(cfg, "MODEL.RPN.ANCHOR_SIZES", "MODEL.ANCHOR_GENERATOR.SIZES") del cfg["MODEL"]["RETINANET"]["ANCHOR_SIZES"] del cfg["MODEL"]["RETINANET"]["ANCHOR_ASPECT_RATIOS"] del cfg["MODEL"]["RETINANET"]["ANCHOR_STRIDES"] @classmethod def downgrade(cls, cfg: CN) -> None: super().downgrade(cfg) _rename(cfg, "MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS", "MODEL.RPN.ANCHOR_ASPECT_RATIOS") _rename(cfg, "MODEL.ANCHOR_GENERATOR.SIZES", "MODEL.RPN.ANCHOR_SIZES") cfg.MODEL.RETINANET.ANCHOR_ASPECT_RATIOS = cfg.MODEL.RPN.ANCHOR_ASPECT_RATIOS cfg.MODEL.RETINANET.ANCHOR_SIZES = cfg.MODEL.RPN.ANCHOR_SIZES cfg.MODEL.RETINANET.ANCHOR_STRIDES = [] # this is not used anywhere in any version
banmo-main
third_party/detectron2_old/detectron2/config/compat.py
# Copyright (c) Facebook, Inc. and its affiliates. from .compat import downgrade_config, upgrade_config from .config import CfgNode, get_cfg, global_cfg, set_global_cfg, configurable from .instantiate import instantiate from .lazy import LazyCall, LazyConfig __all__ = [ "CfgNode", "get_cfg", "global_cfg", "set_global_cfg", "downgrade_config", "upgrade_config", "configurable", "instantiate", "LazyCall", "LazyConfig", ] from detectron2.utils.env import fixup_module_metadata fixup_module_metadata(__name__, globals(), __all__) del fixup_module_metadata
banmo-main
third_party/detectron2_old/detectron2/config/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. import dataclasses import logging from collections import abc from typing import Any from detectron2.utils.registry import _convert_target_to_string, locate __all__ = ["dump_dataclass", "instantiate"] def dump_dataclass(obj: Any): """ Dump a dataclass recursively into a dict that can be later instantiated. Args: obj: a dataclass object Returns: dict """ assert dataclasses.is_dataclass(obj) and not isinstance( obj, type ), "dump_dataclass() requires an instance of a dataclass." ret = {"_target_": _convert_target_to_string(type(obj))} for f in dataclasses.fields(obj): v = getattr(obj, f.name) if dataclasses.is_dataclass(v): v = dump_dataclass(v) if isinstance(v, (list, tuple)): v = [dump_dataclass(x) if dataclasses.is_dataclass(x) else x for x in v] ret[f.name] = v return ret def instantiate(cfg): """ Recursively instantiate objects defined in dictionaries by "_target_" and arguments. Args: cfg: a dict-like object with "_target_" that defines the caller, and other keys that define the arguments Returns: object instantiated by cfg """ from omegaconf import ListConfig if isinstance(cfg, ListConfig): lst = [instantiate(x) for x in cfg] return ListConfig(lst, flags={"allow_objects": True}) if isinstance(cfg, list): # Specialize for list, because many classes take # list[objects] as arguments, such as ResNet, DatasetMapper return [instantiate(x) for x in cfg] if isinstance(cfg, abc.Mapping) and "_target_" in cfg: # conceptually equivalent to hydra.utils.instantiate(cfg) with _convert_=all, # but faster: https://github.com/facebookresearch/hydra/issues/1200 cfg = {k: instantiate(v) for k, v in cfg.items()} cls = cfg.pop("_target_") cls = instantiate(cls) if isinstance(cls, str): cls_name = cls cls = locate(cls_name) assert cls is not None, cls_name else: try: cls_name = cls.__module__ + "." + cls.__qualname__ except Exception: # target could be anything, so the above could fail cls_name = str(cls) assert callable(cls), f"_target_ {cls} does not define a callable object" try: return cls(**cfg) except TypeError: logger = logging.getLogger(__name__) logger.error(f"Error when instantiating {cls_name}!") raise return cfg # return as-is if don't know what to do
banmo-main
third_party/detectron2_old/detectron2/config/instantiate.py
# Copyright (c) Facebook, Inc. and its affiliates. from .config import CfgNode as CN # ----------------------------------------------------------------------------- # Convention about Training / Test specific parameters # ----------------------------------------------------------------------------- # Whenever an argument can be either used for training or for testing, the # corresponding name will be post-fixed by a _TRAIN for a training parameter, # or _TEST for a test-specific parameter. # For example, the number of images during training will be # IMAGES_PER_BATCH_TRAIN, while the number of images for testing will be # IMAGES_PER_BATCH_TEST # ----------------------------------------------------------------------------- # Config definition # ----------------------------------------------------------------------------- _C = CN() # The version number, to upgrade from old configs to new ones if any # changes happen. It's recommended to keep a VERSION in your config file. _C.VERSION = 2 _C.MODEL = CN() _C.MODEL.LOAD_PROPOSALS = False _C.MODEL.MASK_ON = False _C.MODEL.KEYPOINT_ON = False _C.MODEL.DEVICE = "cuda" _C.MODEL.META_ARCHITECTURE = "GeneralizedRCNN" # Path (a file path, or URL like detectron2://.., https://..) to a checkpoint file # to be loaded to the model. You can find available models in the model zoo. _C.MODEL.WEIGHTS = "" # Values to be used for image normalization (BGR order, since INPUT.FORMAT defaults to BGR). # To train on images of different number of channels, just set different mean & std. # Default values are the mean pixel value from ImageNet: [103.53, 116.28, 123.675] _C.MODEL.PIXEL_MEAN = [103.530, 116.280, 123.675] # When using pre-trained models in Detectron1 or any MSRA models, # std has been absorbed into its conv1 weights, so the std needs to be set 1. # Otherwise, you can use [57.375, 57.120, 58.395] (ImageNet std) _C.MODEL.PIXEL_STD = [1.0, 1.0, 1.0] # ----------------------------------------------------------------------------- # INPUT # ----------------------------------------------------------------------------- _C.INPUT = CN() # Size of the smallest side of the image during training _C.INPUT.MIN_SIZE_TRAIN = (800,) # Sample size of smallest side by choice or random selection from range give by # INPUT.MIN_SIZE_TRAIN _C.INPUT.MIN_SIZE_TRAIN_SAMPLING = "choice" # Maximum size of the side of the image during training _C.INPUT.MAX_SIZE_TRAIN = 1333 # Size of the smallest side of the image during testing. Set to zero to disable resize in testing. _C.INPUT.MIN_SIZE_TEST = 800 # Maximum size of the side of the image during testing _C.INPUT.MAX_SIZE_TEST = 1333 # Mode for flipping images used in data augmentation during training # choose one of ["horizontal, "vertical", "none"] _C.INPUT.RANDOM_FLIP = "horizontal" # `True` if cropping is used for data augmentation during training _C.INPUT.CROP = CN({"ENABLED": False}) # Cropping type. See documentation of `detectron2.data.transforms.RandomCrop` for explanation. _C.INPUT.CROP.TYPE = "relative_range" # Size of crop in range (0, 1] if CROP.TYPE is "relative" or "relative_range" and in number of # pixels if CROP.TYPE is "absolute" _C.INPUT.CROP.SIZE = [0.9, 0.9] # Whether the model needs RGB, YUV, HSV etc. # Should be one of the modes defined here, as we use PIL to read the image: # https://pillow.readthedocs.io/en/stable/handbook/concepts.html#concept-modes # with BGR being the one exception. One can set image format to BGR, we will # internally use RGB for conversion and flip the channels over _C.INPUT.FORMAT = "BGR" # The ground truth mask format that the model will use. # Mask R-CNN supports either "polygon" or "bitmask" as ground truth. _C.INPUT.MASK_FORMAT = "polygon" # alternative: "bitmask" # ----------------------------------------------------------------------------- # Dataset # ----------------------------------------------------------------------------- _C.DATASETS = CN() # List of the dataset names for training. Must be registered in DatasetCatalog # Samples from these datasets will be merged and used as one dataset. _C.DATASETS.TRAIN = () # List of the pre-computed proposal files for training, which must be consistent # with datasets listed in DATASETS.TRAIN. _C.DATASETS.PROPOSAL_FILES_TRAIN = () # Number of top scoring precomputed proposals to keep for training _C.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TRAIN = 2000 # List of the dataset names for testing. Must be registered in DatasetCatalog _C.DATASETS.TEST = () # List of the pre-computed proposal files for test, which must be consistent # with datasets listed in DATASETS.TEST. _C.DATASETS.PROPOSAL_FILES_TEST = () # Number of top scoring precomputed proposals to keep for test _C.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TEST = 1000 # ----------------------------------------------------------------------------- # DataLoader # ----------------------------------------------------------------------------- _C.DATALOADER = CN() # Number of data loading threads _C.DATALOADER.NUM_WORKERS = 4 # If True, each batch should contain only images for which the aspect ratio # is compatible. This groups portrait images together, and landscape images # are not batched with portrait images. _C.DATALOADER.ASPECT_RATIO_GROUPING = True # Options: TrainingSampler, RepeatFactorTrainingSampler _C.DATALOADER.SAMPLER_TRAIN = "TrainingSampler" # Repeat threshold for RepeatFactorTrainingSampler _C.DATALOADER.REPEAT_THRESHOLD = 0.0 # Tf True, when working on datasets that have instance annotations, the # training dataloader will filter out images without associated annotations _C.DATALOADER.FILTER_EMPTY_ANNOTATIONS = True # ---------------------------------------------------------------------------- # # Backbone options # ---------------------------------------------------------------------------- # _C.MODEL.BACKBONE = CN() _C.MODEL.BACKBONE.NAME = "build_resnet_backbone" # Freeze the first several stages so they are not trained. # There are 5 stages in ResNet. The first is a convolution, and the following # stages are each group of residual blocks. _C.MODEL.BACKBONE.FREEZE_AT = 2 # ---------------------------------------------------------------------------- # # FPN options # ---------------------------------------------------------------------------- # _C.MODEL.FPN = CN() # Names of the input feature maps to be used by FPN # They must have contiguous power of 2 strides # e.g., ["res2", "res3", "res4", "res5"] _C.MODEL.FPN.IN_FEATURES = [] _C.MODEL.FPN.OUT_CHANNELS = 256 # Options: "" (no norm), "GN" _C.MODEL.FPN.NORM = "" # Types for fusing the FPN top-down and lateral features. Can be either "sum" or "avg" _C.MODEL.FPN.FUSE_TYPE = "sum" # ---------------------------------------------------------------------------- # # Proposal generator options # ---------------------------------------------------------------------------- # _C.MODEL.PROPOSAL_GENERATOR = CN() # Current proposal generators include "RPN", "RRPN" and "PrecomputedProposals" _C.MODEL.PROPOSAL_GENERATOR.NAME = "RPN" # Proposal height and width both need to be greater than MIN_SIZE # (a the scale used during training or inference) _C.MODEL.PROPOSAL_GENERATOR.MIN_SIZE = 0 # ---------------------------------------------------------------------------- # # Anchor generator options # ---------------------------------------------------------------------------- # _C.MODEL.ANCHOR_GENERATOR = CN() # The generator can be any name in the ANCHOR_GENERATOR registry _C.MODEL.ANCHOR_GENERATOR.NAME = "DefaultAnchorGenerator" # Anchor sizes (i.e. sqrt of area) in absolute pixels w.r.t. the network input. # Format: list[list[float]]. SIZES[i] specifies the list of sizes to use for # IN_FEATURES[i]; len(SIZES) must be equal to len(IN_FEATURES) or 1. # When len(SIZES) == 1, SIZES[0] is used for all IN_FEATURES. _C.MODEL.ANCHOR_GENERATOR.SIZES = [[32, 64, 128, 256, 512]] # Anchor aspect ratios. For each area given in `SIZES`, anchors with different aspect # ratios are generated by an anchor generator. # Format: list[list[float]]. ASPECT_RATIOS[i] specifies the list of aspect ratios (H/W) # to use for IN_FEATURES[i]; len(ASPECT_RATIOS) == len(IN_FEATURES) must be true, # or len(ASPECT_RATIOS) == 1 is true and aspect ratio list ASPECT_RATIOS[0] is used # for all IN_FEATURES. _C.MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS = [[0.5, 1.0, 2.0]] # Anchor angles. # list[list[float]], the angle in degrees, for each input feature map. # ANGLES[i] specifies the list of angles for IN_FEATURES[i]. _C.MODEL.ANCHOR_GENERATOR.ANGLES = [[-90, 0, 90]] # Relative offset between the center of the first anchor and the top-left corner of the image # Value has to be in [0, 1). Recommend to use 0.5, which means half stride. # The value is not expected to affect model accuracy. _C.MODEL.ANCHOR_GENERATOR.OFFSET = 0.0 # ---------------------------------------------------------------------------- # # RPN options # ---------------------------------------------------------------------------- # _C.MODEL.RPN = CN() _C.MODEL.RPN.HEAD_NAME = "StandardRPNHead" # used by RPN_HEAD_REGISTRY # Names of the input feature maps to be used by RPN # e.g., ["p2", "p3", "p4", "p5", "p6"] for FPN _C.MODEL.RPN.IN_FEATURES = ["res4"] # Remove RPN anchors that go outside the image by BOUNDARY_THRESH pixels # Set to -1 or a large value, e.g. 100000, to disable pruning anchors _C.MODEL.RPN.BOUNDARY_THRESH = -1 # IOU overlap ratios [BG_IOU_THRESHOLD, FG_IOU_THRESHOLD] # Minimum overlap required between an anchor and ground-truth box for the # (anchor, gt box) pair to be a positive example (IoU >= FG_IOU_THRESHOLD # ==> positive RPN example: 1) # Maximum overlap allowed between an anchor and ground-truth box for the # (anchor, gt box) pair to be a negative examples (IoU < BG_IOU_THRESHOLD # ==> negative RPN example: 0) # Anchors with overlap in between (BG_IOU_THRESHOLD <= IoU < FG_IOU_THRESHOLD) # are ignored (-1) _C.MODEL.RPN.IOU_THRESHOLDS = [0.3, 0.7] _C.MODEL.RPN.IOU_LABELS = [0, -1, 1] # Number of regions per image used to train RPN _C.MODEL.RPN.BATCH_SIZE_PER_IMAGE = 256 # Target fraction of foreground (positive) examples per RPN minibatch _C.MODEL.RPN.POSITIVE_FRACTION = 0.5 # Options are: "smooth_l1", "giou" _C.MODEL.RPN.BBOX_REG_LOSS_TYPE = "smooth_l1" _C.MODEL.RPN.BBOX_REG_LOSS_WEIGHT = 1.0 # Weights on (dx, dy, dw, dh) for normalizing RPN anchor regression targets _C.MODEL.RPN.BBOX_REG_WEIGHTS = (1.0, 1.0, 1.0, 1.0) # The transition point from L1 to L2 loss. Set to 0.0 to make the loss simply L1. _C.MODEL.RPN.SMOOTH_L1_BETA = 0.0 _C.MODEL.RPN.LOSS_WEIGHT = 1.0 # Number of top scoring RPN proposals to keep before applying NMS # When FPN is used, this is *per FPN level* (not total) _C.MODEL.RPN.PRE_NMS_TOPK_TRAIN = 12000 _C.MODEL.RPN.PRE_NMS_TOPK_TEST = 6000 # Number of top scoring RPN proposals to keep after applying NMS # When FPN is used, this limit is applied per level and then again to the union # of proposals from all levels # NOTE: When FPN is used, the meaning of this config is different from Detectron1. # It means per-batch topk in Detectron1, but per-image topk here. # See the "find_top_rpn_proposals" function for details. _C.MODEL.RPN.POST_NMS_TOPK_TRAIN = 2000 _C.MODEL.RPN.POST_NMS_TOPK_TEST = 1000 # NMS threshold used on RPN proposals _C.MODEL.RPN.NMS_THRESH = 0.7 # Set this to -1 to use the same number of output channels as input channels. _C.MODEL.RPN.CONV_DIMS = [-1] # ---------------------------------------------------------------------------- # # ROI HEADS options # ---------------------------------------------------------------------------- # _C.MODEL.ROI_HEADS = CN() _C.MODEL.ROI_HEADS.NAME = "Res5ROIHeads" # Number of foreground classes _C.MODEL.ROI_HEADS.NUM_CLASSES = 80 # Names of the input feature maps to be used by ROI heads # Currently all heads (box, mask, ...) use the same input feature map list # e.g., ["p2", "p3", "p4", "p5"] is commonly used for FPN _C.MODEL.ROI_HEADS.IN_FEATURES = ["res4"] # IOU overlap ratios [IOU_THRESHOLD] # Overlap threshold for an RoI to be considered background (if < IOU_THRESHOLD) # Overlap threshold for an RoI to be considered foreground (if >= IOU_THRESHOLD) _C.MODEL.ROI_HEADS.IOU_THRESHOLDS = [0.5] _C.MODEL.ROI_HEADS.IOU_LABELS = [0, 1] # RoI minibatch size *per image* (number of regions of interest [ROIs]) # Total number of RoIs per training minibatch = # ROI_HEADS.BATCH_SIZE_PER_IMAGE * SOLVER.IMS_PER_BATCH # E.g., a common configuration is: 512 * 16 = 8192 _C.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 512 # Target fraction of RoI minibatch that is labeled foreground (i.e. class > 0) _C.MODEL.ROI_HEADS.POSITIVE_FRACTION = 0.25 # Only used on test mode # Minimum score threshold (assuming scores in a [0, 1] range); a value chosen to # balance obtaining high recall with not having too many low precision # detections that will slow down inference post processing steps (like NMS) # A default threshold of 0.0 increases AP by ~0.2-0.3 but significantly slows down # inference. _C.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.05 # Overlap threshold used for non-maximum suppression (suppress boxes with # IoU >= this threshold) _C.MODEL.ROI_HEADS.NMS_THRESH_TEST = 0.5 # If True, augment proposals with ground-truth boxes before sampling proposals to # train ROI heads. _C.MODEL.ROI_HEADS.PROPOSAL_APPEND_GT = True # ---------------------------------------------------------------------------- # # Box Head # ---------------------------------------------------------------------------- # _C.MODEL.ROI_BOX_HEAD = CN() # C4 don't use head name option # Options for non-C4 models: FastRCNNConvFCHead, _C.MODEL.ROI_BOX_HEAD.NAME = "" # Options are: "smooth_l1", "giou" _C.MODEL.ROI_BOX_HEAD.BBOX_REG_LOSS_TYPE = "smooth_l1" # The final scaling coefficient on the box regression loss, used to balance the magnitude of its # gradients with other losses in the model. See also `MODEL.ROI_KEYPOINT_HEAD.LOSS_WEIGHT`. _C.MODEL.ROI_BOX_HEAD.BBOX_REG_LOSS_WEIGHT = 1.0 # Default weights on (dx, dy, dw, dh) for normalizing bbox regression targets # These are empirically chosen to approximately lead to unit variance targets _C.MODEL.ROI_BOX_HEAD.BBOX_REG_WEIGHTS = (10.0, 10.0, 5.0, 5.0) # The transition point from L1 to L2 loss. Set to 0.0 to make the loss simply L1. _C.MODEL.ROI_BOX_HEAD.SMOOTH_L1_BETA = 0.0 _C.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION = 14 _C.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO = 0 # Type of pooling operation applied to the incoming feature map for each RoI _C.MODEL.ROI_BOX_HEAD.POOLER_TYPE = "ROIAlignV2" _C.MODEL.ROI_BOX_HEAD.NUM_FC = 0 # Hidden layer dimension for FC layers in the RoI box head _C.MODEL.ROI_BOX_HEAD.FC_DIM = 1024 _C.MODEL.ROI_BOX_HEAD.NUM_CONV = 0 # Channel dimension for Conv layers in the RoI box head _C.MODEL.ROI_BOX_HEAD.CONV_DIM = 256 # Normalization method for the convolution layers. # Options: "" (no norm), "GN", "SyncBN". _C.MODEL.ROI_BOX_HEAD.NORM = "" # Whether to use class agnostic for bbox regression _C.MODEL.ROI_BOX_HEAD.CLS_AGNOSTIC_BBOX_REG = False # If true, RoI heads use bounding boxes predicted by the box head rather than proposal boxes. _C.MODEL.ROI_BOX_HEAD.TRAIN_ON_PRED_BOXES = False # ---------------------------------------------------------------------------- # # Cascaded Box Head # ---------------------------------------------------------------------------- # _C.MODEL.ROI_BOX_CASCADE_HEAD = CN() # The number of cascade stages is implicitly defined by the length of the following two configs. _C.MODEL.ROI_BOX_CASCADE_HEAD.BBOX_REG_WEIGHTS = ( (10.0, 10.0, 5.0, 5.0), (20.0, 20.0, 10.0, 10.0), (30.0, 30.0, 15.0, 15.0), ) _C.MODEL.ROI_BOX_CASCADE_HEAD.IOUS = (0.5, 0.6, 0.7) # ---------------------------------------------------------------------------- # # Mask Head # ---------------------------------------------------------------------------- # _C.MODEL.ROI_MASK_HEAD = CN() _C.MODEL.ROI_MASK_HEAD.NAME = "MaskRCNNConvUpsampleHead" _C.MODEL.ROI_MASK_HEAD.POOLER_RESOLUTION = 14 _C.MODEL.ROI_MASK_HEAD.POOLER_SAMPLING_RATIO = 0 _C.MODEL.ROI_MASK_HEAD.NUM_CONV = 0 # The number of convs in the mask head _C.MODEL.ROI_MASK_HEAD.CONV_DIM = 256 # Normalization method for the convolution layers. # Options: "" (no norm), "GN", "SyncBN". _C.MODEL.ROI_MASK_HEAD.NORM = "" # Whether to use class agnostic for mask prediction _C.MODEL.ROI_MASK_HEAD.CLS_AGNOSTIC_MASK = False # Type of pooling operation applied to the incoming feature map for each RoI _C.MODEL.ROI_MASK_HEAD.POOLER_TYPE = "ROIAlignV2" # ---------------------------------------------------------------------------- # # Keypoint Head # ---------------------------------------------------------------------------- # _C.MODEL.ROI_KEYPOINT_HEAD = CN() _C.MODEL.ROI_KEYPOINT_HEAD.NAME = "KRCNNConvDeconvUpsampleHead" _C.MODEL.ROI_KEYPOINT_HEAD.POOLER_RESOLUTION = 14 _C.MODEL.ROI_KEYPOINT_HEAD.POOLER_SAMPLING_RATIO = 0 _C.MODEL.ROI_KEYPOINT_HEAD.CONV_DIMS = tuple(512 for _ in range(8)) _C.MODEL.ROI_KEYPOINT_HEAD.NUM_KEYPOINTS = 17 # 17 is the number of keypoints in COCO. # Images with too few (or no) keypoints are excluded from training. _C.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE = 1 # Normalize by the total number of visible keypoints in the minibatch if True. # Otherwise, normalize by the total number of keypoints that could ever exist # in the minibatch. # The keypoint softmax loss is only calculated on visible keypoints. # Since the number of visible keypoints can vary significantly between # minibatches, this has the effect of up-weighting the importance of # minibatches with few visible keypoints. (Imagine the extreme case of # only one visible keypoint versus N: in the case of N, each one # contributes 1/N to the gradient compared to the single keypoint # determining the gradient direction). Instead, we can normalize the # loss by the total number of keypoints, if it were the case that all # keypoints were visible in a full minibatch. (Returning to the example, # this means that the one visible keypoint contributes as much as each # of the N keypoints.) _C.MODEL.ROI_KEYPOINT_HEAD.NORMALIZE_LOSS_BY_VISIBLE_KEYPOINTS = True # Multi-task loss weight to use for keypoints # Recommended values: # - use 1.0 if NORMALIZE_LOSS_BY_VISIBLE_KEYPOINTS is True # - use 4.0 if NORMALIZE_LOSS_BY_VISIBLE_KEYPOINTS is False _C.MODEL.ROI_KEYPOINT_HEAD.LOSS_WEIGHT = 1.0 # Type of pooling operation applied to the incoming feature map for each RoI _C.MODEL.ROI_KEYPOINT_HEAD.POOLER_TYPE = "ROIAlignV2" # ---------------------------------------------------------------------------- # # Semantic Segmentation Head # ---------------------------------------------------------------------------- # _C.MODEL.SEM_SEG_HEAD = CN() _C.MODEL.SEM_SEG_HEAD.NAME = "SemSegFPNHead" _C.MODEL.SEM_SEG_HEAD.IN_FEATURES = ["p2", "p3", "p4", "p5"] # Label in the semantic segmentation ground truth that is ignored, i.e., no loss is calculated for # the correposnding pixel. _C.MODEL.SEM_SEG_HEAD.IGNORE_VALUE = 255 # Number of classes in the semantic segmentation head _C.MODEL.SEM_SEG_HEAD.NUM_CLASSES = 54 # Number of channels in the 3x3 convs inside semantic-FPN heads. _C.MODEL.SEM_SEG_HEAD.CONVS_DIM = 128 # Outputs from semantic-FPN heads are up-scaled to the COMMON_STRIDE stride. _C.MODEL.SEM_SEG_HEAD.COMMON_STRIDE = 4 # Normalization method for the convolution layers. Options: "" (no norm), "GN". _C.MODEL.SEM_SEG_HEAD.NORM = "GN" _C.MODEL.SEM_SEG_HEAD.LOSS_WEIGHT = 1.0 _C.MODEL.PANOPTIC_FPN = CN() # Scaling of all losses from instance detection / segmentation head. _C.MODEL.PANOPTIC_FPN.INSTANCE_LOSS_WEIGHT = 1.0 # options when combining instance & semantic segmentation outputs _C.MODEL.PANOPTIC_FPN.COMBINE = CN({"ENABLED": True}) # "COMBINE.ENABLED" is deprecated & not used _C.MODEL.PANOPTIC_FPN.COMBINE.OVERLAP_THRESH = 0.5 _C.MODEL.PANOPTIC_FPN.COMBINE.STUFF_AREA_LIMIT = 4096 _C.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH = 0.5 # ---------------------------------------------------------------------------- # # RetinaNet Head # ---------------------------------------------------------------------------- # _C.MODEL.RETINANET = CN() # This is the number of foreground classes. _C.MODEL.RETINANET.NUM_CLASSES = 80 _C.MODEL.RETINANET.IN_FEATURES = ["p3", "p4", "p5", "p6", "p7"] # Convolutions to use in the cls and bbox tower # NOTE: this doesn't include the last conv for logits _C.MODEL.RETINANET.NUM_CONVS = 4 # IoU overlap ratio [bg, fg] for labeling anchors. # Anchors with < bg are labeled negative (0) # Anchors with >= bg and < fg are ignored (-1) # Anchors with >= fg are labeled positive (1) _C.MODEL.RETINANET.IOU_THRESHOLDS = [0.4, 0.5] _C.MODEL.RETINANET.IOU_LABELS = [0, -1, 1] # Prior prob for rare case (i.e. foreground) at the beginning of training. # This is used to set the bias for the logits layer of the classifier subnet. # This improves training stability in the case of heavy class imbalance. _C.MODEL.RETINANET.PRIOR_PROB = 0.01 # Inference cls score threshold, only anchors with score > INFERENCE_TH are # considered for inference (to improve speed) _C.MODEL.RETINANET.SCORE_THRESH_TEST = 0.05 # Select topk candidates before NMS _C.MODEL.RETINANET.TOPK_CANDIDATES_TEST = 1000 _C.MODEL.RETINANET.NMS_THRESH_TEST = 0.5 # Weights on (dx, dy, dw, dh) for normalizing Retinanet anchor regression targets _C.MODEL.RETINANET.BBOX_REG_WEIGHTS = (1.0, 1.0, 1.0, 1.0) # Loss parameters _C.MODEL.RETINANET.FOCAL_LOSS_GAMMA = 2.0 _C.MODEL.RETINANET.FOCAL_LOSS_ALPHA = 0.25 _C.MODEL.RETINANET.SMOOTH_L1_LOSS_BETA = 0.1 # Options are: "smooth_l1", "giou" _C.MODEL.RETINANET.BBOX_REG_LOSS_TYPE = "smooth_l1" # One of BN, SyncBN, FrozenBN, GN # Only supports GN until unshared norm is implemented _C.MODEL.RETINANET.NORM = "" # ---------------------------------------------------------------------------- # # ResNe[X]t options (ResNets = {ResNet, ResNeXt} # Note that parts of a resnet may be used for both the backbone and the head # These options apply to both # ---------------------------------------------------------------------------- # _C.MODEL.RESNETS = CN() _C.MODEL.RESNETS.DEPTH = 50 _C.MODEL.RESNETS.OUT_FEATURES = ["res4"] # res4 for C4 backbone, res2..5 for FPN backbone # Number of groups to use; 1 ==> ResNet; > 1 ==> ResNeXt _C.MODEL.RESNETS.NUM_GROUPS = 1 # Options: FrozenBN, GN, "SyncBN", "BN" _C.MODEL.RESNETS.NORM = "FrozenBN" # Baseline width of each group. # Scaling this parameters will scale the width of all bottleneck layers. _C.MODEL.RESNETS.WIDTH_PER_GROUP = 64 # Place the stride 2 conv on the 1x1 filter # Use True only for the original MSRA ResNet; use False for C2 and Torch models _C.MODEL.RESNETS.STRIDE_IN_1X1 = True # Apply dilation in stage "res5" _C.MODEL.RESNETS.RES5_DILATION = 1 # Output width of res2. Scaling this parameters will scale the width of all 1x1 convs in ResNet # For R18 and R34, this needs to be set to 64 _C.MODEL.RESNETS.RES2_OUT_CHANNELS = 256 _C.MODEL.RESNETS.STEM_OUT_CHANNELS = 64 # Apply Deformable Convolution in stages # Specify if apply deform_conv on Res2, Res3, Res4, Res5 _C.MODEL.RESNETS.DEFORM_ON_PER_STAGE = [False, False, False, False] # Use True to use modulated deform_conv (DeformableV2, https://arxiv.org/abs/1811.11168); # Use False for DeformableV1. _C.MODEL.RESNETS.DEFORM_MODULATED = False # Number of groups in deformable conv. _C.MODEL.RESNETS.DEFORM_NUM_GROUPS = 1 # ---------------------------------------------------------------------------- # # Solver # ---------------------------------------------------------------------------- # _C.SOLVER = CN() # See detectron2/solver/build.py for LR scheduler options _C.SOLVER.LR_SCHEDULER_NAME = "WarmupMultiStepLR" _C.SOLVER.MAX_ITER = 40000 _C.SOLVER.BASE_LR = 0.001 _C.SOLVER.MOMENTUM = 0.9 _C.SOLVER.NESTEROV = False _C.SOLVER.WEIGHT_DECAY = 0.0001 # The weight decay that's applied to parameters of normalization layers # (typically the affine transformation) _C.SOLVER.WEIGHT_DECAY_NORM = 0.0 _C.SOLVER.GAMMA = 0.1 # The iteration number to decrease learning rate by GAMMA. _C.SOLVER.STEPS = (30000,) _C.SOLVER.WARMUP_FACTOR = 1.0 / 1000 _C.SOLVER.WARMUP_ITERS = 1000 _C.SOLVER.WARMUP_METHOD = "linear" # Save a checkpoint after every this number of iterations _C.SOLVER.CHECKPOINT_PERIOD = 5000 # Number of images per batch across all machines. This is also the number # of training images per step (i.e. per iteration). If we use 16 GPUs # and IMS_PER_BATCH = 32, each GPU will see 2 images per batch. # May be adjusted automatically if REFERENCE_WORLD_SIZE is set. _C.SOLVER.IMS_PER_BATCH = 16 # The reference number of workers (GPUs) this config is meant to train with. # It takes no effect when set to 0. # With a non-zero value, it will be used by DefaultTrainer to compute a desired # per-worker batch size, and then scale the other related configs (total batch size, # learning rate, etc) to match the per-worker batch size. # See documentation of `DefaultTrainer.auto_scale_workers` for details: _C.SOLVER.REFERENCE_WORLD_SIZE = 0 # Detectron v1 (and previous detection code) used a 2x higher LR and 0 WD for # biases. This is not useful (at least for recent models). You should avoid # changing these and they exist only to reproduce Detectron v1 training if # desired. _C.SOLVER.BIAS_LR_FACTOR = 1.0 _C.SOLVER.WEIGHT_DECAY_BIAS = _C.SOLVER.WEIGHT_DECAY # Gradient clipping _C.SOLVER.CLIP_GRADIENTS = CN({"ENABLED": False}) # Type of gradient clipping, currently 2 values are supported: # - "value": the absolute values of elements of each gradients are clipped # - "norm": the norm of the gradient for each parameter is clipped thus # affecting all elements in the parameter _C.SOLVER.CLIP_GRADIENTS.CLIP_TYPE = "value" # Maximum absolute value used for clipping gradients _C.SOLVER.CLIP_GRADIENTS.CLIP_VALUE = 1.0 # Floating point number p for L-p norm to be used with the "norm" # gradient clipping type; for L-inf, please specify .inf _C.SOLVER.CLIP_GRADIENTS.NORM_TYPE = 2.0 # Enable automatic mixed precision for training # Note that this does not change model's inference behavior. # To use AMP in inference, run inference under autocast() _C.SOLVER.AMP = CN({"ENABLED": False}) # ---------------------------------------------------------------------------- # # Specific test options # ---------------------------------------------------------------------------- # _C.TEST = CN() # For end-to-end tests to verify the expected accuracy. # Each item is [task, metric, value, tolerance] # e.g.: [['bbox', 'AP', 38.5, 0.2]] _C.TEST.EXPECTED_RESULTS = [] # The period (in terms of steps) to evaluate the model during training. # Set to 0 to disable. _C.TEST.EVAL_PERIOD = 0 # The sigmas used to calculate keypoint OKS. See http://cocodataset.org/#keypoints-eval # When empty, it will use the defaults in COCO. # Otherwise it should be a list[float] with the same length as ROI_KEYPOINT_HEAD.NUM_KEYPOINTS. _C.TEST.KEYPOINT_OKS_SIGMAS = [] # Maximum number of detections to return per image during inference (100 is # based on the limit established for the COCO dataset). _C.TEST.DETECTIONS_PER_IMAGE = 100 _C.TEST.AUG = CN({"ENABLED": False}) _C.TEST.AUG.MIN_SIZES = (400, 500, 600, 700, 800, 900, 1000, 1100, 1200) _C.TEST.AUG.MAX_SIZE = 4000 _C.TEST.AUG.FLIP = True _C.TEST.PRECISE_BN = CN({"ENABLED": False}) _C.TEST.PRECISE_BN.NUM_ITER = 200 # ---------------------------------------------------------------------------- # # Misc options # ---------------------------------------------------------------------------- # # Directory where output files are written _C.OUTPUT_DIR = "./output" # Set seed to negative to fully randomize everything. # Set seed to positive to use a fixed seed. Note that a fixed seed increases # reproducibility but does not guarantee fully deterministic behavior. # Disabling all parallelism further increases reproducibility. _C.SEED = -1 # Benchmark different cudnn algorithms. # If input images have very different sizes, this option will have large overhead # for about 10k iterations. It usually hurts total time, but can benefit for certain models. # If input images have the same or similar sizes, benchmark is often helpful. _C.CUDNN_BENCHMARK = False # The period (in terms of steps) for minibatch visualization at train time. # Set to 0 to disable. _C.VIS_PERIOD = 0 # global config is for quick hack purposes. # You can set them in command line or config files, # and access it with: # # from detectron2.config import global_cfg # print(global_cfg.HACK) # # Do not commit any configs into it. _C.GLOBAL = CN() _C.GLOBAL.HACK = 1.0
banmo-main
third_party/detectron2_old/detectron2/config/defaults.py
# Copyright (c) Facebook, Inc. and its affiliates. import ast import builtins import importlib import inspect import logging import os import uuid from collections import abc from contextlib import contextmanager from copy import deepcopy from typing import List, Tuple, Union import cloudpickle import yaml from omegaconf import DictConfig, ListConfig, OmegaConf from detectron2.utils.file_io import PathManager from detectron2.utils.registry import _convert_target_to_string __all__ = ["LazyCall", "LazyConfig"] class LazyCall: """ Wrap a callable so that when it's called, the call will not be executed, but returns a dict that describes the call. LazyCall object has to be called with only keyword arguments. Positional arguments are not yet supported. Examples: :: from detectron2.config import instantiate, LazyCall layer_cfg = LazyCall(nn.Conv2d)(in_channels=32, out_channels=32) layer_cfg.out_channels = 64 # can edit it afterwards layer = instantiate(layer_cfg) """ def __init__(self, target): if not (callable(target) or isinstance(target, (str, abc.Mapping))): raise TypeError( "target of LazyCall must be a callable or defines a callable! Got {target}" ) self._target = target def __call__(self, **kwargs): kwargs["_target_"] = self._target return DictConfig(content=kwargs, flags={"allow_objects": True}) def _visit_dict_config(cfg, func): """ Apply func recursively to all DictConfig in cfg. """ if isinstance(cfg, DictConfig): func(cfg) for v in cfg.values(): _visit_dict_config(v, func) elif isinstance(cfg, ListConfig): for v in cfg: _visit_dict_config(v, func) def _validate_py_syntax(filename): # see also https://github.com/open-mmlab/mmcv/blob/master/mmcv/utils/config.py with PathManager.open(filename, "r") as f: content = f.read() try: ast.parse(content) except SyntaxError as e: raise SyntaxError(f"Config file {filename} has syntax error!") from e def _cast_to_config(obj): # if given a dict, return DictConfig instead if isinstance(obj, dict): return DictConfig(obj, flags={"allow_objects": True}) return obj _CFG_PACKAGE_NAME = "detectron2._cfg_loader" """ A namespace to put all imported config into. """ def _random_package_name(filename): # generate a random package name when loading config files return _CFG_PACKAGE_NAME + str(uuid.uuid4())[:4] + "." + os.path.basename(filename) @contextmanager def _patch_import(): """ Enhance relative import statements in config files, so that they: 1. locate files purely based on relative location, regardless of packages. e.g. you can import file without having __init__ 2. do not cache modules globally; modifications of module states has no side effect 3. support other storage system through PathManager 4. imported dict are turned into omegaconf.DictConfig automatically """ old_import = builtins.__import__ def find_relative_file(original_file, relative_import_path, level): cur_file = os.path.dirname(original_file) for _ in range(level - 1): cur_file = os.path.dirname(cur_file) cur_name = relative_import_path.lstrip(".") for part in cur_name.split("."): cur_file = os.path.join(cur_file, part) # NOTE: directory import is not handled. Because then it's unclear # if such import should produce python module or DictConfig. This can # be discussed further if needed. if not cur_file.endswith(".py"): cur_file += ".py" if not PathManager.isfile(cur_file): raise ImportError( f"Cannot import name {relative_import_path} from " f"{original_file}: {cur_file} has to exist." ) return cur_file def new_import(name, globals=None, locals=None, fromlist=(), level=0): if ( # Only deal with relative imports inside config files level != 0 and globals is not None and (globals.get("__package__", "") or "").startswith(_CFG_PACKAGE_NAME) ): cur_file = find_relative_file(globals["__file__"], name, level) _validate_py_syntax(cur_file) spec = importlib.machinery.ModuleSpec( _random_package_name(cur_file), None, origin=cur_file ) module = importlib.util.module_from_spec(spec) module.__file__ = cur_file with PathManager.open(cur_file) as f: content = f.read() exec(compile(content, cur_file, "exec"), module.__dict__) for name in fromlist: # turn imported dict into DictConfig automatically val = _cast_to_config(module.__dict__[name]) module.__dict__[name] = val return module return old_import(name, globals, locals, fromlist=fromlist, level=level) builtins.__import__ = new_import yield new_import builtins.__import__ = old_import class LazyConfig: """ Provid methods to save, load, and overrides an omegaconf config object which may contain definition of lazily-constructed objects. """ @staticmethod def load_rel(filename: str, keys: Union[None, str, Tuple[str, ...]] = None): """ Similar to :meth:`load()`, but load path relative to the caller's source file. This has the same functionality as a relative import, except that this method accepts filename as a string, so more characters are allowed in the filename. """ caller_frame = inspect.stack()[1] caller_fname = caller_frame[0].f_code.co_filename assert caller_fname != "<string>", "load_rel Unable to find caller" caller_dir = os.path.dirname(caller_fname) filename = os.path.join(caller_dir, filename) return LazyConfig.load(filename, keys) @staticmethod def load(filename: str, keys: Union[None, str, Tuple[str, ...]] = None): """ Load a config file. Args: filename: absolute path or relative path w.r.t. the current working directory keys: keys to load and return. If not given, return all keys (whose values are config objects) in a dict. """ has_keys = keys is not None filename = filename.replace("/./", "/") # redundant if os.path.splitext(filename)[1] not in [".py", ".yaml", ".yml"]: raise ValueError(f"Config file {filename} has to be a python or yaml file.") if filename.endswith(".py"): _validate_py_syntax(filename) with _patch_import(): # Record the filename module_namespace = { "__file__": filename, "__package__": _random_package_name(filename), } with PathManager.open(filename) as f: content = f.read() # Compile first with filename to: # 1. make filename appears in stacktrace # 2. make load_rel able to find its parent's (possibly remote) location exec(compile(content, filename, "exec"), module_namespace) ret = module_namespace else: with PathManager.open(filename) as f: obj = yaml.unsafe_load(f) ret = OmegaConf.create(obj, flags={"allow_objects": True}) if has_keys: if isinstance(keys, str): return _cast_to_config(ret[keys]) else: return tuple(_cast_to_config(ret[a]) for a in keys) else: if filename.endswith(".py"): # when not specified, only load those that are config objects ret = DictConfig( { name: _cast_to_config(value) for name, value in ret.items() if isinstance(value, (DictConfig, ListConfig, dict)) and not name.startswith("_") }, flags={"allow_objects": True}, ) return ret @staticmethod def save(cfg, filename: str): """ Args: cfg: an omegaconf config object filename: yaml file name to save the config file """ logger = logging.getLogger(__name__) try: cfg = deepcopy(cfg) except Exception: pass else: # if it's deep-copyable, then... def _replace_type_by_name(x): if "_target_" in x and callable(x._target_): try: x._target_ = _convert_target_to_string(x._target_) except AttributeError: pass # not necessary, but makes yaml looks nicer _visit_dict_config(cfg, _replace_type_by_name) try: with PathManager.open(filename, "w") as f: dict = OmegaConf.to_container(cfg, resolve=False) dumped = yaml.dump(dict, default_flow_style=None, allow_unicode=True, width=9999) f.write(dumped) except Exception: logger.exception("Unable to serialize the config to yaml. Error:") new_filename = filename + ".pkl" try: # retry by pickle with PathManager.open(new_filename, "wb") as f: cloudpickle.dump(cfg, f) logger.warning(f"Config saved using cloudpickle at {new_filename} ...") except Exception: pass @staticmethod def apply_overrides(cfg, overrides: List[str]): """ In-place override contents of cfg. Args: cfg: an omegaconf config object overrides: list of strings in the format of "a=b" to override configs. See https://hydra.cc/docs/next/advanced/override_grammar/basic/ for syntax. Returns: the cfg object """ def safe_update(cfg, key, value): parts = key.split(".") for idx in range(1, len(parts)): prefix = ".".join(parts[:idx]) v = OmegaConf.select(cfg, prefix, default=None) if v is None: break if not OmegaConf.is_config(v): raise KeyError( f"Trying to update key {key}, but {prefix} " f"is not a config, but has type {type(v)}." ) OmegaConf.update(cfg, key, value, merge=True) from hydra.core.override_parser.overrides_parser import OverridesParser parser = OverridesParser.create() overrides = parser.parse_overrides(overrides) for o in overrides: key = o.key_or_group value = o.value() if o.is_delete(): # TODO support this raise NotImplementedError("deletion is not yet a supported override") safe_update(cfg, key, value) return cfg @staticmethod def to_py(cfg, prefix: str = "cfg."): """ Convert a config object into its equivalent Python code. Args: cfg: an omegaconf config object prefix: root name for the resulting code (default: "cfg.") Returns: str of formatted Python code """ import black cfg = OmegaConf.to_container(cfg, resolve=True) def _to_str(obj, prefix=None, inside_call=False): if prefix is None: prefix = [] if isinstance(obj, abc.Mapping) and "_target_" in obj: # Dict representing a function call target = _convert_target_to_string(obj.pop("_target_")) args = [] for k, v in sorted(obj.items()): args.append(f"{k}={_to_str(v, inside_call=True)}") args = ", ".join(args) call = f"{target}({args})" return "".join(prefix) + call elif isinstance(obj, abc.Mapping) and not inside_call: # Dict that is not inside a call is a list of top-level config objects that we # render as one object per line with dot separated prefixes key_list = [] for k, v in sorted(obj.items()): if isinstance(v, abc.Mapping) and "_target_" not in v: key_list.append(_to_str(v, prefix=prefix + [k + "."])) else: key = "".join(prefix) + k key_list.append(f"{key}={_to_str(v)}") return "\n".join(key_list) elif isinstance(obj, abc.Mapping): # Dict that is inside a call is rendered as a regular dict return ( "{" + ",".join( f"{repr(k)}: {_to_str(v, inside_call=inside_call)}" for k, v in sorted(obj.items()) ) + "}" ) elif isinstance(obj, list): return "[" + ",".join(_to_str(x, inside_call=inside_call) for x in obj) + "]" else: return repr(obj) py_str = _to_str(cfg, prefix=[prefix]) try: return black.format_str(py_str, mode=black.Mode()) except black.InvalidInput: return py_str
banmo-main
third_party/detectron2_old/detectron2/config/lazy.py
# Copyright (c) Facebook, Inc. and its affiliates. import importlib from pathlib import Path _PROJECTS = { "point_rend": "PointRend", "deeplab": "DeepLab", "panoptic_deeplab": "Panoptic-DeepLab", } _PROJECT_ROOT = Path(__file__).resolve().parent.parent.parent / "projects" if _PROJECT_ROOT.is_dir(): # This is true only for in-place installation (pip install -e, setup.py develop), # where setup(package_dir=) does not work: https://github.com/pypa/setuptools/issues/230 class _D2ProjectsFinder(importlib.abc.MetaPathFinder): def find_spec(self, name, path, target=None): if not name.startswith("detectron2.projects."): return project_name = name.split(".")[-1] project_dir = _PROJECTS.get(project_name) if not project_dir: return target_file = _PROJECT_ROOT / f"{project_dir}/{project_name}/__init__.py" if not target_file.is_file(): return return importlib.util.spec_from_file_location(name, target_file) import sys sys.meta_path.append(_D2ProjectsFinder())
banmo-main
third_party/detectron2_old/detectron2/projects/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. import cloudpickle class PicklableWrapper(object): """ Wrap an object to make it more picklable, note that it uses heavy weight serialization libraries that are slower than pickle. It's best to use it only on closures (which are usually not picklable). This is a simplified version of https://github.com/joblib/joblib/blob/master/joblib/externals/loky/cloudpickle_wrapper.py """ def __init__(self, obj): self._obj = obj def __reduce__(self): s = cloudpickle.dumps(self._obj) return cloudpickle.loads, (s,) def __call__(self, *args, **kwargs): return self._obj(*args, **kwargs) def __getattr__(self, attr): # Ensure that the wrapped object can be used seamlessly as the previous object. if attr not in ["_obj"]: return getattr(self._obj, attr) return getattr(self, attr)
banmo-main
third_party/detectron2_old/detectron2/utils/serialize.py
# Copyright (c) Facebook, Inc. and its affiliates. """ An awesome colormap for really neat visualizations. Copied from Detectron, and removed gray colors. """ import numpy as np __all__ = ["colormap", "random_color"] # fmt: off # RGB: _COLORS = np.array( [ 0.000, 0.447, 0.741, 0.850, 0.325, 0.098, 0.929, 0.694, 0.125, 0.494, 0.184, 0.556, 0.466, 0.674, 0.188, 0.301, 0.745, 0.933, 0.635, 0.078, 0.184, 0.300, 0.300, 0.300, 0.600, 0.600, 0.600, 1.000, 0.000, 0.000, 1.000, 0.500, 0.000, 0.749, 0.749, 0.000, 0.000, 1.000, 0.000, 0.000, 0.000, 1.000, 0.667, 0.000, 1.000, 0.333, 0.333, 0.000, 0.333, 0.667, 0.000, 0.333, 1.000, 0.000, 0.667, 0.333, 0.000, 0.667, 0.667, 0.000, 0.667, 1.000, 0.000, 1.000, 0.333, 0.000, 1.000, 0.667, 0.000, 1.000, 1.000, 0.000, 0.000, 0.333, 0.500, 0.000, 0.667, 0.500, 0.000, 1.000, 0.500, 0.333, 0.000, 0.500, 0.333, 0.333, 0.500, 0.333, 0.667, 0.500, 0.333, 1.000, 0.500, 0.667, 0.000, 0.500, 0.667, 0.333, 0.500, 0.667, 0.667, 0.500, 0.667, 1.000, 0.500, 1.000, 0.000, 0.500, 1.000, 0.333, 0.500, 1.000, 0.667, 0.500, 1.000, 1.000, 0.500, 0.000, 0.333, 1.000, 0.000, 0.667, 1.000, 0.000, 1.000, 1.000, 0.333, 0.000, 1.000, 0.333, 0.333, 1.000, 0.333, 0.667, 1.000, 0.333, 1.000, 1.000, 0.667, 0.000, 1.000, 0.667, 0.333, 1.000, 0.667, 0.667, 1.000, 0.667, 1.000, 1.000, 1.000, 0.000, 1.000, 1.000, 0.333, 1.000, 1.000, 0.667, 1.000, 0.333, 0.000, 0.000, 0.500, 0.000, 0.000, 0.667, 0.000, 0.000, 0.833, 0.000, 0.000, 1.000, 0.000, 0.000, 0.000, 0.167, 0.000, 0.000, 0.333, 0.000, 0.000, 0.500, 0.000, 0.000, 0.667, 0.000, 0.000, 0.833, 0.000, 0.000, 1.000, 0.000, 0.000, 0.000, 0.167, 0.000, 0.000, 0.333, 0.000, 0.000, 0.500, 0.000, 0.000, 0.667, 0.000, 0.000, 0.833, 0.000, 0.000, 1.000, 0.000, 0.000, 0.000, 0.143, 0.143, 0.143, 0.857, 0.857, 0.857, 1.000, 1.000, 1.000 ] ).astype(np.float32).reshape(-1, 3) # fmt: on def colormap(rgb=False, maximum=255): """ Args: rgb (bool): whether to return RGB colors or BGR colors. maximum (int): either 255 or 1 Returns: ndarray: a float32 array of Nx3 colors, in range [0, 255] or [0, 1] """ assert maximum in [255, 1], maximum c = _COLORS * maximum if not rgb: c = c[:, ::-1] return c def random_color(rgb=False, maximum=255): """ Args: rgb (bool): whether to return RGB colors or BGR colors. maximum (int): either 255 or 1 Returns: ndarray: a vector of 3 numbers """ idx = np.random.randint(0, len(_COLORS)) ret = _COLORS[idx] * maximum if not rgb: ret = ret[::-1] return ret if __name__ == "__main__": import cv2 size = 100 H, W = 10, 10 canvas = np.random.rand(H * size, W * size, 3).astype("float32") for h in range(H): for w in range(W): idx = h * W + w if idx >= len(_COLORS): break canvas[h * size : (h + 1) * size, w * size : (w + 1) * size] = _COLORS[idx] cv2.imshow("a", canvas) cv2.waitKey(0)
banmo-main
third_party/detectron2_old/detectron2/utils/colormap.py
# Copyright (c) Facebook, Inc. and its affiliates. import importlib import importlib.util import logging import numpy as np import os import random import sys from datetime import datetime import torch __all__ = ["seed_all_rng"] TORCH_VERSION = tuple(int(x) for x in torch.__version__.split(".")[:2]) """ PyTorch version as a tuple of 2 ints. Useful for comparison. """ DOC_BUILDING = os.getenv("_DOC_BUILDING", False) # set in docs/conf.py """ Whether we're building documentation. """ def seed_all_rng(seed=None): """ Set the random seed for the RNG in torch, numpy and python. Args: seed (int): if None, will use a strong random seed. """ if seed is None: seed = ( os.getpid() + int(datetime.now().strftime("%S%f")) + int.from_bytes(os.urandom(2), "big") ) logger = logging.getLogger(__name__) logger.info("Using a generated random seed {}".format(seed)) np.random.seed(seed) torch.manual_seed(seed) random.seed(seed) os.environ["PYTHONHASHSEED"] = str(seed) # from https://stackoverflow.com/questions/67631/how-to-import-a-module-given-the-full-path def _import_file(module_name, file_path, make_importable=False): spec = importlib.util.spec_from_file_location(module_name, file_path) module = importlib.util.module_from_spec(spec) spec.loader.exec_module(module) if make_importable: sys.modules[module_name] = module return module def _configure_libraries(): """ Configurations for some libraries. """ # An environment option to disable `import cv2` globally, # in case it leads to negative performance impact disable_cv2 = int(os.environ.get("DETECTRON2_DISABLE_CV2", False)) if disable_cv2: sys.modules["cv2"] = None else: # Disable opencl in opencv since its interaction with cuda often has negative effects # This envvar is supported after OpenCV 3.4.0 os.environ["OPENCV_OPENCL_RUNTIME"] = "disabled" try: import cv2 if int(cv2.__version__.split(".")[0]) >= 3: cv2.ocl.setUseOpenCL(False) except ModuleNotFoundError: # Other types of ImportError, if happened, should not be ignored. # Because a failed opencv import could mess up address space # https://github.com/skvark/opencv-python/issues/381 pass def get_version(module, digit=2): return tuple(map(int, module.__version__.split(".")[:digit])) # fmt: off assert get_version(torch) >= (1, 4), "Requires torch>=1.4" import fvcore assert get_version(fvcore, 3) >= (0, 1, 2), "Requires fvcore>=0.1.2" import yaml assert get_version(yaml) >= (5, 1), "Requires pyyaml>=5.1" # fmt: on _ENV_SETUP_DONE = False def setup_environment(): """Perform environment setup work. The default setup is a no-op, but this function allows the user to specify a Python source file or a module in the $DETECTRON2_ENV_MODULE environment variable, that performs custom setup work that may be necessary to their computing environment. """ global _ENV_SETUP_DONE if _ENV_SETUP_DONE: return _ENV_SETUP_DONE = True _configure_libraries() custom_module_path = os.environ.get("DETECTRON2_ENV_MODULE") if custom_module_path: setup_custom_environment(custom_module_path) else: # The default setup is a no-op pass def setup_custom_environment(custom_module): """ Load custom environment setup by importing a Python source file or a module, and run the setup function. """ if custom_module.endswith(".py"): module = _import_file("detectron2.utils.env.custom_module", custom_module) else: module = importlib.import_module(custom_module) assert hasattr(module, "setup_environment") and callable(module.setup_environment), ( "Custom environment module defined in {} does not have the " "required callable attribute 'setup_environment'." ).format(custom_module) module.setup_environment() def fixup_module_metadata(module_name, namespace, keys=None): """ Fix the __qualname__ of module members to be their exported api name, so when they are referenced in docs, sphinx can find them. Reference: https://github.com/python-trio/trio/blob/6754c74eacfad9cc5c92d5c24727a2f3b620624e/trio/_util.py#L216-L241 """ if not DOC_BUILDING: return seen_ids = set() def fix_one(qualname, name, obj): # avoid infinite recursion (relevant when using # typing.Generic, for example) if id(obj) in seen_ids: return seen_ids.add(id(obj)) mod = getattr(obj, "__module__", None) if mod is not None and (mod.startswith(module_name) or mod.startswith("fvcore.")): obj.__module__ = module_name # Modules, unlike everything else in Python, put fully-qualitied # names into their __name__ attribute. We check for "." to avoid # rewriting these. if hasattr(obj, "__name__") and "." not in obj.__name__: obj.__name__ = name obj.__qualname__ = qualname if isinstance(obj, type): for attr_name, attr_value in obj.__dict__.items(): fix_one(objname + "." + attr_name, attr_name, attr_value) if keys is None: keys = namespace.keys() for objname in keys: if not objname.startswith("_"): obj = namespace[objname] fix_one(objname, objname, obj)
banmo-main
third_party/detectron2_old/detectron2/utils/env.py
# Copyright (c) Facebook, Inc. and its affiliates. # -*- coding: utf-8 -*- import typing import fvcore from fvcore.nn import activation_count, flop_count, parameter_count, parameter_count_table from torch import nn from detectron2.export import TracingAdapter __all__ = [ "activation_count_operators", "flop_count_operators", "parameter_count_table", "parameter_count", ] FLOPS_MODE = "flops" ACTIVATIONS_MODE = "activations" # Some extra ops to ignore from counting, including elementwise and reduction ops _IGNORED_OPS = { "aten::add", "aten::add_", "aten::argmax", "aten::argsort", "aten::batch_norm", "aten::constant_pad_nd", "aten::div", "aten::div_", "aten::exp", "aten::log2", "aten::max_pool2d", "aten::meshgrid", "aten::mul", "aten::mul_", "aten::neg", "aten::nonzero_numpy", "aten::reciprocal", "aten::rsub", "aten::sigmoid", "aten::sigmoid_", "aten::softmax", "aten::sort", "aten::sqrt", "aten::sub", "torchvision::nms", # TODO estimate flop for nms } class FlopCountAnalysis(fvcore.nn.FlopCountAnalysis): """ Same as :class:`fvcore.nn.FlopCountAnalysis`, but supports detectron2 models. """ def __init__(self, model, inputs): """ Args: model (nn.Module): inputs (Any): inputs of the given model. Does not have to be tuple of tensors. """ wrapper = TracingAdapter(model, inputs, allow_non_tensor=True) super().__init__(wrapper, wrapper.flattened_inputs) self.set_op_handle(**{k: None for k in _IGNORED_OPS}) def flop_count_operators(model: nn.Module, inputs: list) -> typing.DefaultDict[str, float]: """ Implement operator-level flops counting using jit. This is a wrapper of :func:`fvcore.nn.flop_count` and adds supports for standard detection models in detectron2. Please use :class:`FlopCountAnalysis` for more advanced functionalities. Note: The function runs the input through the model to compute flops. The flops of a detection model is often input-dependent, for example, the flops of box & mask head depends on the number of proposals & the number of detected objects. Therefore, the flops counting using a single input may not accurately reflect the computation cost of a model. It's recommended to average across a number of inputs. Args: model: a detectron2 model that takes `list[dict]` as input. inputs (list[dict]): inputs to model, in detectron2's standard format. Only "image" key will be used. supported_ops (dict[str, Handle]): see documentation of :func:`fvcore.nn.flop_count` Returns: Counter: Gflop count per operator """ old_train = model.training model.eval() ret = FlopCountAnalysis(model, inputs).by_operator() model.train(old_train) return {k: v / 1e9 for k, v in ret.items()} def activation_count_operators( model: nn.Module, inputs: list, **kwargs ) -> typing.DefaultDict[str, float]: """ Implement operator-level activations counting using jit. This is a wrapper of fvcore.nn.activation_count, that supports standard detection models in detectron2. Note: The function runs the input through the model to compute activations. The activations of a detection model is often input-dependent, for example, the activations of box & mask head depends on the number of proposals & the number of detected objects. Args: model: a detectron2 model that takes `list[dict]` as input. inputs (list[dict]): inputs to model, in detectron2's standard format. Only "image" key will be used. Returns: Counter: activation count per operator """ return _wrapper_count_operators(model=model, inputs=inputs, mode=ACTIVATIONS_MODE, **kwargs) def _wrapper_count_operators( model: nn.Module, inputs: list, mode: str, **kwargs ) -> typing.DefaultDict[str, float]: # ignore some ops supported_ops = {k: lambda *args, **kwargs: {} for k in _IGNORED_OPS} supported_ops.update(kwargs.pop("supported_ops", {})) kwargs["supported_ops"] = supported_ops assert len(inputs) == 1, "Please use batch size=1" tensor_input = inputs[0]["image"] inputs = [{"image": tensor_input}] # remove other keys, in case there are any old_train = model.training if isinstance(model, (nn.parallel.distributed.DistributedDataParallel, nn.DataParallel)): model = model.module wrapper = TracingAdapter(model, inputs) wrapper.eval() if mode == FLOPS_MODE: ret = flop_count(wrapper, (tensor_input,), **kwargs) elif mode == ACTIVATIONS_MODE: ret = activation_count(wrapper, (tensor_input,), **kwargs) else: raise NotImplementedError("Count for mode {} is not supported yet.".format(mode)) # compatible with change in fvcore if isinstance(ret, tuple): ret = ret[0] model.train(old_train) return ret
banmo-main
third_party/detectron2_old/detectron2/utils/analysis.py
# Copyright (c) Facebook, Inc. and its affiliates. """ This file contains primitives for multi-gpu communication. This is useful when doing distributed training. """ import functools import logging import numpy as np import pickle import torch import torch.distributed as dist _LOCAL_PROCESS_GROUP = None """ A torch process group which only includes processes that on the same machine as the current process. This variable is set when processes are spawned by `launch()` in "engine/launch.py". """ def get_world_size() -> int: if not dist.is_available(): return 1 if not dist.is_initialized(): return 1 return dist.get_world_size() def get_rank() -> int: if not dist.is_available(): return 0 if not dist.is_initialized(): return 0 return dist.get_rank() def get_local_rank() -> int: """ Returns: The rank of the current process within the local (per-machine) process group. """ if not dist.is_available(): return 0 if not dist.is_initialized(): return 0 assert _LOCAL_PROCESS_GROUP is not None return dist.get_rank(group=_LOCAL_PROCESS_GROUP) def get_local_size() -> int: """ Returns: The size of the per-machine process group, i.e. the number of processes per machine. """ if not dist.is_available(): return 1 if not dist.is_initialized(): return 1 return dist.get_world_size(group=_LOCAL_PROCESS_GROUP) def is_main_process() -> bool: return get_rank() == 0 def synchronize(): """ Helper function to synchronize (barrier) among all processes when using distributed training """ if not dist.is_available(): return if not dist.is_initialized(): return world_size = dist.get_world_size() if world_size == 1: return dist.barrier() @functools.lru_cache() def _get_global_gloo_group(): """ Return a process group based on gloo backend, containing all the ranks The result is cached. """ if dist.get_backend() == "nccl": return dist.new_group(backend="gloo") else: return dist.group.WORLD def _serialize_to_tensor(data, group): backend = dist.get_backend(group) assert backend in ["gloo", "nccl"] device = torch.device("cpu" if backend == "gloo" else "cuda") buffer = pickle.dumps(data) if len(buffer) > 1024 ** 3: logger = logging.getLogger(__name__) logger.warning( "Rank {} trying to all-gather {:.2f} GB of data on device {}".format( get_rank(), len(buffer) / (1024 ** 3), device ) ) storage = torch.ByteStorage.from_buffer(buffer) tensor = torch.ByteTensor(storage).to(device=device) return tensor def _pad_to_largest_tensor(tensor, group): """ Returns: list[int]: size of the tensor, on each rank Tensor: padded tensor that has the max size """ world_size = dist.get_world_size(group=group) assert ( world_size >= 1 ), "comm.gather/all_gather must be called from ranks within the given group!" local_size = torch.tensor([tensor.numel()], dtype=torch.int64, device=tensor.device) size_list = [ torch.zeros([1], dtype=torch.int64, device=tensor.device) for _ in range(world_size) ] dist.all_gather(size_list, local_size, group=group) size_list = [int(size.item()) for size in size_list] max_size = max(size_list) # we pad the tensor because torch all_gather does not support # gathering tensors of different shapes if local_size != max_size: padding = torch.zeros((max_size - local_size,), dtype=torch.uint8, device=tensor.device) tensor = torch.cat((tensor, padding), dim=0) return size_list, tensor def all_gather(data, group=None): """ Run all_gather on arbitrary picklable data (not necessarily tensors). Args: data: any picklable object group: a torch process group. By default, will use a group which contains all ranks on gloo backend. Returns: list[data]: list of data gathered from each rank """ if get_world_size() == 1: return [data] if group is None: group = _get_global_gloo_group() if dist.get_world_size(group) == 1: return [data] tensor = _serialize_to_tensor(data, group) size_list, tensor = _pad_to_largest_tensor(tensor, group) max_size = max(size_list) # receiving Tensor from all ranks tensor_list = [ torch.empty((max_size,), dtype=torch.uint8, device=tensor.device) for _ in size_list ] dist.all_gather(tensor_list, tensor, group=group) data_list = [] for size, tensor in zip(size_list, tensor_list): buffer = tensor.cpu().numpy().tobytes()[:size] data_list.append(pickle.loads(buffer)) return data_list def gather(data, dst=0, group=None): """ Run gather on arbitrary picklable data (not necessarily tensors). Args: data: any picklable object dst (int): destination rank group: a torch process group. By default, will use a group which contains all ranks on gloo backend. Returns: list[data]: on dst, a list of data gathered from each rank. Otherwise, an empty list. """ if get_world_size() == 1: return [data] if group is None: group = _get_global_gloo_group() if dist.get_world_size(group=group) == 1: return [data] rank = dist.get_rank(group=group) tensor = _serialize_to_tensor(data, group) size_list, tensor = _pad_to_largest_tensor(tensor, group) # receiving Tensor from all ranks if rank == dst: max_size = max(size_list) tensor_list = [ torch.empty((max_size,), dtype=torch.uint8, device=tensor.device) for _ in size_list ] dist.gather(tensor, tensor_list, dst=dst, group=group) data_list = [] for size, tensor in zip(size_list, tensor_list): buffer = tensor.cpu().numpy().tobytes()[:size] data_list.append(pickle.loads(buffer)) return data_list else: dist.gather(tensor, [], dst=dst, group=group) return [] def shared_random_seed(): """ Returns: int: a random number that is the same across all workers. If workers need a shared RNG, they can use this shared seed to create one. All workers must call this function, otherwise it will deadlock. """ ints = np.random.randint(2 ** 31) all_ints = all_gather(ints) return all_ints[0] def reduce_dict(input_dict, average=True): """ Reduce the values in the dictionary from all processes so that process with rank 0 has the reduced results. Args: input_dict (dict): inputs to be reduced. All the values must be scalar CUDA Tensor. average (bool): whether to do average or sum Returns: a dict with the same keys as input_dict, after reduction. """ world_size = get_world_size() if world_size < 2: return input_dict with torch.no_grad(): names = [] values = [] # sort the keys so that they are consistent across processes for k in sorted(input_dict.keys()): names.append(k) values.append(input_dict[k]) values = torch.stack(values, dim=0) dist.reduce(values, dst=0) if dist.get_rank() == 0 and average: # only main process gets accumulated, so only divide by # world_size in this case values /= world_size reduced_dict = {k: v for k, v in zip(names, values)} return reduced_dict
banmo-main
third_party/detectron2_old/detectron2/utils/comm.py
# Copyright (c) Facebook, Inc. and its affiliates. import logging from contextlib import contextmanager from functools import wraps import torch __all__ = ["retry_if_cuda_oom"] @contextmanager def _ignore_torch_cuda_oom(): """ A context which ignores CUDA OOM exception from pytorch. """ try: yield except RuntimeError as e: # NOTE: the string may change? if "CUDA out of memory. " in str(e): pass else: raise def retry_if_cuda_oom(func): """ Makes a function retry itself after encountering pytorch's CUDA OOM error. It will first retry after calling `torch.cuda.empty_cache()`. If that still fails, it will then retry by trying to convert inputs to CPUs. In this case, it expects the function to dispatch to CPU implementation. The return values may become CPU tensors as well and it's user's responsibility to convert it back to CUDA tensor if needed. Args: func: a stateless callable that takes tensor-like objects as arguments Returns: a callable which retries `func` if OOM is encountered. Examples: :: output = retry_if_cuda_oom(some_torch_function)(input1, input2) # output may be on CPU even if inputs are on GPU Note: 1. When converting inputs to CPU, it will only look at each argument and check if it has `.device` and `.to` for conversion. Nested structures of tensors are not supported. 2. Since the function might be called more than once, it has to be stateless. """ def maybe_to_cpu(x): try: like_gpu_tensor = x.device.type == "cuda" and hasattr(x, "to") except AttributeError: like_gpu_tensor = False if like_gpu_tensor: return x.to(device="cpu") else: return x @wraps(func) def wrapped(*args, **kwargs): with _ignore_torch_cuda_oom(): return func(*args, **kwargs) # Clear cache and retry torch.cuda.empty_cache() with _ignore_torch_cuda_oom(): return func(*args, **kwargs) # Try on CPU. This slows down the code significantly, therefore print a notice. logger = logging.getLogger(__name__) logger.info("Attempting to copy inputs of {} to CPU due to CUDA OOM".format(str(func))) new_args = (maybe_to_cpu(x) for x in args) new_kwargs = {k: maybe_to_cpu(v) for k, v in kwargs.items()} return func(*new_args, **new_kwargs) return wrapped
banmo-main
third_party/detectron2_old/detectron2/utils/memory.py
# Copyright (c) Facebook, Inc. and its affiliates. import numpy as np import pycocotools.mask as mask_util from detectron2.utils.visualizer import ( ColorMode, Visualizer, _create_text_labels, _PanopticPrediction, ) from .colormap import random_color class _DetectedInstance: """ Used to store data about detected objects in video frame, in order to transfer color to objects in the future frames. Attributes: label (int): bbox (tuple[float]): mask_rle (dict): color (tuple[float]): RGB colors in range (0, 1) ttl (int): time-to-live for the instance. For example, if ttl=2, the instance color can be transferred to objects in the next two frames. """ __slots__ = ["label", "bbox", "mask_rle", "color", "ttl"] def __init__(self, label, bbox, mask_rle, color, ttl): self.label = label self.bbox = bbox self.mask_rle = mask_rle self.color = color self.ttl = ttl class VideoVisualizer: def __init__(self, metadata, instance_mode=ColorMode.IMAGE): """ Args: metadata (MetadataCatalog): image metadata. """ self.metadata = metadata self._old_instances = [] assert instance_mode in [ ColorMode.IMAGE, ColorMode.IMAGE_BW, ], "Other mode not supported yet." self._instance_mode = instance_mode def draw_instance_predictions(self, frame, predictions): """ Draw instance-level prediction results on an image. Args: frame (ndarray): an RGB image of shape (H, W, C), in the range [0, 255]. predictions (Instances): the output of an instance detection/segmentation model. Following fields will be used to draw: "pred_boxes", "pred_classes", "scores", "pred_masks" (or "pred_masks_rle"). Returns: output (VisImage): image object with visualizations. """ frame_visualizer = Visualizer(frame, self.metadata) num_instances = len(predictions) if num_instances == 0: return frame_visualizer.output boxes = predictions.pred_boxes.tensor.numpy() if predictions.has("pred_boxes") else None scores = predictions.scores if predictions.has("scores") else None classes = predictions.pred_classes.numpy() if predictions.has("pred_classes") else None keypoints = predictions.pred_keypoints if predictions.has("pred_keypoints") else None if predictions.has("pred_masks"): masks = predictions.pred_masks # mask IOU is not yet enabled # masks_rles = mask_util.encode(np.asarray(masks.permute(1, 2, 0), order="F")) # assert len(masks_rles) == num_instances else: masks = None detected = [ _DetectedInstance(classes[i], boxes[i], mask_rle=None, color=None, ttl=8) for i in range(num_instances) ] colors = self._assign_colors(detected) labels = _create_text_labels(classes, scores, self.metadata.get("thing_classes", None)) if self._instance_mode == ColorMode.IMAGE_BW: # any() returns uint8 tensor frame_visualizer.output.img = frame_visualizer._create_grayscale_image( (masks.any(dim=0) > 0).numpy() if masks is not None else None ) alpha = 0.3 else: alpha = 0.5 frame_visualizer.overlay_instances( boxes=None if masks is not None else boxes, # boxes are a bit distracting masks=masks, labels=labels, keypoints=keypoints, assigned_colors=colors, alpha=alpha, ) return frame_visualizer.output def draw_sem_seg(self, frame, sem_seg, area_threshold=None): """ Args: sem_seg (ndarray or Tensor): semantic segmentation of shape (H, W), each value is the integer label. area_threshold (Optional[int]): only draw segmentations larger than the threshold """ # don't need to do anything special frame_visualizer = Visualizer(frame, self.metadata) frame_visualizer.draw_sem_seg(sem_seg, area_threshold=None) return frame_visualizer.output def draw_panoptic_seg_predictions( self, frame, panoptic_seg, segments_info, area_threshold=None, alpha=0.5 ): frame_visualizer = Visualizer(frame, self.metadata) pred = _PanopticPrediction(panoptic_seg, segments_info, self.metadata) if self._instance_mode == ColorMode.IMAGE_BW: frame_visualizer.output.img = frame_visualizer._create_grayscale_image( pred.non_empty_mask() ) # draw mask for all semantic segments first i.e. "stuff" for mask, sinfo in pred.semantic_masks(): category_idx = sinfo["category_id"] try: mask_color = [x / 255 for x in self.metadata.stuff_colors[category_idx]] except AttributeError: mask_color = None frame_visualizer.draw_binary_mask( mask, color=mask_color, text=self.metadata.stuff_classes[category_idx], alpha=alpha, area_threshold=area_threshold, ) all_instances = list(pred.instance_masks()) if len(all_instances) == 0: return frame_visualizer.output # draw mask for all instances second masks, sinfo = list(zip(*all_instances)) num_instances = len(masks) masks_rles = mask_util.encode( np.asarray(np.asarray(masks).transpose(1, 2, 0), dtype=np.uint8, order="F") ) assert len(masks_rles) == num_instances category_ids = [x["category_id"] for x in sinfo] detected = [ _DetectedInstance(category_ids[i], bbox=None, mask_rle=masks_rles[i], color=None, ttl=8) for i in range(num_instances) ] colors = self._assign_colors(detected) labels = [self.metadata.thing_classes[k] for k in category_ids] frame_visualizer.overlay_instances( boxes=None, masks=masks, labels=labels, keypoints=None, assigned_colors=colors, alpha=alpha, ) return frame_visualizer.output def _assign_colors(self, instances): """ Naive tracking heuristics to assign same color to the same instance, will update the internal state of tracked instances. Returns: list[tuple[float]]: list of colors. """ # Compute iou with either boxes or masks: is_crowd = np.zeros((len(instances),), dtype=np.bool) if instances[0].bbox is None: assert instances[0].mask_rle is not None # use mask iou only when box iou is None # because box seems good enough rles_old = [x.mask_rle for x in self._old_instances] rles_new = [x.mask_rle for x in instances] ious = mask_util.iou(rles_old, rles_new, is_crowd) threshold = 0.5 else: boxes_old = [x.bbox for x in self._old_instances] boxes_new = [x.bbox for x in instances] ious = mask_util.iou(boxes_old, boxes_new, is_crowd) threshold = 0.6 if len(ious) == 0: ious = np.zeros((len(self._old_instances), len(instances)), dtype="float32") # Only allow matching instances of the same label: for old_idx, old in enumerate(self._old_instances): for new_idx, new in enumerate(instances): if old.label != new.label: ious[old_idx, new_idx] = 0 matched_new_per_old = np.asarray(ious).argmax(axis=1) max_iou_per_old = np.asarray(ious).max(axis=1) # Try to find match for each old instance: extra_instances = [] for idx, inst in enumerate(self._old_instances): if max_iou_per_old[idx] > threshold: newidx = matched_new_per_old[idx] if instances[newidx].color is None: instances[newidx].color = inst.color continue # If an old instance does not match any new instances, # keep it for the next frame in case it is just missed by the detector inst.ttl -= 1 if inst.ttl > 0: extra_instances.append(inst) # Assign random color to newly-detected instances: for inst in instances: if inst.color is None: inst.color = random_color(rgb=True, maximum=1) self._old_instances = instances[:] + extra_instances return [d.color for d in instances]
banmo-main
third_party/detectron2_old/detectron2/utils/video_visualizer.py
# Copyright (c) Facebook, Inc. and its affiliates. from typing import Any import pydoc from fvcore.common.registry import Registry # for backward compatibility. """ ``Registry`` and `locate` provide ways to map a string (typically found in config files) to callable objects. """ __all__ = ["Registry", "locate"] def _convert_target_to_string(t: Any) -> str: """ Inverse of ``locate()``. Args: t: any object with ``__module__`` and ``__qualname__`` """ module, qualname = t.__module__, t.__qualname__ # Compress the path to this object, e.g. ``module.submodule._impl.class`` # may become ``module.submodule.class``, if the later also resolves to the same # object. This simplifies the string, and also is less affected by moving the # class implementation. module_parts = module.split(".") for k in range(1, len(module_parts)): prefix = ".".join(module_parts[:k]) candidate = f"{prefix}.{qualname}" try: if locate(candidate) is t: return candidate except ImportError: pass return f"{module}.{qualname}" def locate(name: str) -> Any: """ Locate and return an object ``x`` using an input string ``{x.__module__}.{x.__qualname__}``, such as "module.submodule.class_name". Raise Exception if it cannot be found. """ obj = pydoc.locate(name) # Some cases (e.g. torch.optim.sgd.SGD) not handled correctly # by pydoc.locate. Try a private function from hydra. if obj is None: try: # from hydra.utils import get_method - will print many errors from hydra.utils import _locate except ImportError as e: raise ImportError(f"Cannot dynamically locate object {name}!") from e else: obj = _locate(name) # it raises if fails return obj
banmo-main
third_party/detectron2_old/detectron2/utils/registry.py
# Copyright (c) Facebook, Inc. and its affiliates. import datetime import json import logging import os import time from collections import defaultdict from contextlib import contextmanager from typing import Optional import torch from fvcore.common.history_buffer import HistoryBuffer from detectron2.utils.file_io import PathManager __all__ = [ "get_event_storage", "JSONWriter", "TensorboardXWriter", "CommonMetricPrinter", "EventStorage", ] _CURRENT_STORAGE_STACK = [] def get_event_storage(): """ Returns: The :class:`EventStorage` object that's currently being used. Throws an error if no :class:`EventStorage` is currently enabled. """ assert len( _CURRENT_STORAGE_STACK ), "get_event_storage() has to be called inside a 'with EventStorage(...)' context!" return _CURRENT_STORAGE_STACK[-1] class EventWriter: """ Base class for writers that obtain events from :class:`EventStorage` and process them. """ def write(self): raise NotImplementedError def close(self): pass class JSONWriter(EventWriter): """ Write scalars to a json file. It saves scalars as one json per line (instead of a big json) for easy parsing. Examples parsing such a json file: :: $ cat metrics.json | jq -s '.[0:2]' [ { "data_time": 0.008433341979980469, "iteration": 19, "loss": 1.9228371381759644, "loss_box_reg": 0.050025828182697296, "loss_classifier": 0.5316952466964722, "loss_mask": 0.7236229181289673, "loss_rpn_box": 0.0856662318110466, "loss_rpn_cls": 0.48198649287223816, "lr": 0.007173333333333333, "time": 0.25401854515075684 }, { "data_time": 0.007216215133666992, "iteration": 39, "loss": 1.282649278640747, "loss_box_reg": 0.06222952902317047, "loss_classifier": 0.30682939291000366, "loss_mask": 0.6970193982124329, "loss_rpn_box": 0.038663312792778015, "loss_rpn_cls": 0.1471673548221588, "lr": 0.007706666666666667, "time": 0.2490077018737793 } ] $ cat metrics.json | jq '.loss_mask' 0.7126231789588928 0.689423680305481 0.6776131987571716 ... """ def __init__(self, json_file, window_size=20): """ Args: json_file (str): path to the json file. New data will be appended if the file exists. window_size (int): the window size of median smoothing for the scalars whose `smoothing_hint` are True. """ self._file_handle = PathManager.open(json_file, "a") self._window_size = window_size self._last_write = -1 def write(self): storage = get_event_storage() to_save = defaultdict(dict) for k, (v, iter) in storage.latest_with_smoothing_hint(self._window_size).items(): # keep scalars that have not been written if iter <= self._last_write: continue to_save[iter][k] = v if len(to_save): all_iters = sorted(to_save.keys()) self._last_write = max(all_iters) for itr, scalars_per_iter in to_save.items(): scalars_per_iter["iteration"] = itr self._file_handle.write(json.dumps(scalars_per_iter, sort_keys=True) + "\n") self._file_handle.flush() try: os.fsync(self._file_handle.fileno()) except AttributeError: pass def close(self): self._file_handle.close() class TensorboardXWriter(EventWriter): """ Write all scalars to a tensorboard file. """ def __init__(self, log_dir: str, window_size: int = 20, **kwargs): """ Args: log_dir (str): the directory to save the output events window_size (int): the scalars will be median-smoothed by this window size kwargs: other arguments passed to `torch.utils.tensorboard.SummaryWriter(...)` """ self._window_size = window_size from torch.utils.tensorboard import SummaryWriter self._writer = SummaryWriter(log_dir, **kwargs) self._last_write = -1 def write(self): storage = get_event_storage() new_last_write = self._last_write for k, (v, iter) in storage.latest_with_smoothing_hint(self._window_size).items(): if iter > self._last_write: self._writer.add_scalar(k, v, iter) new_last_write = max(new_last_write, iter) self._last_write = new_last_write # storage.put_{image,histogram} is only meant to be used by # tensorboard writer. So we access its internal fields directly from here. if len(storage._vis_data) >= 1: for img_name, img, step_num in storage._vis_data: self._writer.add_image(img_name, img, step_num) # Storage stores all image data and rely on this writer to clear them. # As a result it assumes only one writer will use its image data. # An alternative design is to let storage store limited recent # data (e.g. only the most recent image) that all writers can access. # In that case a writer may not see all image data if its period is long. storage.clear_images() if len(storage._histograms) >= 1: for params in storage._histograms: self._writer.add_histogram_raw(**params) storage.clear_histograms() def close(self): if hasattr(self, "_writer"): # doesn't exist when the code fails at import self._writer.close() class CommonMetricPrinter(EventWriter): """ Print **common** metrics to the terminal, including iteration time, ETA, memory, all losses, and the learning rate. It also applies smoothing using a window of 20 elements. It's meant to print common metrics in common ways. To print something in more customized ways, please implement a similar printer by yourself. """ def __init__(self, max_iter: Optional[int] = None, window_size: int = 20): """ Args: max_iter: the maximum number of iterations to train. Used to compute ETA. If not given, ETA will not be printed. window_size (int): the losses will be median-smoothed by this window size """ self.logger = logging.getLogger(__name__) self._max_iter = max_iter self._window_size = window_size self._last_write = None # (step, time) of last call to write(). Used to compute ETA def _get_eta(self, storage) -> Optional[str]: if self._max_iter is None: return "" iteration = storage.iter try: eta_seconds = storage.history("time").median(1000) * (self._max_iter - iteration - 1) storage.put_scalar("eta_seconds", eta_seconds, smoothing_hint=False) return str(datetime.timedelta(seconds=int(eta_seconds))) except KeyError: # estimate eta on our own - more noisy eta_string = None if self._last_write is not None: estimate_iter_time = (time.perf_counter() - self._last_write[1]) / ( iteration - self._last_write[0] ) eta_seconds = estimate_iter_time * (self._max_iter - iteration - 1) eta_string = str(datetime.timedelta(seconds=int(eta_seconds))) self._last_write = (iteration, time.perf_counter()) return eta_string def write(self): storage = get_event_storage() iteration = storage.iter if iteration == self._max_iter: # This hook only reports training progress (loss, ETA, etc) but not other data, # therefore do not write anything after training succeeds, even if this method # is called. return try: data_time = storage.history("data_time").avg(20) except KeyError: # they may not exist in the first few iterations (due to warmup) # or when SimpleTrainer is not used data_time = None try: iter_time = storage.history("time").global_avg() except KeyError: iter_time = None try: lr = "{:.5g}".format(storage.history("lr").latest()) except KeyError: lr = "N/A" eta_string = self._get_eta(storage) if torch.cuda.is_available(): max_mem_mb = torch.cuda.max_memory_allocated() / 1024.0 / 1024.0 else: max_mem_mb = None # NOTE: max_mem is parsed by grep in "dev/parse_results.sh" self.logger.info( " {eta}iter: {iter} {losses} {time}{data_time}lr: {lr} {memory}".format( eta=f"eta: {eta_string} " if eta_string else "", iter=iteration, losses=" ".join( [ "{}: {:.4g}".format(k, v.median(self._window_size)) for k, v in storage.histories().items() if "loss" in k ] ), time="time: {:.4f} ".format(iter_time) if iter_time is not None else "", data_time="data_time: {:.4f} ".format(data_time) if data_time is not None else "", lr=lr, memory="max_mem: {:.0f}M".format(max_mem_mb) if max_mem_mb is not None else "", ) ) class EventStorage: """ The user-facing class that provides metric storage functionalities. In the future we may add support for storing / logging other types of data if needed. """ def __init__(self, start_iter=0): """ Args: start_iter (int): the iteration number to start with """ self._history = defaultdict(HistoryBuffer) self._smoothing_hints = {} self._latest_scalars = {} self._iter = start_iter self._current_prefix = "" self._vis_data = [] self._histograms = [] def put_image(self, img_name, img_tensor): """ Add an `img_tensor` associated with `img_name`, to be shown on tensorboard. Args: img_name (str): The name of the image to put into tensorboard. img_tensor (torch.Tensor or numpy.array): An `uint8` or `float` Tensor of shape `[channel, height, width]` where `channel` is 3. The image format should be RGB. The elements in img_tensor can either have values in [0, 1] (float32) or [0, 255] (uint8). The `img_tensor` will be visualized in tensorboard. """ self._vis_data.append((img_name, img_tensor, self._iter)) def put_scalar(self, name, value, smoothing_hint=True): """ Add a scalar `value` to the `HistoryBuffer` associated with `name`. Args: smoothing_hint (bool): a 'hint' on whether this scalar is noisy and should be smoothed when logged. The hint will be accessible through :meth:`EventStorage.smoothing_hints`. A writer may ignore the hint and apply custom smoothing rule. It defaults to True because most scalars we save need to be smoothed to provide any useful signal. """ name = self._current_prefix + name history = self._history[name] value = float(value) history.update(value, self._iter) self._latest_scalars[name] = (value, self._iter) existing_hint = self._smoothing_hints.get(name) if existing_hint is not None: assert ( existing_hint == smoothing_hint ), "Scalar {} was put with a different smoothing_hint!".format(name) else: self._smoothing_hints[name] = smoothing_hint def put_scalars(self, *, smoothing_hint=True, **kwargs): """ Put multiple scalars from keyword arguments. Examples: storage.put_scalars(loss=my_loss, accuracy=my_accuracy, smoothing_hint=True) """ for k, v in kwargs.items(): self.put_scalar(k, v, smoothing_hint=smoothing_hint) def put_histogram(self, hist_name, hist_tensor, bins=1000): """ Create a histogram from a tensor. Args: hist_name (str): The name of the histogram to put into tensorboard. hist_tensor (torch.Tensor): A Tensor of arbitrary shape to be converted into a histogram. bins (int): Number of histogram bins. """ ht_min, ht_max = hist_tensor.min().item(), hist_tensor.max().item() # Create a histogram with PyTorch hist_counts = torch.histc(hist_tensor, bins=bins) hist_edges = torch.linspace(start=ht_min, end=ht_max, steps=bins + 1, dtype=torch.float32) # Parameter for the add_histogram_raw function of SummaryWriter hist_params = dict( tag=hist_name, min=ht_min, max=ht_max, num=len(hist_tensor), sum=float(hist_tensor.sum()), sum_squares=float(torch.sum(hist_tensor ** 2)), bucket_limits=hist_edges[1:].tolist(), bucket_counts=hist_counts.tolist(), global_step=self._iter, ) self._histograms.append(hist_params) def history(self, name): """ Returns: HistoryBuffer: the scalar history for name """ ret = self._history.get(name, None) if ret is None: raise KeyError("No history metric available for {}!".format(name)) return ret def histories(self): """ Returns: dict[name -> HistoryBuffer]: the HistoryBuffer for all scalars """ return self._history def latest(self): """ Returns: dict[str -> (float, int)]: mapping from the name of each scalar to the most recent value and the iteration number its added. """ return self._latest_scalars def latest_with_smoothing_hint(self, window_size=20): """ Similar to :meth:`latest`, but the returned values are either the un-smoothed original latest value, or a median of the given window_size, depend on whether the smoothing_hint is True. This provides a default behavior that other writers can use. """ result = {} for k, (v, itr) in self._latest_scalars.items(): result[k] = ( self._history[k].median(window_size) if self._smoothing_hints[k] else v, itr, ) return result def smoothing_hints(self): """ Returns: dict[name -> bool]: the user-provided hint on whether the scalar is noisy and needs smoothing. """ return self._smoothing_hints def step(self): """ User should either: (1) Call this function to increment storage.iter when needed. Or (2) Set `storage.iter` to the correct iteration number before each iteration. The storage will then be able to associate the new data with an iteration number. """ self._iter += 1 @property def iter(self): """ Returns: int: The current iteration number. When used together with a trainer, this is ensured to be the same as trainer.iter. """ return self._iter @iter.setter def iter(self, val): self._iter = int(val) @property def iteration(self): # for backward compatibility return self._iter def __enter__(self): _CURRENT_STORAGE_STACK.append(self) return self def __exit__(self, exc_type, exc_val, exc_tb): assert _CURRENT_STORAGE_STACK[-1] == self _CURRENT_STORAGE_STACK.pop() @contextmanager def name_scope(self, name): """ Yields: A context within which all the events added to this storage will be prefixed by the name scope. """ old_prefix = self._current_prefix self._current_prefix = name.rstrip("/") + "/" yield self._current_prefix = old_prefix def clear_images(self): """ Delete all the stored images for visualization. This should be called after images are written to tensorboard. """ self._vis_data = [] def clear_histograms(self): """ Delete all the stored histograms for visualization. This should be called after histograms are written to tensorboard. """ self._histograms = []
banmo-main
third_party/detectron2_old/detectron2/utils/events.py
# Copyright (c) Facebook, Inc. and its affiliates. import importlib import numpy as np import os import re import subprocess import sys from collections import defaultdict import PIL import torch import torchvision from tabulate import tabulate __all__ = ["collect_env_info"] def collect_torch_env(): try: import torch.__config__ return torch.__config__.show() except ImportError: # compatible with older versions of pytorch from torch.utils.collect_env import get_pretty_env_info return get_pretty_env_info() def get_env_module(): var_name = "DETECTRON2_ENV_MODULE" return var_name, os.environ.get(var_name, "<not set>") def detect_compute_compatibility(CUDA_HOME, so_file): try: cuobjdump = os.path.join(CUDA_HOME, "bin", "cuobjdump") if os.path.isfile(cuobjdump): output = subprocess.check_output( "'{}' --list-elf '{}'".format(cuobjdump, so_file), shell=True ) output = output.decode("utf-8").strip().split("\n") arch = [] for line in output: line = re.findall(r"\.sm_([0-9]*)\.", line)[0] arch.append(".".join(line)) arch = sorted(set(arch)) return ", ".join(arch) else: return so_file + "; cannot find cuobjdump" except Exception: # unhandled failure return so_file def collect_env_info(): has_gpu = torch.cuda.is_available() # true for both CUDA & ROCM torch_version = torch.__version__ # NOTE that CUDA_HOME/ROCM_HOME could be None even when CUDA runtime libs are functional from torch.utils.cpp_extension import CUDA_HOME, ROCM_HOME has_rocm = False if (getattr(torch.version, "hip", None) is not None) and (ROCM_HOME is not None): has_rocm = True has_cuda = has_gpu and (not has_rocm) data = [] data.append(("sys.platform", sys.platform)) # check-template.yml depends on it data.append(("Python", sys.version.replace("\n", ""))) data.append(("numpy", np.__version__)) try: import detectron2 # noqa data.append( ("detectron2", detectron2.__version__ + " @" + os.path.dirname(detectron2.__file__)) ) except ImportError: data.append(("detectron2", "failed to import")) except AttributeError: data.append(("detectron2", "imported a wrong installation")) try: import detectron2._C as _C except ImportError as e: data.append(("detectron2._C", f"not built correctly: {e}")) # print system compilers when extension fails to build if sys.platform != "win32": # don't know what to do for windows try: # this is how torch/utils/cpp_extensions.py choose compiler cxx = os.environ.get("CXX", "c++") cxx = subprocess.check_output("'{}' --version".format(cxx), shell=True) cxx = cxx.decode("utf-8").strip().split("\n")[0] except subprocess.SubprocessError: cxx = "Not found" data.append(("Compiler ($CXX)", cxx)) if has_cuda and CUDA_HOME is not None: try: nvcc = os.path.join(CUDA_HOME, "bin", "nvcc") nvcc = subprocess.check_output("'{}' -V".format(nvcc), shell=True) nvcc = nvcc.decode("utf-8").strip().split("\n")[-1] except subprocess.SubprocessError: nvcc = "Not found" data.append(("CUDA compiler", nvcc)) if has_cuda and sys.platform != "win32": try: so_file = importlib.util.find_spec("detectron2._C").origin except (ImportError, AttributeError): pass else: data.append( ("detectron2 arch flags", detect_compute_compatibility(CUDA_HOME, so_file)) ) else: # print compilers that are used to build extension data.append(("Compiler", _C.get_compiler_version())) data.append(("CUDA compiler", _C.get_cuda_version())) # cuda or hip if has_cuda and getattr(_C, "has_cuda", lambda: True)(): data.append( ("detectron2 arch flags", detect_compute_compatibility(CUDA_HOME, _C.__file__)) ) data.append(get_env_module()) data.append(("PyTorch", torch_version + " @" + os.path.dirname(torch.__file__))) data.append(("PyTorch debug build", torch.version.debug)) data.append(("GPU available", has_gpu)) if has_gpu: devices = defaultdict(list) for k in range(torch.cuda.device_count()): cap = ".".join((str(x) for x in torch.cuda.get_device_capability(k))) name = torch.cuda.get_device_name(k) + f" (arch={cap})" devices[name].append(str(k)) for name, devids in devices.items(): data.append(("GPU " + ",".join(devids), name)) if has_rocm: msg = " - invalid!" if not (ROCM_HOME and os.path.isdir(ROCM_HOME)) else "" data.append(("ROCM_HOME", str(ROCM_HOME) + msg)) else: msg = " - invalid!" if not (CUDA_HOME and os.path.isdir(CUDA_HOME)) else "" data.append(("CUDA_HOME", str(CUDA_HOME) + msg)) cuda_arch_list = os.environ.get("TORCH_CUDA_ARCH_LIST", None) if cuda_arch_list: data.append(("TORCH_CUDA_ARCH_LIST", cuda_arch_list)) data.append(("Pillow", PIL.__version__)) try: data.append( ( "torchvision", str(torchvision.__version__) + " @" + os.path.dirname(torchvision.__file__), ) ) if has_cuda: try: torchvision_C = importlib.util.find_spec("torchvision._C").origin msg = detect_compute_compatibility(CUDA_HOME, torchvision_C) data.append(("torchvision arch flags", msg)) except (ImportError, AttributeError): data.append(("torchvision._C", "Not found")) except AttributeError: data.append(("torchvision", "unknown")) try: import fvcore data.append(("fvcore", fvcore.__version__)) except (ImportError, AttributeError): pass try: import iopath data.append(("iopath", iopath.__version__)) except (ImportError, AttributeError): pass try: import cv2 data.append(("cv2", cv2.__version__)) except (ImportError, AttributeError): data.append(("cv2", "Not found")) env_str = tabulate(data) + "\n" env_str += collect_torch_env() return env_str if __name__ == "__main__": try: from detectron2.utils.collect_env import collect_env_info as f print(f()) except ImportError: print(collect_env_info()) if torch.cuda.is_available(): for k in range(torch.cuda.device_count()): device = f"cuda:{k}" try: x = torch.tensor([1, 2.0], dtype=torch.float32) x = x.to(device) except Exception as e: print( f"Unable to copy tensor to device={device}: {e}. " "Your CUDA environment is broken." )
banmo-main
third_party/detectron2_old/detectron2/utils/collect_env.py
# Copyright (c) Facebook, Inc. and its affiliates.
banmo-main
third_party/detectron2_old/detectron2/utils/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. import atexit import functools import logging import os import sys import time from collections import Counter import torch from tabulate import tabulate from termcolor import colored from detectron2.utils.file_io import PathManager __all__ = ["setup_logger", "log_first_n", "log_every_n", "log_every_n_seconds"] class _ColorfulFormatter(logging.Formatter): def __init__(self, *args, **kwargs): self._root_name = kwargs.pop("root_name") + "." self._abbrev_name = kwargs.pop("abbrev_name", "") if len(self._abbrev_name): self._abbrev_name = self._abbrev_name + "." super(_ColorfulFormatter, self).__init__(*args, **kwargs) def formatMessage(self, record): record.name = record.name.replace(self._root_name, self._abbrev_name) log = super(_ColorfulFormatter, self).formatMessage(record) if record.levelno == logging.WARNING: prefix = colored("WARNING", "red", attrs=["blink"]) elif record.levelno == logging.ERROR or record.levelno == logging.CRITICAL: prefix = colored("ERROR", "red", attrs=["blink", "underline"]) else: return log return prefix + " " + log @functools.lru_cache() # so that calling setup_logger multiple times won't add many handlers def setup_logger( output=None, distributed_rank=0, *, color=True, name="detectron2", abbrev_name=None ): """ Initialize the detectron2 logger and set its verbosity level to "DEBUG". Args: output (str): a file name or a directory to save log. If None, will not save log file. If ends with ".txt" or ".log", assumed to be a file name. Otherwise, logs will be saved to `output/log.txt`. name (str): the root module name of this logger abbrev_name (str): an abbreviation of the module, to avoid long names in logs. Set to "" to not log the root module in logs. By default, will abbreviate "detectron2" to "d2" and leave other modules unchanged. Returns: logging.Logger: a logger """ logger = logging.getLogger(name) logger.setLevel(logging.DEBUG) logger.propagate = False if abbrev_name is None: abbrev_name = "d2" if name == "detectron2" else name plain_formatter = logging.Formatter( "[%(asctime)s] %(name)s %(levelname)s: %(message)s", datefmt="%m/%d %H:%M:%S" ) # stdout logging: master only if distributed_rank == 0: ch = logging.StreamHandler(stream=sys.stdout) ch.setLevel(logging.DEBUG) if color: formatter = _ColorfulFormatter( colored("[%(asctime)s %(name)s]: ", "green") + "%(message)s", datefmt="%m/%d %H:%M:%S", root_name=name, abbrev_name=str(abbrev_name), ) else: formatter = plain_formatter ch.setFormatter(formatter) logger.addHandler(ch) # file logging: all workers if output is not None: if output.endswith(".txt") or output.endswith(".log"): filename = output else: filename = os.path.join(output, "log.txt") if distributed_rank > 0: filename = filename + ".rank{}".format(distributed_rank) PathManager.mkdirs(os.path.dirname(filename)) fh = logging.StreamHandler(_cached_log_stream(filename)) fh.setLevel(logging.DEBUG) fh.setFormatter(plain_formatter) logger.addHandler(fh) return logger # cache the opened file object, so that different calls to `setup_logger` # with the same file name can safely write to the same file. @functools.lru_cache(maxsize=None) def _cached_log_stream(filename): # use 1K buffer if writing to cloud storage io = PathManager.open(filename, "a", buffering=1024 if "://" in filename else -1) atexit.register(io.close) return io """ Below are some other convenient logging methods. They are mainly adopted from https://github.com/abseil/abseil-py/blob/master/absl/logging/__init__.py """ def _find_caller(): """ Returns: str: module name of the caller tuple: a hashable key to be used to identify different callers """ frame = sys._getframe(2) while frame: code = frame.f_code if os.path.join("utils", "logger.") not in code.co_filename: mod_name = frame.f_globals["__name__"] if mod_name == "__main__": mod_name = "detectron2" return mod_name, (code.co_filename, frame.f_lineno, code.co_name) frame = frame.f_back _LOG_COUNTER = Counter() _LOG_TIMER = {} def log_first_n(lvl, msg, n=1, *, name=None, key="caller"): """ Log only for the first n times. Args: lvl (int): the logging level msg (str): n (int): name (str): name of the logger to use. Will use the caller's module by default. key (str or tuple[str]): the string(s) can be one of "caller" or "message", which defines how to identify duplicated logs. For example, if called with `n=1, key="caller"`, this function will only log the first call from the same caller, regardless of the message content. If called with `n=1, key="message"`, this function will log the same content only once, even if they are called from different places. If called with `n=1, key=("caller", "message")`, this function will not log only if the same caller has logged the same message before. """ if isinstance(key, str): key = (key,) assert len(key) > 0 caller_module, caller_key = _find_caller() hash_key = () if "caller" in key: hash_key = hash_key + caller_key if "message" in key: hash_key = hash_key + (msg,) _LOG_COUNTER[hash_key] += 1 if _LOG_COUNTER[hash_key] <= n: logging.getLogger(name or caller_module).log(lvl, msg) def log_every_n(lvl, msg, n=1, *, name=None): """ Log once per n times. Args: lvl (int): the logging level msg (str): n (int): name (str): name of the logger to use. Will use the caller's module by default. """ caller_module, key = _find_caller() _LOG_COUNTER[key] += 1 if n == 1 or _LOG_COUNTER[key] % n == 1: logging.getLogger(name or caller_module).log(lvl, msg) def log_every_n_seconds(lvl, msg, n=1, *, name=None): """ Log no more than once per n seconds. Args: lvl (int): the logging level msg (str): n (int): name (str): name of the logger to use. Will use the caller's module by default. """ caller_module, key = _find_caller() last_logged = _LOG_TIMER.get(key, None) current_time = time.time() if last_logged is None or current_time - last_logged >= n: logging.getLogger(name or caller_module).log(lvl, msg) _LOG_TIMER[key] = current_time def create_small_table(small_dict): """ Create a small table using the keys of small_dict as headers. This is only suitable for small dictionaries. Args: small_dict (dict): a result dictionary of only a few items. Returns: str: the table as a string. """ keys, values = tuple(zip(*small_dict.items())) table = tabulate( [values], headers=keys, tablefmt="pipe", floatfmt=".3f", stralign="center", numalign="center", ) return table def _log_api_usage(identifier: str): """ Internal function used to log the usage of different detectron2 components inside facebook's infra. """ torch._C._log_api_usage_once("detectron2." + identifier)
banmo-main
third_party/detectron2_old/detectron2/utils/logger.py
# Copyright (c) Facebook, Inc. and its affiliates. from iopath.common.file_io import HTTPURLHandler, OneDrivePathHandler, PathHandler from iopath.common.file_io import PathManager as PathManagerBase __all__ = ["PathManager", "PathHandler"] PathManager = PathManagerBase() """ This is a detectron2 project-specific PathManager. We try to stay away from global PathManager in fvcore as it introduces potential conflicts among other libraries. """ class Detectron2Handler(PathHandler): """ Resolve anything that's hosted under detectron2's namespace. """ PREFIX = "detectron2://" S3_DETECTRON2_PREFIX = "https://dl.fbaipublicfiles.com/detectron2/" def _get_supported_prefixes(self): return [self.PREFIX] def _get_local_path(self, path, **kwargs): name = path[len(self.PREFIX) :] return PathManager.get_local_path(self.S3_DETECTRON2_PREFIX + name, **kwargs) def _open(self, path, mode="r", **kwargs): return PathManager.open(self._get_local_path(path), mode, **kwargs) PathManager.register_handler(HTTPURLHandler()) PathManager.register_handler(OneDrivePathHandler()) PathManager.register_handler(Detectron2Handler())
banmo-main
third_party/detectron2_old/detectron2/utils/file_io.py
# Copyright (c) Facebook, Inc. and its affiliates. import io import numpy as np import torch from detectron2 import model_zoo from detectron2.data import DatasetCatalog from detectron2.data.detection_utils import read_image from detectron2.modeling import build_model from detectron2.structures import Boxes, Instances, ROIMasks from detectron2.utils.file_io import PathManager """ Internal utilities for tests. Don't use except for writing tests. """ def get_model_no_weights(config_path): """ Like model_zoo.get, but do not load any weights (even pretrained) """ cfg = model_zoo.get_config(config_path) if not torch.cuda.is_available(): cfg.MODEL.DEVICE = "cpu" return build_model(cfg) def random_boxes(num_boxes, max_coord=100, device="cpu"): """ Create a random Nx4 boxes tensor, with coordinates < max_coord. """ boxes = torch.rand(num_boxes, 4, device=device) * (max_coord * 0.5) boxes.clamp_(min=1.0) # tiny boxes cause numerical instability in box regression # Note: the implementation of this function in torchvision is: # boxes[:, 2:] += torch.rand(N, 2) * 100 # but it does not guarantee non-negative widths/heights constraints: # boxes[:, 2] >= boxes[:, 0] and boxes[:, 3] >= boxes[:, 1]: boxes[:, 2:] += boxes[:, :2] return boxes def get_sample_coco_image(tensor=True): """ Args: tensor (bool): if True, returns 3xHxW tensor. else, returns a HxWx3 numpy array. Returns: an image, in BGR color. """ try: file_name = DatasetCatalog.get("coco_2017_val_100")[0]["file_name"] if not PathManager.exists(file_name): raise FileNotFoundError() except IOError: # for public CI to run file_name = "http://images.cocodataset.org/train2017/000000000009.jpg" ret = read_image(file_name, format="BGR") if tensor: ret = torch.from_numpy(np.ascontiguousarray(ret.transpose(2, 0, 1))) return ret def convert_scripted_instances(instances): """ Convert a scripted Instances object to a regular :class:`Instances` object """ ret = Instances(instances.image_size) for name in instances._field_names: val = getattr(instances, "_" + name, None) if val is not None: ret.set(name, val) return ret def assert_instances_allclose(input, other, *, rtol=1e-5, msg="", size_as_tensor=False): """ Args: input, other (Instances): size_as_tensor: compare image_size of the Instances as tensors (instead of tuples). Useful for comparing outputs of tracing. """ if not isinstance(input, Instances): input = convert_scripted_instances(input) if not isinstance(other, Instances): other = convert_scripted_instances(other) if not msg: msg = "Two Instances are different! " else: msg = msg.rstrip() + " " size_error_msg = msg + f"image_size is {input.image_size} vs. {other.image_size}!" if size_as_tensor: assert torch.equal( torch.tensor(input.image_size), torch.tensor(other.image_size) ), size_error_msg else: assert input.image_size == other.image_size, size_error_msg fields = sorted(input.get_fields().keys()) fields_other = sorted(other.get_fields().keys()) assert fields == fields_other, msg + f"Fields are {fields} vs {fields_other}!" for f in fields: val1, val2 = input.get(f), other.get(f) if isinstance(val1, (Boxes, ROIMasks)): # boxes in the range of O(100) and can have a larger tolerance assert torch.allclose(val1.tensor, val2.tensor, atol=100 * rtol), ( msg + f"Field {f} differs too much!" ) elif isinstance(val1, torch.Tensor): if val1.dtype.is_floating_point: mag = torch.abs(val1).max().cpu().item() assert torch.allclose(val1, val2, atol=mag * rtol), ( msg + f"Field {f} differs too much!" ) else: assert torch.equal(val1, val2), msg + f"Field {f} is different!" else: raise ValueError(f"Don't know how to compare type {type(val1)}") def reload_script_model(module): """ Save a jit module and load it back. Similar to the `getExportImportCopy` function in torch/testing/ """ buffer = io.BytesIO() torch.jit.save(module, buffer) buffer.seek(0) return torch.jit.load(buffer)
banmo-main
third_party/detectron2_old/detectron2/utils/testing.py
# Copyright (c) Facebook, Inc. and its affiliates. import colorsys import logging import math import numpy as np from enum import Enum, unique import cv2 import matplotlib as mpl import matplotlib.colors as mplc import matplotlib.figure as mplfigure import pycocotools.mask as mask_util import torch from matplotlib.backends.backend_agg import FigureCanvasAgg from PIL import Image from detectron2.data import MetadataCatalog from detectron2.structures import BitMasks, Boxes, BoxMode, Keypoints, PolygonMasks, RotatedBoxes from detectron2.utils.file_io import PathManager from .colormap import random_color logger = logging.getLogger(__name__) __all__ = ["ColorMode", "VisImage", "Visualizer"] _SMALL_OBJECT_AREA_THRESH = 1000 _LARGE_MASK_AREA_THRESH = 120000 _OFF_WHITE = (1.0, 1.0, 240.0 / 255) _BLACK = (0, 0, 0) _RED = (1.0, 0, 0) _KEYPOINT_THRESHOLD = 0.05 @unique class ColorMode(Enum): """ Enum of different color modes to use for instance visualizations. """ IMAGE = 0 """ Picks a random color for every instance and overlay segmentations with low opacity. """ SEGMENTATION = 1 """ Let instances of the same category have similar colors (from metadata.thing_colors), and overlay them with high opacity. This provides more attention on the quality of segmentation. """ IMAGE_BW = 2 """ Same as IMAGE, but convert all areas without masks to gray-scale. Only available for drawing per-instance mask predictions. """ class GenericMask: """ Attribute: polygons (list[ndarray]): list[ndarray]: polygons for this mask. Each ndarray has format [x, y, x, y, ...] mask (ndarray): a binary mask """ def __init__(self, mask_or_polygons, height, width): self._mask = self._polygons = self._has_holes = None self.height = height self.width = width m = mask_or_polygons if isinstance(m, dict): # RLEs assert "counts" in m and "size" in m if isinstance(m["counts"], list): # uncompressed RLEs h, w = m["size"] assert h == height and w == width m = mask_util.frPyObjects(m, h, w) self._mask = mask_util.decode(m)[:, :] return if isinstance(m, list): # list[ndarray] self._polygons = [np.asarray(x).reshape(-1) for x in m] return if isinstance(m, np.ndarray): # assumed to be a binary mask assert m.shape[1] != 2, m.shape assert m.shape == (height, width), m.shape self._mask = m.astype("uint8") return raise ValueError("GenericMask cannot handle object {} of type '{}'".format(m, type(m))) @property def mask(self): if self._mask is None: self._mask = self.polygons_to_mask(self._polygons) return self._mask @property def polygons(self): if self._polygons is None: self._polygons, self._has_holes = self.mask_to_polygons(self._mask) return self._polygons @property def has_holes(self): if self._has_holes is None: if self._mask is not None: self._polygons, self._has_holes = self.mask_to_polygons(self._mask) else: self._has_holes = False # if original format is polygon, does not have holes return self._has_holes def mask_to_polygons(self, mask): # cv2.RETR_CCOMP flag retrieves all the contours and arranges them to a 2-level # hierarchy. External contours (boundary) of the object are placed in hierarchy-1. # Internal contours (holes) are placed in hierarchy-2. # cv2.CHAIN_APPROX_NONE flag gets vertices of polygons from contours. mask = np.ascontiguousarray(mask) # some versions of cv2 does not support incontiguous arr res = cv2.findContours(mask.astype("uint8"), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE) hierarchy = res[-1] if hierarchy is None: # empty mask return [], False has_holes = (hierarchy.reshape(-1, 4)[:, 3] >= 0).sum() > 0 res = res[-2] res = [x.flatten() for x in res] # These coordinates from OpenCV are integers in range [0, W-1 or H-1]. # We add 0.5 to turn them into real-value coordinate space. A better solution # would be to first +0.5 and then dilate the returned polygon by 0.5. res = [x + 0.5 for x in res if len(x) >= 6] return res, has_holes def polygons_to_mask(self, polygons): rle = mask_util.frPyObjects(polygons, self.height, self.width) rle = mask_util.merge(rle) return mask_util.decode(rle)[:, :] def area(self): return self.mask.sum() def bbox(self): p = mask_util.frPyObjects(self.polygons, self.height, self.width) p = mask_util.merge(p) bbox = mask_util.toBbox(p) bbox[2] += bbox[0] bbox[3] += bbox[1] return bbox class _PanopticPrediction: """ Unify different panoptic annotation/prediction formats """ def __init__(self, panoptic_seg, segments_info, metadata=None): if segments_info is None: assert metadata is not None # If "segments_info" is None, we assume "panoptic_img" is a # H*W int32 image storing the panoptic_id in the format of # category_id * label_divisor + instance_id. We reserve -1 for # VOID label. label_divisor = metadata.label_divisor segments_info = [] for panoptic_label in np.unique(panoptic_seg.numpy()): if panoptic_label == -1: # VOID region. continue pred_class = panoptic_label // label_divisor isthing = pred_class in metadata.thing_dataset_id_to_contiguous_id.values() segments_info.append( { "id": int(panoptic_label), "category_id": int(pred_class), "isthing": bool(isthing), } ) del metadata self._seg = panoptic_seg self._sinfo = {s["id"]: s for s in segments_info} # seg id -> seg info segment_ids, areas = torch.unique(panoptic_seg, sorted=True, return_counts=True) areas = areas.numpy() sorted_idxs = np.argsort(-areas) self._seg_ids, self._seg_areas = segment_ids[sorted_idxs], areas[sorted_idxs] self._seg_ids = self._seg_ids.tolist() for sid, area in zip(self._seg_ids, self._seg_areas): if sid in self._sinfo: self._sinfo[sid]["area"] = float(area) def non_empty_mask(self): """ Returns: (H, W) array, a mask for all pixels that have a prediction """ empty_ids = [] for id in self._seg_ids: if id not in self._sinfo: empty_ids.append(id) if len(empty_ids) == 0: return np.zeros(self._seg.shape, dtype=np.uint8) assert ( len(empty_ids) == 1 ), ">1 ids corresponds to no labels. This is currently not supported" return (self._seg != empty_ids[0]).numpy().astype(np.bool) def semantic_masks(self): for sid in self._seg_ids: sinfo = self._sinfo.get(sid) if sinfo is None or sinfo["isthing"]: # Some pixels (e.g. id 0 in PanopticFPN) have no instance or semantic predictions. continue yield (self._seg == sid).numpy().astype(np.bool), sinfo def instance_masks(self): for sid in self._seg_ids: sinfo = self._sinfo.get(sid) if sinfo is None or not sinfo["isthing"]: continue mask = (self._seg == sid).numpy().astype(np.bool) if mask.sum() > 0: yield mask, sinfo def _create_text_labels(classes, scores, class_names, is_crowd=None): """ Args: classes (list[int] or None): scores (list[float] or None): class_names (list[str] or None): is_crowd (list[bool] or None): Returns: list[str] or None """ labels = None if classes is not None: if class_names is not None and len(class_names) > 0: labels = [class_names[i] for i in classes] else: labels = [str(i) for i in classes] if scores is not None: if labels is None: labels = ["{:.0f}%".format(s * 100) for s in scores] else: labels = ["{} {:.0f}%".format(l, s * 100) for l, s in zip(labels, scores)] if labels is not None and is_crowd is not None: labels = [l + ("|crowd" if crowd else "") for l, crowd in zip(labels, is_crowd)] return labels class VisImage: def __init__(self, img, scale=1.0): """ Args: img (ndarray): an RGB image of shape (H, W, 3). scale (float): scale the input image """ self.img = img self.scale = scale self.width, self.height = img.shape[1], img.shape[0] self._setup_figure(img) def _setup_figure(self, img): """ Args: Same as in :meth:`__init__()`. Returns: fig (matplotlib.pyplot.figure): top level container for all the image plot elements. ax (matplotlib.pyplot.Axes): contains figure elements and sets the coordinate system. """ fig = mplfigure.Figure(frameon=False) self.dpi = fig.get_dpi() # add a small 1e-2 to avoid precision lost due to matplotlib's truncation # (https://github.com/matplotlib/matplotlib/issues/15363) fig.set_size_inches( (self.width * self.scale + 1e-2) / self.dpi, (self.height * self.scale + 1e-2) / self.dpi, ) self.canvas = FigureCanvasAgg(fig) # self.canvas = mpl.backends.backend_cairo.FigureCanvasCairo(fig) ax = fig.add_axes([0.0, 0.0, 1.0, 1.0]) ax.axis("off") # Need to imshow this first so that other patches can be drawn on top ax.imshow(img, extent=(0, self.width, self.height, 0), interpolation="nearest") self.fig = fig self.ax = ax def save(self, filepath): """ Args: filepath (str): a string that contains the absolute path, including the file name, where the visualized image will be saved. """ self.fig.savefig(filepath) def get_image(self): """ Returns: ndarray: the visualized image of shape (H, W, 3) (RGB) in uint8 type. The shape is scaled w.r.t the input image using the given `scale` argument. """ canvas = self.canvas s, (width, height) = canvas.print_to_buffer() # buf = io.BytesIO() # works for cairo backend # canvas.print_rgba(buf) # width, height = self.width, self.height # s = buf.getvalue() buffer = np.frombuffer(s, dtype="uint8") img_rgba = buffer.reshape(height, width, 4) rgb, alpha = np.split(img_rgba, [3], axis=2) return rgb.astype("uint8") class Visualizer: """ Visualizer that draws data about detection/segmentation on images. It contains methods like `draw_{text,box,circle,line,binary_mask,polygon}` that draw primitive objects to images, as well as high-level wrappers like `draw_{instance_predictions,sem_seg,panoptic_seg_predictions,dataset_dict}` that draw composite data in some pre-defined style. Note that the exact visualization style for the high-level wrappers are subject to change. Style such as color, opacity, label contents, visibility of labels, or even the visibility of objects themselves (e.g. when the object is too small) may change according to different heuristics, as long as the results still look visually reasonable. To obtain a consistent style, you can implement custom drawing functions with the abovementioned primitive methods instead. If you need more customized visualization styles, you can process the data yourself following their format documented in tutorials (:doc:`/tutorials/models`, :doc:`/tutorials/datasets`). This class does not intend to satisfy everyone's preference on drawing styles. This visualizer focuses on high rendering quality rather than performance. It is not designed to be used for real-time applications. """ # TODO implement a fast, rasterized version using OpenCV def __init__(self, img_rgb, metadata=None, scale=1.0, instance_mode=ColorMode.IMAGE): """ Args: img_rgb: a numpy array of shape (H, W, C), where H and W correspond to the height and width of the image respectively. C is the number of color channels. The image is required to be in RGB format since that is a requirement of the Matplotlib library. The image is also expected to be in the range [0, 255]. metadata (Metadata): dataset metadata (e.g. class names and colors) instance_mode (ColorMode): defines one of the pre-defined style for drawing instances on an image. """ self.img = np.asarray(img_rgb).clip(0, 255).astype(np.uint8) if metadata is None: metadata = MetadataCatalog.get("__nonexist__") self.metadata = metadata self.output = VisImage(self.img, scale=scale) self.cpu_device = torch.device("cpu") # too small texts are useless, therefore clamp to 9 self._default_font_size = max( np.sqrt(self.output.height * self.output.width) // 90, 10 // scale ) self._instance_mode = instance_mode def draw_instance_predictions(self, predictions): """ Draw instance-level prediction results on an image. Args: predictions (Instances): the output of an instance detection/segmentation model. Following fields will be used to draw: "pred_boxes", "pred_classes", "scores", "pred_masks" (or "pred_masks_rle"). Returns: output (VisImage): image object with visualizations. """ boxes = predictions.pred_boxes if predictions.has("pred_boxes") else None scores = predictions.scores if predictions.has("scores") else None classes = predictions.pred_classes.tolist() if predictions.has("pred_classes") else None labels = _create_text_labels(classes, scores, self.metadata.get("thing_classes", None)) keypoints = predictions.pred_keypoints if predictions.has("pred_keypoints") else None if predictions.has("pred_masks"): masks = np.asarray(predictions.pred_masks) masks = [GenericMask(x, self.output.height, self.output.width) for x in masks] else: masks = None if self._instance_mode == ColorMode.SEGMENTATION and self.metadata.get("thing_colors"): colors = [ self._jitter([x / 255 for x in self.metadata.thing_colors[c]]) for c in classes ] alpha = 0.8 else: colors = None alpha = 0.5 if self._instance_mode == ColorMode.IMAGE_BW: self.output.img = self._create_grayscale_image( (predictions.pred_masks.any(dim=0) > 0).numpy() if predictions.has("pred_masks") else None ) alpha = 0.3 self.overlay_instances( masks=masks, boxes=boxes, labels=labels, keypoints=keypoints, assigned_colors=colors, alpha=alpha, ) return self.output def draw_sem_seg(self, sem_seg, area_threshold=None, alpha=0.8): """ Draw semantic segmentation predictions/labels. Args: sem_seg (Tensor or ndarray): the segmentation of shape (H, W). Each value is the integer label of the pixel. area_threshold (int): segments with less than `area_threshold` are not drawn. alpha (float): the larger it is, the more opaque the segmentations are. Returns: output (VisImage): image object with visualizations. """ if isinstance(sem_seg, torch.Tensor): sem_seg = sem_seg.numpy() labels, areas = np.unique(sem_seg, return_counts=True) sorted_idxs = np.argsort(-areas).tolist() labels = labels[sorted_idxs] for label in filter(lambda l: l < len(self.metadata.stuff_classes), labels): try: mask_color = [x / 255 for x in self.metadata.stuff_colors[label]] except (AttributeError, IndexError): mask_color = None binary_mask = (sem_seg == label).astype(np.uint8) text = self.metadata.stuff_classes[label] self.draw_binary_mask( binary_mask, color=mask_color, edge_color=_OFF_WHITE, text=text, alpha=alpha, area_threshold=area_threshold, ) return self.output def draw_panoptic_seg(self, panoptic_seg, segments_info, area_threshold=None, alpha=0.7): """ Draw panoptic prediction annotations or results. Args: panoptic_seg (Tensor): of shape (height, width) where the values are ids for each segment. segments_info (list[dict] or None): Describe each segment in `panoptic_seg`. If it is a ``list[dict]``, each dict contains keys "id", "category_id". If None, category id of each pixel is computed by ``pixel // metadata.label_divisor``. area_threshold (int): stuff segments with less than `area_threshold` are not drawn. Returns: output (VisImage): image object with visualizations. """ pred = _PanopticPrediction(panoptic_seg, segments_info, self.metadata) if self._instance_mode == ColorMode.IMAGE_BW: self.output.img = self._create_grayscale_image(pred.non_empty_mask()) # draw mask for all semantic segments first i.e. "stuff" for mask, sinfo in pred.semantic_masks(): category_idx = sinfo["category_id"] try: mask_color = [x / 255 for x in self.metadata.stuff_colors[category_idx]] except AttributeError: mask_color = None text = self.metadata.stuff_classes[category_idx] self.draw_binary_mask( mask, color=mask_color, edge_color=_OFF_WHITE, text=text, alpha=alpha, area_threshold=area_threshold, ) # draw mask for all instances second all_instances = list(pred.instance_masks()) if len(all_instances) == 0: return self.output masks, sinfo = list(zip(*all_instances)) category_ids = [x["category_id"] for x in sinfo] try: scores = [x["score"] for x in sinfo] except KeyError: scores = None labels = _create_text_labels( category_ids, scores, self.metadata.thing_classes, [x.get("iscrowd", 0) for x in sinfo] ) try: colors = [ self._jitter([x / 255 for x in self.metadata.thing_colors[c]]) for c in category_ids ] except AttributeError: colors = None self.overlay_instances(masks=masks, labels=labels, assigned_colors=colors, alpha=alpha) return self.output draw_panoptic_seg_predictions = draw_panoptic_seg # backward compatibility def draw_dataset_dict(self, dic): """ Draw annotations/segmentaions in Detectron2 Dataset format. Args: dic (dict): annotation/segmentation data of one image, in Detectron2 Dataset format. Returns: output (VisImage): image object with visualizations. """ annos = dic.get("annotations", None) if annos: if "segmentation" in annos[0]: masks = [x["segmentation"] for x in annos] else: masks = None if "keypoints" in annos[0]: keypts = [x["keypoints"] for x in annos] keypts = np.array(keypts).reshape(len(annos), -1, 3) else: keypts = None boxes = [ BoxMode.convert(x["bbox"], x["bbox_mode"], BoxMode.XYXY_ABS) if len(x["bbox"]) == 4 else x["bbox"] for x in annos ] colors = None category_ids = [x["category_id"] for x in annos] if self._instance_mode == ColorMode.SEGMENTATION and self.metadata.get("thing_colors"): colors = [ self._jitter([x / 255 for x in self.metadata.thing_colors[c]]) for c in category_ids ] names = self.metadata.get("thing_classes", None) labels = _create_text_labels( category_ids, scores=None, class_names=names, is_crowd=[x.get("iscrowd", 0) for x in annos], ) self.overlay_instances( labels=labels, boxes=boxes, masks=masks, keypoints=keypts, assigned_colors=colors ) sem_seg = dic.get("sem_seg", None) if sem_seg is None and "sem_seg_file_name" in dic: with PathManager.open(dic["sem_seg_file_name"], "rb") as f: sem_seg = Image.open(f) sem_seg = np.asarray(sem_seg, dtype="uint8") if sem_seg is not None: self.draw_sem_seg(sem_seg, area_threshold=0, alpha=0.5) pan_seg = dic.get("pan_seg", None) if pan_seg is None and "pan_seg_file_name" in dic: with PathManager.open(dic["pan_seg_file_name"], "rb") as f: pan_seg = Image.open(f) pan_seg = np.asarray(pan_seg) from panopticapi.utils import rgb2id pan_seg = rgb2id(pan_seg) if pan_seg is not None: segments_info = dic["segments_info"] pan_seg = torch.tensor(pan_seg) self.draw_panoptic_seg(pan_seg, segments_info, area_threshold=0, alpha=0.5) return self.output def overlay_instances( self, *, boxes=None, labels=None, masks=None, keypoints=None, assigned_colors=None, alpha=0.5 ): """ Args: boxes (Boxes, RotatedBoxes or ndarray): either a :class:`Boxes`, or an Nx4 numpy array of XYXY_ABS format for the N objects in a single image, or a :class:`RotatedBoxes`, or an Nx5 numpy array of (x_center, y_center, width, height, angle_degrees) format for the N objects in a single image, labels (list[str]): the text to be displayed for each instance. masks (masks-like object): Supported types are: * :class:`detectron2.structures.PolygonMasks`, :class:`detectron2.structures.BitMasks`. * list[list[ndarray]]: contains the segmentation masks for all objects in one image. The first level of the list corresponds to individual instances. The second level to all the polygon that compose the instance, and the third level to the polygon coordinates. The third level should have the format of [x0, y0, x1, y1, ..., xn, yn] (n >= 3). * list[ndarray]: each ndarray is a binary mask of shape (H, W). * list[dict]: each dict is a COCO-style RLE. keypoints (Keypoint or array like): an array-like object of shape (N, K, 3), where the N is the number of instances and K is the number of keypoints. The last dimension corresponds to (x, y, visibility or score). assigned_colors (list[matplotlib.colors]): a list of colors, where each color corresponds to each mask or box in the image. Refer to 'matplotlib.colors' for full list of formats that the colors are accepted in. Returns: output (VisImage): image object with visualizations. """ num_instances = 0 if boxes is not None: boxes = self._convert_boxes(boxes) num_instances = len(boxes) if masks is not None: masks = self._convert_masks(masks) if num_instances: assert len(masks) == num_instances else: num_instances = len(masks) if keypoints is not None: if num_instances: assert len(keypoints) == num_instances else: num_instances = len(keypoints) keypoints = self._convert_keypoints(keypoints) if labels is not None: assert len(labels) == num_instances if assigned_colors is None: assigned_colors = [random_color(rgb=True, maximum=1) for _ in range(num_instances)] if num_instances == 0: return self.output if boxes is not None and boxes.shape[1] == 5: return self.overlay_rotated_instances( boxes=boxes, labels=labels, assigned_colors=assigned_colors ) # Display in largest to smallest order to reduce occlusion. areas = None if boxes is not None: areas = np.prod(boxes[:, 2:] - boxes[:, :2], axis=1) elif masks is not None: areas = np.asarray([x.area() for x in masks]) if areas is not None: sorted_idxs = np.argsort(-areas).tolist() # Re-order overlapped instances in descending order. boxes = boxes[sorted_idxs] if boxes is not None else None labels = [labels[k] for k in sorted_idxs] if labels is not None else None masks = [masks[idx] for idx in sorted_idxs] if masks is not None else None assigned_colors = [assigned_colors[idx] for idx in sorted_idxs] keypoints = keypoints[sorted_idxs] if keypoints is not None else None for i in range(num_instances): color = assigned_colors[i] if boxes is not None: self.draw_box(boxes[i], edge_color=color) if masks is not None: for segment in masks[i].polygons: self.draw_polygon(segment.reshape(-1, 2), color, alpha=alpha) if labels is not None: # first get a box if boxes is not None: x0, y0, x1, y1 = boxes[i] text_pos = (x0, y0) # if drawing boxes, put text on the box corner. horiz_align = "left" elif masks is not None: # skip small mask without polygon if len(masks[i].polygons) == 0: continue x0, y0, x1, y1 = masks[i].bbox() # draw text in the center (defined by median) when box is not drawn # median is less sensitive to outliers. text_pos = np.median(masks[i].mask.nonzero(), axis=1)[::-1] horiz_align = "center" else: continue # drawing the box confidence for keypoints isn't very useful. # for small objects, draw text at the side to avoid occlusion instance_area = (y1 - y0) * (x1 - x0) if ( instance_area < _SMALL_OBJECT_AREA_THRESH * self.output.scale or y1 - y0 < 40 * self.output.scale ): if y1 >= self.output.height - 5: text_pos = (x1, y0) else: text_pos = (x0, y1) height_ratio = (y1 - y0) / np.sqrt(self.output.height * self.output.width) lighter_color = self._change_color_brightness(color, brightness_factor=0.7) font_size = ( np.clip((height_ratio - 0.02) / 0.08 + 1, 1.2, 2) * 0.5 * self._default_font_size ) self.draw_text( labels[i], text_pos, color=lighter_color, horizontal_alignment=horiz_align, font_size=font_size, ) # draw keypoints if keypoints is not None: for keypoints_per_instance in keypoints: self.draw_and_connect_keypoints(keypoints_per_instance) return self.output def overlay_rotated_instances(self, boxes=None, labels=None, assigned_colors=None): """ Args: boxes (ndarray): an Nx5 numpy array of (x_center, y_center, width, height, angle_degrees) format for the N objects in a single image. labels (list[str]): the text to be displayed for each instance. assigned_colors (list[matplotlib.colors]): a list of colors, where each color corresponds to each mask or box in the image. Refer to 'matplotlib.colors' for full list of formats that the colors are accepted in. Returns: output (VisImage): image object with visualizations. """ num_instances = len(boxes) if assigned_colors is None: assigned_colors = [random_color(rgb=True, maximum=1) for _ in range(num_instances)] if num_instances == 0: return self.output # Display in largest to smallest order to reduce occlusion. if boxes is not None: areas = boxes[:, 2] * boxes[:, 3] sorted_idxs = np.argsort(-areas).tolist() # Re-order overlapped instances in descending order. boxes = boxes[sorted_idxs] labels = [labels[k] for k in sorted_idxs] if labels is not None else None colors = [assigned_colors[idx] for idx in sorted_idxs] for i in range(num_instances): self.draw_rotated_box_with_label( boxes[i], edge_color=colors[i], label=labels[i] if labels is not None else None ) return self.output def draw_and_connect_keypoints(self, keypoints): """ Draws keypoints of an instance and follows the rules for keypoint connections to draw lines between appropriate keypoints. This follows color heuristics for line color. Args: keypoints (Tensor): a tensor of shape (K, 3), where K is the number of keypoints and the last dimension corresponds to (x, y, probability). Returns: output (VisImage): image object with visualizations. """ visible = {} keypoint_names = self.metadata.get("keypoint_names") for idx, keypoint in enumerate(keypoints): # draw keypoint x, y, prob = keypoint if prob > _KEYPOINT_THRESHOLD: self.draw_circle((x, y), color=_RED) if keypoint_names: keypoint_name = keypoint_names[idx] visible[keypoint_name] = (x, y) if self.metadata.get("keypoint_connection_rules"): for kp0, kp1, color in self.metadata.keypoint_connection_rules: if kp0 in visible and kp1 in visible: x0, y0 = visible[kp0] x1, y1 = visible[kp1] color = tuple(x / 255.0 for x in color) self.draw_line([x0, x1], [y0, y1], color=color) # draw lines from nose to mid-shoulder and mid-shoulder to mid-hip # Note that this strategy is specific to person keypoints. # For other keypoints, it should just do nothing try: ls_x, ls_y = visible["left_shoulder"] rs_x, rs_y = visible["right_shoulder"] mid_shoulder_x, mid_shoulder_y = (ls_x + rs_x) / 2, (ls_y + rs_y) / 2 except KeyError: pass else: # draw line from nose to mid-shoulder nose_x, nose_y = visible.get("nose", (None, None)) if nose_x is not None: self.draw_line([nose_x, mid_shoulder_x], [nose_y, mid_shoulder_y], color=_RED) try: # draw line from mid-shoulder to mid-hip lh_x, lh_y = visible["left_hip"] rh_x, rh_y = visible["right_hip"] except KeyError: pass else: mid_hip_x, mid_hip_y = (lh_x + rh_x) / 2, (lh_y + rh_y) / 2 self.draw_line([mid_hip_x, mid_shoulder_x], [mid_hip_y, mid_shoulder_y], color=_RED) return self.output """ Primitive drawing functions: """ def draw_text( self, text, position, *, font_size=None, color="g", horizontal_alignment="center", rotation=0 ): """ Args: text (str): class label position (tuple): a tuple of the x and y coordinates to place text on image. font_size (int, optional): font of the text. If not provided, a font size proportional to the image width is calculated and used. color: color of the text. Refer to `matplotlib.colors` for full list of formats that are accepted. horizontal_alignment (str): see `matplotlib.text.Text` rotation: rotation angle in degrees CCW Returns: output (VisImage): image object with text drawn. """ if not font_size: font_size = self._default_font_size # since the text background is dark, we don't want the text to be dark color = np.maximum(list(mplc.to_rgb(color)), 0.2) color[np.argmax(color)] = max(0.8, np.max(color)) x, y = position self.output.ax.text( x, y, text, size=font_size * self.output.scale, family="sans-serif", bbox={"facecolor": "black", "alpha": 0.8, "pad": 0.7, "edgecolor": "none"}, verticalalignment="top", horizontalalignment=horizontal_alignment, color=color, zorder=10, rotation=rotation, ) return self.output def draw_box(self, box_coord, alpha=0.5, edge_color="g", line_style="-"): """ Args: box_coord (tuple): a tuple containing x0, y0, x1, y1 coordinates, where x0 and y0 are the coordinates of the image's top left corner. x1 and y1 are the coordinates of the image's bottom right corner. alpha (float): blending efficient. Smaller values lead to more transparent masks. edge_color: color of the outline of the box. Refer to `matplotlib.colors` for full list of formats that are accepted. line_style (string): the string to use to create the outline of the boxes. Returns: output (VisImage): image object with box drawn. """ x0, y0, x1, y1 = box_coord width = x1 - x0 height = y1 - y0 linewidth = max(self._default_font_size / 4, 1) self.output.ax.add_patch( mpl.patches.Rectangle( (x0, y0), width, height, fill=False, edgecolor=edge_color, linewidth=linewidth * self.output.scale, alpha=alpha, linestyle=line_style, ) ) return self.output def draw_rotated_box_with_label( self, rotated_box, alpha=0.5, edge_color="g", line_style="-", label=None ): """ Draw a rotated box with label on its top-left corner. Args: rotated_box (tuple): a tuple containing (cnt_x, cnt_y, w, h, angle), where cnt_x and cnt_y are the center coordinates of the box. w and h are the width and height of the box. angle represents how many degrees the box is rotated CCW with regard to the 0-degree box. alpha (float): blending efficient. Smaller values lead to more transparent masks. edge_color: color of the outline of the box. Refer to `matplotlib.colors` for full list of formats that are accepted. line_style (string): the string to use to create the outline of the boxes. label (string): label for rotated box. It will not be rendered when set to None. Returns: output (VisImage): image object with box drawn. """ cnt_x, cnt_y, w, h, angle = rotated_box area = w * h # use thinner lines when the box is small linewidth = self._default_font_size / ( 6 if area < _SMALL_OBJECT_AREA_THRESH * self.output.scale else 3 ) theta = angle * math.pi / 180.0 c = math.cos(theta) s = math.sin(theta) rect = [(-w / 2, h / 2), (-w / 2, -h / 2), (w / 2, -h / 2), (w / 2, h / 2)] # x: left->right ; y: top->down rotated_rect = [(s * yy + c * xx + cnt_x, c * yy - s * xx + cnt_y) for (xx, yy) in rect] for k in range(4): j = (k + 1) % 4 self.draw_line( [rotated_rect[k][0], rotated_rect[j][0]], [rotated_rect[k][1], rotated_rect[j][1]], color=edge_color, linestyle="--" if k == 1 else line_style, linewidth=linewidth, ) if label is not None: text_pos = rotated_rect[1] # topleft corner height_ratio = h / np.sqrt(self.output.height * self.output.width) label_color = self._change_color_brightness(edge_color, brightness_factor=0.7) font_size = ( np.clip((height_ratio - 0.02) / 0.08 + 1, 1.2, 2) * 0.5 * self._default_font_size ) self.draw_text(label, text_pos, color=label_color, font_size=font_size, rotation=angle) return self.output def draw_circle(self, circle_coord, color, radius=3): """ Args: circle_coord (list(int) or tuple(int)): contains the x and y coordinates of the center of the circle. color: color of the polygon. Refer to `matplotlib.colors` for a full list of formats that are accepted. radius (int): radius of the circle. Returns: output (VisImage): image object with box drawn. """ x, y = circle_coord self.output.ax.add_patch( mpl.patches.Circle(circle_coord, radius=radius, fill=True, color=color) ) return self.output def draw_line(self, x_data, y_data, color, linestyle="-", linewidth=None): """ Args: x_data (list[int]): a list containing x values of all the points being drawn. Length of list should match the length of y_data. y_data (list[int]): a list containing y values of all the points being drawn. Length of list should match the length of x_data. color: color of the line. Refer to `matplotlib.colors` for a full list of formats that are accepted. linestyle: style of the line. Refer to `matplotlib.lines.Line2D` for a full list of formats that are accepted. linewidth (float or None): width of the line. When it's None, a default value will be computed and used. Returns: output (VisImage): image object with line drawn. """ if linewidth is None: linewidth = self._default_font_size / 3 linewidth = max(linewidth, 1) self.output.ax.add_line( mpl.lines.Line2D( x_data, y_data, linewidth=linewidth * self.output.scale, color=color, linestyle=linestyle, ) ) return self.output def draw_binary_mask( self, binary_mask, color=None, *, edge_color=None, text=None, alpha=0.5, area_threshold=0 ): """ Args: binary_mask (ndarray): numpy array of shape (H, W), where H is the image height and W is the image width. Each value in the array is either a 0 or 1 value of uint8 type. color: color of the mask. Refer to `matplotlib.colors` for a full list of formats that are accepted. If None, will pick a random color. edge_color: color of the polygon edges. Refer to `matplotlib.colors` for a full list of formats that are accepted. text (str): if None, will be drawn in the object's center of mass. alpha (float): blending efficient. Smaller values lead to more transparent masks. area_threshold (float): a connected component small than this will not be shown. Returns: output (VisImage): image object with mask drawn. """ if color is None: color = random_color(rgb=True, maximum=1) color = mplc.to_rgb(color) has_valid_segment = False binary_mask = binary_mask.astype("uint8") # opencv needs uint8 mask = GenericMask(binary_mask, self.output.height, self.output.width) shape2d = (binary_mask.shape[0], binary_mask.shape[1]) if not mask.has_holes: # draw polygons for regular masks for segment in mask.polygons: area = mask_util.area(mask_util.frPyObjects([segment], shape2d[0], shape2d[1])) if area < (area_threshold or 0): continue has_valid_segment = True segment = segment.reshape(-1, 2) self.draw_polygon(segment, color=color, edge_color=edge_color, alpha=alpha) else: # TODO: Use Path/PathPatch to draw vector graphics: # https://stackoverflow.com/questions/8919719/how-to-plot-a-complex-polygon rgba = np.zeros(shape2d + (4,), dtype="float32") rgba[:, :, :3] = color rgba[:, :, 3] = (mask.mask == 1).astype("float32") * alpha has_valid_segment = True self.output.ax.imshow(rgba, extent=(0, self.output.width, self.output.height, 0)) if text is not None and has_valid_segment: # TODO sometimes drawn on wrong objects. the heuristics here can improve. lighter_color = self._change_color_brightness(color, brightness_factor=0.7) _num_cc, cc_labels, stats, centroids = cv2.connectedComponentsWithStats(binary_mask, 8) largest_component_id = np.argmax(stats[1:, -1]) + 1 # draw text on the largest component, as well as other very large components. for cid in range(1, _num_cc): if cid == largest_component_id or stats[cid, -1] > _LARGE_MASK_AREA_THRESH: # median is more stable than centroid # center = centroids[largest_component_id] center = np.median((cc_labels == cid).nonzero(), axis=1)[::-1] self.draw_text(text, center, color=lighter_color) return self.output def draw_polygon(self, segment, color, edge_color=None, alpha=0.5): """ Args: segment: numpy array of shape Nx2, containing all the points in the polygon. color: color of the polygon. Refer to `matplotlib.colors` for a full list of formats that are accepted. edge_color: color of the polygon edges. Refer to `matplotlib.colors` for a full list of formats that are accepted. If not provided, a darker shade of the polygon color will be used instead. alpha (float): blending efficient. Smaller values lead to more transparent masks. Returns: output (VisImage): image object with polygon drawn. """ if edge_color is None: # make edge color darker than the polygon color if alpha > 0.8: edge_color = self._change_color_brightness(color, brightness_factor=-0.7) else: edge_color = color edge_color = mplc.to_rgb(edge_color) + (1,) polygon = mpl.patches.Polygon( segment, fill=True, facecolor=mplc.to_rgb(color) + (alpha,), edgecolor=edge_color, linewidth=max(self._default_font_size // 15 * self.output.scale, 1), ) self.output.ax.add_patch(polygon) return self.output """ Internal methods: """ def _jitter(self, color): """ Randomly modifies given color to produce a slightly different color than the color given. Args: color (tuple[double]): a tuple of 3 elements, containing the RGB values of the color picked. The values in the list are in the [0.0, 1.0] range. Returns: jittered_color (tuple[double]): a tuple of 3 elements, containing the RGB values of the color after being jittered. The values in the list are in the [0.0, 1.0] range. """ color = mplc.to_rgb(color) vec = np.random.rand(3) # better to do it in another color space vec = vec / np.linalg.norm(vec) * 0.5 res = np.clip(vec + color, 0, 1) return tuple(res) def _create_grayscale_image(self, mask=None): """ Create a grayscale version of the original image. The colors in masked area, if given, will be kept. """ img_bw = self.img.astype("f4").mean(axis=2) img_bw = np.stack([img_bw] * 3, axis=2) if mask is not None: img_bw[mask] = self.img[mask] return img_bw def _change_color_brightness(self, color, brightness_factor): """ Depending on the brightness_factor, gives a lighter or darker color i.e. a color with less or more saturation than the original color. Args: color: color of the polygon. Refer to `matplotlib.colors` for a full list of formats that are accepted. brightness_factor (float): a value in [-1.0, 1.0] range. A lightness factor of 0 will correspond to no change, a factor in [-1.0, 0) range will result in a darker color and a factor in (0, 1.0] range will result in a lighter color. Returns: modified_color (tuple[double]): a tuple containing the RGB values of the modified color. Each value in the tuple is in the [0.0, 1.0] range. """ assert brightness_factor >= -1.0 and brightness_factor <= 1.0 color = mplc.to_rgb(color) polygon_color = colorsys.rgb_to_hls(*mplc.to_rgb(color)) modified_lightness = polygon_color[1] + (brightness_factor * polygon_color[1]) modified_lightness = 0.0 if modified_lightness < 0.0 else modified_lightness modified_lightness = 1.0 if modified_lightness > 1.0 else modified_lightness modified_color = colorsys.hls_to_rgb(polygon_color[0], modified_lightness, polygon_color[2]) return modified_color def _convert_boxes(self, boxes): """ Convert different format of boxes to an NxB array, where B = 4 or 5 is the box dimension. """ if isinstance(boxes, Boxes) or isinstance(boxes, RotatedBoxes): return boxes.tensor.numpy() else: return np.asarray(boxes) def _convert_masks(self, masks_or_polygons): """ Convert different format of masks or polygons to a tuple of masks and polygons. Returns: list[GenericMask]: """ m = masks_or_polygons if isinstance(m, PolygonMasks): m = m.polygons if isinstance(m, BitMasks): m = m.tensor.numpy() if isinstance(m, torch.Tensor): m = m.numpy() ret = [] for x in m: if isinstance(x, GenericMask): ret.append(x) else: ret.append(GenericMask(x, self.output.height, self.output.width)) return ret def _convert_keypoints(self, keypoints): if isinstance(keypoints, Keypoints): keypoints = keypoints.tensor keypoints = np.asarray(keypoints) return keypoints def get_output(self): """ Returns: output (VisImage): the image output containing the visualizations added to the image. """ return self.output
banmo-main
third_party/detectron2_old/detectron2/utils/visualizer.py
# Copyright (c) Facebook, Inc. and its affiliates. import copy import itertools import logging from enum import Enum from typing import Any, Callable, Dict, Iterable, List, Optional, Set, Type, Union import torch from fvcore.common.param_scheduler import CosineParamScheduler, MultiStepParamScheduler from detectron2.config import CfgNode from .lr_scheduler import LRMultiplier, WarmupParamScheduler _GradientClipperInput = Union[torch.Tensor, Iterable[torch.Tensor]] _GradientClipper = Callable[[_GradientClipperInput], None] class GradientClipType(Enum): VALUE = "value" NORM = "norm" def _create_gradient_clipper(cfg: CfgNode) -> _GradientClipper: """ Creates gradient clipping closure to clip by value or by norm, according to the provided config. """ cfg = copy.deepcopy(cfg) def clip_grad_norm(p: _GradientClipperInput): torch.nn.utils.clip_grad_norm_(p, cfg.CLIP_VALUE, cfg.NORM_TYPE) def clip_grad_value(p: _GradientClipperInput): torch.nn.utils.clip_grad_value_(p, cfg.CLIP_VALUE) _GRADIENT_CLIP_TYPE_TO_CLIPPER = { GradientClipType.VALUE: clip_grad_value, GradientClipType.NORM: clip_grad_norm, } return _GRADIENT_CLIP_TYPE_TO_CLIPPER[GradientClipType(cfg.CLIP_TYPE)] def _generate_optimizer_class_with_gradient_clipping( optimizer: Type[torch.optim.Optimizer], *, per_param_clipper: Optional[_GradientClipper] = None, global_clipper: Optional[_GradientClipper] = None, ) -> Type[torch.optim.Optimizer]: """ Dynamically creates a new type that inherits the type of a given instance and overrides the `step` method to add gradient clipping """ assert ( per_param_clipper is None or global_clipper is None ), "Not allowed to use both per-parameter clipping and global clipping" def optimizer_wgc_step(self, closure=None): if per_param_clipper is not None: for group in self.param_groups: for p in group["params"]: per_param_clipper(p) else: # global clipper for future use with detr # (https://github.com/facebookresearch/detr/pull/287) all_params = itertools.chain(*[g["params"] for g in self.param_groups]) global_clipper(all_params) super(type(self), self).step(closure) OptimizerWithGradientClip = type( optimizer.__name__ + "WithGradientClip", (optimizer,), {"step": optimizer_wgc_step}, ) return OptimizerWithGradientClip def maybe_add_gradient_clipping( cfg: CfgNode, optimizer: Type[torch.optim.Optimizer] ) -> Type[torch.optim.Optimizer]: """ If gradient clipping is enabled through config options, wraps the existing optimizer type to become a new dynamically created class OptimizerWithGradientClip that inherits the given optimizer and overrides the `step` method to include gradient clipping. Args: cfg: CfgNode, configuration options optimizer: type. A subclass of torch.optim.Optimizer Return: type: either the input `optimizer` (if gradient clipping is disabled), or a subclass of it with gradient clipping included in the `step` method. """ if not cfg.SOLVER.CLIP_GRADIENTS.ENABLED: return optimizer if isinstance(optimizer, torch.optim.Optimizer): optimizer_type = type(optimizer) else: assert issubclass(optimizer, torch.optim.Optimizer), optimizer optimizer_type = optimizer grad_clipper = _create_gradient_clipper(cfg.SOLVER.CLIP_GRADIENTS) OptimizerWithGradientClip = _generate_optimizer_class_with_gradient_clipping( optimizer_type, per_param_clipper=grad_clipper ) if isinstance(optimizer, torch.optim.Optimizer): optimizer.__class__ = OptimizerWithGradientClip # a bit hacky, not recommended return optimizer else: return OptimizerWithGradientClip def build_optimizer(cfg: CfgNode, model: torch.nn.Module) -> torch.optim.Optimizer: """ Build an optimizer from config. """ params = get_default_optimizer_params( model, base_lr=cfg.SOLVER.BASE_LR, weight_decay_norm=cfg.SOLVER.WEIGHT_DECAY_NORM, bias_lr_factor=cfg.SOLVER.BIAS_LR_FACTOR, weight_decay_bias=cfg.SOLVER.WEIGHT_DECAY_BIAS, ) return maybe_add_gradient_clipping(cfg, torch.optim.SGD)( params, lr=cfg.SOLVER.BASE_LR, momentum=cfg.SOLVER.MOMENTUM, nesterov=cfg.SOLVER.NESTEROV, weight_decay=cfg.SOLVER.WEIGHT_DECAY, ) def get_default_optimizer_params( model: torch.nn.Module, base_lr: Optional[float] = None, weight_decay: Optional[float] = None, weight_decay_norm: Optional[float] = None, bias_lr_factor: Optional[float] = 1.0, weight_decay_bias: Optional[float] = None, overrides: Optional[Dict[str, Dict[str, float]]] = None, ): """ Get default param list for optimizer, with support for a few types of overrides. If no overrides needed, this is equivalent to `model.parameters()`. Args: base_lr: lr for every group by default. Can be omitted to use the one in optimizer. weight_decay: weight decay for every group by default. Can be omitted to use the one in optimizer. weight_decay_norm: override weight decay for params in normalization layers bias_lr_factor: multiplier of lr for bias parameters. weight_decay_bias: override weight decay for bias parameters overrides: if not `None`, provides values for optimizer hyperparameters (LR, weight decay) for module parameters with a given name; e.g. ``{"embedding": {"lr": 0.01, "weight_decay": 0.1}}`` will set the LR and weight decay values for all module parameters named `embedding`. For common detection models, ``weight_decay_norm`` is the only option needed to be set. ``bias_lr_factor,weight_decay_bias`` are legacy settings from Detectron1 that are not found useful. Example: :: torch.optim.SGD(get_default_optimizer_params(model, weight_decay_norm=0), lr=0.01, weight_decay=1e-4, momentum=0.9) """ if overrides is None: overrides = {} defaults = {} if base_lr is not None: defaults["lr"] = base_lr if weight_decay is not None: defaults["weight_decay"] = weight_decay bias_overrides = {} if bias_lr_factor is not None and bias_lr_factor != 1.0: # NOTE: unlike Detectron v1, we now by default make bias hyperparameters # exactly the same as regular weights. if base_lr is None: raise ValueError("bias_lr_factor requires base_lr") bias_overrides["lr"] = base_lr * bias_lr_factor if weight_decay_bias is not None: bias_overrides["weight_decay"] = weight_decay_bias if len(bias_overrides): if "bias" in overrides: raise ValueError("Conflicting overrides for 'bias'") overrides["bias"] = bias_overrides norm_module_types = ( torch.nn.BatchNorm1d, torch.nn.BatchNorm2d, torch.nn.BatchNorm3d, torch.nn.SyncBatchNorm, # NaiveSyncBatchNorm inherits from BatchNorm2d torch.nn.GroupNorm, torch.nn.InstanceNorm1d, torch.nn.InstanceNorm2d, torch.nn.InstanceNorm3d, torch.nn.LayerNorm, torch.nn.LocalResponseNorm, ) params: List[Dict[str, Any]] = [] memo: Set[torch.nn.parameter.Parameter] = set() for module in model.modules(): for module_param_name, value in module.named_parameters(recurse=False): if not value.requires_grad: continue # Avoid duplicating parameters if value in memo: continue memo.add(value) hyperparams = copy.copy(defaults) if isinstance(module, norm_module_types) and weight_decay_norm is not None: hyperparams["weight_decay"] = weight_decay_norm hyperparams.update(overrides.get(module_param_name, {})) params.append({"params": [value], **hyperparams}) return params def build_lr_scheduler( cfg: CfgNode, optimizer: torch.optim.Optimizer ) -> torch.optim.lr_scheduler._LRScheduler: """ Build a LR scheduler from config. """ name = cfg.SOLVER.LR_SCHEDULER_NAME if name == "WarmupMultiStepLR": steps = [x for x in cfg.SOLVER.STEPS if x <= cfg.SOLVER.MAX_ITER] if len(steps) != len(cfg.SOLVER.STEPS): logger = logging.getLogger(__name__) logger.warning( "SOLVER.STEPS contains values larger than SOLVER.MAX_ITER. " "These values will be ignored." ) sched = MultiStepParamScheduler( values=[cfg.SOLVER.GAMMA ** k for k in range(len(steps) + 1)], milestones=steps, num_updates=cfg.SOLVER.MAX_ITER, ) elif name == "WarmupCosineLR": sched = CosineParamScheduler(1, 0) else: raise ValueError("Unknown LR scheduler: {}".format(name)) sched = WarmupParamScheduler( sched, cfg.SOLVER.WARMUP_FACTOR, min(cfg.SOLVER.WARMUP_ITERS / cfg.SOLVER.MAX_ITER, 1.0), cfg.SOLVER.WARMUP_METHOD, ) return LRMultiplier(optimizer, multiplier=sched, max_iter=cfg.SOLVER.MAX_ITER)
banmo-main
third_party/detectron2_old/detectron2/solver/build.py
# Copyright (c) Facebook, Inc. and its affiliates. import logging import math from bisect import bisect_right from typing import List import torch from fvcore.common.param_scheduler import ( CompositeParamScheduler, ConstantParamScheduler, LinearParamScheduler, ParamScheduler, ) logger = logging.getLogger(__name__) class WarmupParamScheduler(CompositeParamScheduler): """ Add an initial warmup stage to another scheduler. """ def __init__( self, scheduler: ParamScheduler, warmup_factor: float, warmup_length: float, warmup_method: str = "linear", ): """ Args: scheduler: warmup will be added at the beginning of this scheduler warmup_factor: the factor w.r.t the initial value of ``scheduler``, e.g. 0.001 warmup_length: the relative length (in [0, 1]) of warmup steps w.r.t the entire training, e.g. 0.01 warmup_method: one of "linear" or "constant" """ end_value = scheduler(warmup_length) # the value to reach when warmup ends start_value = warmup_factor * scheduler(0.0) if warmup_method == "constant": warmup = ConstantParamScheduler(start_value) elif warmup_method == "linear": warmup = LinearParamScheduler(start_value, end_value) else: raise ValueError("Unknown warmup method: {}".format(warmup_method)) super().__init__( [warmup, scheduler], interval_scaling=["rescaled", "fixed"], lengths=[warmup_length, 1 - warmup_length], ) class LRMultiplier(torch.optim.lr_scheduler._LRScheduler): """ A LRScheduler which uses fvcore :class:`ParamScheduler` to multiply the learning rate of each param in the optimizer. Every step, the learning rate of each parameter becomes its initial value multiplied by the output of the given :class:`ParamScheduler`. The absolute learning rate value of each parameter can be different. This scheduler can be used as long as the relative scale among them do not change during training. Examples: :: LRMultiplier( opt, WarmupParamScheduler( MultiStepParamScheduler( [1, 0.1, 0.01], milestones=[60000, 80000], num_updates=90000, ), 0.001, 100 / 90000 ), max_iter=90000 ) """ # NOTES: in the most general case, every LR can use its own scheduler. # Supporting this requires interaction with the optimizer when its parameter # group is initialized. For example, classyvision implements its own optimizer # that allows different schedulers for every parameter group. # To avoid this complexity, we use this class to support the most common cases # where the relative scale among all LRs stay unchanged during training. In this # case we only need a total of one scheduler that defines the relative LR multiplier. def __init__( self, optimizer: torch.optim.Optimizer, multiplier: ParamScheduler, max_iter: int, last_iter: int = -1, ): """ Args: optimizer, last_iter: See ``torch.optim.lr_scheduler._LRScheduler``. ``last_iter`` is the same as ``last_epoch``. multiplier: a fvcore ParamScheduler that defines the multiplier on every LR of the optimizer max_iter: the total number of training iterations """ if not isinstance(multiplier, ParamScheduler): raise ValueError( "_LRMultiplier(multiplier=) must be an instance of fvcore " f"ParamScheduler. Got {multiplier} instead." ) self._multiplier = multiplier self._max_iter = max_iter super().__init__(optimizer, last_epoch=last_iter) def state_dict(self): # fvcore schedulers are stateless. Only keep pytorch scheduler states return {"base_lrs": self.base_lrs, "last_epoch": self.last_epoch} def get_lr(self) -> List[float]: multiplier = self._multiplier(self.last_epoch / self._max_iter) return [base_lr * multiplier for base_lr in self.base_lrs] """ Content below is no longer needed! """ # NOTE: PyTorch's LR scheduler interface uses names that assume the LR changes # only on epoch boundaries. We typically use iteration based schedules instead. # As a result, "epoch" (e.g., as in self.last_epoch) should be understood to mean # "iteration" instead. # FIXME: ideally this would be achieved with a CombinedLRScheduler, separating # MultiStepLR with WarmupLR but the current LRScheduler design doesn't allow it. class WarmupMultiStepLR(torch.optim.lr_scheduler._LRScheduler): def __init__( self, optimizer: torch.optim.Optimizer, milestones: List[int], gamma: float = 0.1, warmup_factor: float = 0.001, warmup_iters: int = 1000, warmup_method: str = "linear", last_epoch: int = -1, ): logger.warning( "WarmupMultiStepLR is deprecated! Use LRMultipilier with fvcore ParamScheduler instead!" ) if not list(milestones) == sorted(milestones): raise ValueError( "Milestones should be a list of" " increasing integers. Got {}", milestones ) self.milestones = milestones self.gamma = gamma self.warmup_factor = warmup_factor self.warmup_iters = warmup_iters self.warmup_method = warmup_method super().__init__(optimizer, last_epoch) def get_lr(self) -> List[float]: warmup_factor = _get_warmup_factor_at_iter( self.warmup_method, self.last_epoch, self.warmup_iters, self.warmup_factor ) return [ base_lr * warmup_factor * self.gamma ** bisect_right(self.milestones, self.last_epoch) for base_lr in self.base_lrs ] def _compute_values(self) -> List[float]: # The new interface return self.get_lr() class WarmupCosineLR(torch.optim.lr_scheduler._LRScheduler): def __init__( self, optimizer: torch.optim.Optimizer, max_iters: int, warmup_factor: float = 0.001, warmup_iters: int = 1000, warmup_method: str = "linear", last_epoch: int = -1, ): logger.warning( "WarmupCosineLR is deprecated! Use LRMultipilier with fvcore ParamScheduler instead!" ) self.max_iters = max_iters self.warmup_factor = warmup_factor self.warmup_iters = warmup_iters self.warmup_method = warmup_method super().__init__(optimizer, last_epoch) def get_lr(self) -> List[float]: warmup_factor = _get_warmup_factor_at_iter( self.warmup_method, self.last_epoch, self.warmup_iters, self.warmup_factor ) # Different definitions of half-cosine with warmup are possible. For # simplicity we multiply the standard half-cosine schedule by the warmup # factor. An alternative is to start the period of the cosine at warmup_iters # instead of at 0. In the case that warmup_iters << max_iters the two are # very close to each other. return [ base_lr * warmup_factor * 0.5 * (1.0 + math.cos(math.pi * self.last_epoch / self.max_iters)) for base_lr in self.base_lrs ] def _compute_values(self) -> List[float]: # The new interface return self.get_lr() def _get_warmup_factor_at_iter( method: str, iter: int, warmup_iters: int, warmup_factor: float ) -> float: """ Return the learning rate warmup factor at a specific iteration. See :paper:`ImageNet in 1h` for more details. Args: method (str): warmup method; either "constant" or "linear". iter (int): iteration at which to calculate the warmup factor. warmup_iters (int): the number of warmup iterations. warmup_factor (float): the base warmup factor (the meaning changes according to the method used). Returns: float: the effective warmup factor at the given iteration. """ if iter >= warmup_iters: return 1.0 if method == "constant": return warmup_factor elif method == "linear": alpha = iter / warmup_iters return warmup_factor * (1 - alpha) + alpha else: raise ValueError("Unknown warmup method: {}".format(method))
banmo-main
third_party/detectron2_old/detectron2/solver/lr_scheduler.py
# Copyright (c) Facebook, Inc. and its affiliates. from .build import build_lr_scheduler, build_optimizer, get_default_optimizer_params from .lr_scheduler import WarmupCosineLR, WarmupMultiStepLR, LRMultiplier, WarmupParamScheduler __all__ = [k for k in globals().keys() if not k.startswith("_")]
banmo-main
third_party/detectron2_old/detectron2/solver/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. import os from typing import Optional import pkg_resources import torch from detectron2.checkpoint import DetectionCheckpointer from detectron2.config import CfgNode, LazyConfig, get_cfg, instantiate from detectron2.modeling import build_model class _ModelZooUrls(object): """ Mapping from names to officially released Detectron2 pre-trained models. """ S3_PREFIX = "https://dl.fbaipublicfiles.com/detectron2/" # format: {config_path.yaml} -> model_id/model_final_{commit}.pkl CONFIG_PATH_TO_URL_SUFFIX = { # COCO Detection with Faster R-CNN "COCO-Detection/faster_rcnn_R_50_C4_1x": "137257644/model_final_721ade.pkl", "COCO-Detection/faster_rcnn_R_50_DC5_1x": "137847829/model_final_51d356.pkl", "COCO-Detection/faster_rcnn_R_50_FPN_1x": "137257794/model_final_b275ba.pkl", "COCO-Detection/faster_rcnn_R_50_C4_3x": "137849393/model_final_f97cb7.pkl", "COCO-Detection/faster_rcnn_R_50_DC5_3x": "137849425/model_final_68d202.pkl", "COCO-Detection/faster_rcnn_R_50_FPN_3x": "137849458/model_final_280758.pkl", "COCO-Detection/faster_rcnn_R_101_C4_3x": "138204752/model_final_298dad.pkl", "COCO-Detection/faster_rcnn_R_101_DC5_3x": "138204841/model_final_3e0943.pkl", "COCO-Detection/faster_rcnn_R_101_FPN_3x": "137851257/model_final_f6e8b1.pkl", "COCO-Detection/faster_rcnn_X_101_32x8d_FPN_3x": "139173657/model_final_68b088.pkl", # COCO Detection with RetinaNet "COCO-Detection/retinanet_R_50_FPN_1x": "190397773/model_final_bfca0b.pkl", "COCO-Detection/retinanet_R_50_FPN_3x": "190397829/model_final_5bd44e.pkl", "COCO-Detection/retinanet_R_101_FPN_3x": "190397697/model_final_971ab9.pkl", # COCO Detection with RPN and Fast R-CNN "COCO-Detection/rpn_R_50_C4_1x": "137258005/model_final_450694.pkl", "COCO-Detection/rpn_R_50_FPN_1x": "137258492/model_final_02ce48.pkl", "COCO-Detection/fast_rcnn_R_50_FPN_1x": "137635226/model_final_e5f7ce.pkl", # COCO Instance Segmentation Baselines with Mask R-CNN "COCO-InstanceSegmentation/mask_rcnn_R_50_C4_1x": "137259246/model_final_9243eb.pkl", "COCO-InstanceSegmentation/mask_rcnn_R_50_DC5_1x": "137260150/model_final_4f86c3.pkl", "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x": "137260431/model_final_a54504.pkl", "COCO-InstanceSegmentation/mask_rcnn_R_50_C4_3x": "137849525/model_final_4ce675.pkl", "COCO-InstanceSegmentation/mask_rcnn_R_50_DC5_3x": "137849551/model_final_84107b.pkl", "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x": "137849600/model_final_f10217.pkl", "COCO-InstanceSegmentation/mask_rcnn_R_101_C4_3x": "138363239/model_final_a2914c.pkl", "COCO-InstanceSegmentation/mask_rcnn_R_101_DC5_3x": "138363294/model_final_0464b7.pkl", "COCO-InstanceSegmentation/mask_rcnn_R_101_FPN_3x": "138205316/model_final_a3ec72.pkl", "COCO-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_3x": "139653917/model_final_2d9806.pkl", # noqa # COCO Person Keypoint Detection Baselines with Keypoint R-CNN "COCO-Keypoints/keypoint_rcnn_R_50_FPN_1x": "137261548/model_final_04e291.pkl", "COCO-Keypoints/keypoint_rcnn_R_50_FPN_3x": "137849621/model_final_a6e10b.pkl", "COCO-Keypoints/keypoint_rcnn_R_101_FPN_3x": "138363331/model_final_997cc7.pkl", "COCO-Keypoints/keypoint_rcnn_X_101_32x8d_FPN_3x": "139686956/model_final_5ad38f.pkl", # COCO Panoptic Segmentation Baselines with Panoptic FPN "COCO-PanopticSegmentation/panoptic_fpn_R_50_1x": "139514544/model_final_dbfeb4.pkl", "COCO-PanopticSegmentation/panoptic_fpn_R_50_3x": "139514569/model_final_c10459.pkl", "COCO-PanopticSegmentation/panoptic_fpn_R_101_3x": "139514519/model_final_cafdb1.pkl", # LVIS Instance Segmentation Baselines with Mask R-CNN "LVISv0.5-InstanceSegmentation/mask_rcnn_R_50_FPN_1x": "144219072/model_final_571f7c.pkl", # noqa "LVISv0.5-InstanceSegmentation/mask_rcnn_R_101_FPN_1x": "144219035/model_final_824ab5.pkl", # noqa "LVISv0.5-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_1x": "144219108/model_final_5e3439.pkl", # noqa # Cityscapes & Pascal VOC Baselines "Cityscapes/mask_rcnn_R_50_FPN": "142423278/model_final_af9cf5.pkl", "PascalVOC-Detection/faster_rcnn_R_50_C4": "142202221/model_final_b1acc2.pkl", # Other Settings "Misc/mask_rcnn_R_50_FPN_1x_dconv_c3-c5": "138602867/model_final_65c703.pkl", "Misc/mask_rcnn_R_50_FPN_3x_dconv_c3-c5": "144998336/model_final_821d0b.pkl", "Misc/cascade_mask_rcnn_R_50_FPN_1x": "138602847/model_final_e9d89b.pkl", "Misc/cascade_mask_rcnn_R_50_FPN_3x": "144998488/model_final_480dd8.pkl", "Misc/mask_rcnn_R_50_FPN_3x_syncbn": "169527823/model_final_3b3c51.pkl", "Misc/mask_rcnn_R_50_FPN_3x_gn": "138602888/model_final_dc5d9e.pkl", "Misc/scratch_mask_rcnn_R_50_FPN_3x_gn": "138602908/model_final_01ca85.pkl", "Misc/scratch_mask_rcnn_R_50_FPN_9x_gn": "183808979/model_final_da7b4c.pkl", "Misc/scratch_mask_rcnn_R_50_FPN_9x_syncbn": "184226666/model_final_5ce33e.pkl", "Misc/panoptic_fpn_R_101_dconv_cascade_gn_3x": "139797668/model_final_be35db.pkl", "Misc/cascade_mask_rcnn_X_152_32x8d_FPN_IN5k_gn_dconv": "18131413/model_0039999_e76410.pkl", # noqa # D1 Comparisons "Detectron1-Comparisons/faster_rcnn_R_50_FPN_noaug_1x": "137781054/model_final_7ab50c.pkl", # noqa "Detectron1-Comparisons/mask_rcnn_R_50_FPN_noaug_1x": "137781281/model_final_62ca52.pkl", # noqa "Detectron1-Comparisons/keypoint_rcnn_R_50_FPN_1x": "137781195/model_final_cce136.pkl", } @staticmethod def query(config_path: str) -> Optional[str]: """ Args: config_path: relative config filename """ name = config_path.replace(".yaml", "").replace(".py", "") if name in _ModelZooUrls.CONFIG_PATH_TO_URL_SUFFIX: suffix = _ModelZooUrls.CONFIG_PATH_TO_URL_SUFFIX[name] return _ModelZooUrls.S3_PREFIX + name + "/" + suffix return None def get_checkpoint_url(config_path): """ Returns the URL to the model trained using the given config Args: config_path (str): config file name relative to detectron2's "configs/" directory, e.g., "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml" Returns: str: a URL to the model """ url = _ModelZooUrls.query(config_path) if url is None: raise RuntimeError("Pretrained model for {} is not available!".format(config_path)) return url def get_config_file(config_path): """ Returns path to a builtin config file. Args: config_path (str): config file name relative to detectron2's "configs/" directory, e.g., "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml" Returns: str: the real path to the config file. """ cfg_file = pkg_resources.resource_filename( "detectron2.model_zoo", os.path.join("configs", config_path) ) if not os.path.exists(cfg_file): raise RuntimeError("{} not available in Model Zoo!".format(config_path)) return cfg_file def get_config(config_path, trained: bool = False): """ Returns a config object for a model in model zoo. Args: config_path (str): config file name relative to detectron2's "configs/" directory, e.g., "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml" trained (bool): If True, will set ``MODEL.WEIGHTS`` to trained model zoo weights. If False, the checkpoint specified in the config file's ``MODEL.WEIGHTS`` is used instead; this will typically (though not always) initialize a subset of weights using an ImageNet pre-trained model, while randomly initializing the other weights. Returns: CfgNode or omegaconf.DictConfig: a config object """ cfg_file = get_config_file(config_path) if cfg_file.endswith(".yaml"): cfg = get_cfg() cfg.merge_from_file(cfg_file) if trained: cfg.MODEL.WEIGHTS = get_checkpoint_url(config_path) return cfg elif cfg_file.endswith(".py"): cfg = LazyConfig.load(cfg_file) if trained: url = get_checkpoint_url(config_path) if "train" in cfg and "init_checkpoint" in cfg.train: cfg.train.init_checkpoint = url else: raise NotImplementedError return cfg def get(config_path, trained: bool = False, device: Optional[str] = None): """ Get a model specified by relative path under Detectron2's official ``configs/`` directory. Args: config_path (str): config file name relative to detectron2's "configs/" directory, e.g., "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml" trained (bool): see :func:`get_config`. device (str or None): overwrite the device in config, if given. Returns: nn.Module: a detectron2 model. Will be in training mode. Example: :: from detectron2 import model_zoo model = model_zoo.get("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml", trained=True) """ cfg = get_config(config_path, trained) if device is None and not torch.cuda.is_available(): device = "cpu" if device is not None and isinstance(cfg, CfgNode): cfg.MODEL.DEVICE = device if isinstance(cfg, CfgNode): model = build_model(cfg) DetectionCheckpointer(model).load(cfg.MODEL.WEIGHTS) else: model = instantiate(cfg.model) if device is not None: model = model.to(device) if "train" in cfg and "init_checkpoint" in cfg.train: DetectionCheckpointer(model).load(cfg.train.init_checkpoint) return model
banmo-main
third_party/detectron2_old/detectron2/model_zoo/model_zoo.py
# Copyright (c) Facebook, Inc. and its affiliates. """ Model Zoo API for Detectron2: a collection of functions to create common model architectures listed in `MODEL_ZOO.md <https://github.com/facebookresearch/detectron2/blob/master/MODEL_ZOO.md>`_, and optionally load their pre-trained weights. """ from .model_zoo import get, get_config_file, get_checkpoint_url, get_config __all__ = ["get_checkpoint_url", "get", "get_config_file", "get_config"]
banmo-main
third_party/detectron2_old/detectron2/model_zoo/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. import copy import numpy as np from contextlib import contextmanager from itertools import count from typing import List import torch from fvcore.transforms import HFlipTransform, NoOpTransform from torch import nn from torch.nn.parallel import DistributedDataParallel from detectron2.config import configurable from detectron2.data.detection_utils import read_image from detectron2.data.transforms import ( RandomFlip, ResizeShortestEdge, ResizeTransform, apply_augmentations, ) from detectron2.structures import Boxes, Instances from .meta_arch import GeneralizedRCNN from .postprocessing import detector_postprocess from .roi_heads.fast_rcnn import fast_rcnn_inference_single_image __all__ = ["DatasetMapperTTA", "GeneralizedRCNNWithTTA"] class DatasetMapperTTA: """ Implement test-time augmentation for detection data. It is a callable which takes a dataset dict from a detection dataset, and returns a list of dataset dicts where the images are augmented from the input image by the transformations defined in the config. This is used for test-time augmentation. """ @configurable def __init__(self, min_sizes: List[int], max_size: int, flip: bool): """ Args: min_sizes: list of short-edge size to resize the image to max_size: maximum height or width of resized images flip: whether to apply flipping augmentation """ self.min_sizes = min_sizes self.max_size = max_size self.flip = flip @classmethod def from_config(cls, cfg): return { "min_sizes": cfg.TEST.AUG.MIN_SIZES, "max_size": cfg.TEST.AUG.MAX_SIZE, "flip": cfg.TEST.AUG.FLIP, } def __call__(self, dataset_dict): """ Args: dict: a dict in standard model input format. See tutorials for details. Returns: list[dict]: a list of dicts, which contain augmented version of the input image. The total number of dicts is ``len(min_sizes) * (2 if flip else 1)``. Each dict has field "transforms" which is a TransformList, containing the transforms that are used to generate this image. """ numpy_image = dataset_dict["image"].permute(1, 2, 0).numpy() shape = numpy_image.shape orig_shape = (dataset_dict["height"], dataset_dict["width"]) if shape[:2] != orig_shape: # It transforms the "original" image in the dataset to the input image pre_tfm = ResizeTransform(orig_shape[0], orig_shape[1], shape[0], shape[1]) else: pre_tfm = NoOpTransform() # Create all combinations of augmentations to use aug_candidates = [] # each element is a list[Augmentation] for min_size in self.min_sizes: resize = ResizeShortestEdge(min_size, self.max_size) aug_candidates.append([resize]) # resize only if self.flip: flip = RandomFlip(prob=1.0) aug_candidates.append([resize, flip]) # resize + flip # Apply all the augmentations ret = [] for aug in aug_candidates: new_image, tfms = apply_augmentations(aug, np.copy(numpy_image)) torch_image = torch.from_numpy(np.ascontiguousarray(new_image.transpose(2, 0, 1))) dic = copy.deepcopy(dataset_dict) dic["transforms"] = pre_tfm + tfms dic["image"] = torch_image ret.append(dic) return ret class GeneralizedRCNNWithTTA(nn.Module): """ A GeneralizedRCNN with test-time augmentation enabled. Its :meth:`__call__` method has the same interface as :meth:`GeneralizedRCNN.forward`. """ def __init__(self, cfg, model, tta_mapper=None, batch_size=3): """ Args: cfg (CfgNode): model (GeneralizedRCNN): a GeneralizedRCNN to apply TTA on. tta_mapper (callable): takes a dataset dict and returns a list of augmented versions of the dataset dict. Defaults to `DatasetMapperTTA(cfg)`. batch_size (int): batch the augmented images into this batch size for inference. """ super().__init__() if isinstance(model, DistributedDataParallel): model = model.module assert isinstance( model, GeneralizedRCNN ), "TTA is only supported on GeneralizedRCNN. Got a model of type {}".format(type(model)) self.cfg = cfg.clone() assert not self.cfg.MODEL.KEYPOINT_ON, "TTA for keypoint is not supported yet" assert ( not self.cfg.MODEL.LOAD_PROPOSALS ), "TTA for pre-computed proposals is not supported yet" self.model = model if tta_mapper is None: tta_mapper = DatasetMapperTTA(cfg) self.tta_mapper = tta_mapper self.batch_size = batch_size @contextmanager def _turn_off_roi_heads(self, attrs): """ Open a context where some heads in `model.roi_heads` are temporarily turned off. Args: attr (list[str]): the attribute in `model.roi_heads` which can be used to turn off a specific head, e.g., "mask_on", "keypoint_on". """ roi_heads = self.model.roi_heads old = {} for attr in attrs: try: old[attr] = getattr(roi_heads, attr) except AttributeError: # The head may not be implemented in certain ROIHeads pass if len(old.keys()) == 0: yield else: for attr in old.keys(): setattr(roi_heads, attr, False) yield for attr in old.keys(): setattr(roi_heads, attr, old[attr]) def _batch_inference(self, batched_inputs, detected_instances=None): """ Execute inference on a list of inputs, using batch size = self.batch_size, instead of the length of the list. Inputs & outputs have the same format as :meth:`GeneralizedRCNN.inference` """ if detected_instances is None: detected_instances = [None] * len(batched_inputs) outputs = [] inputs, instances = [], [] for idx, input, instance in zip(count(), batched_inputs, detected_instances): inputs.append(input) instances.append(instance) if len(inputs) == self.batch_size or idx == len(batched_inputs) - 1: outputs.extend( self.model.inference( inputs, instances if instances[0] is not None else None, do_postprocess=False, ) ) inputs, instances = [], [] return outputs def __call__(self, batched_inputs): """ Same input/output format as :meth:`GeneralizedRCNN.forward` """ def _maybe_read_image(dataset_dict): ret = copy.copy(dataset_dict) if "image" not in ret: image = read_image(ret.pop("file_name"), self.model.input_format) image = torch.from_numpy(np.ascontiguousarray(image.transpose(2, 0, 1))) # CHW ret["image"] = image if "height" not in ret and "width" not in ret: ret["height"] = image.shape[1] ret["width"] = image.shape[2] return ret return [self._inference_one_image(_maybe_read_image(x)) for x in batched_inputs] def _inference_one_image(self, input): """ Args: input (dict): one dataset dict with "image" field being a CHW tensor Returns: dict: one output dict """ orig_shape = (input["height"], input["width"]) augmented_inputs, tfms = self._get_augmented_inputs(input) # Detect boxes from all augmented versions with self._turn_off_roi_heads(["mask_on", "keypoint_on"]): # temporarily disable roi heads all_boxes, all_scores, all_classes = self._get_augmented_boxes(augmented_inputs, tfms) # merge all detected boxes to obtain final predictions for boxes merged_instances = self._merge_detections(all_boxes, all_scores, all_classes, orig_shape) if self.cfg.MODEL.MASK_ON: # Use the detected boxes to obtain masks augmented_instances = self._rescale_detected_boxes( augmented_inputs, merged_instances, tfms ) # run forward on the detected boxes outputs = self._batch_inference(augmented_inputs, augmented_instances) # Delete now useless variables to avoid being out of memory del augmented_inputs, augmented_instances # average the predictions merged_instances.pred_masks = self._reduce_pred_masks(outputs, tfms) merged_instances = detector_postprocess(merged_instances, *orig_shape) return {"instances": merged_instances} else: return {"instances": merged_instances} def _get_augmented_inputs(self, input): augmented_inputs = self.tta_mapper(input) tfms = [x.pop("transforms") for x in augmented_inputs] return augmented_inputs, tfms def _get_augmented_boxes(self, augmented_inputs, tfms): # 1: forward with all augmented images outputs = self._batch_inference(augmented_inputs) # 2: union the results all_boxes = [] all_scores = [] all_classes = [] for output, tfm in zip(outputs, tfms): # Need to inverse the transforms on boxes, to obtain results on original image pred_boxes = output.pred_boxes.tensor original_pred_boxes = tfm.inverse().apply_box(pred_boxes.cpu().numpy()) all_boxes.append(torch.from_numpy(original_pred_boxes).to(pred_boxes.device)) all_scores.extend(output.scores) all_classes.extend(output.pred_classes) all_boxes = torch.cat(all_boxes, dim=0) return all_boxes, all_scores, all_classes def _merge_detections(self, all_boxes, all_scores, all_classes, shape_hw): # select from the union of all results num_boxes = len(all_boxes) num_classes = self.cfg.MODEL.ROI_HEADS.NUM_CLASSES # +1 because fast_rcnn_inference expects background scores as well all_scores_2d = torch.zeros(num_boxes, num_classes + 1, device=all_boxes.device) for idx, cls, score in zip(count(), all_classes, all_scores): all_scores_2d[idx, cls] = score merged_instances, _ = fast_rcnn_inference_single_image( all_boxes, all_scores_2d, shape_hw, 1e-8, self.cfg.MODEL.ROI_HEADS.NMS_THRESH_TEST, self.cfg.TEST.DETECTIONS_PER_IMAGE, ) return merged_instances def _rescale_detected_boxes(self, augmented_inputs, merged_instances, tfms): augmented_instances = [] for input, tfm in zip(augmented_inputs, tfms): # Transform the target box to the augmented image's coordinate space pred_boxes = merged_instances.pred_boxes.tensor.cpu().numpy() pred_boxes = torch.from_numpy(tfm.apply_box(pred_boxes)) aug_instances = Instances( image_size=input["image"].shape[1:3], pred_boxes=Boxes(pred_boxes), pred_classes=merged_instances.pred_classes, scores=merged_instances.scores, ) augmented_instances.append(aug_instances) return augmented_instances def _reduce_pred_masks(self, outputs, tfms): # Should apply inverse transforms on masks. # We assume only resize & flip are used. pred_masks is a scale-invariant # representation, so we handle flip specially for output, tfm in zip(outputs, tfms): if any(isinstance(t, HFlipTransform) for t in tfm.transforms): output.pred_masks = output.pred_masks.flip(dims=[3]) all_pred_masks = torch.stack([o.pred_masks for o in outputs], dim=0) avg_pred_masks = torch.mean(all_pred_masks, dim=0) return avg_pred_masks
banmo-main
third_party/detectron2_old/detectron2/modeling/test_time_augmentation.py
# Copyright (c) Facebook, Inc. and its affiliates. import math from typing import List import torch from torch import nn from torchvision.ops import RoIPool from detectron2.layers import ROIAlign, ROIAlignRotated, cat, nonzero_tuple from detectron2.structures import Boxes """ To export ROIPooler to torchscript, in this file, variables that should be annotated with `Union[List[Boxes], List[RotatedBoxes]]` are only annotated with `List[Boxes]`. TODO: Correct these annotations when torchscript support `Union`. https://github.com/pytorch/pytorch/issues/41412 """ __all__ = ["ROIPooler"] def assign_boxes_to_levels( box_lists: List[Boxes], min_level: int, max_level: int, canonical_box_size: int, canonical_level: int, ): """ Map each box in `box_lists` to a feature map level index and return the assignment vector. Args: box_lists (list[Boxes] | list[RotatedBoxes]): A list of N Boxes or N RotatedBoxes, where N is the number of images in the batch. min_level (int): Smallest feature map level index. The input is considered index 0, the output of stage 1 is index 1, and so. max_level (int): Largest feature map level index. canonical_box_size (int): A canonical box size in pixels (sqrt(box area)). canonical_level (int): The feature map level index on which a canonically-sized box should be placed. Returns: A tensor of length M, where M is the total number of boxes aggregated over all N batch images. The memory layout corresponds to the concatenation of boxes from all images. Each element is the feature map index, as an offset from `self.min_level`, for the corresponding box (so value i means the box is at `self.min_level + i`). """ box_sizes = torch.sqrt(cat([boxes.area() for boxes in box_lists])) # Eqn.(1) in FPN paper level_assignments = torch.floor( canonical_level + torch.log2(box_sizes / canonical_box_size + 1e-8) ) # clamp level to (min, max), in case the box size is too large or too small # for the available feature maps level_assignments = torch.clamp(level_assignments, min=min_level, max=max_level) return level_assignments.to(torch.int64) - min_level def _fmt_box_list(box_tensor, batch_index: int): repeated_index = torch.full_like( box_tensor[:, :1], batch_index, dtype=box_tensor.dtype, device=box_tensor.device ) return cat((repeated_index, box_tensor), dim=1) def convert_boxes_to_pooler_format(box_lists: List[Boxes]): """ Convert all boxes in `box_lists` to the low-level format used by ROI pooling ops (see description under Returns). Args: box_lists (list[Boxes] | list[RotatedBoxes]): A list of N Boxes or N RotatedBoxes, where N is the number of images in the batch. Returns: When input is list[Boxes]: A tensor of shape (M, 5), where M is the total number of boxes aggregated over all N batch images. The 5 columns are (batch index, x0, y0, x1, y1), where batch index is the index in [0, N) identifying which batch image the box with corners at (x0, y0, x1, y1) comes from. When input is list[RotatedBoxes]: A tensor of shape (M, 6), where M is the total number of boxes aggregated over all N batch images. The 6 columns are (batch index, x_ctr, y_ctr, width, height, angle_degrees), where batch index is the index in [0, N) identifying which batch image the rotated box (x_ctr, y_ctr, width, height, angle_degrees) comes from. """ pooler_fmt_boxes = cat( [_fmt_box_list(box_list.tensor, i) for i, box_list in enumerate(box_lists)], dim=0 ) return pooler_fmt_boxes class ROIPooler(nn.Module): """ Region of interest feature map pooler that supports pooling from one or more feature maps. """ def __init__( self, output_size, scales, sampling_ratio, pooler_type, canonical_box_size=224, canonical_level=4, ): """ Args: output_size (int, tuple[int] or list[int]): output size of the pooled region, e.g., 14 x 14. If tuple or list is given, the length must be 2. scales (list[float]): The scale for each low-level pooling op relative to the input image. For a feature map with stride s relative to the input image, scale is defined as 1/s. The stride must be power of 2. When there are multiple scales, they must form a pyramid, i.e. they must be a monotically decreasing geometric sequence with a factor of 1/2. sampling_ratio (int): The `sampling_ratio` parameter for the ROIAlign op. pooler_type (string): Name of the type of pooling operation that should be applied. For instance, "ROIPool" or "ROIAlignV2". canonical_box_size (int): A canonical box size in pixels (sqrt(box area)). The default is heuristically defined as 224 pixels in the FPN paper (based on ImageNet pre-training). canonical_level (int): The feature map level index from which a canonically-sized box should be placed. The default is defined as level 4 (stride=16) in the FPN paper, i.e., a box of size 224x224 will be placed on the feature with stride=16. The box placement for all boxes will be determined from their sizes w.r.t canonical_box_size. For example, a box whose area is 4x that of a canonical box should be used to pool features from feature level ``canonical_level+1``. Note that the actual input feature maps given to this module may not have sufficiently many levels for the input boxes. If the boxes are too large or too small for the input feature maps, the closest level will be used. """ super().__init__() if isinstance(output_size, int): output_size = (output_size, output_size) assert len(output_size) == 2 assert isinstance(output_size[0], int) and isinstance(output_size[1], int) self.output_size = output_size if pooler_type == "ROIAlign": self.level_poolers = nn.ModuleList( ROIAlign( output_size, spatial_scale=scale, sampling_ratio=sampling_ratio, aligned=False ) for scale in scales ) elif pooler_type == "ROIAlignV2": self.level_poolers = nn.ModuleList( ROIAlign( output_size, spatial_scale=scale, sampling_ratio=sampling_ratio, aligned=True ) for scale in scales ) elif pooler_type == "ROIPool": self.level_poolers = nn.ModuleList( RoIPool(output_size, spatial_scale=scale) for scale in scales ) elif pooler_type == "ROIAlignRotated": self.level_poolers = nn.ModuleList( ROIAlignRotated(output_size, spatial_scale=scale, sampling_ratio=sampling_ratio) for scale in scales ) else: raise ValueError("Unknown pooler type: {}".format(pooler_type)) # Map scale (defined as 1 / stride) to its feature map level under the # assumption that stride is a power of 2. min_level = -(math.log2(scales[0])) max_level = -(math.log2(scales[-1])) assert math.isclose(min_level, int(min_level)) and math.isclose( max_level, int(max_level) ), "Featuremap stride is not power of 2!" self.min_level = int(min_level) self.max_level = int(max_level) assert ( len(scales) == self.max_level - self.min_level + 1 ), "[ROIPooler] Sizes of input featuremaps do not form a pyramid!" assert 0 <= self.min_level and self.min_level <= self.max_level self.canonical_level = canonical_level assert canonical_box_size > 0 self.canonical_box_size = canonical_box_size def forward(self, x: List[torch.Tensor], box_lists: List[Boxes]): """ Args: x (list[Tensor]): A list of feature maps of NCHW shape, with scales matching those used to construct this module. box_lists (list[Boxes] | list[RotatedBoxes]): A list of N Boxes or N RotatedBoxes, where N is the number of images in the batch. The box coordinates are defined on the original image and will be scaled by the `scales` argument of :class:`ROIPooler`. Returns: Tensor: A tensor of shape (M, C, output_size, output_size) where M is the total number of boxes aggregated over all N batch images and C is the number of channels in `x`. """ num_level_assignments = len(self.level_poolers) assert isinstance(x, list) and isinstance( box_lists, list ), "Arguments to pooler must be lists" assert ( len(x) == num_level_assignments ), "unequal value, num_level_assignments={}, but x is list of {} Tensors".format( num_level_assignments, len(x) ) assert len(box_lists) == x[0].size( 0 ), "unequal value, x[0] batch dim 0 is {}, but box_list has length {}".format( x[0].size(0), len(box_lists) ) if len(box_lists) == 0: return torch.zeros( (0, x[0].shape[1]) + self.output_size, device=x[0].device, dtype=x[0].dtype ) pooler_fmt_boxes = convert_boxes_to_pooler_format(box_lists) if num_level_assignments == 1: return self.level_poolers[0](x[0], pooler_fmt_boxes) level_assignments = assign_boxes_to_levels( box_lists, self.min_level, self.max_level, self.canonical_box_size, self.canonical_level ) num_boxes = pooler_fmt_boxes.size(0) num_channels = x[0].shape[1] output_size = self.output_size[0] dtype, device = x[0].dtype, x[0].device output = torch.zeros( (num_boxes, num_channels, output_size, output_size), dtype=dtype, device=device ) for level, pooler in enumerate(self.level_poolers): inds = nonzero_tuple(level_assignments == level)[0] pooler_fmt_boxes_level = pooler_fmt_boxes[inds] # Use index_put_ instead of advance indexing, to avoid pytorch/issues/49852 output.index_put_((inds,), pooler(x[level], pooler_fmt_boxes_level)) return output
banmo-main
third_party/detectron2_old/detectron2/modeling/poolers.py
# Copyright (c) Facebook, Inc. and its affiliates. from typing import List import torch from detectron2.layers import nonzero_tuple class Matcher(object): """ This class assigns to each predicted "element" (e.g., a box) a ground-truth element. Each predicted element will have exactly zero or one matches; each ground-truth element may be matched to zero or more predicted elements. The matching is determined by the MxN match_quality_matrix, that characterizes how well each (ground-truth, prediction)-pair match each other. For example, if the elements are boxes, this matrix may contain box intersection-over-union overlap values. The matcher returns (a) a vector of length N containing the index of the ground-truth element m in [0, M) that matches to prediction n in [0, N). (b) a vector of length N containing the labels for each prediction. """ def __init__( self, thresholds: List[float], labels: List[int], allow_low_quality_matches: bool = False ): """ Args: thresholds (list): a list of thresholds used to stratify predictions into levels. labels (list): a list of values to label predictions belonging at each level. A label can be one of {-1, 0, 1} signifying {ignore, negative class, positive class}, respectively. allow_low_quality_matches (bool): if True, produce additional matches for predictions with maximum match quality lower than high_threshold. See set_low_quality_matches_ for more details. For example, thresholds = [0.3, 0.5] labels = [0, -1, 1] All predictions with iou < 0.3 will be marked with 0 and thus will be considered as false positives while training. All predictions with 0.3 <= iou < 0.5 will be marked with -1 and thus will be ignored. All predictions with 0.5 <= iou will be marked with 1 and thus will be considered as true positives. """ # Add -inf and +inf to first and last position in thresholds thresholds = thresholds[:] assert thresholds[0] > 0 thresholds.insert(0, -float("inf")) thresholds.append(float("inf")) # Currently torchscript does not support all + generator assert all([low <= high for (low, high) in zip(thresholds[:-1], thresholds[1:])]) assert all([l in [-1, 0, 1] for l in labels]) assert len(labels) == len(thresholds) - 1 self.thresholds = thresholds self.labels = labels self.allow_low_quality_matches = allow_low_quality_matches def __call__(self, match_quality_matrix): """ Args: match_quality_matrix (Tensor[float]): an MxN tensor, containing the pairwise quality between M ground-truth elements and N predicted elements. All elements must be >= 0 (due to the us of `torch.nonzero` for selecting indices in :meth:`set_low_quality_matches_`). Returns: matches (Tensor[int64]): a vector of length N, where matches[i] is a matched ground-truth index in [0, M) match_labels (Tensor[int8]): a vector of length N, where pred_labels[i] indicates whether a prediction is a true or false positive or ignored """ assert match_quality_matrix.dim() == 2 if match_quality_matrix.numel() == 0: default_matches = match_quality_matrix.new_full( (match_quality_matrix.size(1),), 0, dtype=torch.int64 ) # When no gt boxes exist, we define IOU = 0 and therefore set labels # to `self.labels[0]`, which usually defaults to background class 0 # To choose to ignore instead, can make labels=[-1,0,-1,1] + set appropriate thresholds default_match_labels = match_quality_matrix.new_full( (match_quality_matrix.size(1),), self.labels[0], dtype=torch.int8 ) return default_matches, default_match_labels assert torch.all(match_quality_matrix >= 0) # match_quality_matrix is M (gt) x N (predicted) # Max over gt elements (dim 0) to find best gt candidate for each prediction matched_vals, matches = match_quality_matrix.max(dim=0) match_labels = matches.new_full(matches.size(), 1, dtype=torch.int8) for (l, low, high) in zip(self.labels, self.thresholds[:-1], self.thresholds[1:]): low_high = (matched_vals >= low) & (matched_vals < high) match_labels[low_high] = l if self.allow_low_quality_matches: self.set_low_quality_matches_(match_labels, match_quality_matrix) return matches, match_labels def set_low_quality_matches_(self, match_labels, match_quality_matrix): """ Produce additional matches for predictions that have only low-quality matches. Specifically, for each ground-truth G find the set of predictions that have maximum overlap with it (including ties); for each prediction in that set, if it is unmatched, then match it to the ground-truth G. This function implements the RPN assignment case (i) in Sec. 3.1.2 of :paper:`Faster R-CNN`. """ # For each gt, find the prediction with which it has highest quality highest_quality_foreach_gt, _ = match_quality_matrix.max(dim=1) # Find the highest quality match available, even if it is low, including ties. # Note that the matches qualities must be positive due to the use of # `torch.nonzero`. _, pred_inds_with_highest_quality = nonzero_tuple( match_quality_matrix == highest_quality_foreach_gt[:, None] ) # If an anchor was labeled positive only due to a low-quality match # with gt_A, but it has larger overlap with gt_B, it's matched index will still be gt_B. # This follows the implementation in Detectron, and is found to have no significant impact. match_labels[pred_inds_with_highest_quality] = 1
banmo-main
third_party/detectron2_old/detectron2/modeling/matcher.py
# Copyright (c) Facebook, Inc. and its affiliates. from detectron2.layers import ShapeSpec from .anchor_generator import build_anchor_generator, ANCHOR_GENERATOR_REGISTRY from .backbone import ( BACKBONE_REGISTRY, FPN, Backbone, ResNet, ResNetBlockBase, build_backbone, build_resnet_backbone, make_stage, ) from .meta_arch import ( META_ARCH_REGISTRY, SEM_SEG_HEADS_REGISTRY, GeneralizedRCNN, PanopticFPN, ProposalNetwork, RetinaNet, SemanticSegmentor, build_model, build_sem_seg_head, ) from .postprocessing import detector_postprocess from .proposal_generator import ( PROPOSAL_GENERATOR_REGISTRY, build_proposal_generator, RPN_HEAD_REGISTRY, build_rpn_head, ) from .roi_heads import ( ROI_BOX_HEAD_REGISTRY, ROI_HEADS_REGISTRY, ROI_KEYPOINT_HEAD_REGISTRY, ROI_MASK_HEAD_REGISTRY, ROIHeads, StandardROIHeads, BaseMaskRCNNHead, BaseKeypointRCNNHead, FastRCNNOutputLayers, build_box_head, build_keypoint_head, build_mask_head, build_roi_heads, ) from .test_time_augmentation import DatasetMapperTTA, GeneralizedRCNNWithTTA from .mmdet_wrapper import MMDetBackbone, MMDetDetector _EXCLUDE = {"ShapeSpec"} __all__ = [k for k in globals().keys() if k not in _EXCLUDE and not k.startswith("_")] from detectron2.utils.env import fixup_module_metadata fixup_module_metadata(__name__, globals(), __all__) del fixup_module_metadata
banmo-main
third_party/detectron2_old/detectron2/modeling/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. import math from typing import List, Tuple import torch from fvcore.nn import giou_loss, smooth_l1_loss from detectron2.layers import cat from detectron2.structures import Boxes # Value for clamping large dw and dh predictions. The heuristic is that we clamp # such that dw and dh are no larger than what would transform a 16px box into a # 1000px box (based on a small anchor, 16px, and a typical image size, 1000px). _DEFAULT_SCALE_CLAMP = math.log(1000.0 / 16) __all__ = ["Box2BoxTransform", "Box2BoxTransformRotated"] @torch.jit.script class Box2BoxTransform(object): """ The box-to-box transform defined in R-CNN. The transformation is parameterized by 4 deltas: (dx, dy, dw, dh). The transformation scales the box's width and height by exp(dw), exp(dh) and shifts a box's center by the offset (dx * width, dy * height). """ def __init__( self, weights: Tuple[float, float, float, float], scale_clamp: float = _DEFAULT_SCALE_CLAMP ): """ Args: weights (4-element tuple): Scaling factors that are applied to the (dx, dy, dw, dh) deltas. In Fast R-CNN, these were originally set such that the deltas have unit variance; now they are treated as hyperparameters of the system. scale_clamp (float): When predicting deltas, the predicted box scaling factors (dw and dh) are clamped such that they are <= scale_clamp. """ self.weights = weights self.scale_clamp = scale_clamp def get_deltas(self, src_boxes, target_boxes): """ Get box regression transformation deltas (dx, dy, dw, dh) that can be used to transform the `src_boxes` into the `target_boxes`. That is, the relation ``target_boxes == self.apply_deltas(deltas, src_boxes)`` is true (unless any delta is too large and is clamped). Args: src_boxes (Tensor): source boxes, e.g., object proposals target_boxes (Tensor): target of the transformation, e.g., ground-truth boxes. """ assert isinstance(src_boxes, torch.Tensor), type(src_boxes) assert isinstance(target_boxes, torch.Tensor), type(target_boxes) src_widths = src_boxes[:, 2] - src_boxes[:, 0] src_heights = src_boxes[:, 3] - src_boxes[:, 1] src_ctr_x = src_boxes[:, 0] + 0.5 * src_widths src_ctr_y = src_boxes[:, 1] + 0.5 * src_heights target_widths = target_boxes[:, 2] - target_boxes[:, 0] target_heights = target_boxes[:, 3] - target_boxes[:, 1] target_ctr_x = target_boxes[:, 0] + 0.5 * target_widths target_ctr_y = target_boxes[:, 1] + 0.5 * target_heights wx, wy, ww, wh = self.weights dx = wx * (target_ctr_x - src_ctr_x) / src_widths dy = wy * (target_ctr_y - src_ctr_y) / src_heights dw = ww * torch.log(target_widths / src_widths) dh = wh * torch.log(target_heights / src_heights) deltas = torch.stack((dx, dy, dw, dh), dim=1) assert (src_widths > 0).all().item(), "Input boxes to Box2BoxTransform are not valid!" return deltas def apply_deltas(self, deltas, boxes): """ Apply transformation `deltas` (dx, dy, dw, dh) to `boxes`. Args: deltas (Tensor): transformation deltas of shape (N, k*4), where k >= 1. deltas[i] represents k potentially different class-specific box transformations for the single box boxes[i]. boxes (Tensor): boxes to transform, of shape (N, 4) """ deltas = deltas.float() # ensure fp32 for decoding precision boxes = boxes.to(deltas.dtype) widths = boxes[:, 2] - boxes[:, 0] heights = boxes[:, 3] - boxes[:, 1] ctr_x = boxes[:, 0] + 0.5 * widths ctr_y = boxes[:, 1] + 0.5 * heights wx, wy, ww, wh = self.weights dx = deltas[:, 0::4] / wx dy = deltas[:, 1::4] / wy dw = deltas[:, 2::4] / ww dh = deltas[:, 3::4] / wh # Prevent sending too large values into torch.exp() dw = torch.clamp(dw, max=self.scale_clamp) dh = torch.clamp(dh, max=self.scale_clamp) pred_ctr_x = dx * widths[:, None] + ctr_x[:, None] pred_ctr_y = dy * heights[:, None] + ctr_y[:, None] pred_w = torch.exp(dw) * widths[:, None] pred_h = torch.exp(dh) * heights[:, None] x1 = pred_ctr_x - 0.5 * pred_w y1 = pred_ctr_y - 0.5 * pred_h x2 = pred_ctr_x + 0.5 * pred_w y2 = pred_ctr_y + 0.5 * pred_h pred_boxes = torch.stack((x1, y1, x2, y2), dim=-1) return pred_boxes.reshape(deltas.shape) @torch.jit.script class Box2BoxTransformRotated(object): """ The box-to-box transform defined in Rotated R-CNN. The transformation is parameterized by 5 deltas: (dx, dy, dw, dh, da). The transformation scales the box's width and height by exp(dw), exp(dh), shifts a box's center by the offset (dx * width, dy * height), and rotate a box's angle by da (radians). Note: angles of deltas are in radians while angles of boxes are in degrees. """ def __init__( self, weights: Tuple[float, float, float, float, float], scale_clamp: float = _DEFAULT_SCALE_CLAMP, ): """ Args: weights (5-element tuple): Scaling factors that are applied to the (dx, dy, dw, dh, da) deltas. These are treated as hyperparameters of the system. scale_clamp (float): When predicting deltas, the predicted box scaling factors (dw and dh) are clamped such that they are <= scale_clamp. """ self.weights = weights self.scale_clamp = scale_clamp def get_deltas(self, src_boxes, target_boxes): """ Get box regression transformation deltas (dx, dy, dw, dh, da) that can be used to transform the `src_boxes` into the `target_boxes`. That is, the relation ``target_boxes == self.apply_deltas(deltas, src_boxes)`` is true (unless any delta is too large and is clamped). Args: src_boxes (Tensor): Nx5 source boxes, e.g., object proposals target_boxes (Tensor): Nx5 target of the transformation, e.g., ground-truth boxes. """ assert isinstance(src_boxes, torch.Tensor), type(src_boxes) assert isinstance(target_boxes, torch.Tensor), type(target_boxes) src_ctr_x, src_ctr_y, src_widths, src_heights, src_angles = torch.unbind(src_boxes, dim=1) target_ctr_x, target_ctr_y, target_widths, target_heights, target_angles = torch.unbind( target_boxes, dim=1 ) wx, wy, ww, wh, wa = self.weights dx = wx * (target_ctr_x - src_ctr_x) / src_widths dy = wy * (target_ctr_y - src_ctr_y) / src_heights dw = ww * torch.log(target_widths / src_widths) dh = wh * torch.log(target_heights / src_heights) # Angles of deltas are in radians while angles of boxes are in degrees. # the conversion to radians serve as a way to normalize the values da = target_angles - src_angles da = (da + 180.0) % 360.0 - 180.0 # make it in [-180, 180) da *= wa * math.pi / 180.0 deltas = torch.stack((dx, dy, dw, dh, da), dim=1) assert ( (src_widths > 0).all().item() ), "Input boxes to Box2BoxTransformRotated are not valid!" return deltas def apply_deltas(self, deltas, boxes): """ Apply transformation `deltas` (dx, dy, dw, dh, da) to `boxes`. Args: deltas (Tensor): transformation deltas of shape (N, k*5). deltas[i] represents box transformation for the single box boxes[i]. boxes (Tensor): boxes to transform, of shape (N, 5) """ assert deltas.shape[1] % 5 == 0 and boxes.shape[1] == 5 boxes = boxes.to(deltas.dtype).unsqueeze(2) ctr_x = boxes[:, 0] ctr_y = boxes[:, 1] widths = boxes[:, 2] heights = boxes[:, 3] angles = boxes[:, 4] wx, wy, ww, wh, wa = self.weights dx = deltas[:, 0::5] / wx dy = deltas[:, 1::5] / wy dw = deltas[:, 2::5] / ww dh = deltas[:, 3::5] / wh da = deltas[:, 4::5] / wa # Prevent sending too large values into torch.exp() dw = torch.clamp(dw, max=self.scale_clamp) dh = torch.clamp(dh, max=self.scale_clamp) pred_boxes = torch.zeros_like(deltas) pred_boxes[:, 0::5] = dx * widths + ctr_x # x_ctr pred_boxes[:, 1::5] = dy * heights + ctr_y # y_ctr pred_boxes[:, 2::5] = torch.exp(dw) * widths # width pred_boxes[:, 3::5] = torch.exp(dh) * heights # height # Following original RRPN implementation, # angles of deltas are in radians while angles of boxes are in degrees. pred_angle = da * 180.0 / math.pi + angles pred_angle = (pred_angle + 180.0) % 360.0 - 180.0 # make it in [-180, 180) pred_boxes[:, 4::5] = pred_angle return pred_boxes def _dense_box_regression_loss( anchors: List[Boxes], box2box_transform: Box2BoxTransform, pred_anchor_deltas: List[torch.Tensor], gt_boxes: List[torch.Tensor], fg_mask: torch.Tensor, box_reg_loss_type="smooth_l1", smooth_l1_beta=0.0, ): """ Compute loss for dense multi-level box regression. Loss is accumulated over ``fg_mask``. Args: anchors: #lvl anchor boxes, each is (HixWixA, 4) pred_anchor_deltas: #lvl predictions, each is (N, HixWixA, 4) gt_boxes: N ground truth boxes, each has shape (R, 4) (R = sum(Hi * Wi * A)) fg_mask: the foreground boolean mask of shape (N, R) to compute loss on box_reg_loss_type (str): Loss type to use. Supported losses: "smooth_l1", "giou". smooth_l1_beta (float): beta parameter for the smooth L1 regression loss. Default to use L1 loss. Only used when `box_reg_loss_type` is "smooth_l1" """ anchors = type(anchors[0]).cat(anchors).tensor # (R, 4) if box_reg_loss_type == "smooth_l1": gt_anchor_deltas = [box2box_transform.get_deltas(anchors, k) for k in gt_boxes] gt_anchor_deltas = torch.stack(gt_anchor_deltas) # (N, R, 4) loss_box_reg = smooth_l1_loss( cat(pred_anchor_deltas, dim=1)[fg_mask], gt_anchor_deltas[fg_mask], beta=smooth_l1_beta, reduction="sum", ) elif box_reg_loss_type == "giou": pred_boxes = [ box2box_transform.apply_deltas(k, anchors) for k in cat(pred_anchor_deltas, dim=1) ] loss_box_reg = giou_loss( torch.stack(pred_boxes)[fg_mask], torch.stack(gt_boxes)[fg_mask], reduction="sum" ) else: raise ValueError(f"Invalid dense box regression loss type '{box_reg_loss_type}'") return loss_box_reg
banmo-main
third_party/detectron2_old/detectron2/modeling/box_regression.py
# -*- coding: utf-8 -*- import itertools import logging import numpy as np from collections import OrderedDict from collections.abc import Mapping from typing import Dict, List, Optional, Tuple, Union import torch from omegaconf import DictConfig, OmegaConf from torch import Tensor, nn from detectron2.layers import ShapeSpec from detectron2.structures import BitMasks, Boxes, ImageList, Instances from detectron2.utils.events import get_event_storage from .backbone import Backbone logger = logging.getLogger(__name__) def _to_container(cfg): """ mmdet will assert the type of dict/list. So convert omegaconf objects to dict/list. """ if isinstance(cfg, DictConfig): cfg = OmegaConf.to_container(cfg, resolve=True) from mmcv.utils import ConfigDict return ConfigDict(cfg) class MMDetBackbone(Backbone): """ Wrapper of mmdetection backbones to use in detectron2. mmdet backbones produce list/tuple of tensors, while detectron2 backbones produce a dict of tensors. This class wraps the given backbone to produce output in detectron2's convention, so it can be used in place of detectron2 backbones. """ def __init__( self, backbone: Union[nn.Module, Mapping], neck: Union[nn.Module, Mapping, None] = None, *, pretrained_backbone: Optional[str] = None, output_shapes: List[ShapeSpec], output_names: Optional[List[str]] = None, ): """ Args: backbone: either a backbone module or a mmdet config dict that defines a backbone. The backbone takes a 4D image tensor and returns a sequence of tensors. neck: either a backbone module or a mmdet config dict that defines a neck. The neck takes outputs of backbone and returns a sequence of tensors. If None, no neck is used. pretrained_backbone: defines the backbone weights that can be loaded by mmdet, such as "torchvision://resnet50". output_shapes: shape for every output of the backbone (or neck, if given). stride and channels are often needed. output_names: names for every output of the backbone (or neck, if given). By default, will use "out0", "out1", ... """ super().__init__() if isinstance(backbone, Mapping): from mmdet.models import build_backbone backbone = build_backbone(_to_container(backbone)) self.backbone = backbone if isinstance(neck, Mapping): from mmdet.models import build_neck neck = build_neck(_to_container(neck)) self.neck = neck # It's confusing that backbone weights are given as a separate argument, # but "neck" weights, if any, are part of neck itself. This is the interface # of mmdet so we follow it. Reference: # https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/detectors/two_stage.py logger.info(f"Initializing mmdet backbone weights: {pretrained_backbone} ...") self.backbone.init_weights(pretrained_backbone) # train() in mmdet modules is non-trivial, and has to be explicitly # called. Reference: # https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/backbones/resnet.py self.backbone.train() if self.neck is not None: logger.info("Initializing mmdet neck weights ...") if isinstance(self.neck, nn.Sequential): for m in self.neck: m.init_weights() else: self.neck.init_weights() self.neck.train() self._output_shapes = output_shapes if not output_names: output_names = [f"out{i}" for i in range(len(output_shapes))] self._output_names = output_names def forward(self, x) -> Dict[str, Tensor]: outs = self.backbone(x) if self.neck is not None: outs = self.neck(outs) assert isinstance( outs, (list, tuple) ), "mmdet backbone should return a list/tuple of tensors!" if len(outs) != len(self._output_shapes): raise ValueError( "Length of output_shapes does not match outputs from the mmdet backbone: " f"{len(outs)} != {len(self._output_shapes)}" ) return {k: v for k, v in zip(self._output_names, outs)} def output_shape(self) -> Dict[str, ShapeSpec]: return {k: v for k, v in zip(self._output_names, self._output_shapes)} class MMDetDetector(nn.Module): """ Wrapper of a mmdetection detector model, for detection and instance segmentation. Input/output formats of this class follow detectron2's convention, so a mmdetection model can be trained and evaluated in detectron2. """ def __init__( self, detector: Union[nn.Module, Mapping], *, # Default is 32 regardless of model: # https://github.com/open-mmlab/mmdetection/tree/master/configs/_base_/datasets size_divisibility=32, pixel_mean: Tuple[float], pixel_std: Tuple[float], ): """ Args: detector: a mmdet detector, or a mmdet config dict that defines a detector. size_divisibility: pad input images to multiple of this number pixel_mean: per-channel mean to normalize input image pixel_std: per-channel stddev to normalize input image """ super().__init__() if isinstance(detector, Mapping): from mmdet.models import build_detector detector = build_detector(_to_container(detector)) self.detector = detector self.size_divisibility = size_divisibility self.register_buffer("pixel_mean", torch.tensor(pixel_mean).view(-1, 1, 1), False) self.register_buffer("pixel_std", torch.tensor(pixel_std).view(-1, 1, 1), False) assert ( self.pixel_mean.shape == self.pixel_std.shape ), f"{self.pixel_mean} and {self.pixel_std} have different shapes!" def forward(self, batched_inputs: List[Dict[str, torch.Tensor]]): images = [x["image"].to(self.device) for x in batched_inputs] images = [(x - self.pixel_mean) / self.pixel_std for x in images] images = ImageList.from_tensors(images, size_divisibility=self.size_divisibility).tensor metas = [] rescale = {"height" in x for x in batched_inputs} if len(rescale) != 1: raise ValueError("Some inputs have original height/width, but some don't!") rescale = list(rescale)[0] output_shapes = [] for input in batched_inputs: meta = {} c, h, w = input["image"].shape meta["img_shape"] = meta["ori_shape"] = (h, w, c) if rescale: scale_factor = np.array( [w / input["width"], h / input["height"]] * 2, dtype="float32" ) ori_shape = (input["height"], input["width"]) output_shapes.append(ori_shape) meta["ori_shape"] = ori_shape + (c,) else: scale_factor = 1.0 output_shapes.append((h, w)) meta["scale_factor"] = scale_factor meta["flip"] = False padh, padw = images.shape[-2:] meta["pad_shape"] = (padh, padw, c) metas.append(meta) if self.training: gt_instances = [x["instances"].to(self.device) for x in batched_inputs] if gt_instances[0].has("gt_masks"): from mmdet.core import PolygonMasks as mm_PolygonMasks, BitmapMasks as mm_BitMasks def convert_mask(m, shape): # mmdet mask format if isinstance(m, BitMasks): return mm_BitMasks(m.tensor.cpu().numpy(), shape[0], shape[1]) else: return mm_PolygonMasks(m.polygons, shape[0], shape[1]) gt_masks = [convert_mask(x.gt_masks, x.image_size) for x in gt_instances] losses_and_metrics = self.detector.forward_train( images, metas, [x.gt_boxes.tensor for x in gt_instances], [x.gt_classes for x in gt_instances], gt_masks=gt_masks, ) else: losses_and_metrics = self.detector.forward_train( images, metas, [x.gt_boxes.tensor for x in gt_instances], [x.gt_classes for x in gt_instances], ) return _parse_losses(losses_and_metrics) else: results = self.detector.simple_test(images, metas, rescale=rescale) results = [ {"instances": _convert_mmdet_result(r, shape)} for r, shape in zip(results, output_shapes) ] return results @property def device(self): return self.pixel_mean.device # Reference: show_result() in # https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/detectors/base.py def _convert_mmdet_result(result, shape: Tuple[int, int]) -> Instances: if isinstance(result, tuple): bbox_result, segm_result = result if isinstance(segm_result, tuple): segm_result = segm_result[0] else: bbox_result, segm_result = result, None bboxes = torch.from_numpy(np.vstack(bbox_result)) # Nx5 bboxes, scores = bboxes[:, :4], bboxes[:, -1] labels = [ torch.full((bbox.shape[0],), i, dtype=torch.int32) for i, bbox in enumerate(bbox_result) ] labels = torch.cat(labels) inst = Instances(shape) inst.pred_boxes = Boxes(bboxes) inst.scores = scores inst.pred_classes = labels if segm_result is not None and len(labels) > 0: segm_result = list(itertools.chain(*segm_result)) segm_result = [torch.from_numpy(x) if isinstance(x, np.ndarray) else x for x in segm_result] segm_result = torch.stack(segm_result, dim=0) inst.pred_masks = segm_result return inst # reference: https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/detectors/base.py def _parse_losses(losses: Dict[str, Tensor]) -> Dict[str, Tensor]: log_vars = OrderedDict() for loss_name, loss_value in losses.items(): if isinstance(loss_value, torch.Tensor): log_vars[loss_name] = loss_value.mean() elif isinstance(loss_value, list): log_vars[loss_name] = sum(_loss.mean() for _loss in loss_value) else: raise TypeError(f"{loss_name} is not a tensor or list of tensors") if "loss" not in loss_name: # put metrics to storage; don't return them storage = get_event_storage() value = log_vars.pop(loss_name).cpu().item() storage.put_scalar(loss_name, value) return log_vars
banmo-main
third_party/detectron2_old/detectron2/modeling/mmdet_wrapper.py
# Copyright (c) Facebook, Inc. and its affiliates. import collections import math from typing import List import torch from torch import nn from detectron2.config import configurable from detectron2.layers import ShapeSpec from detectron2.structures import Boxes, RotatedBoxes from detectron2.utils.registry import Registry ANCHOR_GENERATOR_REGISTRY = Registry("ANCHOR_GENERATOR") ANCHOR_GENERATOR_REGISTRY.__doc__ = """ Registry for modules that creates object detection anchors for feature maps. The registered object will be called with `obj(cfg, input_shape)`. """ class BufferList(nn.Module): """ Similar to nn.ParameterList, but for buffers """ def __init__(self, buffers): super().__init__() for i, buffer in enumerate(buffers): # Use non-persistent buffer so the values are not saved in checkpoint self.register_buffer(str(i), buffer, persistent=False) def __len__(self): return len(self._buffers) def __iter__(self): return iter(self._buffers.values()) def _create_grid_offsets(size: List[int], stride: int, offset: float, device: torch.device): grid_height, grid_width = size shifts_x = torch.arange( offset * stride, grid_width * stride, step=stride, dtype=torch.float32, device=device ) shifts_y = torch.arange( offset * stride, grid_height * stride, step=stride, dtype=torch.float32, device=device ) shift_y, shift_x = torch.meshgrid(shifts_y, shifts_x) shift_x = shift_x.reshape(-1) shift_y = shift_y.reshape(-1) return shift_x, shift_y def _broadcast_params(params, num_features, name): """ If one size (or aspect ratio) is specified and there are multiple feature maps, we "broadcast" anchors of that single size (or aspect ratio) over all feature maps. If params is list[float], or list[list[float]] with len(params) == 1, repeat it num_features time. Returns: list[list[float]]: param for each feature """ assert isinstance( params, collections.abc.Sequence ), f"{name} in anchor generator has to be a list! Got {params}." assert len(params), f"{name} in anchor generator cannot be empty!" if not isinstance(params[0], collections.abc.Sequence): # params is list[float] return [params] * num_features if len(params) == 1: return list(params) * num_features assert len(params) == num_features, ( f"Got {name} of length {len(params)} in anchor generator, " f"but the number of input features is {num_features}!" ) return params @ANCHOR_GENERATOR_REGISTRY.register() class DefaultAnchorGenerator(nn.Module): """ Compute anchors in the standard ways described in "Faster R-CNN: Towards Real-Time Object Detection with Region Proposal Networks". """ box_dim: torch.jit.Final[int] = 4 """ the dimension of each anchor box. """ @configurable def __init__(self, *, sizes, aspect_ratios, strides, offset=0.5): """ This interface is experimental. Args: sizes (list[list[float]] or list[float]): If ``sizes`` is list[list[float]], ``sizes[i]`` is the list of anchor sizes (i.e. sqrt of anchor area) to use for the i-th feature map. If ``sizes`` is list[float], ``sizes`` is used for all feature maps. Anchor sizes are given in absolute lengths in units of the input image; they do not dynamically scale if the input image size changes. aspect_ratios (list[list[float]] or list[float]): list of aspect ratios (i.e. height / width) to use for anchors. Same "broadcast" rule for `sizes` applies. strides (list[int]): stride of each input feature. offset (float): Relative offset between the center of the first anchor and the top-left corner of the image. Value has to be in [0, 1). Recommend to use 0.5, which means half stride. """ super().__init__() self.strides = strides self.num_features = len(self.strides) sizes = _broadcast_params(sizes, self.num_features, "sizes") aspect_ratios = _broadcast_params(aspect_ratios, self.num_features, "aspect_ratios") self.cell_anchors = self._calculate_anchors(sizes, aspect_ratios) self.offset = offset assert 0.0 <= self.offset < 1.0, self.offset @classmethod def from_config(cls, cfg, input_shape: List[ShapeSpec]): return { "sizes": cfg.MODEL.ANCHOR_GENERATOR.SIZES, "aspect_ratios": cfg.MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS, "strides": [x.stride for x in input_shape], "offset": cfg.MODEL.ANCHOR_GENERATOR.OFFSET, } def _calculate_anchors(self, sizes, aspect_ratios): cell_anchors = [ self.generate_cell_anchors(s, a).float() for s, a in zip(sizes, aspect_ratios) ] return BufferList(cell_anchors) @property @torch.jit.unused def num_cell_anchors(self): """ Alias of `num_anchors`. """ return self.num_anchors @property @torch.jit.unused def num_anchors(self): """ Returns: list[int]: Each int is the number of anchors at every pixel location, on that feature map. For example, if at every pixel we use anchors of 3 aspect ratios and 5 sizes, the number of anchors is 15. (See also ANCHOR_GENERATOR.SIZES and ANCHOR_GENERATOR.ASPECT_RATIOS in config) In standard RPN models, `num_anchors` on every feature map is the same. """ return [len(cell_anchors) for cell_anchors in self.cell_anchors] def _grid_anchors(self, grid_sizes: List[List[int]]): """ Returns: list[Tensor]: #featuremap tensors, each is (#locations x #cell_anchors) x 4 """ anchors = [] # buffers() not supported by torchscript. use named_buffers() instead buffers: List[torch.Tensor] = [x[1] for x in self.cell_anchors.named_buffers()] for size, stride, base_anchors in zip(grid_sizes, self.strides, buffers): shift_x, shift_y = _create_grid_offsets(size, stride, self.offset, base_anchors.device) shifts = torch.stack((shift_x, shift_y, shift_x, shift_y), dim=1) anchors.append((shifts.view(-1, 1, 4) + base_anchors.view(1, -1, 4)).reshape(-1, 4)) return anchors def generate_cell_anchors(self, sizes=(32, 64, 128, 256, 512), aspect_ratios=(0.5, 1, 2)): """ Generate a tensor storing canonical anchor boxes, which are all anchor boxes of different sizes and aspect_ratios centered at (0, 0). We can later build the set of anchors for a full feature map by shifting and tiling these tensors (see `meth:_grid_anchors`). Args: sizes (tuple[float]): aspect_ratios (tuple[float]]): Returns: Tensor of shape (len(sizes) * len(aspect_ratios), 4) storing anchor boxes in XYXY format. """ # This is different from the anchor generator defined in the original Faster R-CNN # code or Detectron. They yield the same AP, however the old version defines cell # anchors in a less natural way with a shift relative to the feature grid and # quantization that results in slightly different sizes for different aspect ratios. # See also https://github.com/facebookresearch/Detectron/issues/227 anchors = [] for size in sizes: area = size ** 2.0 for aspect_ratio in aspect_ratios: # s * s = w * h # a = h / w # ... some algebra ... # w = sqrt(s * s / a) # h = a * w w = math.sqrt(area / aspect_ratio) h = aspect_ratio * w x0, y0, x1, y1 = -w / 2.0, -h / 2.0, w / 2.0, h / 2.0 anchors.append([x0, y0, x1, y1]) return torch.tensor(anchors) def forward(self, features: List[torch.Tensor]): """ Args: features (list[Tensor]): list of backbone feature maps on which to generate anchors. Returns: list[Boxes]: a list of Boxes containing all the anchors for each feature map (i.e. the cell anchors repeated over all locations in the feature map). The number of anchors of each feature map is Hi x Wi x num_cell_anchors, where Hi, Wi are resolution of the feature map divided by anchor stride. """ grid_sizes = [feature_map.shape[-2:] for feature_map in features] anchors_over_all_feature_maps = self._grid_anchors(grid_sizes) return [Boxes(x) for x in anchors_over_all_feature_maps] @ANCHOR_GENERATOR_REGISTRY.register() class RotatedAnchorGenerator(nn.Module): """ Compute rotated anchors used by Rotated RPN (RRPN), described in "Arbitrary-Oriented Scene Text Detection via Rotation Proposals". """ box_dim: int = 5 """ the dimension of each anchor box. """ @configurable def __init__(self, *, sizes, aspect_ratios, strides, angles, offset=0.5): """ This interface is experimental. Args: sizes (list[list[float]] or list[float]): If sizes is list[list[float]], sizes[i] is the list of anchor sizes (i.e. sqrt of anchor area) to use for the i-th feature map. If sizes is list[float], the sizes are used for all feature maps. Anchor sizes are given in absolute lengths in units of the input image; they do not dynamically scale if the input image size changes. aspect_ratios (list[list[float]] or list[float]): list of aspect ratios (i.e. height / width) to use for anchors. Same "broadcast" rule for `sizes` applies. strides (list[int]): stride of each input feature. angles (list[list[float]] or list[float]): list of angles (in degrees CCW) to use for anchors. Same "broadcast" rule for `sizes` applies. offset (float): Relative offset between the center of the first anchor and the top-left corner of the image. Value has to be in [0, 1). Recommend to use 0.5, which means half stride. """ super().__init__() self.strides = strides self.num_features = len(self.strides) sizes = _broadcast_params(sizes, self.num_features, "sizes") aspect_ratios = _broadcast_params(aspect_ratios, self.num_features, "aspect_ratios") angles = _broadcast_params(angles, self.num_features, "angles") self.cell_anchors = self._calculate_anchors(sizes, aspect_ratios, angles) self.offset = offset assert 0.0 <= self.offset < 1.0, self.offset @classmethod def from_config(cls, cfg, input_shape: List[ShapeSpec]): return { "sizes": cfg.MODEL.ANCHOR_GENERATOR.SIZES, "aspect_ratios": cfg.MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS, "strides": [x.stride for x in input_shape], "offset": cfg.MODEL.ANCHOR_GENERATOR.OFFSET, "angles": cfg.MODEL.ANCHOR_GENERATOR.ANGLES, } def _calculate_anchors(self, sizes, aspect_ratios, angles): cell_anchors = [ self.generate_cell_anchors(size, aspect_ratio, angle).float() for size, aspect_ratio, angle in zip(sizes, aspect_ratios, angles) ] return BufferList(cell_anchors) @property def num_cell_anchors(self): """ Alias of `num_anchors`. """ return self.num_anchors @property def num_anchors(self): """ Returns: list[int]: Each int is the number of anchors at every pixel location, on that feature map. For example, if at every pixel we use anchors of 3 aspect ratios, 2 sizes and 5 angles, the number of anchors is 30. (See also ANCHOR_GENERATOR.SIZES, ANCHOR_GENERATOR.ASPECT_RATIOS and ANCHOR_GENERATOR.ANGLES in config) In standard RRPN models, `num_anchors` on every feature map is the same. """ return [len(cell_anchors) for cell_anchors in self.cell_anchors] def _grid_anchors(self, grid_sizes): anchors = [] for size, stride, base_anchors in zip(grid_sizes, self.strides, self.cell_anchors): shift_x, shift_y = _create_grid_offsets(size, stride, self.offset, base_anchors.device) zeros = torch.zeros_like(shift_x) shifts = torch.stack((shift_x, shift_y, zeros, zeros, zeros), dim=1) anchors.append((shifts.view(-1, 1, 5) + base_anchors.view(1, -1, 5)).reshape(-1, 5)) return anchors def generate_cell_anchors( self, sizes=(32, 64, 128, 256, 512), aspect_ratios=(0.5, 1, 2), angles=(-90, -60, -30, 0, 30, 60, 90), ): """ Generate a tensor storing canonical anchor boxes, which are all anchor boxes of different sizes, aspect_ratios, angles centered at (0, 0). We can later build the set of anchors for a full feature map by shifting and tiling these tensors (see `meth:_grid_anchors`). Args: sizes (tuple[float]): aspect_ratios (tuple[float]]): angles (tuple[float]]): Returns: Tensor of shape (len(sizes) * len(aspect_ratios) * len(angles), 5) storing anchor boxes in (x_ctr, y_ctr, w, h, angle) format. """ anchors = [] for size in sizes: area = size ** 2.0 for aspect_ratio in aspect_ratios: # s * s = w * h # a = h / w # ... some algebra ... # w = sqrt(s * s / a) # h = a * w w = math.sqrt(area / aspect_ratio) h = aspect_ratio * w anchors.extend([0, 0, w, h, a] for a in angles) return torch.tensor(anchors) def forward(self, features): """ Args: features (list[Tensor]): list of backbone feature maps on which to generate anchors. Returns: list[RotatedBoxes]: a list of Boxes containing all the anchors for each feature map (i.e. the cell anchors repeated over all locations in the feature map). The number of anchors of each feature map is Hi x Wi x num_cell_anchors, where Hi, Wi are resolution of the feature map divided by anchor stride. """ grid_sizes = [feature_map.shape[-2:] for feature_map in features] anchors_over_all_feature_maps = self._grid_anchors(grid_sizes) return [RotatedBoxes(x) for x in anchors_over_all_feature_maps] def build_anchor_generator(cfg, input_shape): """ Built an anchor generator from `cfg.MODEL.ANCHOR_GENERATOR.NAME`. """ anchor_generator = cfg.MODEL.ANCHOR_GENERATOR.NAME return ANCHOR_GENERATOR_REGISTRY.get(anchor_generator)(cfg, input_shape)
banmo-main
third_party/detectron2_old/detectron2/modeling/anchor_generator.py
# Copyright (c) Facebook, Inc. and its affiliates. import torch from detectron2.layers import nonzero_tuple __all__ = ["subsample_labels"] def subsample_labels( labels: torch.Tensor, num_samples: int, positive_fraction: float, bg_label: int ): """ Return `num_samples` (or fewer, if not enough found) random samples from `labels` which is a mixture of positives & negatives. It will try to return as many positives as possible without exceeding `positive_fraction * num_samples`, and then try to fill the remaining slots with negatives. Args: labels (Tensor): (N, ) label vector with values: * -1: ignore * bg_label: background ("negative") class * otherwise: one or more foreground ("positive") classes num_samples (int): The total number of labels with value >= 0 to return. Values that are not sampled will be filled with -1 (ignore). positive_fraction (float): The number of subsampled labels with values > 0 is `min(num_positives, int(positive_fraction * num_samples))`. The number of negatives sampled is `min(num_negatives, num_samples - num_positives_sampled)`. In order words, if there are not enough positives, the sample is filled with negatives. If there are also not enough negatives, then as many elements are sampled as is possible. bg_label (int): label index of background ("negative") class. Returns: pos_idx, neg_idx (Tensor): 1D vector of indices. The total length of both is `num_samples` or fewer. """ positive = nonzero_tuple((labels != -1) & (labels != bg_label))[0] negative = nonzero_tuple(labels == bg_label)[0] num_pos = int(num_samples * positive_fraction) # protect against not enough positive examples num_pos = min(positive.numel(), num_pos) num_neg = num_samples - num_pos # protect against not enough negative examples num_neg = min(negative.numel(), num_neg) # randomly select positive and negative examples perm1 = torch.randperm(positive.numel(), device=positive.device)[:num_pos] perm2 = torch.randperm(negative.numel(), device=negative.device)[:num_neg] pos_idx = positive[perm1] neg_idx = negative[perm2] return pos_idx, neg_idx
banmo-main
third_party/detectron2_old/detectron2/modeling/sampling.py
# Copyright (c) Facebook, Inc. and its affiliates. import torch from torch.nn import functional as F from detectron2.structures import Instances, ROIMasks # perhaps should rename to "resize_instance" def detector_postprocess( results: Instances, output_height: int, output_width: int, mask_threshold: float = 0.5 ): """ Resize the output instances. The input images are often resized when entering an object detector. As a result, we often need the outputs of the detector in a different resolution from its inputs. This function will resize the raw outputs of an R-CNN detector to produce outputs according to the desired output resolution. Args: results (Instances): the raw outputs from the detector. `results.image_size` contains the input image resolution the detector sees. This object might be modified in-place. output_height, output_width: the desired output resolution. Returns: Instances: the resized output from the model, based on the output resolution """ # Change to 'if is_tracing' after PT1.7 if isinstance(output_height, torch.Tensor): # Converts integer tensors to float temporaries to ensure true # division is performed when computing scale_x and scale_y. output_width_tmp = output_width.float() output_height_tmp = output_height.float() new_size = torch.stack([output_height, output_width]) else: new_size = (output_height, output_width) output_width_tmp = output_width output_height_tmp = output_height scale_x, scale_y = ( output_width_tmp / results.image_size[1], output_height_tmp / results.image_size[0], ) results = Instances(new_size, **results.get_fields()) if results.has("pred_boxes"): output_boxes = results.pred_boxes elif results.has("proposal_boxes"): output_boxes = results.proposal_boxes else: output_boxes = None assert output_boxes is not None, "Predictions must contain boxes!" output_boxes.scale(scale_x, scale_y) output_boxes.clip(results.image_size) results = results[output_boxes.nonempty()] if results.has("pred_masks"): if isinstance(results.pred_masks, ROIMasks): roi_masks = results.pred_masks else: # pred_masks is a tensor of shape (N, 1, M, M) roi_masks = ROIMasks(results.pred_masks[:, 0, :, :]) results.pred_masks = roi_masks.to_bitmasks( results.pred_boxes, output_height, output_width, mask_threshold ).tensor # TODO return ROIMasks/BitMask object in the future if results.has("pred_keypoints"): results.pred_keypoints[:, :, 0] *= scale_x results.pred_keypoints[:, :, 1] *= scale_y return results def sem_seg_postprocess(result, img_size, output_height, output_width): """ Return semantic segmentation predictions in the original resolution. The input images are often resized when entering semantic segmentor. Moreover, in same cases, they also padded inside segmentor to be divisible by maximum network stride. As a result, we often need the predictions of the segmentor in a different resolution from its inputs. Args: result (Tensor): semantic segmentation prediction logits. A tensor of shape (C, H, W), where C is the number of classes, and H, W are the height and width of the prediction. img_size (tuple): image size that segmentor is taking as input. output_height, output_width: the desired output resolution. Returns: semantic segmentation prediction (Tensor): A tensor of the shape (C, output_height, output_width) that contains per-pixel soft predictions. """ result = result[:, : img_size[0], : img_size[1]].expand(1, -1, -1, -1) result = F.interpolate( result, size=(output_height, output_width), mode="bilinear", align_corners=False )[0] return result
banmo-main
third_party/detectron2_old/detectron2/modeling/postprocessing.py
# Copyright (c) Facebook, Inc. and its affiliates. import torch from detectron2.utils.logger import _log_api_usage from detectron2.utils.registry import Registry META_ARCH_REGISTRY = Registry("META_ARCH") # noqa F401 isort:skip META_ARCH_REGISTRY.__doc__ = """ Registry for meta-architectures, i.e. the whole model. The registered object will be called with `obj(cfg)` and expected to return a `nn.Module` object. """ def build_model(cfg): """ Build the whole model architecture, defined by ``cfg.MODEL.META_ARCHITECTURE``. Note that it does not load any weights from ``cfg``. """ meta_arch = cfg.MODEL.META_ARCHITECTURE model = META_ARCH_REGISTRY.get(meta_arch)(cfg) model.to(torch.device(cfg.MODEL.DEVICE)) _log_api_usage("modeling.meta_arch." + meta_arch) return model
banmo-main
third_party/detectron2_old/detectron2/modeling/meta_arch/build.py
# Copyright (c) Facebook, Inc. and its affiliates. import logging import numpy as np from typing import Dict, List, Optional, Tuple import torch from torch import nn from detectron2.config import configurable from detectron2.data.detection_utils import convert_image_to_rgb from detectron2.structures import ImageList, Instances from detectron2.utils.events import get_event_storage from detectron2.utils.logger import log_first_n from ..backbone import Backbone, build_backbone from ..postprocessing import detector_postprocess from ..proposal_generator import build_proposal_generator from ..roi_heads import build_roi_heads from .build import META_ARCH_REGISTRY __all__ = ["GeneralizedRCNN", "ProposalNetwork"] @META_ARCH_REGISTRY.register() class GeneralizedRCNN(nn.Module): """ Generalized R-CNN. Any models that contains the following three components: 1. Per-image feature extraction (aka backbone) 2. Region proposal generation 3. Per-region feature extraction and prediction """ @configurable def __init__( self, *, backbone: Backbone, proposal_generator: nn.Module, roi_heads: nn.Module, pixel_mean: Tuple[float], pixel_std: Tuple[float], input_format: Optional[str] = None, vis_period: int = 0, ): """ Args: backbone: a backbone module, must follow detectron2's backbone interface proposal_generator: a module that generates proposals using backbone features roi_heads: a ROI head that performs per-region computation pixel_mean, pixel_std: list or tuple with #channels element, representing the per-channel mean and std to be used to normalize the input image input_format: describe the meaning of channels of input. Needed by visualization vis_period: the period to run visualization. Set to 0 to disable. """ super().__init__() self.backbone = backbone self.proposal_generator = proposal_generator self.roi_heads = roi_heads self.input_format = input_format self.vis_period = vis_period if vis_period > 0: assert input_format is not None, "input_format is required for visualization!" self.register_buffer("pixel_mean", torch.tensor(pixel_mean).view(-1, 1, 1), False) self.register_buffer("pixel_std", torch.tensor(pixel_std).view(-1, 1, 1), False) assert ( self.pixel_mean.shape == self.pixel_std.shape ), f"{self.pixel_mean} and {self.pixel_std} have different shapes!" @classmethod def from_config(cls, cfg): backbone = build_backbone(cfg) return { "backbone": backbone, "proposal_generator": build_proposal_generator(cfg, backbone.output_shape()), "roi_heads": build_roi_heads(cfg, backbone.output_shape()), "input_format": cfg.INPUT.FORMAT, "vis_period": cfg.VIS_PERIOD, "pixel_mean": cfg.MODEL.PIXEL_MEAN, "pixel_std": cfg.MODEL.PIXEL_STD, } @property def device(self): return self.pixel_mean.device def visualize_training(self, batched_inputs, proposals): """ A function used to visualize images and proposals. It shows ground truth bounding boxes on the original image and up to 20 top-scoring predicted object proposals on the original image. Users can implement different visualization functions for different models. Args: batched_inputs (list): a list that contains input to the model. proposals (list): a list that contains predicted proposals. Both batched_inputs and proposals should have the same length. """ from detectron2.utils.visualizer import Visualizer storage = get_event_storage() max_vis_prop = 20 for input, prop in zip(batched_inputs, proposals): img = input["image"] img = convert_image_to_rgb(img.permute(1, 2, 0), self.input_format) v_gt = Visualizer(img, None) v_gt = v_gt.overlay_instances(boxes=input["instances"].gt_boxes) anno_img = v_gt.get_image() box_size = min(len(prop.proposal_boxes), max_vis_prop) v_pred = Visualizer(img, None) v_pred = v_pred.overlay_instances( boxes=prop.proposal_boxes[0:box_size].tensor.cpu().numpy() ) prop_img = v_pred.get_image() vis_img = np.concatenate((anno_img, prop_img), axis=1) vis_img = vis_img.transpose(2, 0, 1) vis_name = "Left: GT bounding boxes; Right: Predicted proposals" storage.put_image(vis_name, vis_img) break # only visualize one image in a batch def forward(self, batched_inputs: List[Dict[str, torch.Tensor]]): """ Args: batched_inputs: a list, batched outputs of :class:`DatasetMapper` . Each item in the list contains the inputs for one image. For now, each item in the list is a dict that contains: * image: Tensor, image in (C, H, W) format. * instances (optional): groundtruth :class:`Instances` * proposals (optional): :class:`Instances`, precomputed proposals. Other information that's included in the original dicts, such as: * "height", "width" (int): the output resolution of the model, used in inference. See :meth:`postprocess` for details. Returns: list[dict]: Each dict is the output for one input image. The dict contains one key "instances" whose value is a :class:`Instances`. The :class:`Instances` object has the following keys: "pred_boxes", "pred_classes", "scores", "pred_masks", "pred_keypoints" """ if not self.training: return self.inference(batched_inputs) images = self.preprocess_image(batched_inputs) if "instances" in batched_inputs[0]: gt_instances = [x["instances"].to(self.device) for x in batched_inputs] else: gt_instances = None features = self.backbone(images.tensor) if self.proposal_generator is not None: proposals, proposal_losses = self.proposal_generator(images, features, gt_instances) else: assert "proposals" in batched_inputs[0] proposals = [x["proposals"].to(self.device) for x in batched_inputs] proposal_losses = {} _, detector_losses = self.roi_heads(images, features, proposals, gt_instances) if self.vis_period > 0: storage = get_event_storage() if storage.iter % self.vis_period == 0: self.visualize_training(batched_inputs, proposals) losses = {} losses.update(detector_losses) losses.update(proposal_losses) return losses def inference( self, batched_inputs: List[Dict[str, torch.Tensor]], detected_instances: Optional[List[Instances]] = None, do_postprocess: bool = True, ): """ Run inference on the given inputs. Args: batched_inputs (list[dict]): same as in :meth:`forward` detected_instances (None or list[Instances]): if not None, it contains an `Instances` object per image. The `Instances` object contains "pred_boxes" and "pred_classes" which are known boxes in the image. The inference will then skip the detection of bounding boxes, and only predict other per-ROI outputs. do_postprocess (bool): whether to apply post-processing on the outputs. Returns: When do_postprocess=True, same as in :meth:`forward`. Otherwise, a list[Instances] containing raw network outputs. """ assert not self.training images = self.preprocess_image(batched_inputs) features = self.backbone(images.tensor) if detected_instances is None: if self.proposal_generator is not None: proposals, _ = self.proposal_generator(images, features, None) else: assert "proposals" in batched_inputs[0] proposals = [x["proposals"].to(self.device) for x in batched_inputs] results, _ = self.roi_heads(images, features, proposals, None) else: detected_instances = [x.to(self.device) for x in detected_instances] results = self.roi_heads.forward_with_given_boxes(features, detected_instances) if do_postprocess: assert not torch.jit.is_scripting(), "Scripting is not supported for postprocess." return GeneralizedRCNN._postprocess(results, batched_inputs, images.image_sizes) else: return results def preprocess_image(self, batched_inputs: List[Dict[str, torch.Tensor]]): """ Normalize, pad and batch the input images. """ images = [x["image"].to(self.device) for x in batched_inputs] images = [(x - self.pixel_mean) / self.pixel_std for x in images] images = ImageList.from_tensors(images, self.backbone.size_divisibility) return images @staticmethod def _postprocess(instances, batched_inputs: List[Dict[str, torch.Tensor]], image_sizes): """ Rescale the output instances to the target size. """ # note: private function; subject to changes processed_results = [] for results_per_image, input_per_image, image_size in zip( instances, batched_inputs, image_sizes ): height = input_per_image.get("height", image_size[0]) width = input_per_image.get("width", image_size[1]) r = detector_postprocess(results_per_image, height, width) processed_results.append({"instances": r}) return processed_results @META_ARCH_REGISTRY.register() class ProposalNetwork(nn.Module): """ A meta architecture that only predicts object proposals. """ @configurable def __init__( self, *, backbone: Backbone, proposal_generator: nn.Module, pixel_mean: Tuple[float], pixel_std: Tuple[float], ): """ Args: backbone: a backbone module, must follow detectron2's backbone interface proposal_generator: a module that generates proposals using backbone features pixel_mean, pixel_std: list or tuple with #channels element, representing the per-channel mean and std to be used to normalize the input image """ super().__init__() self.backbone = backbone self.proposal_generator = proposal_generator self.register_buffer("pixel_mean", torch.tensor(pixel_mean).view(-1, 1, 1), False) self.register_buffer("pixel_std", torch.tensor(pixel_std).view(-1, 1, 1), False) @classmethod def from_config(cls, cfg): backbone = build_backbone(cfg) return { "backbone": backbone, "proposal_generator": build_proposal_generator(cfg, backbone.output_shape()), "pixel_mean": cfg.MODEL.PIXEL_MEAN, "pixel_std": cfg.MODEL.PIXEL_STD, } @property def device(self): return self.pixel_mean.device def forward(self, batched_inputs): """ Args: Same as in :class:`GeneralizedRCNN.forward` Returns: list[dict]: Each dict is the output for one input image. The dict contains one key "proposals" whose value is a :class:`Instances` with keys "proposal_boxes" and "objectness_logits". """ images = [x["image"].to(self.device) for x in batched_inputs] images = [(x - self.pixel_mean) / self.pixel_std for x in images] images = ImageList.from_tensors(images, self.backbone.size_divisibility) features = self.backbone(images.tensor) if "instances" in batched_inputs[0]: gt_instances = [x["instances"].to(self.device) for x in batched_inputs] elif "targets" in batched_inputs[0]: log_first_n( logging.WARN, "'targets' in the model inputs is now renamed to 'instances'!", n=10 ) gt_instances = [x["targets"].to(self.device) for x in batched_inputs] else: gt_instances = None proposals, proposal_losses = self.proposal_generator(images, features, gt_instances) # In training, the proposals are not useful at all but we generate them anyway. # This makes RPN-only models about 5% slower. if self.training: return proposal_losses processed_results = [] for results_per_image, input_per_image, image_size in zip( proposals, batched_inputs, images.image_sizes ): height = input_per_image.get("height", image_size[0]) width = input_per_image.get("width", image_size[1]) r = detector_postprocess(results_per_image, height, width) processed_results.append({"proposals": r}) return processed_results
banmo-main
third_party/detectron2_old/detectron2/modeling/meta_arch/rcnn.py
# -*- coding: utf-8 -*- # Copyright (c) Facebook, Inc. and its affiliates. import logging from typing import Dict, List import torch from torch import nn from detectron2.config import configurable from detectron2.structures import ImageList from ..postprocessing import detector_postprocess, sem_seg_postprocess from .build import META_ARCH_REGISTRY from .rcnn import GeneralizedRCNN from .semantic_seg import build_sem_seg_head __all__ = ["PanopticFPN"] @META_ARCH_REGISTRY.register() class PanopticFPN(GeneralizedRCNN): """ Implement the paper :paper:`PanopticFPN`. """ @configurable def __init__( self, *, sem_seg_head: nn.Module, combine_overlap_thresh: float = 0.5, combine_stuff_area_thresh: float = 4096, combine_instances_score_thresh: float = 0.5, **kwargs, ): """ NOTE: this interface is experimental. Args: sem_seg_head: a module for the semantic segmentation head. combine_overlap_thresh: combine masks into one instances if they have enough overlap combine_stuff_area_thresh: ignore stuff areas smaller than this threshold combine_instances_score_thresh: ignore instances whose score is smaller than this threshold Other arguments are the same as :class:`GeneralizedRCNN`. """ super().__init__(**kwargs) self.sem_seg_head = sem_seg_head # options when combining instance & semantic outputs self.combine_overlap_thresh = combine_overlap_thresh self.combine_stuff_area_thresh = combine_stuff_area_thresh self.combine_instances_score_thresh = combine_instances_score_thresh @classmethod def from_config(cls, cfg): ret = super().from_config(cfg) ret.update( { "combine_overlap_thresh": cfg.MODEL.PANOPTIC_FPN.COMBINE.OVERLAP_THRESH, "combine_stuff_area_thresh": cfg.MODEL.PANOPTIC_FPN.COMBINE.STUFF_AREA_LIMIT, "combine_instances_score_thresh": cfg.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH, # noqa } ) ret["sem_seg_head"] = build_sem_seg_head(cfg, ret["backbone"].output_shape()) logger = logging.getLogger(__name__) if not cfg.MODEL.PANOPTIC_FPN.COMBINE.ENABLED: logger.warning( "PANOPTIC_FPN.COMBINED.ENABLED is no longer used. " " model.inference(do_postprocess=) should be used to toggle postprocessing." ) if cfg.MODEL.PANOPTIC_FPN.INSTANCE_LOSS_WEIGHT != 1.0: w = cfg.MODEL.PANOPTIC_FPN.INSTANCE_LOSS_WEIGHT logger.warning( "PANOPTIC_FPN.INSTANCE_LOSS_WEIGHT should be replaced by weights on each ROI head." ) def update_weight(x): if isinstance(x, dict): return {k: v * w for k, v in x.items()} else: return x * w roi_heads = ret["roi_heads"] roi_heads.box_predictor.loss_weight = update_weight(roi_heads.box_predictor.loss_weight) roi_heads.mask_head.loss_weight = update_weight(roi_heads.mask_head.loss_weight) return ret def forward(self, batched_inputs): """ Args: batched_inputs: a list, batched outputs of :class:`DatasetMapper`. Each item in the list contains the inputs for one image. For now, each item in the list is a dict that contains: * "image": Tensor, image in (C, H, W) format. * "instances": Instances * "sem_seg": semantic segmentation ground truth. * Other information that's included in the original dicts, such as: "height", "width" (int): the output resolution of the model, used in inference. See :meth:`postprocess` for details. Returns: list[dict]: each dict has the results for one image. The dict contains the following keys: * "instances": see :meth:`GeneralizedRCNN.forward` for its format. * "sem_seg": see :meth:`SemanticSegmentor.forward` for its format. * "panoptic_seg": See the return value of :func:`combine_semantic_and_instance_outputs` for its format. """ if not self.training: return self.inference(batched_inputs) images = self.preprocess_image(batched_inputs) features = self.backbone(images.tensor) assert "sem_seg" in batched_inputs[0] gt_sem_seg = [x["sem_seg"].to(self.device) for x in batched_inputs] gt_sem_seg = ImageList.from_tensors( gt_sem_seg, self.backbone.size_divisibility, self.sem_seg_head.ignore_value ).tensor sem_seg_results, sem_seg_losses = self.sem_seg_head(features, gt_sem_seg) gt_instances = [x["instances"].to(self.device) for x in batched_inputs] proposals, proposal_losses = self.proposal_generator(images, features, gt_instances) detector_results, detector_losses = self.roi_heads( images, features, proposals, gt_instances ) losses = sem_seg_losses losses.update(proposal_losses) losses.update(detector_losses) return losses def inference(self, batched_inputs: List[Dict[str, torch.Tensor]], do_postprocess: bool = True): """ Run inference on the given inputs. Args: batched_inputs (list[dict]): same as in :meth:`forward` do_postprocess (bool): whether to apply post-processing on the outputs. Returns: When do_postprocess=True, see docs in :meth:`forward`. Otherwise, returns a (list[Instances], list[Tensor]) that contains the raw detector outputs, and raw semantic segmentation outputs. """ images = self.preprocess_image(batched_inputs) features = self.backbone(images.tensor) sem_seg_results, sem_seg_losses = self.sem_seg_head(features, None) proposals, _ = self.proposal_generator(images, features, None) detector_results, _ = self.roi_heads(images, features, proposals, None) if do_postprocess: processed_results = [] for sem_seg_result, detector_result, input_per_image, image_size in zip( sem_seg_results, detector_results, batched_inputs, images.image_sizes ): height = input_per_image.get("height", image_size[0]) width = input_per_image.get("width", image_size[1]) sem_seg_r = sem_seg_postprocess(sem_seg_result, image_size, height, width) detector_r = detector_postprocess(detector_result, height, width) processed_results.append({"sem_seg": sem_seg_r, "instances": detector_r}) panoptic_r = combine_semantic_and_instance_outputs( detector_r, sem_seg_r.argmax(dim=0), self.combine_overlap_thresh, self.combine_stuff_area_thresh, self.combine_instances_score_thresh, ) processed_results[-1]["panoptic_seg"] = panoptic_r return processed_results else: return detector_results, sem_seg_results def combine_semantic_and_instance_outputs( instance_results, semantic_results, overlap_threshold, stuff_area_thresh, instances_score_thresh, ): """ Implement a simple combining logic following "combine_semantic_and_instance_predictions.py" in panopticapi to produce panoptic segmentation outputs. Args: instance_results: output of :func:`detector_postprocess`. semantic_results: an (H, W) tensor, each element is the contiguous semantic category id Returns: panoptic_seg (Tensor): of shape (height, width) where the values are ids for each segment. segments_info (list[dict]): Describe each segment in `panoptic_seg`. Each dict contains keys "id", "category_id", "isthing". """ panoptic_seg = torch.zeros_like(semantic_results, dtype=torch.int32) # sort instance outputs by scores sorted_inds = torch.argsort(-instance_results.scores) current_segment_id = 0 segments_info = [] instance_masks = instance_results.pred_masks.to(dtype=torch.bool, device=panoptic_seg.device) # Add instances one-by-one, check for overlaps with existing ones for inst_id in sorted_inds: score = instance_results.scores[inst_id].item() if score < instances_score_thresh: break mask = instance_masks[inst_id] # H,W mask_area = mask.sum().item() if mask_area == 0: continue intersect = (mask > 0) & (panoptic_seg > 0) intersect_area = intersect.sum().item() if intersect_area * 1.0 / mask_area > overlap_threshold: continue if intersect_area > 0: mask = mask & (panoptic_seg == 0) current_segment_id += 1 panoptic_seg[mask] = current_segment_id segments_info.append( { "id": current_segment_id, "isthing": True, "score": score, "category_id": instance_results.pred_classes[inst_id].item(), "instance_id": inst_id.item(), } ) # Add semantic results to remaining empty areas semantic_labels = torch.unique(semantic_results).cpu().tolist() for semantic_label in semantic_labels: if semantic_label == 0: # 0 is a special "thing" class continue mask = (semantic_results == semantic_label) & (panoptic_seg == 0) mask_area = mask.sum().item() if mask_area < stuff_area_thresh: continue current_segment_id += 1 panoptic_seg[mask] = current_segment_id segments_info.append( { "id": current_segment_id, "isthing": False, "category_id": semantic_label, "area": mask_area, } ) return panoptic_seg, segments_info
banmo-main
third_party/detectron2_old/detectron2/modeling/meta_arch/panoptic_fpn.py
# -*- coding: utf-8 -*- # Copyright (c) Facebook, Inc. and its affiliates. from .build import META_ARCH_REGISTRY, build_model # isort:skip from .panoptic_fpn import PanopticFPN # import all the meta_arch, so they will be registered from .rcnn import GeneralizedRCNN, ProposalNetwork from .retinanet import RetinaNet from .semantic_seg import SEM_SEG_HEADS_REGISTRY, SemanticSegmentor, build_sem_seg_head __all__ = list(globals().keys())
banmo-main
third_party/detectron2_old/detectron2/modeling/meta_arch/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. import logging import math import numpy as np from typing import Dict, List, Tuple import torch from fvcore.nn import sigmoid_focal_loss_jit from torch import Tensor, nn from torch.nn import functional as F from detectron2.config import configurable from detectron2.data.detection_utils import convert_image_to_rgb from detectron2.layers import ShapeSpec, batched_nms, cat, get_norm, nonzero_tuple from detectron2.structures import Boxes, ImageList, Instances, pairwise_iou from detectron2.utils.events import get_event_storage from ..anchor_generator import build_anchor_generator from ..backbone import Backbone, build_backbone from ..box_regression import Box2BoxTransform, _dense_box_regression_loss from ..matcher import Matcher from ..postprocessing import detector_postprocess from .build import META_ARCH_REGISTRY __all__ = ["RetinaNet"] logger = logging.getLogger(__name__) def permute_to_N_HWA_K(tensor, K: int): """ Transpose/reshape a tensor from (N, (Ai x K), H, W) to (N, (HxWxAi), K) """ assert tensor.dim() == 4, tensor.shape N, _, H, W = tensor.shape tensor = tensor.view(N, -1, K, H, W) tensor = tensor.permute(0, 3, 4, 1, 2) tensor = tensor.reshape(N, -1, K) # Size=(N,HWA,K) return tensor @META_ARCH_REGISTRY.register() class RetinaNet(nn.Module): """ Implement RetinaNet in :paper:`RetinaNet`. """ @configurable def __init__( self, *, backbone: Backbone, head: nn.Module, head_in_features, anchor_generator, box2box_transform, anchor_matcher, num_classes, focal_loss_alpha=0.25, focal_loss_gamma=2.0, smooth_l1_beta=0.0, box_reg_loss_type="smooth_l1", test_score_thresh=0.05, test_topk_candidates=1000, test_nms_thresh=0.5, max_detections_per_image=100, pixel_mean, pixel_std, vis_period=0, input_format="BGR", ): """ NOTE: this interface is experimental. Args: backbone: a backbone module, must follow detectron2's backbone interface head (nn.Module): a module that predicts logits and regression deltas for each level from a list of per-level features head_in_features (Tuple[str]): Names of the input feature maps to be used in head anchor_generator (nn.Module): a module that creates anchors from a list of features. Usually an instance of :class:`AnchorGenerator` box2box_transform (Box2BoxTransform): defines the transform from anchors boxes to instance boxes anchor_matcher (Matcher): label the anchors by matching them with ground truth. num_classes (int): number of classes. Used to label background proposals. # Loss parameters: focal_loss_alpha (float): focal_loss_alpha focal_loss_gamma (float): focal_loss_gamma smooth_l1_beta (float): smooth_l1_beta box_reg_loss_type (str): Options are "smooth_l1", "giou" # Inference parameters: test_score_thresh (float): Inference cls score threshold, only anchors with score > INFERENCE_TH are considered for inference (to improve speed) test_topk_candidates (int): Select topk candidates before NMS test_nms_thresh (float): Overlap threshold used for non-maximum suppression (suppress boxes with IoU >= this threshold) max_detections_per_image (int): Maximum number of detections to return per image during inference (100 is based on the limit established for the COCO dataset). # Input parameters pixel_mean (Tuple[float]): Values to be used for image normalization (BGR order). To train on images of different number of channels, set different mean & std. Default values are the mean pixel value from ImageNet: [103.53, 116.28, 123.675] pixel_std (Tuple[float]): When using pre-trained models in Detectron1 or any MSRA models, std has been absorbed into its conv1 weights, so the std needs to be set 1. Otherwise, you can use [57.375, 57.120, 58.395] (ImageNet std) vis_period (int): The period (in terms of steps) for minibatch visualization at train time. Set to 0 to disable. input_format (str): Whether the model needs RGB, YUV, HSV etc. """ super().__init__() self.backbone = backbone self.head = head self.head_in_features = head_in_features if len(self.backbone.output_shape()) != len(self.head_in_features): logger.warning("[RetinaNet] Backbone produces unused features.") # Anchors self.anchor_generator = anchor_generator self.box2box_transform = box2box_transform self.anchor_matcher = anchor_matcher self.num_classes = num_classes # Loss parameters: self.focal_loss_alpha = focal_loss_alpha self.focal_loss_gamma = focal_loss_gamma self.smooth_l1_beta = smooth_l1_beta self.box_reg_loss_type = box_reg_loss_type # Inference parameters: self.test_score_thresh = test_score_thresh self.test_topk_candidates = test_topk_candidates self.test_nms_thresh = test_nms_thresh self.max_detections_per_image = max_detections_per_image # Vis parameters self.vis_period = vis_period self.input_format = input_format self.register_buffer("pixel_mean", torch.tensor(pixel_mean).view(-1, 1, 1), False) self.register_buffer("pixel_std", torch.tensor(pixel_std).view(-1, 1, 1), False) """ In Detectron1, loss is normalized by number of foreground samples in the batch. When batch size is 1 per GPU, #foreground has a large variance and using it lead to lower performance. Here we maintain an EMA of #foreground to stabilize the normalizer. """ self.loss_normalizer = 100 # initialize with any reasonable #fg that's not too small self.loss_normalizer_momentum = 0.9 @classmethod def from_config(cls, cfg): backbone = build_backbone(cfg) backbone_shape = backbone.output_shape() feature_shapes = [backbone_shape[f] for f in cfg.MODEL.RETINANET.IN_FEATURES] head = RetinaNetHead(cfg, feature_shapes) anchor_generator = build_anchor_generator(cfg, feature_shapes) return { "backbone": backbone, "head": head, "anchor_generator": anchor_generator, "box2box_transform": Box2BoxTransform(weights=cfg.MODEL.RETINANET.BBOX_REG_WEIGHTS), "anchor_matcher": Matcher( cfg.MODEL.RETINANET.IOU_THRESHOLDS, cfg.MODEL.RETINANET.IOU_LABELS, allow_low_quality_matches=True, ), "pixel_mean": cfg.MODEL.PIXEL_MEAN, "pixel_std": cfg.MODEL.PIXEL_STD, "num_classes": cfg.MODEL.RETINANET.NUM_CLASSES, "head_in_features": cfg.MODEL.RETINANET.IN_FEATURES, # Loss parameters: "focal_loss_alpha": cfg.MODEL.RETINANET.FOCAL_LOSS_ALPHA, "focal_loss_gamma": cfg.MODEL.RETINANET.FOCAL_LOSS_GAMMA, "smooth_l1_beta": cfg.MODEL.RETINANET.SMOOTH_L1_LOSS_BETA, "box_reg_loss_type": cfg.MODEL.RETINANET.BBOX_REG_LOSS_TYPE, # Inference parameters: "test_score_thresh": cfg.MODEL.RETINANET.SCORE_THRESH_TEST, "test_topk_candidates": cfg.MODEL.RETINANET.TOPK_CANDIDATES_TEST, "test_nms_thresh": cfg.MODEL.RETINANET.NMS_THRESH_TEST, "max_detections_per_image": cfg.TEST.DETECTIONS_PER_IMAGE, # Vis parameters "vis_period": cfg.VIS_PERIOD, "input_format": cfg.INPUT.FORMAT, } @property def device(self): return self.pixel_mean.device def visualize_training(self, batched_inputs, results): """ A function used to visualize ground truth images and final network predictions. It shows ground truth bounding boxes on the original image and up to 20 predicted object bounding boxes on the original image. Args: batched_inputs (list): a list that contains input to the model. results (List[Instances]): a list of #images elements. """ from detectron2.utils.visualizer import Visualizer assert len(batched_inputs) == len( results ), "Cannot visualize inputs and results of different sizes" storage = get_event_storage() max_boxes = 20 image_index = 0 # only visualize a single image img = batched_inputs[image_index]["image"] img = convert_image_to_rgb(img.permute(1, 2, 0), self.input_format) v_gt = Visualizer(img, None) v_gt = v_gt.overlay_instances(boxes=batched_inputs[image_index]["instances"].gt_boxes) anno_img = v_gt.get_image() processed_results = detector_postprocess(results[image_index], img.shape[0], img.shape[1]) predicted_boxes = processed_results.pred_boxes.tensor.detach().cpu().numpy() v_pred = Visualizer(img, None) v_pred = v_pred.overlay_instances(boxes=predicted_boxes[0:max_boxes]) prop_img = v_pred.get_image() vis_img = np.vstack((anno_img, prop_img)) vis_img = vis_img.transpose(2, 0, 1) vis_name = f"Top: GT bounding boxes; Bottom: {max_boxes} Highest Scoring Results" storage.put_image(vis_name, vis_img) def forward(self, batched_inputs: List[Dict[str, Tensor]]): """ Args: batched_inputs: a list, batched outputs of :class:`DatasetMapper` . Each item in the list contains the inputs for one image. For now, each item in the list is a dict that contains: * image: Tensor, image in (C, H, W) format. * instances: Instances Other information that's included in the original dicts, such as: * "height", "width" (int): the output resolution of the model, used in inference. See :meth:`postprocess` for details. Returns: In training, dict[str, Tensor]: mapping from a named loss to a tensor storing the loss. Used during training only. In inference, the standard output format, described in :doc:`/tutorials/models`. """ images = self.preprocess_image(batched_inputs) features = self.backbone(images.tensor) features = [features[f] for f in self.head_in_features] anchors = self.anchor_generator(features) pred_logits, pred_anchor_deltas = self.head(features) # Transpose the Hi*Wi*A dimension to the middle: pred_logits = [permute_to_N_HWA_K(x, self.num_classes) for x in pred_logits] pred_anchor_deltas = [permute_to_N_HWA_K(x, 4) for x in pred_anchor_deltas] if self.training: assert not torch.jit.is_scripting(), "Not supported" assert "instances" in batched_inputs[0], "Instance annotations are missing in training!" gt_instances = [x["instances"].to(self.device) for x in batched_inputs] gt_labels, gt_boxes = self.label_anchors(anchors, gt_instances) losses = self.losses(anchors, pred_logits, gt_labels, pred_anchor_deltas, gt_boxes) if self.vis_period > 0: storage = get_event_storage() if storage.iter % self.vis_period == 0: results = self.inference( anchors, pred_logits, pred_anchor_deltas, images.image_sizes ) self.visualize_training(batched_inputs, results) return losses else: results = self.inference(anchors, pred_logits, pred_anchor_deltas, images.image_sizes) if torch.jit.is_scripting(): return results processed_results = [] for results_per_image, input_per_image, image_size in zip( results, batched_inputs, images.image_sizes ): height = input_per_image.get("height", image_size[0]) width = input_per_image.get("width", image_size[1]) r = detector_postprocess(results_per_image, height, width) processed_results.append({"instances": r}) return processed_results def losses(self, anchors, pred_logits, gt_labels, pred_anchor_deltas, gt_boxes): """ Args: anchors (list[Boxes]): a list of #feature level Boxes gt_labels, gt_boxes: see output of :meth:`RetinaNet.label_anchors`. Their shapes are (N, R) and (N, R, 4), respectively, where R is the total number of anchors across levels, i.e. sum(Hi x Wi x Ai) pred_logits, pred_anchor_deltas: both are list[Tensor]. Each element in the list corresponds to one level and has shape (N, Hi * Wi * Ai, K or 4). Where K is the number of classes used in `pred_logits`. Returns: dict[str, Tensor]: mapping from a named loss to a scalar tensor storing the loss. Used during training only. The dict keys are: "loss_cls" and "loss_box_reg" """ num_images = len(gt_labels) gt_labels = torch.stack(gt_labels) # (N, R) valid_mask = gt_labels >= 0 pos_mask = (gt_labels >= 0) & (gt_labels != self.num_classes) num_pos_anchors = pos_mask.sum().item() get_event_storage().put_scalar("num_pos_anchors", num_pos_anchors / num_images) self.loss_normalizer = self.loss_normalizer_momentum * self.loss_normalizer + ( 1 - self.loss_normalizer_momentum ) * max(num_pos_anchors, 1) # classification and regression loss gt_labels_target = F.one_hot(gt_labels[valid_mask], num_classes=self.num_classes + 1)[ :, :-1 ] # no loss for the last (background) class loss_cls = sigmoid_focal_loss_jit( cat(pred_logits, dim=1)[valid_mask], gt_labels_target.to(pred_logits[0].dtype), alpha=self.focal_loss_alpha, gamma=self.focal_loss_gamma, reduction="sum", ) loss_box_reg = _dense_box_regression_loss( anchors, self.box2box_transform, pred_anchor_deltas, gt_boxes, pos_mask, box_reg_loss_type=self.box_reg_loss_type, smooth_l1_beta=self.smooth_l1_beta, ) return { "loss_cls": loss_cls / self.loss_normalizer, "loss_box_reg": loss_box_reg / self.loss_normalizer, } @torch.no_grad() def label_anchors(self, anchors, gt_instances): """ Args: anchors (list[Boxes]): A list of #feature level Boxes. The Boxes contains anchors of this image on the specific feature level. gt_instances (list[Instances]): a list of N `Instances`s. The i-th `Instances` contains the ground-truth per-instance annotations for the i-th input image. Returns: list[Tensor]: List of #img tensors. i-th element is a vector of labels whose length is the total number of anchors across all feature maps (sum(Hi * Wi * A)). Label values are in {-1, 0, ..., K}, with -1 means ignore, and K means background. list[Tensor]: i-th element is a Rx4 tensor, where R is the total number of anchors across feature maps. The values are the matched gt boxes for each anchor. Values are undefined for those anchors not labeled as foreground. """ anchors = Boxes.cat(anchors) # Rx4 gt_labels = [] matched_gt_boxes = [] for gt_per_image in gt_instances: match_quality_matrix = pairwise_iou(gt_per_image.gt_boxes, anchors) matched_idxs, anchor_labels = self.anchor_matcher(match_quality_matrix) del match_quality_matrix if len(gt_per_image) > 0: matched_gt_boxes_i = gt_per_image.gt_boxes.tensor[matched_idxs] gt_labels_i = gt_per_image.gt_classes[matched_idxs] # Anchors with label 0 are treated as background. gt_labels_i[anchor_labels == 0] = self.num_classes # Anchors with label -1 are ignored. gt_labels_i[anchor_labels == -1] = -1 else: matched_gt_boxes_i = torch.zeros_like(anchors.tensor) gt_labels_i = torch.zeros_like(matched_idxs) + self.num_classes gt_labels.append(gt_labels_i) matched_gt_boxes.append(matched_gt_boxes_i) return gt_labels, matched_gt_boxes def inference( self, anchors: List[Boxes], pred_logits: List[Tensor], pred_anchor_deltas: List[Tensor], image_sizes: List[Tuple[int, int]], ): """ Arguments: anchors (list[Boxes]): A list of #feature level Boxes. The Boxes contain anchors of this image on the specific feature level. pred_logits, pred_anchor_deltas: list[Tensor], one per level. Each has shape (N, Hi * Wi * Ai, K or 4) image_sizes (List[(h, w)]): the input image sizes Returns: results (List[Instances]): a list of #images elements. """ results: List[Instances] = [] for img_idx, image_size in enumerate(image_sizes): pred_logits_per_image = [x[img_idx] for x in pred_logits] deltas_per_image = [x[img_idx] for x in pred_anchor_deltas] results_per_image = self.inference_single_image( anchors, pred_logits_per_image, deltas_per_image, image_size ) results.append(results_per_image) return results def inference_single_image( self, anchors: List[Boxes], box_cls: List[Tensor], box_delta: List[Tensor], image_size: Tuple[int, int], ): """ Single-image inference. Return bounding-box detection results by thresholding on scores and applying non-maximum suppression (NMS). Arguments: anchors (list[Boxes]): list of #feature levels. Each entry contains a Boxes object, which contains all the anchors in that feature level. box_cls (list[Tensor]): list of #feature levels. Each entry contains tensor of size (H x W x A, K) box_delta (list[Tensor]): Same shape as 'box_cls' except that K becomes 4. image_size (tuple(H, W)): a tuple of the image height and width. Returns: Same as `inference`, but for only one image. """ boxes_all = [] scores_all = [] class_idxs_all = [] # Iterate over every feature level for box_cls_i, box_reg_i, anchors_i in zip(box_cls, box_delta, anchors): # (HxWxAxK,) predicted_prob = box_cls_i.flatten().sigmoid_() # Apply two filtering below to make NMS faster. # 1. Keep boxes with confidence score higher than threshold keep_idxs = predicted_prob > self.test_score_thresh predicted_prob = predicted_prob[keep_idxs] topk_idxs = nonzero_tuple(keep_idxs)[0] # 2. Keep top k top scoring boxes only num_topk = min(self.test_topk_candidates, topk_idxs.size(0)) # torch.sort is actually faster than .topk (at least on GPUs) predicted_prob, idxs = predicted_prob.sort(descending=True) predicted_prob = predicted_prob[:num_topk] topk_idxs = topk_idxs[idxs[:num_topk]] anchor_idxs = topk_idxs // self.num_classes classes_idxs = topk_idxs % self.num_classes box_reg_i = box_reg_i[anchor_idxs] anchors_i = anchors_i[anchor_idxs] # predict boxes predicted_boxes = self.box2box_transform.apply_deltas(box_reg_i, anchors_i.tensor) boxes_all.append(predicted_boxes) scores_all.append(predicted_prob) class_idxs_all.append(classes_idxs) boxes_all, scores_all, class_idxs_all = [ cat(x) for x in [boxes_all, scores_all, class_idxs_all] ] keep = batched_nms(boxes_all, scores_all, class_idxs_all, self.test_nms_thresh) keep = keep[: self.max_detections_per_image] result = Instances(image_size) result.pred_boxes = Boxes(boxes_all[keep]) result.scores = scores_all[keep] result.pred_classes = class_idxs_all[keep] return result def preprocess_image(self, batched_inputs: List[Dict[str, Tensor]]): """ Normalize, pad and batch the input images. """ images = [x["image"].to(self.device) for x in batched_inputs] images = [(x - self.pixel_mean) / self.pixel_std for x in images] images = ImageList.from_tensors(images, self.backbone.size_divisibility) return images class RetinaNetHead(nn.Module): """ The head used in RetinaNet for object classification and box regression. It has two subnets for the two tasks, with a common structure but separate parameters. """ @configurable def __init__( self, *, input_shape: List[ShapeSpec], num_classes, num_anchors, conv_dims: List[int], norm="", prior_prob=0.01, ): """ NOTE: this interface is experimental. Args: input_shape (List[ShapeSpec]): input shape num_classes (int): number of classes. Used to label background proposals. num_anchors (int): number of generated anchors conv_dims (List[int]): dimensions for each convolution layer norm (str or callable): Normalization for conv layers except for the two output layers. See :func:`detectron2.layers.get_norm` for supported types. prior_prob (float): Prior weight for computing bias """ super().__init__() if norm == "BN" or norm == "SyncBN": logger.warning("Shared norm does not work well for BN, SyncBN, expect poor results") cls_subnet = [] bbox_subnet = [] for in_channels, out_channels in zip( [input_shape[0].channels] + list(conv_dims), conv_dims ): cls_subnet.append( nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1) ) if norm: cls_subnet.append(get_norm(norm, out_channels)) cls_subnet.append(nn.ReLU()) bbox_subnet.append( nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1) ) if norm: bbox_subnet.append(get_norm(norm, out_channels)) bbox_subnet.append(nn.ReLU()) self.cls_subnet = nn.Sequential(*cls_subnet) self.bbox_subnet = nn.Sequential(*bbox_subnet) self.cls_score = nn.Conv2d( conv_dims[-1], num_anchors * num_classes, kernel_size=3, stride=1, padding=1 ) self.bbox_pred = nn.Conv2d( conv_dims[-1], num_anchors * 4, kernel_size=3, stride=1, padding=1 ) # Initialization for modules in [self.cls_subnet, self.bbox_subnet, self.cls_score, self.bbox_pred]: for layer in modules.modules(): if isinstance(layer, nn.Conv2d): torch.nn.init.normal_(layer.weight, mean=0, std=0.01) torch.nn.init.constant_(layer.bias, 0) # Use prior in model initialization to improve stability bias_value = -(math.log((1 - prior_prob) / prior_prob)) torch.nn.init.constant_(self.cls_score.bias, bias_value) @classmethod def from_config(cls, cfg, input_shape: List[ShapeSpec]): num_anchors = build_anchor_generator(cfg, input_shape).num_cell_anchors assert ( len(set(num_anchors)) == 1 ), "Using different number of anchors between levels is not currently supported!" num_anchors = num_anchors[0] return { "input_shape": input_shape, "num_classes": cfg.MODEL.RETINANET.NUM_CLASSES, "conv_dims": [input_shape[0].channels] * cfg.MODEL.RETINANET.NUM_CONVS, "prior_prob": cfg.MODEL.RETINANET.PRIOR_PROB, "norm": cfg.MODEL.RETINANET.NORM, "num_anchors": num_anchors, } def forward(self, features: List[Tensor]): """ Arguments: features (list[Tensor]): FPN feature map tensors in high to low resolution. Each tensor in the list correspond to different feature levels. Returns: logits (list[Tensor]): #lvl tensors, each has shape (N, AxK, Hi, Wi). The tensor predicts the classification probability at each spatial position for each of the A anchors and K object classes. bbox_reg (list[Tensor]): #lvl tensors, each has shape (N, Ax4, Hi, Wi). The tensor predicts 4-vector (dx,dy,dw,dh) box regression values for every anchor. These values are the relative offset between the anchor and the ground truth box. """ logits = [] bbox_reg = [] for feature in features: logits.append(self.cls_score(self.cls_subnet(feature))) bbox_reg.append(self.bbox_pred(self.bbox_subnet(feature))) return logits, bbox_reg
banmo-main
third_party/detectron2_old/detectron2/modeling/meta_arch/retinanet.py
# Copyright (c) Facebook, Inc. and its affiliates. import numpy as np from typing import Callable, Dict, Optional, Tuple, Union import fvcore.nn.weight_init as weight_init import torch from torch import nn from torch.nn import functional as F from detectron2.config import configurable from detectron2.layers import Conv2d, ShapeSpec, get_norm from detectron2.structures import ImageList from detectron2.utils.registry import Registry from ..backbone import Backbone, build_backbone from ..postprocessing import sem_seg_postprocess from .build import META_ARCH_REGISTRY __all__ = ["SemanticSegmentor", "SEM_SEG_HEADS_REGISTRY", "SemSegFPNHead", "build_sem_seg_head"] SEM_SEG_HEADS_REGISTRY = Registry("SEM_SEG_HEADS") SEM_SEG_HEADS_REGISTRY.__doc__ = """ Registry for semantic segmentation heads, which make semantic segmentation predictions from feature maps. """ @META_ARCH_REGISTRY.register() class SemanticSegmentor(nn.Module): """ Main class for semantic segmentation architectures. """ @configurable def __init__( self, *, backbone: Backbone, sem_seg_head: nn.Module, pixel_mean: Tuple[float], pixel_std: Tuple[float], ): """ Args: backbone: a backbone module, must follow detectron2's backbone interface sem_seg_head: a module that predicts semantic segmentation from backbone features pixel_mean, pixel_std: list or tuple with #channels element, representing the per-channel mean and std to be used to normalize the input image """ super().__init__() self.backbone = backbone self.sem_seg_head = sem_seg_head self.register_buffer("pixel_mean", torch.tensor(pixel_mean).view(-1, 1, 1), False) self.register_buffer("pixel_std", torch.tensor(pixel_std).view(-1, 1, 1), False) @classmethod def from_config(cls, cfg): backbone = build_backbone(cfg) sem_seg_head = build_sem_seg_head(cfg, backbone.output_shape()) return { "backbone": backbone, "sem_seg_head": sem_seg_head, "pixel_mean": cfg.MODEL.PIXEL_MEAN, "pixel_std": cfg.MODEL.PIXEL_STD, } @property def device(self): return self.pixel_mean.device def forward(self, batched_inputs): """ Args: batched_inputs: a list, batched outputs of :class:`DatasetMapper`. Each item in the list contains the inputs for one image. For now, each item in the list is a dict that contains: * "image": Tensor, image in (C, H, W) format. * "sem_seg": semantic segmentation ground truth * Other information that's included in the original dicts, such as: "height", "width" (int): the output resolution of the model (may be different from input resolution), used in inference. Returns: list[dict]: Each dict is the output for one input image. The dict contains one key "sem_seg" whose value is a Tensor that represents the per-pixel segmentation prediced by the head. The prediction has shape KxHxW that represents the logits of each class for each pixel. """ images = [x["image"].to(self.device) for x in batched_inputs] images = [(x - self.pixel_mean) / self.pixel_std for x in images] images = ImageList.from_tensors(images, self.backbone.size_divisibility) features = self.backbone(images.tensor) if "sem_seg" in batched_inputs[0]: targets = [x["sem_seg"].to(self.device) for x in batched_inputs] targets = ImageList.from_tensors( targets, self.backbone.size_divisibility, self.sem_seg_head.ignore_value ).tensor else: targets = None results, losses = self.sem_seg_head(features, targets) if self.training: return losses processed_results = [] for result, input_per_image, image_size in zip(results, batched_inputs, images.image_sizes): height = input_per_image.get("height") width = input_per_image.get("width") r = sem_seg_postprocess(result, image_size, height, width) processed_results.append({"sem_seg": r}) return processed_results def build_sem_seg_head(cfg, input_shape): """ Build a semantic segmentation head from `cfg.MODEL.SEM_SEG_HEAD.NAME`. """ name = cfg.MODEL.SEM_SEG_HEAD.NAME return SEM_SEG_HEADS_REGISTRY.get(name)(cfg, input_shape) @SEM_SEG_HEADS_REGISTRY.register() class SemSegFPNHead(nn.Module): """ A semantic segmentation head described in :paper:`PanopticFPN`. It takes a list of FPN features as input, and applies a sequence of 3x3 convs and upsampling to scale all of them to the stride defined by ``common_stride``. Then these features are added and used to make final predictions by another 1x1 conv layer. """ @configurable def __init__( self, input_shape: Dict[str, ShapeSpec], *, num_classes: int, conv_dims: int, common_stride: int, loss_weight: float = 1.0, norm: Optional[Union[str, Callable]] = None, ignore_value: int = -1, ): """ NOTE: this interface is experimental. Args: input_shape: shapes (channels and stride) of the input features num_classes: number of classes to predict conv_dims: number of output channels for the intermediate conv layers. common_stride: the common stride that all features will be upscaled to loss_weight: loss weight norm (str or callable): normalization for all conv layers ignore_value: category id to be ignored during training. """ super().__init__() input_shape = sorted(input_shape.items(), key=lambda x: x[1].stride) self.in_features = [k for k, v in input_shape] feature_strides = [v.stride for k, v in input_shape] feature_channels = [v.channels for k, v in input_shape] self.ignore_value = ignore_value self.common_stride = common_stride self.loss_weight = loss_weight self.scale_heads = [] for in_feature, stride, channels in zip( self.in_features, feature_strides, feature_channels ): head_ops = [] head_length = max(1, int(np.log2(stride) - np.log2(self.common_stride))) for k in range(head_length): norm_module = get_norm(norm, conv_dims) conv = Conv2d( channels if k == 0 else conv_dims, conv_dims, kernel_size=3, stride=1, padding=1, bias=not norm, norm=norm_module, activation=F.relu, ) weight_init.c2_msra_fill(conv) head_ops.append(conv) if stride != self.common_stride: head_ops.append( nn.Upsample(scale_factor=2, mode="bilinear", align_corners=False) ) self.scale_heads.append(nn.Sequential(*head_ops)) self.add_module(in_feature, self.scale_heads[-1]) self.predictor = Conv2d(conv_dims, num_classes, kernel_size=1, stride=1, padding=0) weight_init.c2_msra_fill(self.predictor) @classmethod def from_config(cls, cfg, input_shape: Dict[str, ShapeSpec]): return { "input_shape": { k: v for k, v in input_shape.items() if k in cfg.MODEL.SEM_SEG_HEAD.IN_FEATURES }, "ignore_value": cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE, "num_classes": cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES, "conv_dims": cfg.MODEL.SEM_SEG_HEAD.CONVS_DIM, "common_stride": cfg.MODEL.SEM_SEG_HEAD.COMMON_STRIDE, "norm": cfg.MODEL.SEM_SEG_HEAD.NORM, "loss_weight": cfg.MODEL.SEM_SEG_HEAD.LOSS_WEIGHT, } def forward(self, features, targets=None): """ Returns: In training, returns (None, dict of losses) In inference, returns (CxHxW logits, {}) """ x = self.layers(features) if self.training: return None, self.losses(x, targets) else: x = F.interpolate( x, scale_factor=self.common_stride, mode="bilinear", align_corners=False ) return x, {} def layers(self, features): for i, f in enumerate(self.in_features): if i == 0: x = self.scale_heads[i](features[f]) else: x = x + self.scale_heads[i](features[f]) x = self.predictor(x) return x def losses(self, predictions, targets): predictions = predictions.float() # https://github.com/pytorch/pytorch/issues/48163 predictions = F.interpolate( predictions, scale_factor=self.common_stride, mode="bilinear", align_corners=False ) loss = F.cross_entropy( predictions, targets, reduction="mean", ignore_index=self.ignore_value ) losses = {"loss_sem_seg": loss * self.loss_weight} return losses
banmo-main
third_party/detectron2_old/detectron2/modeling/meta_arch/semantic_seg.py
# Copyright (c) Facebook, Inc. and its affiliates. from detectron2.utils.registry import Registry PROPOSAL_GENERATOR_REGISTRY = Registry("PROPOSAL_GENERATOR") PROPOSAL_GENERATOR_REGISTRY.__doc__ = """ Registry for proposal generator, which produces object proposals from feature maps. The registered object will be called with `obj(cfg, input_shape)`. The call should return a `nn.Module` object. """ from . import rpn, rrpn # noqa F401 isort:skip def build_proposal_generator(cfg, input_shape): """ Build a proposal generator from `cfg.MODEL.PROPOSAL_GENERATOR.NAME`. The name can be "PrecomputedProposals" to use no proposal generator. """ name = cfg.MODEL.PROPOSAL_GENERATOR.NAME if name == "PrecomputedProposals": return None return PROPOSAL_GENERATOR_REGISTRY.get(name)(cfg, input_shape)
banmo-main
third_party/detectron2_old/detectron2/modeling/proposal_generator/build.py
# Copyright (c) Facebook, Inc. and its affiliates. from typing import Dict, List, Optional, Tuple, Union import torch import torch.nn.functional as F from torch import nn from detectron2.config import configurable from detectron2.layers import Conv2d, ShapeSpec, cat from detectron2.structures import Boxes, ImageList, Instances, pairwise_iou from detectron2.utils.events import get_event_storage from detectron2.utils.memory import retry_if_cuda_oom from detectron2.utils.registry import Registry from ..anchor_generator import build_anchor_generator from ..box_regression import Box2BoxTransform, _dense_box_regression_loss from ..matcher import Matcher from ..sampling import subsample_labels from .build import PROPOSAL_GENERATOR_REGISTRY from .proposal_utils import find_top_rpn_proposals RPN_HEAD_REGISTRY = Registry("RPN_HEAD") RPN_HEAD_REGISTRY.__doc__ = """ Registry for RPN heads, which take feature maps and perform objectness classification and bounding box regression for anchors. The registered object will be called with `obj(cfg, input_shape)`. The call should return a `nn.Module` object. """ """ Shape shorthand in this module: N: number of images in the minibatch L: number of feature maps per image on which RPN is run A: number of cell anchors (must be the same for all feature maps) Hi, Wi: height and width of the i-th feature map B: size of the box parameterization Naming convention: objectness: refers to the binary classification of an anchor as object vs. not object. deltas: refers to the 4-d (dx, dy, dw, dh) deltas that parameterize the box2box transform (see :class:`box_regression.Box2BoxTransform`), or 5d for rotated boxes. pred_objectness_logits: predicted objectness scores in [-inf, +inf]; use sigmoid(pred_objectness_logits) to estimate P(object). gt_labels: ground-truth binary classification labels for objectness pred_anchor_deltas: predicted box2box transform deltas gt_anchor_deltas: ground-truth box2box transform deltas """ def build_rpn_head(cfg, input_shape): """ Build an RPN head defined by `cfg.MODEL.RPN.HEAD_NAME`. """ name = cfg.MODEL.RPN.HEAD_NAME return RPN_HEAD_REGISTRY.get(name)(cfg, input_shape) @RPN_HEAD_REGISTRY.register() class StandardRPNHead(nn.Module): """ Standard RPN classification and regression heads described in :paper:`Faster R-CNN`. Uses a 3x3 conv to produce a shared hidden state from which one 1x1 conv predicts objectness logits for each anchor and a second 1x1 conv predicts bounding-box deltas specifying how to deform each anchor into an object proposal. """ @configurable def __init__( self, *, in_channels: int, num_anchors: int, box_dim: int = 4, conv_dims: List[int] = (-1,) ): """ NOTE: this interface is experimental. Args: in_channels (int): number of input feature channels. When using multiple input features, they must have the same number of channels. num_anchors (int): number of anchors to predict for *each spatial position* on the feature map. The total number of anchors for each feature map will be `num_anchors * H * W`. box_dim (int): dimension of a box, which is also the number of box regression predictions to make for each anchor. An axis aligned box has box_dim=4, while a rotated box has box_dim=5. conv_dims (list[int]): a list of integers representing the output channels of N conv layers. Set it to -1 to use the same number of output channels as input channels. """ super().__init__() cur_channels = in_channels # Keeping the old variable names and structure for backwards compatiblity. # Otherwise the old checkpoints will fail to load. if len(conv_dims) == 1: out_channels = cur_channels if conv_dims[0] == -1 else conv_dims[0] # 3x3 conv for the hidden representation self.conv = self._get_rpn_conv(cur_channels, out_channels) cur_channels = out_channels else: self.conv = nn.Sequential() for k, conv_dim in enumerate(conv_dims): out_channels = cur_channels if conv_dim == -1 else conv_dim if out_channels <= 0: raise ValueError( f"Conv output channels should be greater than 0. Got {out_channels}" ) conv = self._get_rpn_conv(cur_channels, out_channels) self.conv.add_module(f"conv{k}", conv) cur_channels = out_channels # 1x1 conv for predicting objectness logits self.objectness_logits = nn.Conv2d(cur_channels, num_anchors, kernel_size=1, stride=1) # 1x1 conv for predicting box2box transform deltas self.anchor_deltas = nn.Conv2d(cur_channels, num_anchors * box_dim, kernel_size=1, stride=1) # Keeping the order of weights initialization same for backwards compatiblility. for layer in self.modules(): if isinstance(layer, nn.Conv2d): nn.init.normal_(layer.weight, std=0.01) nn.init.constant_(layer.bias, 0) def _get_rpn_conv(self, in_channels, out_channels): return Conv2d( in_channels, out_channels, kernel_size=3, stride=1, padding=1, activation=nn.ReLU(), ) @classmethod def from_config(cls, cfg, input_shape): # Standard RPN is shared across levels: in_channels = [s.channels for s in input_shape] assert len(set(in_channels)) == 1, "Each level must have the same channel!" in_channels = in_channels[0] # RPNHead should take the same input as anchor generator # NOTE: it assumes that creating an anchor generator does not have unwanted side effect. anchor_generator = build_anchor_generator(cfg, input_shape) num_anchors = anchor_generator.num_anchors box_dim = anchor_generator.box_dim assert ( len(set(num_anchors)) == 1 ), "Each level must have the same number of anchors per spatial position" return { "in_channels": in_channels, "num_anchors": num_anchors[0], "box_dim": box_dim, "conv_dims": cfg.MODEL.RPN.CONV_DIMS, } def forward(self, features: List[torch.Tensor]): """ Args: features (list[Tensor]): list of feature maps Returns: list[Tensor]: A list of L elements. Element i is a tensor of shape (N, A, Hi, Wi) representing the predicted objectness logits for all anchors. A is the number of cell anchors. list[Tensor]: A list of L elements. Element i is a tensor of shape (N, A*box_dim, Hi, Wi) representing the predicted "deltas" used to transform anchors to proposals. """ pred_objectness_logits = [] pred_anchor_deltas = [] for x in features: t = self.conv(x) pred_objectness_logits.append(self.objectness_logits(t)) pred_anchor_deltas.append(self.anchor_deltas(t)) return pred_objectness_logits, pred_anchor_deltas @PROPOSAL_GENERATOR_REGISTRY.register() class RPN(nn.Module): """ Region Proposal Network, introduced by :paper:`Faster R-CNN`. """ @configurable def __init__( self, *, in_features: List[str], head: nn.Module, anchor_generator: nn.Module, anchor_matcher: Matcher, box2box_transform: Box2BoxTransform, batch_size_per_image: int, positive_fraction: float, pre_nms_topk: Tuple[float, float], post_nms_topk: Tuple[float, float], nms_thresh: float = 0.7, min_box_size: float = 0.0, anchor_boundary_thresh: float = -1.0, loss_weight: Union[float, Dict[str, float]] = 1.0, box_reg_loss_type: str = "smooth_l1", smooth_l1_beta: float = 0.0, ): """ NOTE: this interface is experimental. Args: in_features (list[str]): list of names of input features to use head (nn.Module): a module that predicts logits and regression deltas for each level from a list of per-level features anchor_generator (nn.Module): a module that creates anchors from a list of features. Usually an instance of :class:`AnchorGenerator` anchor_matcher (Matcher): label the anchors by matching them with ground truth. box2box_transform (Box2BoxTransform): defines the transform from anchors boxes to instance boxes batch_size_per_image (int): number of anchors per image to sample for training positive_fraction (float): fraction of foreground anchors to sample for training pre_nms_topk (tuple[float]): (train, test) that represents the number of top k proposals to select before NMS, in training and testing. post_nms_topk (tuple[float]): (train, test) that represents the number of top k proposals to select after NMS, in training and testing. nms_thresh (float): NMS threshold used to de-duplicate the predicted proposals min_box_size (float): remove proposal boxes with any side smaller than this threshold, in the unit of input image pixels anchor_boundary_thresh (float): legacy option loss_weight (float|dict): weights to use for losses. Can be single float for weighting all rpn losses together, or a dict of individual weightings. Valid dict keys are: "loss_rpn_cls" - applied to classification loss "loss_rpn_loc" - applied to box regression loss box_reg_loss_type (str): Loss type to use. Supported losses: "smooth_l1", "giou". smooth_l1_beta (float): beta parameter for the smooth L1 regression loss. Default to use L1 loss. Only used when `box_reg_loss_type` is "smooth_l1" """ super().__init__() self.in_features = in_features self.rpn_head = head self.anchor_generator = anchor_generator self.anchor_matcher = anchor_matcher self.box2box_transform = box2box_transform self.batch_size_per_image = batch_size_per_image self.positive_fraction = positive_fraction # Map from self.training state to train/test settings self.pre_nms_topk = {True: pre_nms_topk[0], False: pre_nms_topk[1]} self.post_nms_topk = {True: post_nms_topk[0], False: post_nms_topk[1]} self.nms_thresh = nms_thresh self.min_box_size = float(min_box_size) self.anchor_boundary_thresh = anchor_boundary_thresh if isinstance(loss_weight, float): loss_weight = {"loss_rpn_cls": loss_weight, "loss_rpn_loc": loss_weight} self.loss_weight = loss_weight self.box_reg_loss_type = box_reg_loss_type self.smooth_l1_beta = smooth_l1_beta @classmethod def from_config(cls, cfg, input_shape: Dict[str, ShapeSpec]): in_features = cfg.MODEL.RPN.IN_FEATURES ret = { "in_features": in_features, "min_box_size": cfg.MODEL.PROPOSAL_GENERATOR.MIN_SIZE, "nms_thresh": cfg.MODEL.RPN.NMS_THRESH, "batch_size_per_image": cfg.MODEL.RPN.BATCH_SIZE_PER_IMAGE, "positive_fraction": cfg.MODEL.RPN.POSITIVE_FRACTION, "loss_weight": { "loss_rpn_cls": cfg.MODEL.RPN.LOSS_WEIGHT, "loss_rpn_loc": cfg.MODEL.RPN.BBOX_REG_LOSS_WEIGHT * cfg.MODEL.RPN.LOSS_WEIGHT, }, "anchor_boundary_thresh": cfg.MODEL.RPN.BOUNDARY_THRESH, "box2box_transform": Box2BoxTransform(weights=cfg.MODEL.RPN.BBOX_REG_WEIGHTS), "box_reg_loss_type": cfg.MODEL.RPN.BBOX_REG_LOSS_TYPE, "smooth_l1_beta": cfg.MODEL.RPN.SMOOTH_L1_BETA, } ret["pre_nms_topk"] = (cfg.MODEL.RPN.PRE_NMS_TOPK_TRAIN, cfg.MODEL.RPN.PRE_NMS_TOPK_TEST) ret["post_nms_topk"] = (cfg.MODEL.RPN.POST_NMS_TOPK_TRAIN, cfg.MODEL.RPN.POST_NMS_TOPK_TEST) ret["anchor_generator"] = build_anchor_generator(cfg, [input_shape[f] for f in in_features]) ret["anchor_matcher"] = Matcher( cfg.MODEL.RPN.IOU_THRESHOLDS, cfg.MODEL.RPN.IOU_LABELS, allow_low_quality_matches=True ) ret["head"] = build_rpn_head(cfg, [input_shape[f] for f in in_features]) return ret def _subsample_labels(self, label): """ Randomly sample a subset of positive and negative examples, and overwrite the label vector to the ignore value (-1) for all elements that are not included in the sample. Args: labels (Tensor): a vector of -1, 0, 1. Will be modified in-place and returned. """ pos_idx, neg_idx = subsample_labels( label, self.batch_size_per_image, self.positive_fraction, 0 ) # Fill with the ignore label (-1), then set positive and negative labels label.fill_(-1) label.scatter_(0, pos_idx, 1) label.scatter_(0, neg_idx, 0) return label @torch.jit.unused @torch.no_grad() def label_and_sample_anchors( self, anchors: List[Boxes], gt_instances: List[Instances] ) -> Tuple[List[torch.Tensor], List[torch.Tensor]]: """ Args: anchors (list[Boxes]): anchors for each feature map. gt_instances: the ground-truth instances for each image. Returns: list[Tensor]: List of #img tensors. i-th element is a vector of labels whose length is the total number of anchors across all feature maps R = sum(Hi * Wi * A). Label values are in {-1, 0, 1}, with meanings: -1 = ignore; 0 = negative class; 1 = positive class. list[Tensor]: i-th element is a Rx4 tensor. The values are the matched gt boxes for each anchor. Values are undefined for those anchors not labeled as 1. """ anchors = Boxes.cat(anchors) gt_boxes = [x.gt_boxes for x in gt_instances] image_sizes = [x.image_size for x in gt_instances] del gt_instances gt_labels = [] matched_gt_boxes = [] for image_size_i, gt_boxes_i in zip(image_sizes, gt_boxes): """ image_size_i: (h, w) for the i-th image gt_boxes_i: ground-truth boxes for i-th image """ match_quality_matrix = retry_if_cuda_oom(pairwise_iou)(gt_boxes_i, anchors) matched_idxs, gt_labels_i = retry_if_cuda_oom(self.anchor_matcher)(match_quality_matrix) # Matching is memory-expensive and may result in CPU tensors. But the result is small gt_labels_i = gt_labels_i.to(device=gt_boxes_i.device) del match_quality_matrix if self.anchor_boundary_thresh >= 0: # Discard anchors that go out of the boundaries of the image # NOTE: This is legacy functionality that is turned off by default in Detectron2 anchors_inside_image = anchors.inside_box(image_size_i, self.anchor_boundary_thresh) gt_labels_i[~anchors_inside_image] = -1 # A vector of labels (-1, 0, 1) for each anchor gt_labels_i = self._subsample_labels(gt_labels_i) if len(gt_boxes_i) == 0: # These values won't be used anyway since the anchor is labeled as background matched_gt_boxes_i = torch.zeros_like(anchors.tensor) else: # TODO wasted indexing computation for ignored boxes matched_gt_boxes_i = gt_boxes_i[matched_idxs].tensor gt_labels.append(gt_labels_i) # N,AHW matched_gt_boxes.append(matched_gt_boxes_i) return gt_labels, matched_gt_boxes @torch.jit.unused def losses( self, anchors: List[Boxes], pred_objectness_logits: List[torch.Tensor], gt_labels: List[torch.Tensor], pred_anchor_deltas: List[torch.Tensor], gt_boxes: List[torch.Tensor], ) -> Dict[str, torch.Tensor]: """ Return the losses from a set of RPN predictions and their associated ground-truth. Args: anchors (list[Boxes or RotatedBoxes]): anchors for each feature map, each has shape (Hi*Wi*A, B), where B is box dimension (4 or 5). pred_objectness_logits (list[Tensor]): A list of L elements. Element i is a tensor of shape (N, Hi*Wi*A) representing the predicted objectness logits for all anchors. gt_labels (list[Tensor]): Output of :meth:`label_and_sample_anchors`. pred_anchor_deltas (list[Tensor]): A list of L elements. Element i is a tensor of shape (N, Hi*Wi*A, 4 or 5) representing the predicted "deltas" used to transform anchors to proposals. gt_boxes (list[Tensor]): Output of :meth:`label_and_sample_anchors`. Returns: dict[loss name -> loss value]: A dict mapping from loss name to loss value. Loss names are: `loss_rpn_cls` for objectness classification and `loss_rpn_loc` for proposal localization. """ num_images = len(gt_labels) gt_labels = torch.stack(gt_labels) # (N, sum(Hi*Wi*Ai)) # Log the number of positive/negative anchors per-image that's used in training pos_mask = gt_labels == 1 num_pos_anchors = pos_mask.sum().item() num_neg_anchors = (gt_labels == 0).sum().item() storage = get_event_storage() storage.put_scalar("rpn/num_pos_anchors", num_pos_anchors / num_images) storage.put_scalar("rpn/num_neg_anchors", num_neg_anchors / num_images) localization_loss = _dense_box_regression_loss( anchors, self.box2box_transform, pred_anchor_deltas, gt_boxes, pos_mask, box_reg_loss_type=self.box_reg_loss_type, smooth_l1_beta=self.smooth_l1_beta, ) valid_mask = gt_labels >= 0 objectness_loss = F.binary_cross_entropy_with_logits( cat(pred_objectness_logits, dim=1)[valid_mask], gt_labels[valid_mask].to(torch.float32), reduction="sum", ) normalizer = self.batch_size_per_image * num_images losses = { "loss_rpn_cls": objectness_loss / normalizer, # The original Faster R-CNN paper uses a slightly different normalizer # for loc loss. But it doesn't matter in practice "loss_rpn_loc": localization_loss / normalizer, } losses = {k: v * self.loss_weight.get(k, 1.0) for k, v in losses.items()} return losses def forward( self, images: ImageList, features: Dict[str, torch.Tensor], gt_instances: Optional[List[Instances]] = None, ): """ Args: images (ImageList): input images of length `N` features (dict[str, Tensor]): input data as a mapping from feature map name to tensor. Axis 0 represents the number of images `N` in the input data; axes 1-3 are channels, height, and width, which may vary between feature maps (e.g., if a feature pyramid is used). gt_instances (list[Instances], optional): a length `N` list of `Instances`s. Each `Instances` stores ground-truth instances for the corresponding image. Returns: proposals: list[Instances]: contains fields "proposal_boxes", "objectness_logits" loss: dict[Tensor] or None """ features = [features[f] for f in self.in_features] anchors = self.anchor_generator(features) pred_objectness_logits, pred_anchor_deltas = self.rpn_head(features) # Transpose the Hi*Wi*A dimension to the middle: pred_objectness_logits = [ # (N, A, Hi, Wi) -> (N, Hi, Wi, A) -> (N, Hi*Wi*A) score.permute(0, 2, 3, 1).flatten(1) for score in pred_objectness_logits ] pred_anchor_deltas = [ # (N, A*B, Hi, Wi) -> (N, A, B, Hi, Wi) -> (N, Hi, Wi, A, B) -> (N, Hi*Wi*A, B) x.view(x.shape[0], -1, self.anchor_generator.box_dim, x.shape[-2], x.shape[-1]) .permute(0, 3, 4, 1, 2) .flatten(1, -2) for x in pred_anchor_deltas ] if self.training: assert gt_instances is not None, "RPN requires gt_instances in training!" gt_labels, gt_boxes = self.label_and_sample_anchors(anchors, gt_instances) losses = self.losses( anchors, pred_objectness_logits, gt_labels, pred_anchor_deltas, gt_boxes ) else: losses = {} proposals = self.predict_proposals( anchors, pred_objectness_logits, pred_anchor_deltas, images.image_sizes ) return proposals, losses def predict_proposals( self, anchors: List[Boxes], pred_objectness_logits: List[torch.Tensor], pred_anchor_deltas: List[torch.Tensor], image_sizes: List[Tuple[int, int]], ): """ Decode all the predicted box regression deltas to proposals. Find the top proposals by applying NMS and removing boxes that are too small. Returns: proposals (list[Instances]): list of N Instances. The i-th Instances stores post_nms_topk object proposals for image i, sorted by their objectness score in descending order. """ # The proposals are treated as fixed for joint training with roi heads. # This approach ignores the derivative w.r.t. the proposal boxes’ coordinates that # are also network responses. with torch.no_grad(): pred_proposals = self._decode_proposals(anchors, pred_anchor_deltas) return find_top_rpn_proposals( pred_proposals, pred_objectness_logits, image_sizes, self.nms_thresh, self.pre_nms_topk[self.training], self.post_nms_topk[self.training], self.min_box_size, self.training, ) def _decode_proposals(self, anchors: List[Boxes], pred_anchor_deltas: List[torch.Tensor]): """ Transform anchors into proposals by applying the predicted anchor deltas. Returns: proposals (list[Tensor]): A list of L tensors. Tensor i has shape (N, Hi*Wi*A, B) """ N = pred_anchor_deltas[0].shape[0] proposals = [] # For each feature map for anchors_i, pred_anchor_deltas_i in zip(anchors, pred_anchor_deltas): B = anchors_i.tensor.size(1) pred_anchor_deltas_i = pred_anchor_deltas_i.reshape(-1, B) # Expand anchors to shape (N*Hi*Wi*A, B) anchors_i = anchors_i.tensor.unsqueeze(0).expand(N, -1, -1).reshape(-1, B) proposals_i = self.box2box_transform.apply_deltas(pred_anchor_deltas_i, anchors_i) # Append feature map proposals with shape (N, Hi*Wi*A, B) proposals.append(proposals_i.view(N, -1, B)) return proposals
banmo-main
third_party/detectron2_old/detectron2/modeling/proposal_generator/rpn.py
# Copyright (c) Facebook, Inc. and its affiliates. import itertools import logging from typing import Dict, List import torch from detectron2.config import configurable from detectron2.layers import ShapeSpec, batched_nms_rotated, cat from detectron2.structures import Instances, RotatedBoxes, pairwise_iou_rotated from detectron2.utils.memory import retry_if_cuda_oom from ..box_regression import Box2BoxTransformRotated from .build import PROPOSAL_GENERATOR_REGISTRY from .rpn import RPN logger = logging.getLogger(__name__) def find_top_rrpn_proposals( proposals, pred_objectness_logits, image_sizes, nms_thresh, pre_nms_topk, post_nms_topk, min_box_size, training, ): """ For each feature map, select the `pre_nms_topk` highest scoring proposals, apply NMS, clip proposals, and remove small boxes. Return the `post_nms_topk` highest scoring proposals among all the feature maps if `training` is True, otherwise, returns the highest `post_nms_topk` scoring proposals for each feature map. Args: proposals (list[Tensor]): A list of L tensors. Tensor i has shape (N, Hi*Wi*A, 5). All proposal predictions on the feature maps. pred_objectness_logits (list[Tensor]): A list of L tensors. Tensor i has shape (N, Hi*Wi*A). image_sizes (list[tuple]): sizes (h, w) for each image nms_thresh (float): IoU threshold to use for NMS pre_nms_topk (int): number of top k scoring proposals to keep before applying NMS. When RRPN is run on multiple feature maps (as in FPN) this number is per feature map. post_nms_topk (int): number of top k scoring proposals to keep after applying NMS. When RRPN is run on multiple feature maps (as in FPN) this number is total, over all feature maps. min_box_size(float): minimum proposal box side length in pixels (absolute units wrt input images). training (bool): True if proposals are to be used in training, otherwise False. This arg exists only to support a legacy bug; look for the "NB: Legacy bug ..." comment. Returns: proposals (list[Instances]): list of N Instances. The i-th Instances stores post_nms_topk object proposals for image i. """ num_images = len(image_sizes) device = proposals[0].device # 1. Select top-k anchor for every level and every image topk_scores = [] # #lvl Tensor, each of shape N x topk topk_proposals = [] level_ids = [] # #lvl Tensor, each of shape (topk,) batch_idx = torch.arange(num_images, device=device) for level_id, proposals_i, logits_i in zip( itertools.count(), proposals, pred_objectness_logits ): Hi_Wi_A = logits_i.shape[1] num_proposals_i = min(pre_nms_topk, Hi_Wi_A) # sort is faster than topk (https://github.com/pytorch/pytorch/issues/22812) # topk_scores_i, topk_idx = logits_i.topk(num_proposals_i, dim=1) logits_i, idx = logits_i.sort(descending=True, dim=1) topk_scores_i = logits_i[batch_idx, :num_proposals_i] topk_idx = idx[batch_idx, :num_proposals_i] # each is N x topk topk_proposals_i = proposals_i[batch_idx[:, None], topk_idx] # N x topk x 5 topk_proposals.append(topk_proposals_i) topk_scores.append(topk_scores_i) level_ids.append(torch.full((num_proposals_i,), level_id, dtype=torch.int64, device=device)) # 2. Concat all levels together topk_scores = cat(topk_scores, dim=1) topk_proposals = cat(topk_proposals, dim=1) level_ids = cat(level_ids, dim=0) # 3. For each image, run a per-level NMS, and choose topk results. results = [] for n, image_size in enumerate(image_sizes): boxes = RotatedBoxes(topk_proposals[n]) scores_per_img = topk_scores[n] valid_mask = torch.isfinite(boxes.tensor).all(dim=1) & torch.isfinite(scores_per_img) if not valid_mask.all(): boxes = boxes[valid_mask] scores_per_img = scores_per_img[valid_mask] boxes.clip(image_size) # filter empty boxes keep = boxes.nonempty(threshold=min_box_size) lvl = level_ids if keep.sum().item() != len(boxes): boxes, scores_per_img, lvl = (boxes[keep], scores_per_img[keep], level_ids[keep]) keep = batched_nms_rotated(boxes.tensor, scores_per_img, lvl, nms_thresh) # In Detectron1, there was different behavior during training vs. testing. # (https://github.com/facebookresearch/Detectron/issues/459) # During training, topk is over the proposals from *all* images in the training batch. # During testing, it is over the proposals for each image separately. # As a result, the training behavior becomes batch-dependent, # and the configuration "POST_NMS_TOPK_TRAIN" end up relying on the batch size. # This bug is addressed in Detectron2 to make the behavior independent of batch size. keep = keep[:post_nms_topk] res = Instances(image_size) res.proposal_boxes = boxes[keep] res.objectness_logits = scores_per_img[keep] results.append(res) return results @PROPOSAL_GENERATOR_REGISTRY.register() class RRPN(RPN): """ Rotated Region Proposal Network described in :paper:`RRPN`. """ @configurable def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) if self.anchor_boundary_thresh >= 0: raise NotImplementedError( "anchor_boundary_thresh is a legacy option not implemented for RRPN." ) @classmethod def from_config(cls, cfg, input_shape: Dict[str, ShapeSpec]): ret = super().from_config(cfg, input_shape) ret["box2box_transform"] = Box2BoxTransformRotated(weights=cfg.MODEL.RPN.BBOX_REG_WEIGHTS) return ret @torch.no_grad() def label_and_sample_anchors(self, anchors: List[RotatedBoxes], gt_instances: List[Instances]): """ Args: anchors (list[RotatedBoxes]): anchors for each feature map. gt_instances: the ground-truth instances for each image. Returns: list[Tensor]: List of #img tensors. i-th element is a vector of labels whose length is the total number of anchors across feature maps. Label values are in {-1, 0, 1}, with meanings: -1 = ignore; 0 = negative class; 1 = positive class. list[Tensor]: i-th element is a Nx5 tensor, where N is the total number of anchors across feature maps. The values are the matched gt boxes for each anchor. Values are undefined for those anchors not labeled as 1. """ anchors = RotatedBoxes.cat(anchors) gt_boxes = [x.gt_boxes for x in gt_instances] del gt_instances gt_labels = [] matched_gt_boxes = [] for gt_boxes_i in gt_boxes: """ gt_boxes_i: ground-truth boxes for i-th image """ match_quality_matrix = retry_if_cuda_oom(pairwise_iou_rotated)(gt_boxes_i, anchors) matched_idxs, gt_labels_i = retry_if_cuda_oom(self.anchor_matcher)(match_quality_matrix) # Matching is memory-expensive and may result in CPU tensors. But the result is small gt_labels_i = gt_labels_i.to(device=gt_boxes_i.device) # A vector of labels (-1, 0, 1) for each anchor gt_labels_i = self._subsample_labels(gt_labels_i) if len(gt_boxes_i) == 0: # These values won't be used anyway since the anchor is labeled as background matched_gt_boxes_i = torch.zeros_like(anchors.tensor) else: # TODO wasted indexing computation for ignored boxes matched_gt_boxes_i = gt_boxes_i[matched_idxs].tensor gt_labels.append(gt_labels_i) # N,AHW matched_gt_boxes.append(matched_gt_boxes_i) return gt_labels, matched_gt_boxes @torch.no_grad() def predict_proposals(self, anchors, pred_objectness_logits, pred_anchor_deltas, image_sizes): pred_proposals = self._decode_proposals(anchors, pred_anchor_deltas) return find_top_rrpn_proposals( pred_proposals, pred_objectness_logits, image_sizes, self.nms_thresh, self.pre_nms_topk[self.training], self.post_nms_topk[self.training], self.min_box_size, self.training, )
banmo-main
third_party/detectron2_old/detectron2/modeling/proposal_generator/rrpn.py
# Copyright (c) Facebook, Inc. and its affiliates. from .build import PROPOSAL_GENERATOR_REGISTRY, build_proposal_generator from .rpn import RPN_HEAD_REGISTRY, build_rpn_head, RPN, StandardRPNHead __all__ = list(globals().keys())
banmo-main
third_party/detectron2_old/detectron2/modeling/proposal_generator/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. import logging import math from typing import List, Tuple, Union import torch from detectron2.layers import batched_nms, cat from detectron2.structures import Boxes, Instances from detectron2.utils.env import TORCH_VERSION logger = logging.getLogger(__name__) def _is_tracing(): if torch.jit.is_scripting(): # https://github.com/pytorch/pytorch/issues/47379 return False else: return TORCH_VERSION >= (1, 7) and torch.jit.is_tracing() def find_top_rpn_proposals( proposals: List[torch.Tensor], pred_objectness_logits: List[torch.Tensor], image_sizes: List[Tuple[int, int]], nms_thresh: float, pre_nms_topk: int, post_nms_topk: int, min_box_size: float, training: bool, ): """ For each feature map, select the `pre_nms_topk` highest scoring proposals, apply NMS, clip proposals, and remove small boxes. Return the `post_nms_topk` highest scoring proposals among all the feature maps for each image. Args: proposals (list[Tensor]): A list of L tensors. Tensor i has shape (N, Hi*Wi*A, 4). All proposal predictions on the feature maps. pred_objectness_logits (list[Tensor]): A list of L tensors. Tensor i has shape (N, Hi*Wi*A). image_sizes (list[tuple]): sizes (h, w) for each image nms_thresh (float): IoU threshold to use for NMS pre_nms_topk (int): number of top k scoring proposals to keep before applying NMS. When RPN is run on multiple feature maps (as in FPN) this number is per feature map. post_nms_topk (int): number of top k scoring proposals to keep after applying NMS. When RPN is run on multiple feature maps (as in FPN) this number is total, over all feature maps. min_box_size (float): minimum proposal box side length in pixels (absolute units wrt input images). training (bool): True if proposals are to be used in training, otherwise False. This arg exists only to support a legacy bug; look for the "NB: Legacy bug ..." comment. Returns: list[Instances]: list of N Instances. The i-th Instances stores post_nms_topk object proposals for image i, sorted by their objectness score in descending order. """ num_images = len(image_sizes) device = proposals[0].device # 1. Select top-k anchor for every level and every image topk_scores = [] # #lvl Tensor, each of shape N x topk topk_proposals = [] level_ids = [] # #lvl Tensor, each of shape (topk,) batch_idx = torch.arange(num_images, device=device) for level_id, (proposals_i, logits_i) in enumerate(zip(proposals, pred_objectness_logits)): Hi_Wi_A = logits_i.shape[1] if isinstance(Hi_Wi_A, torch.Tensor): # it's a tensor in tracing num_proposals_i = torch.clamp(Hi_Wi_A, max=pre_nms_topk) else: num_proposals_i = min(Hi_Wi_A, pre_nms_topk) # sort is faster than topk: https://github.com/pytorch/pytorch/issues/22812 # topk_scores_i, topk_idx = logits_i.topk(num_proposals_i, dim=1) logits_i, idx = logits_i.sort(descending=True, dim=1) topk_scores_i = logits_i.narrow(1, 0, num_proposals_i) topk_idx = idx.narrow(1, 0, num_proposals_i) # each is N x topk topk_proposals_i = proposals_i[batch_idx[:, None], topk_idx] # N x topk x 4 topk_proposals.append(topk_proposals_i) topk_scores.append(topk_scores_i) level_ids.append(torch.full((num_proposals_i,), level_id, dtype=torch.int64, device=device)) # 2. Concat all levels together topk_scores = cat(topk_scores, dim=1) topk_proposals = cat(topk_proposals, dim=1) level_ids = cat(level_ids, dim=0) # 3. For each image, run a per-level NMS, and choose topk results. results: List[Instances] = [] for n, image_size in enumerate(image_sizes): boxes = Boxes(topk_proposals[n]) scores_per_img = topk_scores[n] lvl = level_ids valid_mask = torch.isfinite(boxes.tensor).all(dim=1) & torch.isfinite(scores_per_img) if not valid_mask.all(): if training: raise FloatingPointError( "Predicted boxes or scores contain Inf/NaN. Training has diverged." ) boxes = boxes[valid_mask] scores_per_img = scores_per_img[valid_mask] lvl = lvl[valid_mask] boxes.clip(image_size) # filter empty boxes keep = boxes.nonempty(threshold=min_box_size) if _is_tracing() or keep.sum().item() != len(boxes): boxes, scores_per_img, lvl = boxes[keep], scores_per_img[keep], lvl[keep] keep = batched_nms(boxes.tensor, scores_per_img, lvl, nms_thresh) # In Detectron1, there was different behavior during training vs. testing. # (https://github.com/facebookresearch/Detectron/issues/459) # During training, topk is over the proposals from *all* images in the training batch. # During testing, it is over the proposals for each image separately. # As a result, the training behavior becomes batch-dependent, # and the configuration "POST_NMS_TOPK_TRAIN" end up relying on the batch size. # This bug is addressed in Detectron2 to make the behavior independent of batch size. keep = keep[:post_nms_topk] # keep is already sorted res = Instances(image_size) res.proposal_boxes = boxes[keep] res.objectness_logits = scores_per_img[keep] results.append(res) return results def add_ground_truth_to_proposals( gt: Union[List[Instances], List[Boxes]], proposals: List[Instances] ) -> List[Instances]: """ Call `add_ground_truth_to_proposals_single_image` for all images. Args: gt(Union[List[Instances], List[Boxes]): list of N elements. Element i is a Instances representing the ground-truth for image i. proposals (list[Instances]): list of N elements. Element i is a Instances representing the proposals for image i. Returns: list[Instances]: list of N Instances. Each is the proposals for the image, with field "proposal_boxes" and "objectness_logits". """ assert gt is not None if len(proposals) != len(gt): raise ValueError("proposals and gt should have the same length as the number of images!") if len(proposals) == 0: return proposals return [ add_ground_truth_to_proposals_single_image(gt_i, proposals_i) for gt_i, proposals_i in zip(gt, proposals) ] def add_ground_truth_to_proposals_single_image( gt: Union[Instances, Boxes], proposals: Instances ) -> Instances: """ Augment `proposals` with `gt`. Args: Same as `add_ground_truth_to_proposals`, but with gt and proposals per image. Returns: Same as `add_ground_truth_to_proposals`, but for only one image. """ if isinstance(gt, Boxes): # convert Boxes to Instances gt = Instances(proposals.image_size, gt_boxes=gt) gt_boxes = gt.gt_boxes device = proposals.objectness_logits.device # Assign all ground-truth boxes an objectness logit corresponding to # P(object) = sigmoid(logit) =~ 1. gt_logit_value = math.log((1.0 - 1e-10) / (1 - (1.0 - 1e-10))) gt_logits = gt_logit_value * torch.ones(len(gt_boxes), device=device) # Concatenating gt_boxes with proposals requires them to have the same fields gt_proposal = Instances(proposals.image_size, **gt.get_fields()) gt_proposal.proposal_boxes = gt_boxes gt_proposal.objectness_logits = gt_logits for key in proposals.get_fields().keys(): assert gt_proposal.has( key ), "The attribute '{}' in `proposals` does not exist in `gt`".format(key) # NOTE: Instances.cat only use fields from the first item. Extra fields in latter items # will be thrown away. new_proposals = Instances.cat([proposals, gt_proposal]) return new_proposals
banmo-main
third_party/detectron2_old/detectron2/modeling/proposal_generator/proposal_utils.py
# Copyright (c) Facebook, Inc. and its affiliates. from typing import List import fvcore.nn.weight_init as weight_init import torch from torch import nn from torch.nn import functional as F from detectron2.config import configurable from detectron2.layers import Conv2d, ConvTranspose2d, ShapeSpec, cat, get_norm from detectron2.structures import Instances from detectron2.utils.events import get_event_storage from detectron2.utils.registry import Registry __all__ = [ "BaseMaskRCNNHead", "MaskRCNNConvUpsampleHead", "build_mask_head", "ROI_MASK_HEAD_REGISTRY", ] ROI_MASK_HEAD_REGISTRY = Registry("ROI_MASK_HEAD") ROI_MASK_HEAD_REGISTRY.__doc__ = """ Registry for mask heads, which predicts instance masks given per-region features. The registered object will be called with `obj(cfg, input_shape)`. """ @torch.jit.unused def mask_rcnn_loss(pred_mask_logits: torch.Tensor, instances: List[Instances], vis_period: int = 0): """ Compute the mask prediction loss defined in the Mask R-CNN paper. Args: pred_mask_logits (Tensor): A tensor of shape (B, C, Hmask, Wmask) or (B, 1, Hmask, Wmask) for class-specific or class-agnostic, where B is the total number of predicted masks in all images, C is the number of foreground classes, and Hmask, Wmask are the height and width of the mask predictions. The values are logits. instances (list[Instances]): A list of N Instances, where N is the number of images in the batch. These instances are in 1:1 correspondence with the pred_mask_logits. The ground-truth labels (class, box, mask, ...) associated with each instance are stored in fields. vis_period (int): the period (in steps) to dump visualization. Returns: mask_loss (Tensor): A scalar tensor containing the loss. """ cls_agnostic_mask = pred_mask_logits.size(1) == 1 total_num_masks = pred_mask_logits.size(0) mask_side_len = pred_mask_logits.size(2) assert pred_mask_logits.size(2) == pred_mask_logits.size(3), "Mask prediction must be square!" gt_classes = [] gt_masks = [] for instances_per_image in instances: if len(instances_per_image) == 0: continue if not cls_agnostic_mask: gt_classes_per_image = instances_per_image.gt_classes.to(dtype=torch.int64) gt_classes.append(gt_classes_per_image) gt_masks_per_image = instances_per_image.gt_masks.crop_and_resize( instances_per_image.proposal_boxes.tensor, mask_side_len ).to(device=pred_mask_logits.device) # A tensor of shape (N, M, M), N=#instances in the image; M=mask_side_len gt_masks.append(gt_masks_per_image) if len(gt_masks) == 0: return pred_mask_logits.sum() * 0 gt_masks = cat(gt_masks, dim=0) if cls_agnostic_mask: pred_mask_logits = pred_mask_logits[:, 0] else: indices = torch.arange(total_num_masks) gt_classes = cat(gt_classes, dim=0) pred_mask_logits = pred_mask_logits[indices, gt_classes] if gt_masks.dtype == torch.bool: gt_masks_bool = gt_masks else: # Here we allow gt_masks to be float as well (depend on the implementation of rasterize()) gt_masks_bool = gt_masks > 0.5 gt_masks = gt_masks.to(dtype=torch.float32) # Log the training accuracy (using gt classes and 0.5 threshold) mask_incorrect = (pred_mask_logits > 0.0) != gt_masks_bool mask_accuracy = 1 - (mask_incorrect.sum().item() / max(mask_incorrect.numel(), 1.0)) num_positive = gt_masks_bool.sum().item() false_positive = (mask_incorrect & ~gt_masks_bool).sum().item() / max( gt_masks_bool.numel() - num_positive, 1.0 ) false_negative = (mask_incorrect & gt_masks_bool).sum().item() / max(num_positive, 1.0) storage = get_event_storage() storage.put_scalar("mask_rcnn/accuracy", mask_accuracy) storage.put_scalar("mask_rcnn/false_positive", false_positive) storage.put_scalar("mask_rcnn/false_negative", false_negative) if vis_period > 0 and storage.iter % vis_period == 0: pred_masks = pred_mask_logits.sigmoid() vis_masks = torch.cat([pred_masks, gt_masks], axis=2) name = "Left: mask prediction; Right: mask GT" for idx, vis_mask in enumerate(vis_masks): vis_mask = torch.stack([vis_mask] * 3, axis=0) storage.put_image(name + f" ({idx})", vis_mask) mask_loss = F.binary_cross_entropy_with_logits(pred_mask_logits, gt_masks, reduction="mean") return mask_loss def mask_rcnn_inference(pred_mask_logits: torch.Tensor, pred_instances: List[Instances]): """ Convert pred_mask_logits to estimated foreground probability masks while also extracting only the masks for the predicted classes in pred_instances. For each predicted box, the mask of the same class is attached to the instance by adding a new "pred_masks" field to pred_instances. Args: pred_mask_logits (Tensor): A tensor of shape (B, C, Hmask, Wmask) or (B, 1, Hmask, Wmask) for class-specific or class-agnostic, where B is the total number of predicted masks in all images, C is the number of foreground classes, and Hmask, Wmask are the height and width of the mask predictions. The values are logits. pred_instances (list[Instances]): A list of N Instances, where N is the number of images in the batch. Each Instances must have field "pred_classes". Returns: None. pred_instances will contain an extra "pred_masks" field storing a mask of size (Hmask, Wmask) for predicted class. Note that the masks are returned as a soft (non-quantized) masks the resolution predicted by the network; post-processing steps, such as resizing the predicted masks to the original image resolution and/or binarizing them, is left to the caller. """ cls_agnostic_mask = pred_mask_logits.size(1) == 1 if cls_agnostic_mask: mask_probs_pred = pred_mask_logits.sigmoid() else: # Select masks corresponding to the predicted classes num_masks = pred_mask_logits.shape[0] class_pred = cat([i.pred_classes for i in pred_instances]) indices = torch.arange(num_masks, device=class_pred.device) mask_probs_pred = pred_mask_logits[indices, class_pred][:, None].sigmoid() # mask_probs_pred.shape: (B, 1, Hmask, Wmask) num_boxes_per_image = [len(i) for i in pred_instances] mask_probs_pred = mask_probs_pred.split(num_boxes_per_image, dim=0) for prob, instances in zip(mask_probs_pred, pred_instances): instances.pred_masks = prob # (1, Hmask, Wmask) class BaseMaskRCNNHead(nn.Module): """ Implement the basic Mask R-CNN losses and inference logic described in :paper:`Mask R-CNN` """ @configurable def __init__(self, *, loss_weight: float = 1.0, vis_period: int = 0): """ NOTE: this interface is experimental. Args: loss_weight (float): multiplier of the loss vis_period (int): visualization period """ super().__init__() self.vis_period = vis_period self.loss_weight = loss_weight @classmethod def from_config(cls, cfg, input_shape): return {"vis_period": cfg.VIS_PERIOD} def forward(self, x, instances: List[Instances]): """ Args: x: input region feature(s) provided by :class:`ROIHeads`. instances (list[Instances]): contains the boxes & labels corresponding to the input features. Exact format is up to its caller to decide. Typically, this is the foreground instances in training, with "proposal_boxes" field and other gt annotations. In inference, it contains boxes that are already predicted. Returns: A dict of losses in training. The predicted "instances" in inference. """ x = self.layers(x) if self.training: return {"loss_mask": mask_rcnn_loss(x, instances, self.vis_period) * self.loss_weight} else: mask_rcnn_inference(x, instances) return instances def layers(self, x): """ Neural network layers that makes predictions from input features. """ raise NotImplementedError # To get torchscript support, we make the head a subclass of `nn.Sequential`. # Therefore, to add new layers in this head class, please make sure they are # added in the order they will be used in forward(). @ROI_MASK_HEAD_REGISTRY.register() class MaskRCNNConvUpsampleHead(BaseMaskRCNNHead, nn.Sequential): """ A mask head with several conv layers, plus an upsample layer (with `ConvTranspose2d`). Predictions are made with a final 1x1 conv layer. """ @configurable def __init__(self, input_shape: ShapeSpec, *, num_classes, conv_dims, conv_norm="", **kwargs): """ NOTE: this interface is experimental. Args: input_shape (ShapeSpec): shape of the input feature num_classes (int): the number of foreground classes (i.e. background is not included). 1 if using class agnostic prediction. conv_dims (list[int]): a list of N>0 integers representing the output dimensions of N-1 conv layers and the last upsample layer. conv_norm (str or callable): normalization for the conv layers. See :func:`detectron2.layers.get_norm` for supported types. """ super().__init__(**kwargs) assert len(conv_dims) >= 1, "conv_dims have to be non-empty!" self.conv_norm_relus = [] cur_channels = input_shape.channels for k, conv_dim in enumerate(conv_dims[:-1]): conv = Conv2d( cur_channels, conv_dim, kernel_size=3, stride=1, padding=1, bias=not conv_norm, norm=get_norm(conv_norm, conv_dim), activation=nn.ReLU(), ) self.add_module("mask_fcn{}".format(k + 1), conv) self.conv_norm_relus.append(conv) cur_channels = conv_dim self.deconv = ConvTranspose2d( cur_channels, conv_dims[-1], kernel_size=2, stride=2, padding=0 ) self.add_module("deconv_relu", nn.ReLU()) cur_channels = conv_dims[-1] self.predictor = Conv2d(cur_channels, num_classes, kernel_size=1, stride=1, padding=0) for layer in self.conv_norm_relus + [self.deconv]: weight_init.c2_msra_fill(layer) # use normal distribution initialization for mask prediction layer nn.init.normal_(self.predictor.weight, std=0.001) if self.predictor.bias is not None: nn.init.constant_(self.predictor.bias, 0) @classmethod def from_config(cls, cfg, input_shape): ret = super().from_config(cfg, input_shape) conv_dim = cfg.MODEL.ROI_MASK_HEAD.CONV_DIM num_conv = cfg.MODEL.ROI_MASK_HEAD.NUM_CONV ret.update( conv_dims=[conv_dim] * (num_conv + 1), # +1 for ConvTranspose conv_norm=cfg.MODEL.ROI_MASK_HEAD.NORM, input_shape=input_shape, ) if cfg.MODEL.ROI_MASK_HEAD.CLS_AGNOSTIC_MASK: ret["num_classes"] = 1 else: ret["num_classes"] = cfg.MODEL.ROI_HEADS.NUM_CLASSES return ret def layers(self, x): for layer in self: x = layer(x) return x def build_mask_head(cfg, input_shape): """ Build a mask head defined by `cfg.MODEL.ROI_MASK_HEAD.NAME`. """ name = cfg.MODEL.ROI_MASK_HEAD.NAME return ROI_MASK_HEAD_REGISTRY.get(name)(cfg, input_shape)
banmo-main
third_party/detectron2_old/detectron2/modeling/roi_heads/mask_head.py
# Copyright (c) Facebook, Inc. and its affiliates. import logging from typing import Dict, List, Tuple, Union import torch from fvcore.nn import giou_loss, smooth_l1_loss from torch import nn from torch.nn import functional as F from detectron2.config import configurable from detectron2.layers import ShapeSpec, batched_nms, cat, cross_entropy, nonzero_tuple from detectron2.modeling.box_regression import Box2BoxTransform from detectron2.structures import Boxes, Instances from detectron2.utils.events import get_event_storage __all__ = ["fast_rcnn_inference", "FastRCNNOutputLayers"] logger = logging.getLogger(__name__) """ Shape shorthand in this module: N: number of images in the minibatch R: number of ROIs, combined over all images, in the minibatch Ri: number of ROIs in image i K: number of foreground classes. E.g.,there are 80 foreground classes in COCO. Naming convention: deltas: refers to the 4-d (dx, dy, dw, dh) deltas that parameterize the box2box transform (see :class:`box_regression.Box2BoxTransform`). pred_class_logits: predicted class scores in [-inf, +inf]; use softmax(pred_class_logits) to estimate P(class). gt_classes: ground-truth classification labels in [0, K], where [0, K) represent foreground object classes and K represents the background class. pred_proposal_deltas: predicted box2box transform deltas for transforming proposals to detection box predictions. gt_proposal_deltas: ground-truth box2box transform deltas """ def fast_rcnn_inference( boxes: List[torch.Tensor], scores: List[torch.Tensor], image_shapes: List[Tuple[int, int]], score_thresh: float, nms_thresh: float, topk_per_image: int, ): """ Call `fast_rcnn_inference_single_image` for all images. Args: boxes (list[Tensor]): A list of Tensors of predicted class-specific or class-agnostic boxes for each image. Element i has shape (Ri, K * 4) if doing class-specific regression, or (Ri, 4) if doing class-agnostic regression, where Ri is the number of predicted objects for image i. This is compatible with the output of :meth:`FastRCNNOutputLayers.predict_boxes`. scores (list[Tensor]): A list of Tensors of predicted class scores for each image. Element i has shape (Ri, K + 1), where Ri is the number of predicted objects for image i. Compatible with the output of :meth:`FastRCNNOutputLayers.predict_probs`. image_shapes (list[tuple]): A list of (width, height) tuples for each image in the batch. score_thresh (float): Only return detections with a confidence score exceeding this threshold. nms_thresh (float): The threshold to use for box non-maximum suppression. Value in [0, 1]. topk_per_image (int): The number of top scoring detections to return. Set < 0 to return all detections. Returns: instances: (list[Instances]): A list of N instances, one for each image in the batch, that stores the topk most confidence detections. kept_indices: (list[Tensor]): A list of 1D tensor of length of N, each element indicates the corresponding boxes/scores index in [0, Ri) from the input, for image i. """ result_per_image = [ fast_rcnn_inference_single_image( boxes_per_image, scores_per_image, image_shape, score_thresh, nms_thresh, topk_per_image ) for scores_per_image, boxes_per_image, image_shape in zip(scores, boxes, image_shapes) ] return [x[0] for x in result_per_image], [x[1] for x in result_per_image] def _log_classification_stats(pred_logits, gt_classes, prefix="fast_rcnn"): """ Log the classification metrics to EventStorage. Args: pred_logits: Rx(K+1) logits. The last column is for background class. gt_classes: R labels """ num_instances = gt_classes.numel() if num_instances == 0: return pred_classes = pred_logits.argmax(dim=1) bg_class_ind = pred_logits.shape[1] - 1 fg_inds = (gt_classes >= 0) & (gt_classes < bg_class_ind) num_fg = fg_inds.nonzero().numel() fg_gt_classes = gt_classes[fg_inds] fg_pred_classes = pred_classes[fg_inds] num_false_negative = (fg_pred_classes == bg_class_ind).nonzero().numel() num_accurate = (pred_classes == gt_classes).nonzero().numel() fg_num_accurate = (fg_pred_classes == fg_gt_classes).nonzero().numel() storage = get_event_storage() storage.put_scalar(f"{prefix}/cls_accuracy", num_accurate / num_instances) if num_fg > 0: storage.put_scalar(f"{prefix}/fg_cls_accuracy", fg_num_accurate / num_fg) storage.put_scalar(f"{prefix}/false_negative", num_false_negative / num_fg) def fast_rcnn_inference_single_image( boxes, scores, image_shape: Tuple[int, int], score_thresh: float, nms_thresh: float, topk_per_image: int, ): """ Single-image inference. Return bounding-box detection results by thresholding on scores and applying non-maximum suppression (NMS). Args: Same as `fast_rcnn_inference`, but with boxes, scores, and image shapes per image. Returns: Same as `fast_rcnn_inference`, but for only one image. """ valid_mask = torch.isfinite(boxes).all(dim=1) & torch.isfinite(scores).all(dim=1) if not valid_mask.all(): boxes = boxes[valid_mask] scores = scores[valid_mask] scores = scores[:, :-1] num_bbox_reg_classes = boxes.shape[1] // 4 # Convert to Boxes to use the `clip` function ... boxes = Boxes(boxes.reshape(-1, 4)) boxes.clip(image_shape) boxes = boxes.tensor.view(-1, num_bbox_reg_classes, 4) # R x C x 4 # 1. Filter results based on detection scores. It can make NMS more efficient # by filtering out low-confidence detections. filter_mask = scores > score_thresh # R x K # R' x 2. First column contains indices of the R predictions; # Second column contains indices of classes. filter_inds = filter_mask.nonzero() if num_bbox_reg_classes == 1: boxes = boxes[filter_inds[:, 0], 0] else: boxes = boxes[filter_mask] scores = scores[filter_mask] # 2. Apply NMS for each class independently. keep = batched_nms(boxes, scores, filter_inds[:, 1], nms_thresh) if topk_per_image >= 0: keep = keep[:topk_per_image] boxes, scores, filter_inds = boxes[keep], scores[keep], filter_inds[keep] result = Instances(image_shape) result.pred_boxes = Boxes(boxes) result.scores = scores result.pred_classes = filter_inds[:, 1] return result, filter_inds[:, 0] class FastRCNNOutputs: """ An internal implementation that stores information about outputs of a Fast R-CNN head, and provides methods that are used to decode the outputs of a Fast R-CNN head. """ def __init__( self, box2box_transform, pred_class_logits, pred_proposal_deltas, proposals, smooth_l1_beta=0.0, box_reg_loss_type="smooth_l1", ): """ Args: box2box_transform (Box2BoxTransform/Box2BoxTransformRotated): box2box transform instance for proposal-to-detection transformations. pred_class_logits (Tensor): A tensor of shape (R, K + 1) storing the predicted class logits for all R predicted object instances. Each row corresponds to a predicted object instance. pred_proposal_deltas (Tensor): A tensor of shape (R, K * B) or (R, B) for class-specific or class-agnostic regression. It stores the predicted deltas that transform proposals into final box detections. B is the box dimension (4 or 5). When B is 4, each row is [dx, dy, dw, dh (, ....)]. When B is 5, each row is [dx, dy, dw, dh, da (, ....)]. proposals (list[Instances]): A list of N Instances, where Instances i stores the proposals for image i, in the field "proposal_boxes". When training, each Instances must have ground-truth labels stored in the field "gt_classes" and "gt_boxes". The total number of all instances must be equal to R. smooth_l1_beta (float): The transition point between L1 and L2 loss in the smooth L1 loss function. When set to 0, the loss becomes L1. When set to +inf, the loss becomes constant 0. box_reg_loss_type (str): Box regression loss type. One of: "smooth_l1", "giou" """ self.box2box_transform = box2box_transform self.num_preds_per_image = [len(p) for p in proposals] self.pred_class_logits = pred_class_logits self.pred_proposal_deltas = pred_proposal_deltas self.smooth_l1_beta = smooth_l1_beta self.box_reg_loss_type = box_reg_loss_type self.image_shapes = [x.image_size for x in proposals] if len(proposals): box_type = type(proposals[0].proposal_boxes) # cat(..., dim=0) concatenates over all images in the batch self.proposals = box_type.cat([p.proposal_boxes for p in proposals]) assert ( not self.proposals.tensor.requires_grad ), "Proposals should not require gradients!" # "gt_classes" exists if and only if training. But other gt fields may # not necessarily exist in training for images that have no groundtruth. if proposals[0].has("gt_classes"): self.gt_classes = cat([p.gt_classes for p in proposals], dim=0) # If "gt_boxes" does not exist, the proposals must be all negative and # should not be included in regression loss computation. # Here we just use proposal_boxes as an arbitrary placeholder because its # value won't be used in self.box_reg_loss(). gt_boxes = [ p.gt_boxes if p.has("gt_boxes") else p.proposal_boxes for p in proposals ] self.gt_boxes = box_type.cat(gt_boxes) else: self.proposals = Boxes(torch.zeros(0, 4, device=self.pred_proposal_deltas.device)) self._no_instances = len(self.proposals) == 0 # no instances found def softmax_cross_entropy_loss(self): """ Deprecated """ _log_classification_stats(self.pred_class_logits, self.gt_classes) return cross_entropy(self.pred_class_logits, self.gt_classes, reduction="mean") def box_reg_loss(self): """ Deprecated """ if self._no_instances: return 0.0 * self.pred_proposal_deltas.sum() box_dim = self.proposals.tensor.size(1) # 4 or 5 cls_agnostic_bbox_reg = self.pred_proposal_deltas.size(1) == box_dim device = self.pred_proposal_deltas.device bg_class_ind = self.pred_class_logits.shape[1] - 1 # Box delta loss is only computed between the prediction for the gt class k # (if 0 <= k < bg_class_ind) and the target; there is no loss defined on predictions # for non-gt classes and background. # Empty fg_inds should produce a valid loss of zero because reduction=sum. fg_inds = nonzero_tuple((self.gt_classes >= 0) & (self.gt_classes < bg_class_ind))[0] if cls_agnostic_bbox_reg: # pred_proposal_deltas only corresponds to foreground class for agnostic gt_class_cols = torch.arange(box_dim, device=device) else: # pred_proposal_deltas for class k are located in columns [b * k : b * k + b], # where b is the dimension of box representation (4 or 5) # Note that compared to Detectron1, # we do not perform bounding box regression for background classes. gt_class_cols = box_dim * self.gt_classes[fg_inds, None] + torch.arange( box_dim, device=device ) if self.box_reg_loss_type == "smooth_l1": gt_proposal_deltas = self.box2box_transform.get_deltas( self.proposals.tensor, self.gt_boxes.tensor ) loss_box_reg = smooth_l1_loss( self.pred_proposal_deltas[fg_inds[:, None], gt_class_cols], gt_proposal_deltas[fg_inds], self.smooth_l1_beta, reduction="sum", ) elif self.box_reg_loss_type == "giou": fg_pred_boxes = self.box2box_transform.apply_deltas( self.pred_proposal_deltas[fg_inds[:, None], gt_class_cols], self.proposals.tensor[fg_inds], ) loss_box_reg = giou_loss( fg_pred_boxes, self.gt_boxes.tensor[fg_inds], reduction="sum", ) else: raise ValueError(f"Invalid bbox reg loss type '{self.box_reg_loss_type}'") loss_box_reg = loss_box_reg / self.gt_classes.numel() return loss_box_reg def losses(self): """ Deprecated """ return {"loss_cls": self.softmax_cross_entropy_loss(), "loss_box_reg": self.box_reg_loss()} def predict_boxes(self): """ Deprecated """ pred = self.box2box_transform.apply_deltas(self.pred_proposal_deltas, self.proposals.tensor) return pred.split(self.num_preds_per_image, dim=0) def predict_probs(self): """ Deprecated """ probs = F.softmax(self.pred_class_logits, dim=-1) return probs.split(self.num_preds_per_image, dim=0) class FastRCNNOutputLayers(nn.Module): """ Two linear layers for predicting Fast R-CNN outputs: 1. proposal-to-detection box regression deltas 2. classification scores """ @configurable def __init__( self, input_shape: ShapeSpec, *, box2box_transform, num_classes: int, test_score_thresh: float = 0.0, test_nms_thresh: float = 0.5, test_topk_per_image: int = 100, cls_agnostic_bbox_reg: bool = False, smooth_l1_beta: float = 0.0, box_reg_loss_type: str = "smooth_l1", loss_weight: Union[float, Dict[str, float]] = 1.0, ): """ NOTE: this interface is experimental. Args: input_shape (ShapeSpec): shape of the input feature to this module box2box_transform (Box2BoxTransform or Box2BoxTransformRotated): num_classes (int): number of foreground classes test_score_thresh (float): threshold to filter predictions results. test_nms_thresh (float): NMS threshold for prediction results. test_topk_per_image (int): number of top predictions to produce per image. cls_agnostic_bbox_reg (bool): whether to use class agnostic for bbox regression smooth_l1_beta (float): transition point from L1 to L2 loss. Only used if `box_reg_loss_type` is "smooth_l1" box_reg_loss_type (str): Box regression loss type. One of: "smooth_l1", "giou" loss_weight (float|dict): weights to use for losses. Can be single float for weighting all losses, or a dict of individual weightings. Valid dict keys are: * "loss_cls": applied to classification loss * "loss_box_reg": applied to box regression loss """ super().__init__() if isinstance(input_shape, int): # some backward compatibility input_shape = ShapeSpec(channels=input_shape) self.num_classes = num_classes input_size = input_shape.channels * (input_shape.width or 1) * (input_shape.height or 1) # prediction layer for num_classes foreground classes and one background class (hence + 1) self.cls_score = nn.Linear(input_size, num_classes + 1) num_bbox_reg_classes = 1 if cls_agnostic_bbox_reg else num_classes box_dim = len(box2box_transform.weights) self.bbox_pred = nn.Linear(input_size, num_bbox_reg_classes * box_dim) nn.init.normal_(self.cls_score.weight, std=0.01) nn.init.normal_(self.bbox_pred.weight, std=0.001) for l in [self.cls_score, self.bbox_pred]: nn.init.constant_(l.bias, 0) self.box2box_transform = box2box_transform self.smooth_l1_beta = smooth_l1_beta self.test_score_thresh = test_score_thresh self.test_nms_thresh = test_nms_thresh self.test_topk_per_image = test_topk_per_image self.box_reg_loss_type = box_reg_loss_type if isinstance(loss_weight, float): loss_weight = {"loss_cls": loss_weight, "loss_box_reg": loss_weight} self.loss_weight = loss_weight @classmethod def from_config(cls, cfg, input_shape): return { "input_shape": input_shape, "box2box_transform": Box2BoxTransform(weights=cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_WEIGHTS), # fmt: off "num_classes" : cfg.MODEL.ROI_HEADS.NUM_CLASSES, "cls_agnostic_bbox_reg" : cfg.MODEL.ROI_BOX_HEAD.CLS_AGNOSTIC_BBOX_REG, "smooth_l1_beta" : cfg.MODEL.ROI_BOX_HEAD.SMOOTH_L1_BETA, "test_score_thresh" : cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST, "test_nms_thresh" : cfg.MODEL.ROI_HEADS.NMS_THRESH_TEST, "test_topk_per_image" : cfg.TEST.DETECTIONS_PER_IMAGE, "box_reg_loss_type" : cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_LOSS_TYPE, "loss_weight" : {"loss_box_reg": cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_LOSS_WEIGHT}, # fmt: on } def forward(self, x): """ Args: x: per-region features of shape (N, ...) for N bounding boxes to predict. Returns: (Tensor, Tensor): First tensor: shape (N,K+1), scores for each of the N box. Each row contains the scores for K object categories and 1 background class. Second tensor: bounding box regression deltas for each box. Shape is shape (N,Kx4), or (N,4) for class-agnostic regression. """ if x.dim() > 2: x = torch.flatten(x, start_dim=1) scores = self.cls_score(x) proposal_deltas = self.bbox_pred(x) return scores, proposal_deltas def losses(self, predictions, proposals): """ Args: predictions: return values of :meth:`forward()`. proposals (list[Instances]): proposals that match the features that were used to compute predictions. The fields ``proposal_boxes``, ``gt_boxes``, ``gt_classes`` are expected. Returns: Dict[str, Tensor]: dict of losses """ scores, proposal_deltas = predictions # parse classification outputs gt_classes = ( cat([p.gt_classes for p in proposals], dim=0) if len(proposals) else torch.empty(0) ) _log_classification_stats(scores, gt_classes) # parse box regression outputs if len(proposals): proposal_boxes = cat([p.proposal_boxes.tensor for p in proposals], dim=0) # Nx4 assert not proposal_boxes.requires_grad, "Proposals should not require gradients!" # If "gt_boxes" does not exist, the proposals must be all negative and # should not be included in regression loss computation. # Here we just use proposal_boxes as an arbitrary placeholder because its # value won't be used in self.box_reg_loss(). gt_boxes = cat( [(p.gt_boxes if p.has("gt_boxes") else p.proposal_boxes).tensor for p in proposals], dim=0, ) else: proposal_boxes = gt_boxes = torch.empty((0, 4), device=proposal_deltas.device) losses = { "loss_cls": cross_entropy(scores, gt_classes, reduction="mean"), "loss_box_reg": self.box_reg_loss( proposal_boxes, gt_boxes, proposal_deltas, gt_classes ), } return {k: v * self.loss_weight.get(k, 1.0) for k, v in losses.items()} def box_reg_loss(self, proposal_boxes, gt_boxes, pred_deltas, gt_classes): """ Args: All boxes are tensors with the same shape Rx(4 or 5). gt_classes is a long tensor of shape R, the gt class label of each proposal. R shall be the number of proposals. """ box_dim = proposal_boxes.shape[1] # 4 or 5 # Regression loss is only computed for foreground proposals (those matched to a GT) fg_inds = nonzero_tuple((gt_classes >= 0) & (gt_classes < self.num_classes))[0] if pred_deltas.shape[1] == box_dim: # cls-agnostic regression fg_pred_deltas = pred_deltas[fg_inds] else: fg_pred_deltas = pred_deltas.view(-1, self.num_classes, box_dim)[ fg_inds, gt_classes[fg_inds] ] if self.box_reg_loss_type == "smooth_l1": gt_pred_deltas = self.box2box_transform.get_deltas( proposal_boxes[fg_inds], gt_boxes[fg_inds], ) loss_box_reg = smooth_l1_loss( fg_pred_deltas, gt_pred_deltas, self.smooth_l1_beta, reduction="sum" ) elif self.box_reg_loss_type == "giou": fg_pred_boxes = self.box2box_transform.apply_deltas( fg_pred_deltas, proposal_boxes[fg_inds] ) loss_box_reg = giou_loss(fg_pred_boxes, gt_boxes[fg_inds], reduction="sum") else: raise ValueError(f"Invalid bbox reg loss type '{self.box_reg_loss_type}'") # The reg loss is normalized using the total number of regions (R), not the number # of foreground regions even though the box regression loss is only defined on # foreground regions. Why? Because doing so gives equal training influence to # each foreground example. To see how, consider two different minibatches: # (1) Contains a single foreground region # (2) Contains 100 foreground regions # If we normalize by the number of foreground regions, the single example in # minibatch (1) will be given 100 times as much influence as each foreground # example in minibatch (2). Normalizing by the total number of regions, R, # means that the single example in minibatch (1) and each of the 100 examples # in minibatch (2) are given equal influence. return loss_box_reg / max(gt_classes.numel(), 1.0) # return 0 if empty def inference(self, predictions: Tuple[torch.Tensor, torch.Tensor], proposals: List[Instances]): """ Args: predictions: return values of :meth:`forward()`. proposals (list[Instances]): proposals that match the features that were used to compute predictions. The ``proposal_boxes`` field is expected. Returns: list[Instances]: same as `fast_rcnn_inference`. list[Tensor]: same as `fast_rcnn_inference`. """ boxes = self.predict_boxes(predictions, proposals) scores = self.predict_probs(predictions, proposals) image_shapes = [x.image_size for x in proposals] return fast_rcnn_inference( boxes, scores, image_shapes, self.test_score_thresh, self.test_nms_thresh, self.test_topk_per_image, ) def predict_boxes_for_gt_classes(self, predictions, proposals): """ Args: predictions: return values of :meth:`forward()`. proposals (list[Instances]): proposals that match the features that were used to compute predictions. The fields ``proposal_boxes``, ``gt_classes`` are expected. Returns: list[Tensor]: A list of Tensors of predicted boxes for GT classes in case of class-specific box head. Element i of the list has shape (Ri, B), where Ri is the number of proposals for image i and B is the box dimension (4 or 5) """ if not len(proposals): return [] scores, proposal_deltas = predictions proposal_boxes = cat([p.proposal_boxes.tensor for p in proposals], dim=0) N, B = proposal_boxes.shape predict_boxes = self.box2box_transform.apply_deltas( proposal_deltas, proposal_boxes ) # Nx(KxB) K = predict_boxes.shape[1] // B if K > 1: gt_classes = torch.cat([p.gt_classes for p in proposals], dim=0) # Some proposals are ignored or have a background class. Their gt_classes # cannot be used as index. gt_classes = gt_classes.clamp_(0, K - 1) predict_boxes = predict_boxes.view(N, K, B)[ torch.arange(N, dtype=torch.long, device=predict_boxes.device), gt_classes ] num_prop_per_image = [len(p) for p in proposals] return predict_boxes.split(num_prop_per_image) def predict_boxes( self, predictions: Tuple[torch.Tensor, torch.Tensor], proposals: List[Instances] ): """ Args: predictions: return values of :meth:`forward()`. proposals (list[Instances]): proposals that match the features that were used to compute predictions. The ``proposal_boxes`` field is expected. Returns: list[Tensor]: A list of Tensors of predicted class-specific or class-agnostic boxes for each image. Element i has shape (Ri, K * B) or (Ri, B), where Ri is the number of proposals for image i and B is the box dimension (4 or 5) """ if not len(proposals): return [] _, proposal_deltas = predictions num_prop_per_image = [len(p) for p in proposals] proposal_boxes = cat([p.proposal_boxes.tensor for p in proposals], dim=0) predict_boxes = self.box2box_transform.apply_deltas( proposal_deltas, proposal_boxes, ) # Nx(KxB) return predict_boxes.split(num_prop_per_image) def predict_probs( self, predictions: Tuple[torch.Tensor, torch.Tensor], proposals: List[Instances] ): """ Args: predictions: return values of :meth:`forward()`. proposals (list[Instances]): proposals that match the features that were used to compute predictions. Returns: list[Tensor]: A list of Tensors of predicted class probabilities for each image. Element i has shape (Ri, K + 1), where Ri is the number of proposals for image i. """ scores, _ = predictions num_inst_per_image = [len(p) for p in proposals] probs = F.softmax(scores, dim=-1) return probs.split(num_inst_per_image, dim=0)
banmo-main
third_party/detectron2_old/detectron2/modeling/roi_heads/fast_rcnn.py
# Copyright (c) Facebook, Inc. and its affiliates. import numpy as np from typing import List import fvcore.nn.weight_init as weight_init import torch from torch import nn from detectron2.config import configurable from detectron2.layers import Conv2d, ShapeSpec, get_norm from detectron2.utils.registry import Registry __all__ = ["FastRCNNConvFCHead", "build_box_head", "ROI_BOX_HEAD_REGISTRY"] ROI_BOX_HEAD_REGISTRY = Registry("ROI_BOX_HEAD") ROI_BOX_HEAD_REGISTRY.__doc__ = """ Registry for box heads, which make box predictions from per-region features. The registered object will be called with `obj(cfg, input_shape)`. """ # To get torchscript support, we make the head a subclass of `nn.Sequential`. # Therefore, to add new layers in this head class, please make sure they are # added in the order they will be used in forward(). @ROI_BOX_HEAD_REGISTRY.register() class FastRCNNConvFCHead(nn.Sequential): """ A head with several 3x3 conv layers (each followed by norm & relu) and then several fc layers (each followed by relu). """ @configurable def __init__( self, input_shape: ShapeSpec, *, conv_dims: List[int], fc_dims: List[int], conv_norm="" ): """ NOTE: this interface is experimental. Args: input_shape (ShapeSpec): shape of the input feature. conv_dims (list[int]): the output dimensions of the conv layers fc_dims (list[int]): the output dimensions of the fc layers conv_norm (str or callable): normalization for the conv layers. See :func:`detectron2.layers.get_norm` for supported types. """ super().__init__() assert len(conv_dims) + len(fc_dims) > 0 self._output_size = (input_shape.channels, input_shape.height, input_shape.width) self.conv_norm_relus = [] for k, conv_dim in enumerate(conv_dims): conv = Conv2d( self._output_size[0], conv_dim, kernel_size=3, padding=1, bias=not conv_norm, norm=get_norm(conv_norm, conv_dim), activation=nn.ReLU(), ) self.add_module("conv{}".format(k + 1), conv) self.conv_norm_relus.append(conv) self._output_size = (conv_dim, self._output_size[1], self._output_size[2]) self.fcs = [] for k, fc_dim in enumerate(fc_dims): if k == 0: self.add_module("flatten", nn.Flatten()) fc = nn.Linear(int(np.prod(self._output_size)), fc_dim) self.add_module("fc{}".format(k + 1), fc) self.add_module("fc_relu{}".format(k + 1), nn.ReLU()) self.fcs.append(fc) self._output_size = fc_dim for layer in self.conv_norm_relus: weight_init.c2_msra_fill(layer) for layer in self.fcs: weight_init.c2_xavier_fill(layer) @classmethod def from_config(cls, cfg, input_shape): num_conv = cfg.MODEL.ROI_BOX_HEAD.NUM_CONV conv_dim = cfg.MODEL.ROI_BOX_HEAD.CONV_DIM num_fc = cfg.MODEL.ROI_BOX_HEAD.NUM_FC fc_dim = cfg.MODEL.ROI_BOX_HEAD.FC_DIM return { "input_shape": input_shape, "conv_dims": [conv_dim] * num_conv, "fc_dims": [fc_dim] * num_fc, "conv_norm": cfg.MODEL.ROI_BOX_HEAD.NORM, } def forward(self, x): for layer in self: x = layer(x) return x @property @torch.jit.unused def output_shape(self): """ Returns: ShapeSpec: the output feature shape """ o = self._output_size if isinstance(o, int): return ShapeSpec(channels=o) else: return ShapeSpec(channels=o[0], height=o[1], width=o[2]) def build_box_head(cfg, input_shape): """ Build a box head defined by `cfg.MODEL.ROI_BOX_HEAD.NAME`. """ name = cfg.MODEL.ROI_BOX_HEAD.NAME return ROI_BOX_HEAD_REGISTRY.get(name)(cfg, input_shape)
banmo-main
third_party/detectron2_old/detectron2/modeling/roi_heads/box_head.py
# Copyright (c) Facebook, Inc. and its affiliates. from typing import List import torch from torch import nn from torch.nn import functional as F from detectron2.config import configurable from detectron2.layers import Conv2d, ConvTranspose2d, cat, interpolate from detectron2.structures import Instances, heatmaps_to_keypoints from detectron2.utils.events import get_event_storage from detectron2.utils.registry import Registry _TOTAL_SKIPPED = 0 __all__ = [ "ROI_KEYPOINT_HEAD_REGISTRY", "build_keypoint_head", "BaseKeypointRCNNHead", "KRCNNConvDeconvUpsampleHead", ] ROI_KEYPOINT_HEAD_REGISTRY = Registry("ROI_KEYPOINT_HEAD") ROI_KEYPOINT_HEAD_REGISTRY.__doc__ = """ Registry for keypoint heads, which make keypoint predictions from per-region features. The registered object will be called with `obj(cfg, input_shape)`. """ def build_keypoint_head(cfg, input_shape): """ Build a keypoint head from `cfg.MODEL.ROI_KEYPOINT_HEAD.NAME`. """ name = cfg.MODEL.ROI_KEYPOINT_HEAD.NAME return ROI_KEYPOINT_HEAD_REGISTRY.get(name)(cfg, input_shape) def keypoint_rcnn_loss(pred_keypoint_logits, instances, normalizer): """ Arguments: pred_keypoint_logits (Tensor): A tensor of shape (N, K, S, S) where N is the total number of instances in the batch, K is the number of keypoints, and S is the side length of the keypoint heatmap. The values are spatial logits. instances (list[Instances]): A list of M Instances, where M is the batch size. These instances are predictions from the model that are in 1:1 correspondence with pred_keypoint_logits. Each Instances should contain a `gt_keypoints` field containing a `structures.Keypoint` instance. normalizer (float): Normalize the loss by this amount. If not specified, we normalize by the number of visible keypoints in the minibatch. Returns a scalar tensor containing the loss. """ heatmaps = [] valid = [] keypoint_side_len = pred_keypoint_logits.shape[2] for instances_per_image in instances: if len(instances_per_image) == 0: continue keypoints = instances_per_image.gt_keypoints heatmaps_per_image, valid_per_image = keypoints.to_heatmap( instances_per_image.proposal_boxes.tensor, keypoint_side_len ) heatmaps.append(heatmaps_per_image.view(-1)) valid.append(valid_per_image.view(-1)) if len(heatmaps): keypoint_targets = cat(heatmaps, dim=0) valid = cat(valid, dim=0).to(dtype=torch.uint8) valid = torch.nonzero(valid).squeeze(1) # torch.mean (in binary_cross_entropy_with_logits) doesn't # accept empty tensors, so handle it separately if len(heatmaps) == 0 or valid.numel() == 0: global _TOTAL_SKIPPED _TOTAL_SKIPPED += 1 storage = get_event_storage() storage.put_scalar("kpts_num_skipped_batches", _TOTAL_SKIPPED, smoothing_hint=False) return pred_keypoint_logits.sum() * 0 N, K, H, W = pred_keypoint_logits.shape pred_keypoint_logits = pred_keypoint_logits.view(N * K, H * W) keypoint_loss = F.cross_entropy( pred_keypoint_logits[valid], keypoint_targets[valid], reduction="sum" ) # If a normalizer isn't specified, normalize by the number of visible keypoints in the minibatch if normalizer is None: normalizer = valid.numel() keypoint_loss /= normalizer return keypoint_loss def keypoint_rcnn_inference(pred_keypoint_logits: torch.Tensor, pred_instances: List[Instances]): """ Post process each predicted keypoint heatmap in `pred_keypoint_logits` into (x, y, score) and add it to the `pred_instances` as a `pred_keypoints` field. Args: pred_keypoint_logits (Tensor): A tensor of shape (R, K, S, S) where R is the total number of instances in the batch, K is the number of keypoints, and S is the side length of the keypoint heatmap. The values are spatial logits. pred_instances (list[Instances]): A list of N Instances, where N is the number of images. Returns: None. Each element in pred_instances will contain extra "pred_keypoints" and "pred_keypoint_heatmaps" fields. "pred_keypoints" is a tensor of shape (#instance, K, 3) where the last dimension corresponds to (x, y, score). The scores are larger than 0. "pred_keypoint_heatmaps" contains the raw keypoint logits as passed to this function. """ # flatten all bboxes from all images together (list[Boxes] -> Rx4 tensor) bboxes_flat = cat([b.pred_boxes.tensor for b in pred_instances], dim=0) pred_keypoint_logits = pred_keypoint_logits.detach() keypoint_results = heatmaps_to_keypoints(pred_keypoint_logits, bboxes_flat.detach()) num_instances_per_image = [len(i) for i in pred_instances] keypoint_results = keypoint_results[:, :, [0, 1, 3]].split(num_instances_per_image, dim=0) heatmap_results = pred_keypoint_logits.split(num_instances_per_image, dim=0) for keypoint_results_per_image, heatmap_results_per_image, instances_per_image in zip( keypoint_results, heatmap_results, pred_instances ): # keypoint_results_per_image is (num instances)x(num keypoints)x(x, y, score) # heatmap_results_per_image is (num instances)x(num keypoints)x(side)x(side) instances_per_image.pred_keypoints = keypoint_results_per_image instances_per_image.pred_keypoint_heatmaps = heatmap_results_per_image class BaseKeypointRCNNHead(nn.Module): """ Implement the basic Keypoint R-CNN losses and inference logic described in Sec. 5 of :paper:`Mask R-CNN`. """ @configurable def __init__(self, *, num_keypoints, loss_weight=1.0, loss_normalizer=1.0): """ NOTE: this interface is experimental. Args: num_keypoints (int): number of keypoints to predict loss_weight (float): weight to multiple on the keypoint loss loss_normalizer (float or str): If float, divide the loss by `loss_normalizer * #images`. If 'visible', the loss is normalized by the total number of visible keypoints across images. """ super().__init__() self.num_keypoints = num_keypoints self.loss_weight = loss_weight assert loss_normalizer == "visible" or isinstance(loss_normalizer, float), loss_normalizer self.loss_normalizer = loss_normalizer @classmethod def from_config(cls, cfg, input_shape): ret = { "loss_weight": cfg.MODEL.ROI_KEYPOINT_HEAD.LOSS_WEIGHT, "num_keypoints": cfg.MODEL.ROI_KEYPOINT_HEAD.NUM_KEYPOINTS, } normalize_by_visible = ( cfg.MODEL.ROI_KEYPOINT_HEAD.NORMALIZE_LOSS_BY_VISIBLE_KEYPOINTS ) # noqa if not normalize_by_visible: batch_size_per_image = cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE positive_sample_fraction = cfg.MODEL.ROI_HEADS.POSITIVE_FRACTION ret["loss_normalizer"] = ( ret["num_keypoints"] * batch_size_per_image * positive_sample_fraction ) else: ret["loss_normalizer"] = "visible" return ret def forward(self, x, instances: List[Instances]): """ Args: x: input 4D region feature(s) provided by :class:`ROIHeads`. instances (list[Instances]): contains the boxes & labels corresponding to the input features. Exact format is up to its caller to decide. Typically, this is the foreground instances in training, with "proposal_boxes" field and other gt annotations. In inference, it contains boxes that are already predicted. Returns: A dict of losses if in training. The predicted "instances" if in inference. """ x = self.layers(x) if self.training: num_images = len(instances) normalizer = ( None if self.loss_normalizer == "visible" else num_images * self.loss_normalizer ) return { "loss_keypoint": keypoint_rcnn_loss(x, instances, normalizer=normalizer) * self.loss_weight } else: keypoint_rcnn_inference(x, instances) return instances def layers(self, x): """ Neural network layers that makes predictions from regional input features. """ raise NotImplementedError # To get torchscript support, we make the head a subclass of `nn.Sequential`. # Therefore, to add new layers in this head class, please make sure they are # added in the order they will be used in forward(). @ROI_KEYPOINT_HEAD_REGISTRY.register() class KRCNNConvDeconvUpsampleHead(BaseKeypointRCNNHead, nn.Sequential): """ A standard keypoint head containing a series of 3x3 convs, followed by a transpose convolution and bilinear interpolation for upsampling. It is described in Sec. 5 of :paper:`Mask R-CNN`. """ @configurable def __init__(self, input_shape, *, num_keypoints, conv_dims, **kwargs): """ NOTE: this interface is experimental. Args: input_shape (ShapeSpec): shape of the input feature conv_dims: an iterable of output channel counts for each conv in the head e.g. (512, 512, 512) for three convs outputting 512 channels. """ super().__init__(num_keypoints=num_keypoints, **kwargs) # default up_scale to 2.0 (this can be made an option) up_scale = 2.0 in_channels = input_shape.channels for idx, layer_channels in enumerate(conv_dims, 1): module = Conv2d(in_channels, layer_channels, 3, stride=1, padding=1) self.add_module("conv_fcn{}".format(idx), module) self.add_module("conv_fcn_relu{}".format(idx), nn.ReLU()) in_channels = layer_channels deconv_kernel = 4 self.score_lowres = ConvTranspose2d( in_channels, num_keypoints, deconv_kernel, stride=2, padding=deconv_kernel // 2 - 1 ) self.up_scale = up_scale for name, param in self.named_parameters(): if "bias" in name: nn.init.constant_(param, 0) elif "weight" in name: # Caffe2 implementation uses MSRAFill, which in fact # corresponds to kaiming_normal_ in PyTorch nn.init.kaiming_normal_(param, mode="fan_out", nonlinearity="relu") @classmethod def from_config(cls, cfg, input_shape): ret = super().from_config(cfg, input_shape) ret["input_shape"] = input_shape ret["conv_dims"] = cfg.MODEL.ROI_KEYPOINT_HEAD.CONV_DIMS return ret def layers(self, x): for layer in self: x = layer(x) x = interpolate(x, scale_factor=self.up_scale, mode="bilinear", align_corners=False) return x
banmo-main
third_party/detectron2_old/detectron2/modeling/roi_heads/keypoint_head.py
# Copyright (c) Facebook, Inc. and its affiliates. from .box_head import ROI_BOX_HEAD_REGISTRY, build_box_head, FastRCNNConvFCHead from .keypoint_head import ( ROI_KEYPOINT_HEAD_REGISTRY, build_keypoint_head, BaseKeypointRCNNHead, KRCNNConvDeconvUpsampleHead, ) from .mask_head import ( ROI_MASK_HEAD_REGISTRY, build_mask_head, BaseMaskRCNNHead, MaskRCNNConvUpsampleHead, ) from .roi_heads import ( ROI_HEADS_REGISTRY, ROIHeads, Res5ROIHeads, StandardROIHeads, build_roi_heads, select_foreground_proposals, ) from .cascade_rcnn import CascadeROIHeads from .rotated_fast_rcnn import RROIHeads from .fast_rcnn import FastRCNNOutputLayers from . import cascade_rcnn # isort:skip __all__ = list(globals().keys())
banmo-main
third_party/detectron2_old/detectron2/modeling/roi_heads/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. import logging import numpy as np import torch from detectron2.config import configurable from detectron2.layers import ShapeSpec, batched_nms_rotated from detectron2.structures import Instances, RotatedBoxes, pairwise_iou_rotated from detectron2.utils.events import get_event_storage from ..box_regression import Box2BoxTransformRotated from ..poolers import ROIPooler from ..proposal_generator.proposal_utils import add_ground_truth_to_proposals from .box_head import build_box_head from .fast_rcnn import FastRCNNOutputLayers from .roi_heads import ROI_HEADS_REGISTRY, StandardROIHeads logger = logging.getLogger(__name__) """ Shape shorthand in this module: N: number of images in the minibatch R: number of ROIs, combined over all images, in the minibatch Ri: number of ROIs in image i K: number of foreground classes. E.g.,there are 80 foreground classes in COCO. Naming convention: deltas: refers to the 5-d (dx, dy, dw, dh, da) deltas that parameterize the box2box transform (see :class:`box_regression.Box2BoxTransformRotated`). pred_class_logits: predicted class scores in [-inf, +inf]; use softmax(pred_class_logits) to estimate P(class). gt_classes: ground-truth classification labels in [0, K], where [0, K) represent foreground object classes and K represents the background class. pred_proposal_deltas: predicted rotated box2box transform deltas for transforming proposals to detection box predictions. gt_proposal_deltas: ground-truth rotated box2box transform deltas """ def fast_rcnn_inference_rotated( boxes, scores, image_shapes, score_thresh, nms_thresh, topk_per_image ): """ Call `fast_rcnn_inference_single_image_rotated` for all images. Args: boxes (list[Tensor]): A list of Tensors of predicted class-specific or class-agnostic boxes for each image. Element i has shape (Ri, K * 5) if doing class-specific regression, or (Ri, 5) if doing class-agnostic regression, where Ri is the number of predicted objects for image i. This is compatible with the output of :meth:`FastRCNNOutputs.predict_boxes`. scores (list[Tensor]): A list of Tensors of predicted class scores for each image. Element i has shape (Ri, K + 1), where Ri is the number of predicted objects for image i. Compatible with the output of :meth:`FastRCNNOutputs.predict_probs`. image_shapes (list[tuple]): A list of (width, height) tuples for each image in the batch. score_thresh (float): Only return detections with a confidence score exceeding this threshold. nms_thresh (float): The threshold to use for box non-maximum suppression. Value in [0, 1]. topk_per_image (int): The number of top scoring detections to return. Set < 0 to return all detections. Returns: instances: (list[Instances]): A list of N instances, one for each image in the batch, that stores the topk most confidence detections. kept_indices: (list[Tensor]): A list of 1D tensor of length of N, each element indicates the corresponding boxes/scores index in [0, Ri) from the input, for image i. """ result_per_image = [ fast_rcnn_inference_single_image_rotated( boxes_per_image, scores_per_image, image_shape, score_thresh, nms_thresh, topk_per_image ) for scores_per_image, boxes_per_image, image_shape in zip(scores, boxes, image_shapes) ] return [x[0] for x in result_per_image], [x[1] for x in result_per_image] def fast_rcnn_inference_single_image_rotated( boxes, scores, image_shape, score_thresh, nms_thresh, topk_per_image ): """ Single-image inference. Return rotated bounding-box detection results by thresholding on scores and applying rotated non-maximum suppression (Rotated NMS). Args: Same as `fast_rcnn_inference_rotated`, but with rotated boxes, scores, and image shapes per image. Returns: Same as `fast_rcnn_inference_rotated`, but for only one image. """ valid_mask = torch.isfinite(boxes).all(dim=1) & torch.isfinite(scores).all(dim=1) if not valid_mask.all(): boxes = boxes[valid_mask] scores = scores[valid_mask] B = 5 # box dimension scores = scores[:, :-1] num_bbox_reg_classes = boxes.shape[1] // B # Convert to Boxes to use the `clip` function ... boxes = RotatedBoxes(boxes.reshape(-1, B)) boxes.clip(image_shape) boxes = boxes.tensor.view(-1, num_bbox_reg_classes, B) # R x C x B # Filter results based on detection scores filter_mask = scores > score_thresh # R x K # R' x 2. First column contains indices of the R predictions; # Second column contains indices of classes. filter_inds = filter_mask.nonzero() if num_bbox_reg_classes == 1: boxes = boxes[filter_inds[:, 0], 0] else: boxes = boxes[filter_mask] scores = scores[filter_mask] # Apply per-class Rotated NMS keep = batched_nms_rotated(boxes, scores, filter_inds[:, 1], nms_thresh) if topk_per_image >= 0: keep = keep[:topk_per_image] boxes, scores, filter_inds = boxes[keep], scores[keep], filter_inds[keep] result = Instances(image_shape) result.pred_boxes = RotatedBoxes(boxes) result.scores = scores result.pred_classes = filter_inds[:, 1] return result, filter_inds[:, 0] class RotatedFastRCNNOutputLayers(FastRCNNOutputLayers): """ Two linear layers for predicting Rotated Fast R-CNN outputs. """ @classmethod def from_config(cls, cfg, input_shape): args = super().from_config(cfg, input_shape) args["box2box_transform"] = Box2BoxTransformRotated( weights=cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_WEIGHTS ) return args def inference(self, predictions, proposals): """ Returns: list[Instances]: same as `fast_rcnn_inference_rotated`. list[Tensor]: same as `fast_rcnn_inference_rotated`. """ boxes = self.predict_boxes(predictions, proposals) scores = self.predict_probs(predictions, proposals) image_shapes = [x.image_size for x in proposals] return fast_rcnn_inference_rotated( boxes, scores, image_shapes, self.test_score_thresh, self.test_nms_thresh, self.test_topk_per_image, ) @ROI_HEADS_REGISTRY.register() class RROIHeads(StandardROIHeads): """ This class is used by Rotated Fast R-CNN to detect rotated boxes. For now, it only supports box predictions but not mask or keypoints. """ @configurable def __init__(self, **kwargs): """ NOTE: this interface is experimental. """ super().__init__(**kwargs) assert ( not self.mask_on and not self.keypoint_on ), "Mask/Keypoints not supported in Rotated ROIHeads." assert not self.train_on_pred_boxes, "train_on_pred_boxes not implemented for RROIHeads!" @classmethod def _init_box_head(cls, cfg, input_shape): # fmt: off in_features = cfg.MODEL.ROI_HEADS.IN_FEATURES pooler_resolution = cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION pooler_scales = tuple(1.0 / input_shape[k].stride for k in in_features) sampling_ratio = cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO pooler_type = cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE # fmt: on assert pooler_type in ["ROIAlignRotated"], pooler_type # assume all channel counts are equal in_channels = [input_shape[f].channels for f in in_features][0] box_pooler = ROIPooler( output_size=pooler_resolution, scales=pooler_scales, sampling_ratio=sampling_ratio, pooler_type=pooler_type, ) box_head = build_box_head( cfg, ShapeSpec(channels=in_channels, height=pooler_resolution, width=pooler_resolution) ) # This line is the only difference v.s. StandardROIHeads box_predictor = RotatedFastRCNNOutputLayers(cfg, box_head.output_shape) return { "box_in_features": in_features, "box_pooler": box_pooler, "box_head": box_head, "box_predictor": box_predictor, } @torch.no_grad() def label_and_sample_proposals(self, proposals, targets): """ Prepare some proposals to be used to train the RROI heads. It performs box matching between `proposals` and `targets`, and assigns training labels to the proposals. It returns `self.batch_size_per_image` random samples from proposals and groundtruth boxes, with a fraction of positives that is no larger than `self.positive_sample_fraction. Args: See :meth:`StandardROIHeads.forward` Returns: list[Instances]: length `N` list of `Instances`s containing the proposals sampled for training. Each `Instances` has the following fields: - proposal_boxes: the rotated proposal boxes - gt_boxes: the ground-truth rotated boxes that the proposal is assigned to (this is only meaningful if the proposal has a label > 0; if label = 0 then the ground-truth box is random) - gt_classes: the ground-truth classification lable for each proposal """ if self.proposal_append_gt: proposals = add_ground_truth_to_proposals(targets, proposals) proposals_with_gt = [] num_fg_samples = [] num_bg_samples = [] for proposals_per_image, targets_per_image in zip(proposals, targets): has_gt = len(targets_per_image) > 0 match_quality_matrix = pairwise_iou_rotated( targets_per_image.gt_boxes, proposals_per_image.proposal_boxes ) matched_idxs, matched_labels = self.proposal_matcher(match_quality_matrix) sampled_idxs, gt_classes = self._sample_proposals( matched_idxs, matched_labels, targets_per_image.gt_classes ) proposals_per_image = proposals_per_image[sampled_idxs] proposals_per_image.gt_classes = gt_classes if has_gt: sampled_targets = matched_idxs[sampled_idxs] proposals_per_image.gt_boxes = targets_per_image.gt_boxes[sampled_targets] num_bg_samples.append((gt_classes == self.num_classes).sum().item()) num_fg_samples.append(gt_classes.numel() - num_bg_samples[-1]) proposals_with_gt.append(proposals_per_image) # Log the number of fg/bg samples that are selected for training ROI heads storage = get_event_storage() storage.put_scalar("roi_head/num_fg_samples", np.mean(num_fg_samples)) storage.put_scalar("roi_head/num_bg_samples", np.mean(num_bg_samples)) return proposals_with_gt
banmo-main
third_party/detectron2_old/detectron2/modeling/roi_heads/rotated_fast_rcnn.py
# Copyright (c) Facebook, Inc. and its affiliates. from typing import List import torch from torch import nn from torch.autograd.function import Function from detectron2.config import configurable from detectron2.layers import ShapeSpec from detectron2.structures import Boxes, Instances, pairwise_iou from detectron2.utils.events import get_event_storage from ..box_regression import Box2BoxTransform from ..matcher import Matcher from ..poolers import ROIPooler from .box_head import build_box_head from .fast_rcnn import FastRCNNOutputLayers, fast_rcnn_inference from .roi_heads import ROI_HEADS_REGISTRY, StandardROIHeads class _ScaleGradient(Function): @staticmethod def forward(ctx, input, scale): ctx.scale = scale return input @staticmethod def backward(ctx, grad_output): return grad_output * ctx.scale, None @ROI_HEADS_REGISTRY.register() class CascadeROIHeads(StandardROIHeads): """ The ROI heads that implement :paper:`Cascade R-CNN`. """ @configurable def __init__( self, *, box_in_features: List[str], box_pooler: ROIPooler, box_heads: List[nn.Module], box_predictors: List[nn.Module], proposal_matchers: List[Matcher], **kwargs, ): """ NOTE: this interface is experimental. Args: box_pooler (ROIPooler): pooler that extracts region features from given boxes box_heads (list[nn.Module]): box head for each cascade stage box_predictors (list[nn.Module]): box predictor for each cascade stage proposal_matchers (list[Matcher]): matcher with different IoU thresholds to match boxes with ground truth for each stage. The first matcher matches RPN proposals with ground truth, the other matchers use boxes predicted by the previous stage as proposals and match them with ground truth. """ assert "proposal_matcher" not in kwargs, ( "CascadeROIHeads takes 'proposal_matchers=' for each stage instead " "of one 'proposal_matcher='." ) # The first matcher matches RPN proposals with ground truth, done in the base class kwargs["proposal_matcher"] = proposal_matchers[0] num_stages = self.num_cascade_stages = len(box_heads) box_heads = nn.ModuleList(box_heads) box_predictors = nn.ModuleList(box_predictors) assert len(box_predictors) == num_stages, f"{len(box_predictors)} != {num_stages}!" assert len(proposal_matchers) == num_stages, f"{len(proposal_matchers)} != {num_stages}!" super().__init__( box_in_features=box_in_features, box_pooler=box_pooler, box_head=box_heads, box_predictor=box_predictors, **kwargs, ) self.proposal_matchers = proposal_matchers @classmethod def from_config(cls, cfg, input_shape): ret = super().from_config(cfg, input_shape) ret.pop("proposal_matcher") return ret @classmethod def _init_box_head(cls, cfg, input_shape): # fmt: off in_features = cfg.MODEL.ROI_HEADS.IN_FEATURES pooler_resolution = cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION pooler_scales = tuple(1.0 / input_shape[k].stride for k in in_features) sampling_ratio = cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO pooler_type = cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE cascade_bbox_reg_weights = cfg.MODEL.ROI_BOX_CASCADE_HEAD.BBOX_REG_WEIGHTS cascade_ious = cfg.MODEL.ROI_BOX_CASCADE_HEAD.IOUS assert len(cascade_bbox_reg_weights) == len(cascade_ious) assert cfg.MODEL.ROI_BOX_HEAD.CLS_AGNOSTIC_BBOX_REG, \ "CascadeROIHeads only support class-agnostic regression now!" assert cascade_ious[0] == cfg.MODEL.ROI_HEADS.IOU_THRESHOLDS[0] # fmt: on in_channels = [input_shape[f].channels for f in in_features] # Check all channel counts are equal assert len(set(in_channels)) == 1, in_channels in_channels = in_channels[0] box_pooler = ROIPooler( output_size=pooler_resolution, scales=pooler_scales, sampling_ratio=sampling_ratio, pooler_type=pooler_type, ) pooled_shape = ShapeSpec( channels=in_channels, width=pooler_resolution, height=pooler_resolution ) box_heads, box_predictors, proposal_matchers = [], [], [] for match_iou, bbox_reg_weights in zip(cascade_ious, cascade_bbox_reg_weights): box_head = build_box_head(cfg, pooled_shape) box_heads.append(box_head) box_predictors.append( FastRCNNOutputLayers( cfg, box_head.output_shape, box2box_transform=Box2BoxTransform(weights=bbox_reg_weights), ) ) proposal_matchers.append(Matcher([match_iou], [0, 1], allow_low_quality_matches=False)) return { "box_in_features": in_features, "box_pooler": box_pooler, "box_heads": box_heads, "box_predictors": box_predictors, "proposal_matchers": proposal_matchers, } def forward(self, images, features, proposals, targets=None): del images if self.training: proposals = self.label_and_sample_proposals(proposals, targets) if self.training: # Need targets to box head losses = self._forward_box(features, proposals, targets) losses.update(self._forward_mask(features, proposals)) losses.update(self._forward_keypoint(features, proposals)) return proposals, losses else: pred_instances = self._forward_box(features, proposals) pred_instances = self.forward_with_given_boxes(features, pred_instances) return pred_instances, {} def _forward_box(self, features, proposals, targets=None): """ Args: features, targets: the same as in Same as in :meth:`ROIHeads.forward`. proposals (list[Instances]): the per-image object proposals with their matching ground truth. Each has fields "proposal_boxes", and "objectness_logits", "gt_classes", "gt_boxes". """ features = [features[f] for f in self.box_in_features] head_outputs = [] # (predictor, predictions, proposals) prev_pred_boxes = None image_sizes = [x.image_size for x in proposals] for k in range(self.num_cascade_stages): if k > 0: # The output boxes of the previous stage are used to create the input # proposals of the next stage. proposals = self._create_proposals_from_boxes(prev_pred_boxes, image_sizes) if self.training: proposals = self._match_and_label_boxes(proposals, k, targets) predictions = self._run_stage(features, proposals, k) prev_pred_boxes = self.box_predictor[k].predict_boxes(predictions, proposals) head_outputs.append((self.box_predictor[k], predictions, proposals)) if self.training: losses = {} storage = get_event_storage() for stage, (predictor, predictions, proposals) in enumerate(head_outputs): with storage.name_scope("stage{}".format(stage)): stage_losses = predictor.losses(predictions, proposals) losses.update({k + "_stage{}".format(stage): v for k, v in stage_losses.items()}) return losses else: # Each is a list[Tensor] of length #image. Each tensor is Ri x (K+1) scores_per_stage = [h[0].predict_probs(h[1], h[2]) for h in head_outputs] # Average the scores across heads scores = [ sum(list(scores_per_image)) * (1.0 / self.num_cascade_stages) for scores_per_image in zip(*scores_per_stage) ] # Use the boxes of the last head predictor, predictions, proposals = head_outputs[-1] boxes = predictor.predict_boxes(predictions, proposals) pred_instances, _ = fast_rcnn_inference( boxes, scores, image_sizes, predictor.test_score_thresh, predictor.test_nms_thresh, predictor.test_topk_per_image, ) return pred_instances @torch.no_grad() def _match_and_label_boxes(self, proposals, stage, targets): """ Match proposals with groundtruth using the matcher at the given stage. Label the proposals as foreground or background based on the match. Args: proposals (list[Instances]): One Instances for each image, with the field "proposal_boxes". stage (int): the current stage targets (list[Instances]): the ground truth instances Returns: list[Instances]: the same proposals, but with fields "gt_classes" and "gt_boxes" """ num_fg_samples, num_bg_samples = [], [] for proposals_per_image, targets_per_image in zip(proposals, targets): match_quality_matrix = pairwise_iou( targets_per_image.gt_boxes, proposals_per_image.proposal_boxes ) # proposal_labels are 0 or 1 matched_idxs, proposal_labels = self.proposal_matchers[stage](match_quality_matrix) if len(targets_per_image) > 0: gt_classes = targets_per_image.gt_classes[matched_idxs] # Label unmatched proposals (0 label from matcher) as background (label=num_classes) gt_classes[proposal_labels == 0] = self.num_classes gt_boxes = targets_per_image.gt_boxes[matched_idxs] else: gt_classes = torch.zeros_like(matched_idxs) + self.num_classes gt_boxes = Boxes( targets_per_image.gt_boxes.tensor.new_zeros((len(proposals_per_image), 4)) ) proposals_per_image.gt_classes = gt_classes proposals_per_image.gt_boxes = gt_boxes num_fg_samples.append((proposal_labels == 1).sum().item()) num_bg_samples.append(proposal_labels.numel() - num_fg_samples[-1]) # Log the number of fg/bg samples in each stage storage = get_event_storage() storage.put_scalar( "stage{}/roi_head/num_fg_samples".format(stage), sum(num_fg_samples) / len(num_fg_samples), ) storage.put_scalar( "stage{}/roi_head/num_bg_samples".format(stage), sum(num_bg_samples) / len(num_bg_samples), ) return proposals def _run_stage(self, features, proposals, stage): """ Args: features (list[Tensor]): #lvl input features to ROIHeads proposals (list[Instances]): #image Instances, with the field "proposal_boxes" stage (int): the current stage Returns: Same output as `FastRCNNOutputLayers.forward()`. """ box_features = self.box_pooler(features, [x.proposal_boxes for x in proposals]) # The original implementation averages the losses among heads, # but scale up the parameter gradients of the heads. # This is equivalent to adding the losses among heads, # but scale down the gradients on features. box_features = _ScaleGradient.apply(box_features, 1.0 / self.num_cascade_stages) box_features = self.box_head[stage](box_features) return self.box_predictor[stage](box_features) def _create_proposals_from_boxes(self, boxes, image_sizes): """ Args: boxes (list[Tensor]): per-image predicted boxes, each of shape Ri x 4 image_sizes (list[tuple]): list of image shapes in (h, w) Returns: list[Instances]: per-image proposals with the given boxes. """ # Just like RPN, the proposals should not have gradients boxes = [Boxes(b.detach()) for b in boxes] proposals = [] for boxes_per_image, image_size in zip(boxes, image_sizes): boxes_per_image.clip(image_size) if self.training: # do not filter empty boxes at inference time, # because the scores from each stage need to be aligned and added later boxes_per_image = boxes_per_image[boxes_per_image.nonempty()] prop = Instances(image_size) prop.proposal_boxes = boxes_per_image proposals.append(prop) return proposals
banmo-main
third_party/detectron2_old/detectron2/modeling/roi_heads/cascade_rcnn.py
# Copyright (c) Facebook, Inc. and its affiliates. import inspect import logging import numpy as np from typing import Dict, List, Optional, Tuple import torch from torch import nn from detectron2.config import configurable from detectron2.layers import ShapeSpec, nonzero_tuple from detectron2.structures import Boxes, ImageList, Instances, pairwise_iou from detectron2.utils.events import get_event_storage from detectron2.utils.registry import Registry from ..backbone.resnet import BottleneckBlock, ResNet from ..matcher import Matcher from ..poolers import ROIPooler from ..proposal_generator.proposal_utils import add_ground_truth_to_proposals from ..sampling import subsample_labels from .box_head import build_box_head from .fast_rcnn import FastRCNNOutputLayers from .keypoint_head import build_keypoint_head from .mask_head import build_mask_head ROI_HEADS_REGISTRY = Registry("ROI_HEADS") ROI_HEADS_REGISTRY.__doc__ = """ Registry for ROI heads in a generalized R-CNN model. ROIHeads take feature maps and region proposals, and perform per-region computation. The registered object will be called with `obj(cfg, input_shape)`. The call is expected to return an :class:`ROIHeads`. """ logger = logging.getLogger(__name__) def build_roi_heads(cfg, input_shape): """ Build ROIHeads defined by `cfg.MODEL.ROI_HEADS.NAME`. """ name = cfg.MODEL.ROI_HEADS.NAME return ROI_HEADS_REGISTRY.get(name)(cfg, input_shape) def select_foreground_proposals( proposals: List[Instances], bg_label: int ) -> Tuple[List[Instances], List[torch.Tensor]]: """ Given a list of N Instances (for N images), each containing a `gt_classes` field, return a list of Instances that contain only instances with `gt_classes != -1 && gt_classes != bg_label`. Args: proposals (list[Instances]): A list of N Instances, where N is the number of images in the batch. bg_label: label index of background class. Returns: list[Instances]: N Instances, each contains only the selected foreground instances. list[Tensor]: N boolean vector, correspond to the selection mask of each Instances object. True for selected instances. """ assert isinstance(proposals, (list, tuple)) assert isinstance(proposals[0], Instances) assert proposals[0].has("gt_classes") fg_proposals = [] fg_selection_masks = [] for proposals_per_image in proposals: gt_classes = proposals_per_image.gt_classes fg_selection_mask = (gt_classes != -1) & (gt_classes != bg_label) fg_idxs = fg_selection_mask.nonzero().squeeze(1) fg_proposals.append(proposals_per_image[fg_idxs]) fg_selection_masks.append(fg_selection_mask) return fg_proposals, fg_selection_masks def select_proposals_with_visible_keypoints(proposals: List[Instances]) -> List[Instances]: """ Args: proposals (list[Instances]): a list of N Instances, where N is the number of images. Returns: proposals: only contains proposals with at least one visible keypoint. Note that this is still slightly different from Detectron. In Detectron, proposals for training keypoint head are re-sampled from all the proposals with IOU>threshold & >=1 visible keypoint. Here, the proposals are first sampled from all proposals with IOU>threshold, then proposals with no visible keypoint are filtered out. This strategy seems to make no difference on Detectron and is easier to implement. """ ret = [] all_num_fg = [] for proposals_per_image in proposals: # If empty/unannotated image (hard negatives), skip filtering for train if len(proposals_per_image) == 0: ret.append(proposals_per_image) continue gt_keypoints = proposals_per_image.gt_keypoints.tensor # #fg x K x 3 vis_mask = gt_keypoints[:, :, 2] >= 1 xs, ys = gt_keypoints[:, :, 0], gt_keypoints[:, :, 1] proposal_boxes = proposals_per_image.proposal_boxes.tensor.unsqueeze(dim=1) # #fg x 1 x 4 kp_in_box = ( (xs >= proposal_boxes[:, :, 0]) & (xs <= proposal_boxes[:, :, 2]) & (ys >= proposal_boxes[:, :, 1]) & (ys <= proposal_boxes[:, :, 3]) ) selection = (kp_in_box & vis_mask).any(dim=1) selection_idxs = nonzero_tuple(selection)[0] all_num_fg.append(selection_idxs.numel()) ret.append(proposals_per_image[selection_idxs]) storage = get_event_storage() storage.put_scalar("keypoint_head/num_fg_samples", np.mean(all_num_fg)) return ret class ROIHeads(torch.nn.Module): """ ROIHeads perform all per-region computation in an R-CNN. It typically contains logic to 1. (in training only) match proposals with ground truth and sample them 2. crop the regions and extract per-region features using proposals 3. make per-region predictions with different heads It can have many variants, implemented as subclasses of this class. This base class contains the logic to match/sample proposals. But it is not necessary to inherit this class if the sampling logic is not needed. """ @configurable def __init__( self, *, num_classes, batch_size_per_image, positive_fraction, proposal_matcher, proposal_append_gt=True, ): """ NOTE: this interface is experimental. Args: num_classes (int): number of foreground classes (i.e. background is not included) batch_size_per_image (int): number of proposals to sample for training positive_fraction (float): fraction of positive (foreground) proposals to sample for training. proposal_matcher (Matcher): matcher that matches proposals and ground truth proposal_append_gt (bool): whether to include ground truth as proposals as well """ super().__init__() self.batch_size_per_image = batch_size_per_image self.positive_fraction = positive_fraction self.num_classes = num_classes self.proposal_matcher = proposal_matcher self.proposal_append_gt = proposal_append_gt @classmethod def from_config(cls, cfg): return { "batch_size_per_image": cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE, "positive_fraction": cfg.MODEL.ROI_HEADS.POSITIVE_FRACTION, "num_classes": cfg.MODEL.ROI_HEADS.NUM_CLASSES, "proposal_append_gt": cfg.MODEL.ROI_HEADS.PROPOSAL_APPEND_GT, # Matcher to assign box proposals to gt boxes "proposal_matcher": Matcher( cfg.MODEL.ROI_HEADS.IOU_THRESHOLDS, cfg.MODEL.ROI_HEADS.IOU_LABELS, allow_low_quality_matches=False, ), } def _sample_proposals( self, matched_idxs: torch.Tensor, matched_labels: torch.Tensor, gt_classes: torch.Tensor ) -> Tuple[torch.Tensor, torch.Tensor]: """ Based on the matching between N proposals and M groundtruth, sample the proposals and set their classification labels. Args: matched_idxs (Tensor): a vector of length N, each is the best-matched gt index in [0, M) for each proposal. matched_labels (Tensor): a vector of length N, the matcher's label (one of cfg.MODEL.ROI_HEADS.IOU_LABELS) for each proposal. gt_classes (Tensor): a vector of length M. Returns: Tensor: a vector of indices of sampled proposals. Each is in [0, N). Tensor: a vector of the same length, the classification label for each sampled proposal. Each sample is labeled as either a category in [0, num_classes) or the background (num_classes). """ has_gt = gt_classes.numel() > 0 # Get the corresponding GT for each proposal if has_gt: gt_classes = gt_classes[matched_idxs] # Label unmatched proposals (0 label from matcher) as background (label=num_classes) gt_classes[matched_labels == 0] = self.num_classes # Label ignore proposals (-1 label) gt_classes[matched_labels == -1] = -1 else: gt_classes = torch.zeros_like(matched_idxs) + self.num_classes sampled_fg_idxs, sampled_bg_idxs = subsample_labels( gt_classes, self.batch_size_per_image, self.positive_fraction, self.num_classes ) sampled_idxs = torch.cat([sampled_fg_idxs, sampled_bg_idxs], dim=0) return sampled_idxs, gt_classes[sampled_idxs] @torch.no_grad() def label_and_sample_proposals( self, proposals: List[Instances], targets: List[Instances] ) -> List[Instances]: """ Prepare some proposals to be used to train the ROI heads. It performs box matching between `proposals` and `targets`, and assigns training labels to the proposals. It returns ``self.batch_size_per_image`` random samples from proposals and groundtruth boxes, with a fraction of positives that is no larger than ``self.positive_fraction``. Args: See :meth:`ROIHeads.forward` Returns: list[Instances]: length `N` list of `Instances`s containing the proposals sampled for training. Each `Instances` has the following fields: - proposal_boxes: the proposal boxes - gt_boxes: the ground-truth box that the proposal is assigned to (this is only meaningful if the proposal has a label > 0; if label = 0 then the ground-truth box is random) Other fields such as "gt_classes", "gt_masks", that's included in `targets`. """ # Augment proposals with ground-truth boxes. # In the case of learned proposals (e.g., RPN), when training starts # the proposals will be low quality due to random initialization. # It's possible that none of these initial # proposals have high enough overlap with the gt objects to be used # as positive examples for the second stage components (box head, # cls head, mask head). Adding the gt boxes to the set of proposals # ensures that the second stage components will have some positive # examples from the start of training. For RPN, this augmentation improves # convergence and empirically improves box AP on COCO by about 0.5 # points (under one tested configuration). if self.proposal_append_gt: proposals = add_ground_truth_to_proposals(targets, proposals) proposals_with_gt = [] num_fg_samples = [] num_bg_samples = [] for proposals_per_image, targets_per_image in zip(proposals, targets): has_gt = len(targets_per_image) > 0 match_quality_matrix = pairwise_iou( targets_per_image.gt_boxes, proposals_per_image.proposal_boxes ) matched_idxs, matched_labels = self.proposal_matcher(match_quality_matrix) sampled_idxs, gt_classes = self._sample_proposals( matched_idxs, matched_labels, targets_per_image.gt_classes ) # Set target attributes of the sampled proposals: proposals_per_image = proposals_per_image[sampled_idxs] proposals_per_image.gt_classes = gt_classes if has_gt: sampled_targets = matched_idxs[sampled_idxs] # We index all the attributes of targets that start with "gt_" # and have not been added to proposals yet (="gt_classes"). # NOTE: here the indexing waste some compute, because heads # like masks, keypoints, etc, will filter the proposals again, # (by foreground/background, or number of keypoints in the image, etc) # so we essentially index the data twice. for (trg_name, trg_value) in targets_per_image.get_fields().items(): if trg_name.startswith("gt_") and not proposals_per_image.has(trg_name): proposals_per_image.set(trg_name, trg_value[sampled_targets]) # If no GT is given in the image, we don't know what a dummy gt value can be. # Therefore the returned proposals won't have any gt_* fields, except for a # gt_classes full of background label. num_bg_samples.append((gt_classes == self.num_classes).sum().item()) num_fg_samples.append(gt_classes.numel() - num_bg_samples[-1]) proposals_with_gt.append(proposals_per_image) # Log the number of fg/bg samples that are selected for training ROI heads storage = get_event_storage() storage.put_scalar("roi_head/num_fg_samples", np.mean(num_fg_samples)) storage.put_scalar("roi_head/num_bg_samples", np.mean(num_bg_samples)) return proposals_with_gt def forward( self, images: ImageList, features: Dict[str, torch.Tensor], proposals: List[Instances], targets: Optional[List[Instances]] = None, ) -> Tuple[List[Instances], Dict[str, torch.Tensor]]: """ Args: images (ImageList): features (dict[str,Tensor]): input data as a mapping from feature map name to tensor. Axis 0 represents the number of images `N` in the input data; axes 1-3 are channels, height, and width, which may vary between feature maps (e.g., if a feature pyramid is used). proposals (list[Instances]): length `N` list of `Instances`. The i-th `Instances` contains object proposals for the i-th input image, with fields "proposal_boxes" and "objectness_logits". targets (list[Instances], optional): length `N` list of `Instances`. The i-th `Instances` contains the ground-truth per-instance annotations for the i-th input image. Specify `targets` during training only. It may have the following fields: - gt_boxes: the bounding box of each instance. - gt_classes: the label for each instance with a category ranging in [0, #class]. - gt_masks: PolygonMasks or BitMasks, the ground-truth masks of each instance. - gt_keypoints: NxKx3, the groud-truth keypoints for each instance. Returns: list[Instances]: length `N` list of `Instances` containing the detected instances. Returned during inference only; may be [] during training. dict[str->Tensor]: mapping from a named loss to a tensor storing the loss. Used during training only. """ raise NotImplementedError() @ROI_HEADS_REGISTRY.register() class Res5ROIHeads(ROIHeads): """ The ROIHeads in a typical "C4" R-CNN model, where the box and mask head share the cropping and the per-region feature computation by a Res5 block. See :paper:`ResNet` Appendix A. """ @configurable def __init__( self, *, in_features: List[str], pooler: ROIPooler, res5: nn.Module, box_predictor: nn.Module, mask_head: Optional[nn.Module] = None, **kwargs, ): """ NOTE: this interface is experimental. Args: in_features (list[str]): list of backbone feature map names to use for feature extraction pooler (ROIPooler): pooler to extra region features from backbone res5 (nn.Sequential): a CNN to compute per-region features, to be used by ``box_predictor`` and ``mask_head``. Typically this is a "res5" block from a ResNet. box_predictor (nn.Module): make box predictions from the feature. Should have the same interface as :class:`FastRCNNOutputLayers`. mask_head (nn.Module): transform features to make mask predictions """ super().__init__(**kwargs) self.in_features = in_features self.pooler = pooler if isinstance(res5, (list, tuple)): res5 = nn.Sequential(*res5) self.res5 = res5 self.box_predictor = box_predictor self.mask_on = mask_head is not None if self.mask_on: self.mask_head = mask_head @classmethod def from_config(cls, cfg, input_shape): # fmt: off ret = super().from_config(cfg) in_features = ret["in_features"] = cfg.MODEL.ROI_HEADS.IN_FEATURES pooler_resolution = cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION pooler_type = cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE pooler_scales = (1.0 / input_shape[in_features[0]].stride, ) sampling_ratio = cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO mask_on = cfg.MODEL.MASK_ON # fmt: on assert not cfg.MODEL.KEYPOINT_ON assert len(in_features) == 1 ret["pooler"] = ROIPooler( output_size=pooler_resolution, scales=pooler_scales, sampling_ratio=sampling_ratio, pooler_type=pooler_type, ) # Compatbility with old moco code. Might be useful. # See notes in StandardROIHeads.from_config if not inspect.ismethod(cls._build_res5_block): logger.warning( "The behavior of _build_res5_block may change. " "Please do not depend on private methods." ) cls._build_res5_block = classmethod(cls._build_res5_block) ret["res5"], out_channels = cls._build_res5_block(cfg) ret["box_predictor"] = FastRCNNOutputLayers( cfg, ShapeSpec(channels=out_channels, height=1, width=1) ) if mask_on: ret["mask_head"] = build_mask_head( cfg, ShapeSpec(channels=out_channels, width=pooler_resolution, height=pooler_resolution), ) return ret @classmethod def _build_res5_block(cls, cfg): # fmt: off stage_channel_factor = 2 ** 3 # res5 is 8x res2 num_groups = cfg.MODEL.RESNETS.NUM_GROUPS width_per_group = cfg.MODEL.RESNETS.WIDTH_PER_GROUP bottleneck_channels = num_groups * width_per_group * stage_channel_factor out_channels = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS * stage_channel_factor stride_in_1x1 = cfg.MODEL.RESNETS.STRIDE_IN_1X1 norm = cfg.MODEL.RESNETS.NORM assert not cfg.MODEL.RESNETS.DEFORM_ON_PER_STAGE[-1], \ "Deformable conv is not yet supported in res5 head." # fmt: on blocks = ResNet.make_stage( BottleneckBlock, 3, stride_per_block=[2, 1, 1], in_channels=out_channels // 2, bottleneck_channels=bottleneck_channels, out_channels=out_channels, num_groups=num_groups, norm=norm, stride_in_1x1=stride_in_1x1, ) return nn.Sequential(*blocks), out_channels def _shared_roi_transform(self, features, boxes): x = self.pooler(features, boxes) return self.res5(x) def forward(self, images, features, proposals, targets=None): """ See :meth:`ROIHeads.forward`. """ del images if self.training: assert targets proposals = self.label_and_sample_proposals(proposals, targets) del targets proposal_boxes = [x.proposal_boxes for x in proposals] box_features = self._shared_roi_transform( [features[f] for f in self.in_features], proposal_boxes ) predictions = self.box_predictor(box_features.mean(dim=[2, 3])) if self.training: del features losses = self.box_predictor.losses(predictions, proposals) if self.mask_on: proposals, fg_selection_masks = select_foreground_proposals( proposals, self.num_classes ) # Since the ROI feature transform is shared between boxes and masks, # we don't need to recompute features. The mask loss is only defined # on foreground proposals, so we need to select out the foreground # features. mask_features = box_features[torch.cat(fg_selection_masks, dim=0)] del box_features losses.update(self.mask_head(mask_features, proposals)) return [], losses else: pred_instances, _ = self.box_predictor.inference(predictions, proposals) pred_instances = self.forward_with_given_boxes(features, pred_instances) return pred_instances, {} def forward_with_given_boxes(self, features, instances): """ Use the given boxes in `instances` to produce other (non-box) per-ROI outputs. Args: features: same as in `forward()` instances (list[Instances]): instances to predict other outputs. Expect the keys "pred_boxes" and "pred_classes" to exist. Returns: instances (Instances): the same `Instances` object, with extra fields such as `pred_masks` or `pred_keypoints`. """ assert not self.training assert instances[0].has("pred_boxes") and instances[0].has("pred_classes") if self.mask_on: features = [features[f] for f in self.in_features] x = self._shared_roi_transform(features, [x.pred_boxes for x in instances]) return self.mask_head(x, instances) else: return instances @ROI_HEADS_REGISTRY.register() class StandardROIHeads(ROIHeads): """ It's "standard" in a sense that there is no ROI transform sharing or feature sharing between tasks. Each head independently processes the input features by each head's own pooler and head. This class is used by most models, such as FPN and C5. To implement more models, you can subclass it and implement a different :meth:`forward()` or a head. """ @configurable def __init__( self, *, box_in_features: List[str], box_pooler: ROIPooler, box_head: nn.Module, box_predictor: nn.Module, mask_in_features: Optional[List[str]] = None, mask_pooler: Optional[ROIPooler] = None, mask_head: Optional[nn.Module] = None, keypoint_in_features: Optional[List[str]] = None, keypoint_pooler: Optional[ROIPooler] = None, keypoint_head: Optional[nn.Module] = None, train_on_pred_boxes: bool = False, **kwargs, ): """ NOTE: this interface is experimental. Args: box_in_features (list[str]): list of feature names to use for the box head. box_pooler (ROIPooler): pooler to extra region features for box head box_head (nn.Module): transform features to make box predictions box_predictor (nn.Module): make box predictions from the feature. Should have the same interface as :class:`FastRCNNOutputLayers`. mask_in_features (list[str]): list of feature names to use for the mask pooler or mask head. None if not using mask head. mask_pooler (ROIPooler): pooler to extract region features from image features. The mask head will then take region features to make predictions. If None, the mask head will directly take the dict of image features defined by `mask_in_features` mask_head (nn.Module): transform features to make mask predictions keypoint_in_features, keypoint_pooler, keypoint_head: similar to ``mask_*``. train_on_pred_boxes (bool): whether to use proposal boxes or predicted boxes from the box head to train other heads. """ super().__init__(**kwargs) # keep self.in_features for backward compatibility self.in_features = self.box_in_features = box_in_features self.box_pooler = box_pooler self.box_head = box_head self.box_predictor = box_predictor self.mask_on = mask_in_features is not None if self.mask_on: self.mask_in_features = mask_in_features self.mask_pooler = mask_pooler self.mask_head = mask_head self.keypoint_on = keypoint_in_features is not None if self.keypoint_on: self.keypoint_in_features = keypoint_in_features self.keypoint_pooler = keypoint_pooler self.keypoint_head = keypoint_head self.train_on_pred_boxes = train_on_pred_boxes @classmethod def from_config(cls, cfg, input_shape): ret = super().from_config(cfg) ret["train_on_pred_boxes"] = cfg.MODEL.ROI_BOX_HEAD.TRAIN_ON_PRED_BOXES # Subclasses that have not been updated to use from_config style construction # may have overridden _init_*_head methods. In this case, those overridden methods # will not be classmethods and we need to avoid trying to call them here. # We test for this with ismethod which only returns True for bound methods of cls. # Such subclasses will need to handle calling their overridden _init_*_head methods. if inspect.ismethod(cls._init_box_head): ret.update(cls._init_box_head(cfg, input_shape)) if inspect.ismethod(cls._init_mask_head): ret.update(cls._init_mask_head(cfg, input_shape)) if inspect.ismethod(cls._init_keypoint_head): ret.update(cls._init_keypoint_head(cfg, input_shape)) return ret @classmethod def _init_box_head(cls, cfg, input_shape): # fmt: off in_features = cfg.MODEL.ROI_HEADS.IN_FEATURES pooler_resolution = cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION pooler_scales = tuple(1.0 / input_shape[k].stride for k in in_features) sampling_ratio = cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO pooler_type = cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE # fmt: on # If StandardROIHeads is applied on multiple feature maps (as in FPN), # then we share the same predictors and therefore the channel counts must be the same in_channels = [input_shape[f].channels for f in in_features] # Check all channel counts are equal assert len(set(in_channels)) == 1, in_channels in_channels = in_channels[0] box_pooler = ROIPooler( output_size=pooler_resolution, scales=pooler_scales, sampling_ratio=sampling_ratio, pooler_type=pooler_type, ) # Here we split "box head" and "box predictor", which is mainly due to historical reasons. # They are used together so the "box predictor" layers should be part of the "box head". # New subclasses of ROIHeads do not need "box predictor"s. box_head = build_box_head( cfg, ShapeSpec(channels=in_channels, height=pooler_resolution, width=pooler_resolution) ) box_predictor = FastRCNNOutputLayers(cfg, box_head.output_shape) return { "box_in_features": in_features, "box_pooler": box_pooler, "box_head": box_head, "box_predictor": box_predictor, } @classmethod def _init_mask_head(cls, cfg, input_shape): if not cfg.MODEL.MASK_ON: return {} # fmt: off in_features = cfg.MODEL.ROI_HEADS.IN_FEATURES pooler_resolution = cfg.MODEL.ROI_MASK_HEAD.POOLER_RESOLUTION pooler_scales = tuple(1.0 / input_shape[k].stride for k in in_features) sampling_ratio = cfg.MODEL.ROI_MASK_HEAD.POOLER_SAMPLING_RATIO pooler_type = cfg.MODEL.ROI_MASK_HEAD.POOLER_TYPE # fmt: on in_channels = [input_shape[f].channels for f in in_features][0] ret = {"mask_in_features": in_features} ret["mask_pooler"] = ( ROIPooler( output_size=pooler_resolution, scales=pooler_scales, sampling_ratio=sampling_ratio, pooler_type=pooler_type, ) if pooler_type else None ) if pooler_type: shape = ShapeSpec( channels=in_channels, width=pooler_resolution, height=pooler_resolution ) else: shape = {f: input_shape[f] for f in in_features} ret["mask_head"] = build_mask_head(cfg, shape) return ret @classmethod def _init_keypoint_head(cls, cfg, input_shape): if not cfg.MODEL.KEYPOINT_ON: return {} # fmt: off in_features = cfg.MODEL.ROI_HEADS.IN_FEATURES pooler_resolution = cfg.MODEL.ROI_KEYPOINT_HEAD.POOLER_RESOLUTION pooler_scales = tuple(1.0 / input_shape[k].stride for k in in_features) # noqa sampling_ratio = cfg.MODEL.ROI_KEYPOINT_HEAD.POOLER_SAMPLING_RATIO pooler_type = cfg.MODEL.ROI_KEYPOINT_HEAD.POOLER_TYPE # fmt: on in_channels = [input_shape[f].channels for f in in_features][0] ret = {"keypoint_in_features": in_features} ret["keypoint_pooler"] = ( ROIPooler( output_size=pooler_resolution, scales=pooler_scales, sampling_ratio=sampling_ratio, pooler_type=pooler_type, ) if pooler_type else None ) if pooler_type: shape = ShapeSpec( channels=in_channels, width=pooler_resolution, height=pooler_resolution ) else: shape = {f: input_shape[f] for f in in_features} ret["keypoint_head"] = build_keypoint_head(cfg, shape) return ret def forward( self, images: ImageList, features: Dict[str, torch.Tensor], proposals: List[Instances], targets: Optional[List[Instances]] = None, ) -> Tuple[List[Instances], Dict[str, torch.Tensor]]: """ See :class:`ROIHeads.forward`. """ del images if self.training: assert targets, "'targets' argument is required during training" proposals = self.label_and_sample_proposals(proposals, targets) del targets if self.training: losses = self._forward_box(features, proposals) # Usually the original proposals used by the box head are used by the mask, keypoint # heads. But when `self.train_on_pred_boxes is True`, proposals will contain boxes # predicted by the box head. losses.update(self._forward_mask(features, proposals)) losses.update(self._forward_keypoint(features, proposals)) return proposals, losses else: pred_instances = self._forward_box(features, proposals) # During inference cascaded prediction is used: the mask and keypoints heads are only # applied to the top scoring box detections. pred_instances = self.forward_with_given_boxes(features, pred_instances) return pred_instances, {} def forward_with_given_boxes( self, features: Dict[str, torch.Tensor], instances: List[Instances] ) -> List[Instances]: """ Use the given boxes in `instances` to produce other (non-box) per-ROI outputs. This is useful for downstream tasks where a box is known, but need to obtain other attributes (outputs of other heads). Test-time augmentation also uses this. Args: features: same as in `forward()` instances (list[Instances]): instances to predict other outputs. Expect the keys "pred_boxes" and "pred_classes" to exist. Returns: list[Instances]: the same `Instances` objects, with extra fields such as `pred_masks` or `pred_keypoints`. """ assert not self.training assert instances[0].has("pred_boxes") and instances[0].has("pred_classes") instances = self._forward_mask(features, instances) instances = self._forward_keypoint(features, instances) return instances def _forward_box(self, features: Dict[str, torch.Tensor], proposals: List[Instances]): """ Forward logic of the box prediction branch. If `self.train_on_pred_boxes is True`, the function puts predicted boxes in the `proposal_boxes` field of `proposals` argument. Args: features (dict[str, Tensor]): mapping from feature map names to tensor. Same as in :meth:`ROIHeads.forward`. proposals (list[Instances]): the per-image object proposals with their matching ground truth. Each has fields "proposal_boxes", and "objectness_logits", "gt_classes", "gt_boxes". Returns: In training, a dict of losses. In inference, a list of `Instances`, the predicted instances. """ features = [features[f] for f in self.box_in_features] box_features = self.box_pooler(features, [x.proposal_boxes for x in proposals]) box_features = self.box_head(box_features) predictions = self.box_predictor(box_features) del box_features if self.training: losses = self.box_predictor.losses(predictions, proposals) # proposals is modified in-place below, so losses must be computed first. if self.train_on_pred_boxes: with torch.no_grad(): pred_boxes = self.box_predictor.predict_boxes_for_gt_classes( predictions, proposals ) for proposals_per_image, pred_boxes_per_image in zip(proposals, pred_boxes): proposals_per_image.proposal_boxes = Boxes(pred_boxes_per_image) return losses else: pred_instances, _ = self.box_predictor.inference(predictions, proposals) return pred_instances def _forward_mask(self, features: Dict[str, torch.Tensor], instances: List[Instances]): """ Forward logic of the mask prediction branch. Args: features (dict[str, Tensor]): mapping from feature map names to tensor. Same as in :meth:`ROIHeads.forward`. instances (list[Instances]): the per-image instances to train/predict masks. In training, they can be the proposals. In inference, they can be the boxes predicted by R-CNN box head. Returns: In training, a dict of losses. In inference, update `instances` with new fields "pred_masks" and return it. """ if not self.mask_on: return {} if self.training else instances if self.training: # head is only trained on positive proposals. instances, _ = select_foreground_proposals(instances, self.num_classes) if self.mask_pooler is not None: features = [features[f] for f in self.mask_in_features] boxes = [x.proposal_boxes if self.training else x.pred_boxes for x in instances] features = self.mask_pooler(features, boxes) else: features = {f: features[f] for f in self.mask_in_features} return self.mask_head(features, instances) def _forward_keypoint(self, features: Dict[str, torch.Tensor], instances: List[Instances]): """ Forward logic of the keypoint prediction branch. Args: features (dict[str, Tensor]): mapping from feature map names to tensor. Same as in :meth:`ROIHeads.forward`. instances (list[Instances]): the per-image instances to train/predict keypoints. In training, they can be the proposals. In inference, they can be the boxes predicted by R-CNN box head. Returns: In training, a dict of losses. In inference, update `instances` with new fields "pred_keypoints" and return it. """ if not self.keypoint_on: return {} if self.training else instances if self.training: # head is only trained on positive proposals with >=1 visible keypoints. instances, _ = select_foreground_proposals(instances, self.num_classes) instances = select_proposals_with_visible_keypoints(instances) if self.keypoint_pooler is not None: features = [features[f] for f in self.keypoint_in_features] boxes = [x.proposal_boxes if self.training else x.pred_boxes for x in instances] features = self.keypoint_pooler(features, boxes) else: features = {f: features[f] for f in self.keypoint_in_features} return self.keypoint_head(features, instances)
banmo-main
third_party/detectron2_old/detectron2/modeling/roi_heads/roi_heads.py
# Copyright (c) Facebook, Inc. and its affiliates. from detectron2.layers import ShapeSpec from detectron2.utils.registry import Registry from .backbone import Backbone BACKBONE_REGISTRY = Registry("BACKBONE") BACKBONE_REGISTRY.__doc__ = """ Registry for backbones, which extract feature maps from images The registered object must be a callable that accepts two arguments: 1. A :class:`detectron2.config.CfgNode` 2. A :class:`detectron2.layers.ShapeSpec`, which contains the input shape specification. Registered object must return instance of :class:`Backbone`. """ def build_backbone(cfg, input_shape=None): """ Build a backbone from `cfg.MODEL.BACKBONE.NAME`. Returns: an instance of :class:`Backbone` """ if input_shape is None: input_shape = ShapeSpec(channels=len(cfg.MODEL.PIXEL_MEAN)) backbone_name = cfg.MODEL.BACKBONE.NAME backbone = BACKBONE_REGISTRY.get(backbone_name)(cfg, input_shape) assert isinstance(backbone, Backbone) return backbone
banmo-main
third_party/detectron2_old/detectron2/modeling/backbone/build.py
# Copyright (c) Facebook, Inc. and its affiliates. import math import fvcore.nn.weight_init as weight_init import torch import torch.nn.functional as F from torch import nn from detectron2.layers import Conv2d, ShapeSpec, get_norm from .backbone import Backbone from .build import BACKBONE_REGISTRY from .resnet import build_resnet_backbone __all__ = ["build_resnet_fpn_backbone", "build_retinanet_resnet_fpn_backbone", "FPN"] class FPN(Backbone): """ This module implements :paper:`FPN`. It creates pyramid features built on top of some input feature maps. """ _fuse_type: torch.jit.Final[str] def __init__( self, bottom_up, in_features, out_channels, norm="", top_block=None, fuse_type="sum" ): """ Args: bottom_up (Backbone): module representing the bottom up subnetwork. Must be a subclass of :class:`Backbone`. The multi-scale feature maps generated by the bottom up network, and listed in `in_features`, are used to generate FPN levels. in_features (list[str]): names of the input feature maps coming from the backbone to which FPN is attached. For example, if the backbone produces ["res2", "res3", "res4"], any *contiguous* sublist of these may be used; order must be from high to low resolution. out_channels (int): number of channels in the output feature maps. norm (str): the normalization to use. top_block (nn.Module or None): if provided, an extra operation will be performed on the output of the last (smallest resolution) FPN output, and the result will extend the result list. The top_block further downsamples the feature map. It must have an attribute "num_levels", meaning the number of extra FPN levels added by this block, and "in_feature", which is a string representing its input feature (e.g., p5). fuse_type (str): types for fusing the top down features and the lateral ones. It can be "sum" (default), which sums up element-wise; or "avg", which takes the element-wise mean of the two. """ super(FPN, self).__init__() assert isinstance(bottom_up, Backbone) assert in_features, in_features # Feature map strides and channels from the bottom up network (e.g. ResNet) input_shapes = bottom_up.output_shape() strides = [input_shapes[f].stride for f in in_features] in_channels_per_feature = [input_shapes[f].channels for f in in_features] _assert_strides_are_log2_contiguous(strides) lateral_convs = [] output_convs = [] use_bias = norm == "" for idx, in_channels in enumerate(in_channels_per_feature): lateral_norm = get_norm(norm, out_channels) output_norm = get_norm(norm, out_channels) lateral_conv = Conv2d( in_channels, out_channels, kernel_size=1, bias=use_bias, norm=lateral_norm ) output_conv = Conv2d( out_channels, out_channels, kernel_size=3, stride=1, padding=1, bias=use_bias, norm=output_norm, ) weight_init.c2_xavier_fill(lateral_conv) weight_init.c2_xavier_fill(output_conv) stage = int(math.log2(strides[idx])) self.add_module("fpn_lateral{}".format(stage), lateral_conv) self.add_module("fpn_output{}".format(stage), output_conv) lateral_convs.append(lateral_conv) output_convs.append(output_conv) # Place convs into top-down order (from low to high resolution) # to make the top-down computation in forward clearer. self.lateral_convs = lateral_convs[::-1] self.output_convs = output_convs[::-1] self.top_block = top_block self.in_features = tuple(in_features) self.bottom_up = bottom_up # Return feature names are "p<stage>", like ["p2", "p3", ..., "p6"] self._out_feature_strides = {"p{}".format(int(math.log2(s))): s for s in strides} # top block output feature maps. if self.top_block is not None: for s in range(stage, stage + self.top_block.num_levels): self._out_feature_strides["p{}".format(s + 1)] = 2 ** (s + 1) self._out_features = list(self._out_feature_strides.keys()) self._out_feature_channels = {k: out_channels for k in self._out_features} self._size_divisibility = strides[-1] assert fuse_type in {"avg", "sum"} self._fuse_type = fuse_type @property def size_divisibility(self): return self._size_divisibility def forward(self, x): """ Args: input (dict[str->Tensor]): mapping feature map name (e.g., "res5") to feature map tensor for each feature level in high to low resolution order. Returns: dict[str->Tensor]: mapping from feature map name to FPN feature map tensor in high to low resolution order. Returned feature names follow the FPN paper convention: "p<stage>", where stage has stride = 2 ** stage e.g., ["p2", "p3", ..., "p6"]. """ bottom_up_features = self.bottom_up(x) results = [] prev_features = self.lateral_convs[0](bottom_up_features[self.in_features[-1]]) results.append(self.output_convs[0](prev_features)) # Reverse feature maps into top-down order (from low to high resolution) for idx, (lateral_conv, output_conv) in enumerate( zip(self.lateral_convs, self.output_convs) ): # Slicing of ModuleList is not supported https://github.com/pytorch/pytorch/issues/47336 # Therefore we loop over all modules but skip the first one if idx > 0: features = self.in_features[-idx - 1] features = bottom_up_features[features] top_down_features = F.interpolate(prev_features, scale_factor=2.0, mode="nearest") lateral_features = lateral_conv(features) prev_features = lateral_features + top_down_features if self._fuse_type == "avg": prev_features /= 2 results.insert(0, output_conv(prev_features)) if self.top_block is not None: if self.top_block.in_feature in bottom_up_features: top_block_in_feature = bottom_up_features[self.top_block.in_feature] else: top_block_in_feature = results[self._out_features.index(self.top_block.in_feature)] results.extend(self.top_block(top_block_in_feature)) assert len(self._out_features) == len(results) return {f: res for f, res in zip(self._out_features, results)} def output_shape(self): return { name: ShapeSpec( channels=self._out_feature_channels[name], stride=self._out_feature_strides[name] ) for name in self._out_features } def _assert_strides_are_log2_contiguous(strides): """ Assert that each stride is 2x times its preceding stride, i.e. "contiguous in log2". """ for i, stride in enumerate(strides[1:], 1): assert stride == 2 * strides[i - 1], "Strides {} {} are not log2 contiguous".format( stride, strides[i - 1] ) class LastLevelMaxPool(nn.Module): """ This module is used in the original FPN to generate a downsampled P6 feature from P5. """ def __init__(self): super().__init__() self.num_levels = 1 self.in_feature = "p5" def forward(self, x): return [F.max_pool2d(x, kernel_size=1, stride=2, padding=0)] class LastLevelP6P7(nn.Module): """ This module is used in RetinaNet to generate extra layers, P6 and P7 from C5 feature. """ def __init__(self, in_channels, out_channels, in_feature="res5"): super().__init__() self.num_levels = 2 self.in_feature = in_feature self.p6 = nn.Conv2d(in_channels, out_channels, 3, 2, 1) self.p7 = nn.Conv2d(out_channels, out_channels, 3, 2, 1) for module in [self.p6, self.p7]: weight_init.c2_xavier_fill(module) def forward(self, c5): p6 = self.p6(c5) p7 = self.p7(F.relu(p6)) return [p6, p7] @BACKBONE_REGISTRY.register() def build_resnet_fpn_backbone(cfg, input_shape: ShapeSpec): """ Args: cfg: a detectron2 CfgNode Returns: backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`. """ bottom_up = build_resnet_backbone(cfg, input_shape) in_features = cfg.MODEL.FPN.IN_FEATURES out_channels = cfg.MODEL.FPN.OUT_CHANNELS backbone = FPN( bottom_up=bottom_up, in_features=in_features, out_channels=out_channels, norm=cfg.MODEL.FPN.NORM, top_block=LastLevelMaxPool(), fuse_type=cfg.MODEL.FPN.FUSE_TYPE, ) return backbone @BACKBONE_REGISTRY.register() def build_retinanet_resnet_fpn_backbone(cfg, input_shape: ShapeSpec): """ Args: cfg: a detectron2 CfgNode Returns: backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`. """ bottom_up = build_resnet_backbone(cfg, input_shape) in_features = cfg.MODEL.FPN.IN_FEATURES out_channels = cfg.MODEL.FPN.OUT_CHANNELS in_channels_p6p7 = bottom_up.output_shape()["res5"].channels backbone = FPN( bottom_up=bottom_up, in_features=in_features, out_channels=out_channels, norm=cfg.MODEL.FPN.NORM, top_block=LastLevelP6P7(in_channels_p6p7, out_channels), fuse_type=cfg.MODEL.FPN.FUSE_TYPE, ) return backbone
banmo-main
third_party/detectron2_old/detectron2/modeling/backbone/fpn.py
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved """ Implementation of RegNet models from :paper:`dds` and :paper:`scaling`. This code is adapted from https://github.com/facebookresearch/pycls with minimal modifications. Some code duplication exists between RegNet and ResNets (e.g., ResStem) in order to simplify model loading. """ import numpy as np from torch import nn from detectron2.layers import CNNBlockBase, ShapeSpec, get_norm from .backbone import Backbone __all__ = [ "AnyNet", "RegNet", "ResStem", "SimpleStem", "VanillaBlock", "ResBasicBlock", "ResBottleneckBlock", ] def conv2d(w_in, w_out, k, *, stride=1, groups=1, bias=False): """Helper for building a conv2d layer.""" assert k % 2 == 1, "Only odd size kernels supported to avoid padding issues." s, p, g, b = stride, (k - 1) // 2, groups, bias return nn.Conv2d(w_in, w_out, k, stride=s, padding=p, groups=g, bias=b) def gap2d(): """Helper for building a global average pooling layer.""" return nn.AdaptiveAvgPool2d((1, 1)) def pool2d(k, *, stride=1): """Helper for building a pool2d layer.""" assert k % 2 == 1, "Only odd size kernels supported to avoid padding issues." return nn.MaxPool2d(k, stride=stride, padding=(k - 1) // 2) def init_weights(m): """Performs ResNet-style weight initialization.""" if isinstance(m, nn.Conv2d): # Note that there is no bias due to BN fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(mean=0.0, std=np.sqrt(2.0 / fan_out)) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1.0) m.bias.data.zero_() elif isinstance(m, nn.Linear): m.weight.data.normal_(mean=0.0, std=0.01) m.bias.data.zero_() class ResStem(CNNBlockBase): """ResNet stem for ImageNet: 7x7, BN, AF, MaxPool.""" def __init__(self, w_in, w_out, norm, activation_class): super().__init__(w_in, w_out, 4) self.conv = conv2d(w_in, w_out, 7, stride=2) self.bn = get_norm(norm, w_out) self.af = activation_class() self.pool = pool2d(3, stride=2) def forward(self, x): for layer in self.children(): x = layer(x) return x class SimpleStem(CNNBlockBase): """Simple stem for ImageNet: 3x3, BN, AF.""" def __init__(self, w_in, w_out, norm, activation_class): super().__init__(w_in, w_out, 2) self.conv = conv2d(w_in, w_out, 3, stride=2) self.bn = get_norm(norm, w_out) self.af = activation_class() def forward(self, x): for layer in self.children(): x = layer(x) return x class SE(nn.Module): """Squeeze-and-Excitation (SE) block: AvgPool, FC, Act, FC, Sigmoid.""" def __init__(self, w_in, w_se, activation_class): super().__init__() self.avg_pool = gap2d() self.f_ex = nn.Sequential( conv2d(w_in, w_se, 1, bias=True), activation_class(), conv2d(w_se, w_in, 1, bias=True), nn.Sigmoid(), ) def forward(self, x): return x * self.f_ex(self.avg_pool(x)) class VanillaBlock(CNNBlockBase): """Vanilla block: [3x3 conv, BN, Relu] x2.""" def __init__(self, w_in, w_out, stride, norm, activation_class, _params): super().__init__(w_in, w_out, stride) self.a = conv2d(w_in, w_out, 3, stride=stride) self.a_bn = get_norm(norm, w_out) self.a_af = activation_class() self.b = conv2d(w_out, w_out, 3) self.b_bn = get_norm(norm, w_out) self.b_af = activation_class() def forward(self, x): for layer in self.children(): x = layer(x) return x class BasicTransform(nn.Module): """Basic transformation: [3x3 conv, BN, Relu] x2.""" def __init__(self, w_in, w_out, stride, norm, activation_class, _params): super().__init__() self.a = conv2d(w_in, w_out, 3, stride=stride) self.a_bn = get_norm(norm, w_out) self.a_af = activation_class() self.b = conv2d(w_out, w_out, 3) self.b_bn = get_norm(norm, w_out) self.b_bn.final_bn = True def forward(self, x): for layer in self.children(): x = layer(x) return x class ResBasicBlock(CNNBlockBase): """Residual basic block: x + f(x), f = basic transform.""" def __init__(self, w_in, w_out, stride, norm, activation_class, params): super().__init__(w_in, w_out, stride) self.proj, self.bn = None, None if (w_in != w_out) or (stride != 1): self.proj = conv2d(w_in, w_out, 1, stride=stride) self.bn = get_norm(norm, w_out) self.f = BasicTransform(w_in, w_out, stride, norm, activation_class, params) self.af = activation_class() def forward(self, x): x_p = self.bn(self.proj(x)) if self.proj else x return self.af(x_p + self.f(x)) class BottleneckTransform(nn.Module): """Bottleneck transformation: 1x1, 3x3 [+SE], 1x1.""" def __init__(self, w_in, w_out, stride, norm, activation_class, params): super().__init__() w_b = int(round(w_out * params["bot_mul"])) w_se = int(round(w_in * params["se_r"])) groups = w_b // params["group_w"] self.a = conv2d(w_in, w_b, 1) self.a_bn = get_norm(norm, w_b) self.a_af = activation_class() self.b = conv2d(w_b, w_b, 3, stride=stride, groups=groups) self.b_bn = get_norm(norm, w_b) self.b_af = activation_class() self.se = SE(w_b, w_se, activation_class) if w_se else None self.c = conv2d(w_b, w_out, 1) self.c_bn = get_norm(norm, w_out) self.c_bn.final_bn = True def forward(self, x): for layer in self.children(): x = layer(x) return x class ResBottleneckBlock(CNNBlockBase): """Residual bottleneck block: x + f(x), f = bottleneck transform.""" def __init__(self, w_in, w_out, stride, norm, activation_class, params): super().__init__(w_in, w_out, stride) self.proj, self.bn = None, None if (w_in != w_out) or (stride != 1): self.proj = conv2d(w_in, w_out, 1, stride=stride) self.bn = get_norm(norm, w_out) self.f = BottleneckTransform(w_in, w_out, stride, norm, activation_class, params) self.af = activation_class() def forward(self, x): x_p = self.bn(self.proj(x)) if self.proj else x return self.af(x_p + self.f(x)) class AnyStage(nn.Module): """AnyNet stage (sequence of blocks w/ the same output shape).""" def __init__(self, w_in, w_out, stride, d, block_class, norm, activation_class, params): super().__init__() for i in range(d): block = block_class(w_in, w_out, stride, norm, activation_class, params) self.add_module("b{}".format(i + 1), block) stride, w_in = 1, w_out def forward(self, x): for block in self.children(): x = block(x) return x class AnyNet(Backbone): """AnyNet model. See :paper:`dds`.""" def __init__( self, *, stem_class, stem_width, block_class, depths, widths, group_widths, strides, bottleneck_ratios, se_ratio, activation_class, freeze_at=0, norm="BN", out_features=None, ): """ Args: stem_class (callable): A callable taking 4 arguments (channels in, channels out, normalization, callable returning an activation function) that returns another callable implementing the stem module. stem_width (int): The number of output channels that the stem produces. block_class (callable): A callable taking 6 arguments (channels in, channels out, stride, normalization, callable returning an activation function, a dict of block-specific parameters) that returns another callable implementing the repeated block module. depths (list[int]): Number of blocks in each stage. widths (list[int]): For each stage, the number of output channels of each block. group_widths (list[int]): For each stage, the number of channels per group in group convolution, if the block uses group convolution. strides (list[int]): The stride that each network stage applies to its input. bottleneck_ratios (list[float]): For each stage, the ratio of the number of bottleneck channels to the number of block input channels (or, equivalently, output channels), if the block uses a bottleneck. se_ratio (float): The ratio of the number of channels used inside the squeeze-excitation (SE) module to it number of input channels, if SE the block uses SE. activation_class (callable): A callable taking no arguments that returns another callable implementing an activation function. freeze_at (int): The number of stages at the beginning to freeze. see :meth:`freeze` for detailed explanation. norm (str or callable): normalization for all conv layers. See :func:`layers.get_norm` for supported format. out_features (list[str]): name of the layers whose outputs should be returned in forward. RegNet's use "stem" and "s1", "s2", etc for the stages after the stem. If None, will return the output of the last layer. """ super().__init__() self.stem = stem_class(3, stem_width, norm, activation_class) current_stride = self.stem.stride self._out_feature_strides = {"stem": current_stride} self._out_feature_channels = {"stem": self.stem.out_channels} self.stages_and_names = [] prev_w = stem_width for i, (d, w, s, b, g) in enumerate( zip(depths, widths, strides, bottleneck_ratios, group_widths) ): params = {"bot_mul": b, "group_w": g, "se_r": se_ratio} stage = AnyStage(prev_w, w, s, d, block_class, norm, activation_class, params) name = "s{}".format(i + 1) self.add_module(name, stage) self.stages_and_names.append((stage, name)) self._out_feature_strides[name] = current_stride = int( current_stride * np.prod([k.stride for k in stage.children()]) ) self._out_feature_channels[name] = list(stage.children())[-1].out_channels prev_w = w self.apply(init_weights) if out_features is None: out_features = [name] self._out_features = out_features assert len(self._out_features) children = [x[0] for x in self.named_children()] for out_feature in self._out_features: assert out_feature in children, "Available children: {} does not include {}".format( ", ".join(children), out_feature ) self.freeze(freeze_at) def forward(self, x): """ Args: x: Tensor of shape (N,C,H,W). H, W must be a multiple of ``self.size_divisibility``. Returns: dict[str->Tensor]: names and the corresponding features """ assert x.dim() == 4, f"Model takes an input of shape (N, C, H, W). Got {x.shape} instead!" outputs = {} x = self.stem(x) if "stem" in self._out_features: outputs["stem"] = x for stage, name in self.stages_and_names: x = stage(x) if name in self._out_features: outputs[name] = x return outputs def output_shape(self): return { name: ShapeSpec( channels=self._out_feature_channels[name], stride=self._out_feature_strides[name] ) for name in self._out_features } def freeze(self, freeze_at=0): """ Freeze the first several stages of the model. Commonly used in fine-tuning. Layers that produce the same feature map spatial size are defined as one "stage" by :paper:`FPN`. Args: freeze_at (int): number of stages to freeze. `1` means freezing the stem. `2` means freezing the stem and one residual stage, etc. Returns: nn.Module: this model itself """ if freeze_at >= 1: self.stem.freeze() for idx, (stage, _) in enumerate(self.stages_and_names, start=2): if freeze_at >= idx: for block in stage.children(): block.freeze() return self def adjust_block_compatibility(ws, bs, gs): """Adjusts the compatibility of widths, bottlenecks, and groups.""" assert len(ws) == len(bs) == len(gs) assert all(w > 0 and b > 0 and g > 0 for w, b, g in zip(ws, bs, gs)) vs = [int(max(1, w * b)) for w, b in zip(ws, bs)] gs = [int(min(g, v)) for g, v in zip(gs, vs)] ms = [np.lcm(g, b) if b > 1 else g for g, b in zip(gs, bs)] vs = [max(m, int(round(v / m) * m)) for v, m in zip(vs, ms)] ws = [int(v / b) for v, b in zip(vs, bs)] assert all(w * b % g == 0 for w, b, g in zip(ws, bs, gs)) return ws, bs, gs def generate_regnet_parameters(w_a, w_0, w_m, d, q=8): """Generates per stage widths and depths from RegNet parameters.""" assert w_a >= 0 and w_0 > 0 and w_m > 1 and w_0 % q == 0 # Generate continuous per-block ws ws_cont = np.arange(d) * w_a + w_0 # Generate quantized per-block ws ks = np.round(np.log(ws_cont / w_0) / np.log(w_m)) ws_all = w_0 * np.power(w_m, ks) ws_all = np.round(np.divide(ws_all, q)).astype(int) * q # Generate per stage ws and ds (assumes ws_all are sorted) ws, ds = np.unique(ws_all, return_counts=True) # Compute number of actual stages and total possible stages num_stages, total_stages = len(ws), ks.max() + 1 # Convert numpy arrays to lists and return ws, ds, ws_all, ws_cont = (x.tolist() for x in (ws, ds, ws_all, ws_cont)) return ws, ds, num_stages, total_stages, ws_all, ws_cont class RegNet(AnyNet): """RegNet model. See :paper:`dds`.""" def __init__( self, *, stem_class, stem_width, block_class, depth, w_a, w_0, w_m, group_width, stride=2, bottleneck_ratio=1.0, se_ratio=0.0, activation_class=None, freeze_at=0, norm="BN", out_features=None, ): """ Build a RegNet from the parameterization described in :paper:`dds` Section 3.3. Args: See :class:`AnyNet` for arguments that are not listed here. depth (int): Total number of blocks in the RegNet. w_a (float): Factor by which block width would increase prior to quantizing block widths by stage. See :paper:`dds` Section 3.3. w_0 (int): Initial block width. See :paper:`dds` Section 3.3. w_m (float): Parameter controlling block width quantization. See :paper:`dds` Section 3.3. group_width (int): Number of channels per group in group convolution, if the block uses group convolution. bottleneck_ratio (float): The ratio of the number of bottleneck channels to the number of block input channels (or, equivalently, output channels), if the block uses a bottleneck. stride (int): The stride that each network stage applies to its input. """ ws, ds = generate_regnet_parameters(w_a, w_0, w_m, depth)[0:2] ss = [stride for _ in ws] bs = [bottleneck_ratio for _ in ws] gs = [group_width for _ in ws] ws, bs, gs = adjust_block_compatibility(ws, bs, gs) def default_activation_class(): return nn.ReLU(inplace=True) super().__init__( stem_class=stem_class, stem_width=stem_width, block_class=block_class, depths=ds, widths=ws, strides=ss, group_widths=gs, bottleneck_ratios=bs, se_ratio=se_ratio, activation_class=default_activation_class if activation_class is None else activation_class, freeze_at=freeze_at, norm=norm, out_features=out_features, )
banmo-main
third_party/detectron2_old/detectron2/modeling/backbone/regnet.py
# Copyright (c) Facebook, Inc. and its affiliates. from abc import ABCMeta, abstractmethod import torch.nn as nn from detectron2.layers import ShapeSpec __all__ = ["Backbone"] class Backbone(nn.Module, metaclass=ABCMeta): """ Abstract base class for network backbones. """ def __init__(self): """ The `__init__` method of any subclass can specify its own set of arguments. """ super().__init__() @abstractmethod def forward(self): """ Subclasses must override this method, but adhere to the same return type. Returns: dict[str->Tensor]: mapping from feature name (e.g., "res2") to tensor """ pass @property def size_divisibility(self) -> int: """ Some backbones require the input height and width to be divisible by a specific integer. This is typically true for encoder / decoder type networks with lateral connection (e.g., FPN) for which feature maps need to match dimension in the "bottom up" and "top down" paths. Set to 0 if no specific input size divisibility is required. """ return 0 def output_shape(self): """ Returns: dict[str->ShapeSpec] """ # this is a backward-compatible default return { name: ShapeSpec( channels=self._out_feature_channels[name], stride=self._out_feature_strides[name] ) for name in self._out_features }
banmo-main
third_party/detectron2_old/detectron2/modeling/backbone/backbone.py
# Copyright (c) Facebook, Inc. and its affiliates. from .build import build_backbone, BACKBONE_REGISTRY # noqa F401 isort:skip from .backbone import Backbone from .fpn import FPN from .regnet import RegNet from .resnet import ( BasicStem, ResNet, ResNetBlockBase, build_resnet_backbone, make_stage, BottleneckBlock, ) __all__ = [k for k in globals().keys() if not k.startswith("_")] # TODO can expose more resnet blocks after careful consideration
banmo-main
third_party/detectron2_old/detectron2/modeling/backbone/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. import numpy as np import fvcore.nn.weight_init as weight_init import torch import torch.nn.functional as F from torch import nn from detectron2.layers import ( CNNBlockBase, Conv2d, DeformConv, ModulatedDeformConv, ShapeSpec, get_norm, ) from .backbone import Backbone from .build import BACKBONE_REGISTRY __all__ = [ "ResNetBlockBase", "BasicBlock", "BottleneckBlock", "DeformBottleneckBlock", "BasicStem", "ResNet", "make_stage", "build_resnet_backbone", ] class BasicBlock(CNNBlockBase): """ The basic residual block for ResNet-18 and ResNet-34 defined in :paper:`ResNet`, with two 3x3 conv layers and a projection shortcut if needed. """ def __init__(self, in_channels, out_channels, *, stride=1, norm="BN"): """ Args: in_channels (int): Number of input channels. out_channels (int): Number of output channels. stride (int): Stride for the first conv. norm (str or callable): normalization for all conv layers. See :func:`layers.get_norm` for supported format. """ super().__init__(in_channels, out_channels, stride) if in_channels != out_channels: self.shortcut = Conv2d( in_channels, out_channels, kernel_size=1, stride=stride, bias=False, norm=get_norm(norm, out_channels), ) else: self.shortcut = None self.conv1 = Conv2d( in_channels, out_channels, kernel_size=3, stride=stride, padding=1, bias=False, norm=get_norm(norm, out_channels), ) self.conv2 = Conv2d( out_channels, out_channels, kernel_size=3, stride=1, padding=1, bias=False, norm=get_norm(norm, out_channels), ) for layer in [self.conv1, self.conv2, self.shortcut]: if layer is not None: # shortcut can be None weight_init.c2_msra_fill(layer) def forward(self, x): out = self.conv1(x) out = F.relu_(out) out = self.conv2(out) if self.shortcut is not None: shortcut = self.shortcut(x) else: shortcut = x out += shortcut out = F.relu_(out) return out class BottleneckBlock(CNNBlockBase): """ The standard bottleneck residual block used by ResNet-50, 101 and 152 defined in :paper:`ResNet`. It contains 3 conv layers with kernels 1x1, 3x3, 1x1, and a projection shortcut if needed. """ def __init__( self, in_channels, out_channels, *, bottleneck_channels, stride=1, num_groups=1, norm="BN", stride_in_1x1=False, dilation=1, ): """ Args: bottleneck_channels (int): number of output channels for the 3x3 "bottleneck" conv layers. num_groups (int): number of groups for the 3x3 conv layer. norm (str or callable): normalization for all conv layers. See :func:`layers.get_norm` for supported format. stride_in_1x1 (bool): when stride>1, whether to put stride in the first 1x1 convolution or the bottleneck 3x3 convolution. dilation (int): the dilation rate of the 3x3 conv layer. """ super().__init__(in_channels, out_channels, stride) if in_channels != out_channels: self.shortcut = Conv2d( in_channels, out_channels, kernel_size=1, stride=stride, bias=False, norm=get_norm(norm, out_channels), ) else: self.shortcut = None # The original MSRA ResNet models have stride in the first 1x1 conv # The subsequent fb.torch.resnet and Caffe2 ResNe[X]t implementations have # stride in the 3x3 conv stride_1x1, stride_3x3 = (stride, 1) if stride_in_1x1 else (1, stride) self.conv1 = Conv2d( in_channels, bottleneck_channels, kernel_size=1, stride=stride_1x1, bias=False, norm=get_norm(norm, bottleneck_channels), ) self.conv2 = Conv2d( bottleneck_channels, bottleneck_channels, kernel_size=3, stride=stride_3x3, padding=1 * dilation, bias=False, groups=num_groups, dilation=dilation, norm=get_norm(norm, bottleneck_channels), ) self.conv3 = Conv2d( bottleneck_channels, out_channels, kernel_size=1, bias=False, norm=get_norm(norm, out_channels), ) for layer in [self.conv1, self.conv2, self.conv3, self.shortcut]: if layer is not None: # shortcut can be None weight_init.c2_msra_fill(layer) # Zero-initialize the last normalization in each residual branch, # so that at the beginning, the residual branch starts with zeros, # and each residual block behaves like an identity. # See Sec 5.1 in "Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour": # "For BN layers, the learnable scaling coefficient γ is initialized # to be 1, except for each residual block's last BN # where γ is initialized to be 0." # nn.init.constant_(self.conv3.norm.weight, 0) # TODO this somehow hurts performance when training GN models from scratch. # Add it as an option when we need to use this code to train a backbone. def forward(self, x): out = self.conv1(x) out = F.relu_(out) out = self.conv2(out) out = F.relu_(out) out = self.conv3(out) if self.shortcut is not None: shortcut = self.shortcut(x) else: shortcut = x out += shortcut out = F.relu_(out) return out class DeformBottleneckBlock(CNNBlockBase): """ Similar to :class:`BottleneckBlock`, but with :paper:`deformable conv <deformconv>` in the 3x3 convolution. """ def __init__( self, in_channels, out_channels, *, bottleneck_channels, stride=1, num_groups=1, norm="BN", stride_in_1x1=False, dilation=1, deform_modulated=False, deform_num_groups=1, ): super().__init__(in_channels, out_channels, stride) self.deform_modulated = deform_modulated if in_channels != out_channels: self.shortcut = Conv2d( in_channels, out_channels, kernel_size=1, stride=stride, bias=False, norm=get_norm(norm, out_channels), ) else: self.shortcut = None stride_1x1, stride_3x3 = (stride, 1) if stride_in_1x1 else (1, stride) self.conv1 = Conv2d( in_channels, bottleneck_channels, kernel_size=1, stride=stride_1x1, bias=False, norm=get_norm(norm, bottleneck_channels), ) if deform_modulated: deform_conv_op = ModulatedDeformConv # offset channels are 2 or 3 (if with modulated) * kernel_size * kernel_size offset_channels = 27 else: deform_conv_op = DeformConv offset_channels = 18 self.conv2_offset = Conv2d( bottleneck_channels, offset_channels * deform_num_groups, kernel_size=3, stride=stride_3x3, padding=1 * dilation, dilation=dilation, ) self.conv2 = deform_conv_op( bottleneck_channels, bottleneck_channels, kernel_size=3, stride=stride_3x3, padding=1 * dilation, bias=False, groups=num_groups, dilation=dilation, deformable_groups=deform_num_groups, norm=get_norm(norm, bottleneck_channels), ) self.conv3 = Conv2d( bottleneck_channels, out_channels, kernel_size=1, bias=False, norm=get_norm(norm, out_channels), ) for layer in [self.conv1, self.conv2, self.conv3, self.shortcut]: if layer is not None: # shortcut can be None weight_init.c2_msra_fill(layer) nn.init.constant_(self.conv2_offset.weight, 0) nn.init.constant_(self.conv2_offset.bias, 0) def forward(self, x): out = self.conv1(x) out = F.relu_(out) if self.deform_modulated: offset_mask = self.conv2_offset(out) offset_x, offset_y, mask = torch.chunk(offset_mask, 3, dim=1) offset = torch.cat((offset_x, offset_y), dim=1) mask = mask.sigmoid() out = self.conv2(out, offset, mask) else: offset = self.conv2_offset(out) out = self.conv2(out, offset) out = F.relu_(out) out = self.conv3(out) if self.shortcut is not None: shortcut = self.shortcut(x) else: shortcut = x out += shortcut out = F.relu_(out) return out class BasicStem(CNNBlockBase): """ The standard ResNet stem (layers before the first residual block), with a conv, relu and max_pool. """ def __init__(self, in_channels=3, out_channels=64, norm="BN"): """ Args: norm (str or callable): norm after the first conv layer. See :func:`layers.get_norm` for supported format. """ super().__init__(in_channels, out_channels, 4) self.in_channels = in_channels self.conv1 = Conv2d( in_channels, out_channels, kernel_size=7, stride=2, padding=3, bias=False, norm=get_norm(norm, out_channels), ) weight_init.c2_msra_fill(self.conv1) def forward(self, x): x = self.conv1(x) x = F.relu_(x) x = F.max_pool2d(x, kernel_size=3, stride=2, padding=1) return x class ResNet(Backbone): """ Implement :paper:`ResNet`. """ def __init__(self, stem, stages, num_classes=None, out_features=None, freeze_at=0): """ Args: stem (nn.Module): a stem module stages (list[list[CNNBlockBase]]): several (typically 4) stages, each contains multiple :class:`CNNBlockBase`. num_classes (None or int): if None, will not perform classification. Otherwise, will create a linear layer. out_features (list[str]): name of the layers whose outputs should be returned in forward. Can be anything in "stem", "linear", or "res2" ... If None, will return the output of the last layer. freeze_at (int): The number of stages at the beginning to freeze. see :meth:`freeze` for detailed explanation. """ super().__init__() self.stem = stem self.num_classes = num_classes current_stride = self.stem.stride self._out_feature_strides = {"stem": current_stride} self._out_feature_channels = {"stem": self.stem.out_channels} self.stage_names, self.stages = [], [] if out_features is not None: # Avoid keeping unused layers in this module. They consume extra memory # and may cause allreduce to fail num_stages = max( [{"res2": 1, "res3": 2, "res4": 3, "res5": 4}.get(f, 0) for f in out_features] ) stages = stages[:num_stages] for i, blocks in enumerate(stages): assert len(blocks) > 0, len(blocks) for block in blocks: assert isinstance(block, CNNBlockBase), block name = "res" + str(i + 2) stage = nn.Sequential(*blocks) self.add_module(name, stage) self.stage_names.append(name) self.stages.append(stage) self._out_feature_strides[name] = current_stride = int( current_stride * np.prod([k.stride for k in blocks]) ) self._out_feature_channels[name] = curr_channels = blocks[-1].out_channels self.stage_names = tuple(self.stage_names) # Make it static for scripting if num_classes is not None: self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) self.linear = nn.Linear(curr_channels, num_classes) # Sec 5.1 in "Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour": # "The 1000-way fully-connected layer is initialized by # drawing weights from a zero-mean Gaussian with standard deviation of 0.01." nn.init.normal_(self.linear.weight, std=0.01) name = "linear" if out_features is None: out_features = [name] self._out_features = out_features assert len(self._out_features) children = [x[0] for x in self.named_children()] for out_feature in self._out_features: assert out_feature in children, "Available children: {}".format(", ".join(children)) self.freeze(freeze_at) def forward(self, x): """ Args: x: Tensor of shape (N,C,H,W). H, W must be a multiple of ``self.size_divisibility``. Returns: dict[str->Tensor]: names and the corresponding features """ assert x.dim() == 4, f"ResNet takes an input of shape (N, C, H, W). Got {x.shape} instead!" outputs = {} x = self.stem(x) if "stem" in self._out_features: outputs["stem"] = x for name, stage in zip(self.stage_names, self.stages): x = stage(x) if name in self._out_features: outputs[name] = x if self.num_classes is not None: x = self.avgpool(x) x = torch.flatten(x, 1) x = self.linear(x) if "linear" in self._out_features: outputs["linear"] = x return outputs def output_shape(self): return { name: ShapeSpec( channels=self._out_feature_channels[name], stride=self._out_feature_strides[name] ) for name in self._out_features } def freeze(self, freeze_at=0): """ Freeze the first several stages of the ResNet. Commonly used in fine-tuning. Layers that produce the same feature map spatial size are defined as one "stage" by :paper:`FPN`. Args: freeze_at (int): number of stages to freeze. `1` means freezing the stem. `2` means freezing the stem and one residual stage, etc. Returns: nn.Module: this ResNet itself """ if freeze_at >= 1: self.stem.freeze() for idx, stage in enumerate(self.stages, start=2): if freeze_at >= idx: for block in stage.children(): block.freeze() return self @staticmethod def make_stage(block_class, num_blocks, *, in_channels, out_channels, **kwargs): """ Create a list of blocks of the same type that forms one ResNet stage. Args: block_class (type): a subclass of CNNBlockBase that's used to create all blocks in this stage. A module of this type must not change spatial resolution of inputs unless its stride != 1. num_blocks (int): number of blocks in this stage in_channels (int): input channels of the entire stage. out_channels (int): output channels of **every block** in the stage. kwargs: other arguments passed to the constructor of `block_class`. If the argument name is "xx_per_block", the argument is a list of values to be passed to each block in the stage. Otherwise, the same argument is passed to every block in the stage. Returns: list[CNNBlockBase]: a list of block module. Examples: :: stage = ResNet.make_stage( BottleneckBlock, 3, in_channels=16, out_channels=64, bottleneck_channels=16, num_groups=1, stride_per_block=[2, 1, 1], dilations_per_block=[1, 1, 2] ) Usually, layers that produce the same feature map spatial size are defined as one "stage" (in :paper:`FPN`). Under such definition, ``stride_per_block[1:]`` should all be 1. """ blocks = [] for i in range(num_blocks): curr_kwargs = {} for k, v in kwargs.items(): if k.endswith("_per_block"): assert len(v) == num_blocks, ( f"Argument '{k}' of make_stage should have the " f"same length as num_blocks={num_blocks}." ) newk = k[: -len("_per_block")] assert newk not in kwargs, f"Cannot call make_stage with both {k} and {newk}!" curr_kwargs[newk] = v[i] else: curr_kwargs[k] = v blocks.append( block_class(in_channels=in_channels, out_channels=out_channels, **curr_kwargs) ) in_channels = out_channels return blocks @staticmethod def make_default_stages(depth, block_class=None, **kwargs): """ Created list of ResNet stages from pre-defined depth (one of 18, 34, 50, 101, 152). If it doesn't create the ResNet variant you need, please use :meth:`make_stage` instead for fine-grained customization. Args: depth (int): depth of ResNet block_class (type): the CNN block class. Has to accept `bottleneck_channels` argument for depth > 50. By default it is BasicBlock or BottleneckBlock, based on the depth. kwargs: other arguments to pass to `make_stage`. Should not contain stride and channels, as they are predefined for each depth. Returns: list[list[CNNBlockBase]]: modules in all stages; see arguments of :class:`ResNet.__init__`. """ num_blocks_per_stage = { 18: [2, 2, 2, 2], 34: [3, 4, 6, 3], 50: [3, 4, 6, 3], 101: [3, 4, 23, 3], 152: [3, 8, 36, 3], }[depth] if block_class is None: block_class = BasicBlock if depth < 50 else BottleneckBlock if depth < 50: in_channels = [64, 64, 128, 256] out_channels = [64, 128, 256, 512] else: in_channels = [64, 256, 512, 1024] out_channels = [256, 512, 1024, 2048] ret = [] for (n, s, i, o) in zip(num_blocks_per_stage, [1, 2, 2, 2], in_channels, out_channels): if depth >= 50: kwargs["bottleneck_channels"] = o // 4 ret.append( ResNet.make_stage( block_class=block_class, num_blocks=n, stride_per_block=[s] + [1] * (n - 1), in_channels=i, out_channels=o, **kwargs, ) ) return ret ResNetBlockBase = CNNBlockBase """ Alias for backward compatibiltiy. """ def make_stage(*args, **kwargs): """ Deprecated alias for backward compatibiltiy. """ return ResNet.make_stage(*args, **kwargs) @BACKBONE_REGISTRY.register() def build_resnet_backbone(cfg, input_shape): """ Create a ResNet instance from config. Returns: ResNet: a :class:`ResNet` instance. """ # need registration of new blocks/stems? norm = cfg.MODEL.RESNETS.NORM stem = BasicStem( in_channels=input_shape.channels, out_channels=cfg.MODEL.RESNETS.STEM_OUT_CHANNELS, norm=norm, ) # fmt: off freeze_at = cfg.MODEL.BACKBONE.FREEZE_AT out_features = cfg.MODEL.RESNETS.OUT_FEATURES depth = cfg.MODEL.RESNETS.DEPTH num_groups = cfg.MODEL.RESNETS.NUM_GROUPS width_per_group = cfg.MODEL.RESNETS.WIDTH_PER_GROUP bottleneck_channels = num_groups * width_per_group in_channels = cfg.MODEL.RESNETS.STEM_OUT_CHANNELS out_channels = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS stride_in_1x1 = cfg.MODEL.RESNETS.STRIDE_IN_1X1 res5_dilation = cfg.MODEL.RESNETS.RES5_DILATION deform_on_per_stage = cfg.MODEL.RESNETS.DEFORM_ON_PER_STAGE deform_modulated = cfg.MODEL.RESNETS.DEFORM_MODULATED deform_num_groups = cfg.MODEL.RESNETS.DEFORM_NUM_GROUPS # fmt: on assert res5_dilation in {1, 2}, "res5_dilation cannot be {}.".format(res5_dilation) num_blocks_per_stage = { 18: [2, 2, 2, 2], 34: [3, 4, 6, 3], 50: [3, 4, 6, 3], 101: [3, 4, 23, 3], 152: [3, 8, 36, 3], }[depth] if depth in [18, 34]: assert out_channels == 64, "Must set MODEL.RESNETS.RES2_OUT_CHANNELS = 64 for R18/R34" assert not any( deform_on_per_stage ), "MODEL.RESNETS.DEFORM_ON_PER_STAGE unsupported for R18/R34" assert res5_dilation == 1, "Must set MODEL.RESNETS.RES5_DILATION = 1 for R18/R34" assert num_groups == 1, "Must set MODEL.RESNETS.NUM_GROUPS = 1 for R18/R34" stages = [] for idx, stage_idx in enumerate(range(2, 6)): # res5_dilation is used this way as a convention in R-FCN & Deformable Conv paper dilation = res5_dilation if stage_idx == 5 else 1 first_stride = 1 if idx == 0 or (stage_idx == 5 and dilation == 2) else 2 stage_kargs = { "num_blocks": num_blocks_per_stage[idx], "stride_per_block": [first_stride] + [1] * (num_blocks_per_stage[idx] - 1), "in_channels": in_channels, "out_channels": out_channels, "norm": norm, } # Use BasicBlock for R18 and R34. if depth in [18, 34]: stage_kargs["block_class"] = BasicBlock else: stage_kargs["bottleneck_channels"] = bottleneck_channels stage_kargs["stride_in_1x1"] = stride_in_1x1 stage_kargs["dilation"] = dilation stage_kargs["num_groups"] = num_groups if deform_on_per_stage[idx]: stage_kargs["block_class"] = DeformBottleneckBlock stage_kargs["deform_modulated"] = deform_modulated stage_kargs["deform_num_groups"] = deform_num_groups else: stage_kargs["block_class"] = BottleneckBlock blocks = ResNet.make_stage(**stage_kargs) in_channels = out_channels out_channels *= 2 bottleneck_channels *= 2 stages.append(blocks) return ResNet(stem, stages, out_features=out_features, freeze_at=freeze_at)
banmo-main
third_party/detectron2_old/detectron2/modeling/backbone/resnet.py
# Copyright (c) Facebook, Inc. and its affiliates. import copy import logging import numpy as np import time from pycocotools.cocoeval import COCOeval from detectron2 import _C logger = logging.getLogger(__name__) class COCOeval_opt(COCOeval): """ This is a slightly modified version of the original COCO API, where the functions evaluateImg() and accumulate() are implemented in C++ to speedup evaluation """ def evaluate(self): """ Run per image evaluation on given images and store results in self.evalImgs_cpp, a datastructure that isn't readable from Python but is used by a c++ implementation of accumulate(). Unlike the original COCO PythonAPI, we don't populate the datastructure self.evalImgs because this datastructure is a computational bottleneck. :return: None """ tic = time.time() p = self.params # add backward compatibility if useSegm is specified in params if p.useSegm is not None: p.iouType = "segm" if p.useSegm == 1 else "bbox" logger.info("Evaluate annotation type *{}*".format(p.iouType)) p.imgIds = list(np.unique(p.imgIds)) if p.useCats: p.catIds = list(np.unique(p.catIds)) p.maxDets = sorted(p.maxDets) self.params = p self._prepare() # bottleneck # loop through images, area range, max detection number catIds = p.catIds if p.useCats else [-1] if p.iouType == "segm" or p.iouType == "bbox": computeIoU = self.computeIoU elif p.iouType == "keypoints": computeIoU = self.computeOks self.ious = { (imgId, catId): computeIoU(imgId, catId) for imgId in p.imgIds for catId in catIds } # bottleneck maxDet = p.maxDets[-1] # <<<< Beginning of code differences with original COCO API def convert_instances_to_cpp(instances, is_det=False): # Convert annotations for a list of instances in an image to a format that's fast # to access in C++ instances_cpp = [] for instance in instances: instance_cpp = _C.InstanceAnnotation( int(instance["id"]), instance["score"] if is_det else instance.get("score", 0.0), instance["area"], bool(instance.get("iscrowd", 0)), bool(instance.get("ignore", 0)), ) instances_cpp.append(instance_cpp) return instances_cpp # Convert GT annotations, detections, and IOUs to a format that's fast to access in C++ ground_truth_instances = [ [convert_instances_to_cpp(self._gts[imgId, catId]) for catId in p.catIds] for imgId in p.imgIds ] detected_instances = [ [convert_instances_to_cpp(self._dts[imgId, catId], is_det=True) for catId in p.catIds] for imgId in p.imgIds ] ious = [[self.ious[imgId, catId] for catId in catIds] for imgId in p.imgIds] if not p.useCats: # For each image, flatten per-category lists into a single list ground_truth_instances = [[[o for c in i for o in c]] for i in ground_truth_instances] detected_instances = [[[o for c in i for o in c]] for i in detected_instances] # Call C++ implementation of self.evaluateImgs() self._evalImgs_cpp = _C.COCOevalEvaluateImages( p.areaRng, maxDet, p.iouThrs, ious, ground_truth_instances, detected_instances ) self._evalImgs = None self._paramsEval = copy.deepcopy(self.params) toc = time.time() logger.info("COCOeval_opt.evaluate() finished in {:0.2f} seconds.".format(toc - tic)) # >>>> End of code differences with original COCO API def accumulate(self): """ Accumulate per image evaluation results and store the result in self.eval. Does not support changing parameter settings from those used by self.evaluate() """ logger.info("Accumulating evaluation results...") tic = time.time() assert hasattr( self, "_evalImgs_cpp" ), "evaluate() must be called before accmulate() is called." self.eval = _C.COCOevalAccumulate(self._paramsEval, self._evalImgs_cpp) # recall is num_iou_thresholds X num_categories X num_area_ranges X num_max_detections self.eval["recall"] = np.array(self.eval["recall"]).reshape( self.eval["counts"][:1] + self.eval["counts"][2:] ) # precision and scores are num_iou_thresholds X num_recall_thresholds X num_categories X # num_area_ranges X num_max_detections self.eval["precision"] = np.array(self.eval["precision"]).reshape(self.eval["counts"]) self.eval["scores"] = np.array(self.eval["scores"]).reshape(self.eval["counts"]) toc = time.time() logger.info("COCOeval_opt.accumulate() finished in {:0.2f} seconds.".format(toc - tic))
banmo-main
third_party/detectron2_old/detectron2/evaluation/fast_eval_api.py
# Copyright (c) Facebook, Inc. and its affiliates. import contextlib import io import itertools import json import logging import numpy as np import os import tempfile from collections import OrderedDict from typing import Optional from PIL import Image from tabulate import tabulate from detectron2.data import MetadataCatalog from detectron2.utils import comm from detectron2.utils.file_io import PathManager from .evaluator import DatasetEvaluator logger = logging.getLogger(__name__) class COCOPanopticEvaluator(DatasetEvaluator): """ Evaluate Panoptic Quality metrics on COCO using PanopticAPI. It saves panoptic segmentation prediction in `output_dir` It contains a synchronize call and has to be called from all workers. """ def __init__(self, dataset_name: str, output_dir: Optional[str] = None): """ Args: dataset_name: name of the dataset output_dir: output directory to save results for evaluation. """ self._metadata = MetadataCatalog.get(dataset_name) self._thing_contiguous_id_to_dataset_id = { v: k for k, v in self._metadata.thing_dataset_id_to_contiguous_id.items() } self._stuff_contiguous_id_to_dataset_id = { v: k for k, v in self._metadata.stuff_dataset_id_to_contiguous_id.items() } self._output_dir = output_dir if self._output_dir is not None: PathManager.mkdirs(self._output_dir) def reset(self): self._predictions = [] def _convert_category_id(self, segment_info): isthing = segment_info.pop("isthing", None) if isthing is None: # the model produces panoptic category id directly. No more conversion needed return segment_info if isthing is True: segment_info["category_id"] = self._thing_contiguous_id_to_dataset_id[ segment_info["category_id"] ] else: segment_info["category_id"] = self._stuff_contiguous_id_to_dataset_id[ segment_info["category_id"] ] return segment_info def process(self, inputs, outputs): from panopticapi.utils import id2rgb for input, output in zip(inputs, outputs): panoptic_img, segments_info = output["panoptic_seg"] panoptic_img = panoptic_img.cpu().numpy() if segments_info is None: # If "segments_info" is None, we assume "panoptic_img" is a # H*W int32 image storing the panoptic_id in the format of # category_id * label_divisor + instance_id. We reserve -1 for # VOID label, and add 1 to panoptic_img since the official # evaluation script uses 0 for VOID label. label_divisor = self._metadata.label_divisor segments_info = [] for panoptic_label in np.unique(panoptic_img): if panoptic_label == -1: # VOID region. continue pred_class = panoptic_label // label_divisor isthing = ( pred_class in self._metadata.thing_dataset_id_to_contiguous_id.values() ) segments_info.append( { "id": int(panoptic_label) + 1, "category_id": int(pred_class), "isthing": bool(isthing), } ) # Official evaluation script uses 0 for VOID label. panoptic_img += 1 file_name = os.path.basename(input["file_name"]) file_name_png = os.path.splitext(file_name)[0] + ".png" with io.BytesIO() as out: Image.fromarray(id2rgb(panoptic_img)).save(out, format="PNG") segments_info = [self._convert_category_id(x) for x in segments_info] self._predictions.append( { "image_id": input["image_id"], "file_name": file_name_png, "png_string": out.getvalue(), "segments_info": segments_info, } ) def evaluate(self): comm.synchronize() self._predictions = comm.gather(self._predictions) self._predictions = list(itertools.chain(*self._predictions)) if not comm.is_main_process(): return # PanopticApi requires local files gt_json = PathManager.get_local_path(self._metadata.panoptic_json) gt_folder = PathManager.get_local_path(self._metadata.panoptic_root) with tempfile.TemporaryDirectory(prefix="panoptic_eval") as pred_dir: logger.info("Writing all panoptic predictions to {} ...".format(pred_dir)) for p in self._predictions: with open(os.path.join(pred_dir, p["file_name"]), "wb") as f: f.write(p.pop("png_string")) with open(gt_json, "r") as f: json_data = json.load(f) json_data["annotations"] = self._predictions output_dir = self._output_dir or pred_dir predictions_json = os.path.join(output_dir, "predictions.json") with PathManager.open(predictions_json, "w") as f: f.write(json.dumps(json_data)) from panopticapi.evaluation import pq_compute with contextlib.redirect_stdout(io.StringIO()): pq_res = pq_compute( gt_json, PathManager.get_local_path(predictions_json), gt_folder=gt_folder, pred_folder=pred_dir, ) res = {} res["PQ"] = 100 * pq_res["All"]["pq"] res["SQ"] = 100 * pq_res["All"]["sq"] res["RQ"] = 100 * pq_res["All"]["rq"] res["PQ_th"] = 100 * pq_res["Things"]["pq"] res["SQ_th"] = 100 * pq_res["Things"]["sq"] res["RQ_th"] = 100 * pq_res["Things"]["rq"] res["PQ_st"] = 100 * pq_res["Stuff"]["pq"] res["SQ_st"] = 100 * pq_res["Stuff"]["sq"] res["RQ_st"] = 100 * pq_res["Stuff"]["rq"] results = OrderedDict({"panoptic_seg": res}) _print_panoptic_results(pq_res) return results def _print_panoptic_results(pq_res): headers = ["", "PQ", "SQ", "RQ", "#categories"] data = [] for name in ["All", "Things", "Stuff"]: row = [name] + [pq_res[name][k] * 100 for k in ["pq", "sq", "rq"]] + [pq_res[name]["n"]] data.append(row) table = tabulate( data, headers=headers, tablefmt="pipe", floatfmt=".3f", stralign="center", numalign="center" ) logger.info("Panoptic Evaluation Results:\n" + table) if __name__ == "__main__": from detectron2.utils.logger import setup_logger logger = setup_logger() import argparse parser = argparse.ArgumentParser() parser.add_argument("--gt-json") parser.add_argument("--gt-dir") parser.add_argument("--pred-json") parser.add_argument("--pred-dir") args = parser.parse_args() from panopticapi.evaluation import pq_compute with contextlib.redirect_stdout(io.StringIO()): pq_res = pq_compute( args.gt_json, args.pred_json, gt_folder=args.gt_dir, pred_folder=args.pred_dir ) _print_panoptic_results(pq_res)
banmo-main
third_party/detectron2_old/detectron2/evaluation/panoptic_evaluation.py
# Copyright (c) Facebook, Inc. and its affiliates. from .cityscapes_evaluation import CityscapesInstanceEvaluator, CityscapesSemSegEvaluator from .coco_evaluation import COCOEvaluator from .rotated_coco_evaluation import RotatedCOCOEvaluator from .evaluator import DatasetEvaluator, DatasetEvaluators, inference_context, inference_on_dataset from .lvis_evaluation import LVISEvaluator from .panoptic_evaluation import COCOPanopticEvaluator from .pascal_voc_evaluation import PascalVOCDetectionEvaluator from .sem_seg_evaluation import SemSegEvaluator from .testing import print_csv_format, verify_results __all__ = [k for k in globals().keys() if not k.startswith("_")]
banmo-main
third_party/detectron2_old/detectron2/evaluation/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. import glob import logging import numpy as np import os import tempfile from collections import OrderedDict import torch from PIL import Image from detectron2.data import MetadataCatalog from detectron2.utils import comm from detectron2.utils.file_io import PathManager from .evaluator import DatasetEvaluator class CityscapesEvaluator(DatasetEvaluator): """ Base class for evaluation using cityscapes API. """ def __init__(self, dataset_name): """ Args: dataset_name (str): the name of the dataset. It must have the following metadata associated with it: "thing_classes", "gt_dir". """ self._metadata = MetadataCatalog.get(dataset_name) self._cpu_device = torch.device("cpu") self._logger = logging.getLogger(__name__) def reset(self): self._working_dir = tempfile.TemporaryDirectory(prefix="cityscapes_eval_") self._temp_dir = self._working_dir.name # All workers will write to the same results directory # TODO this does not work in distributed training self._temp_dir = comm.all_gather(self._temp_dir)[0] if self._temp_dir != self._working_dir.name: self._working_dir.cleanup() self._logger.info( "Writing cityscapes results to temporary directory {} ...".format(self._temp_dir) ) class CityscapesInstanceEvaluator(CityscapesEvaluator): """ Evaluate instance segmentation results on cityscapes dataset using cityscapes API. Note: * It does not work in multi-machine distributed training. * It contains a synchronization, therefore has to be used on all ranks. * Only the main process runs evaluation. """ def process(self, inputs, outputs): from cityscapesscripts.helpers.labels import name2label for input, output in zip(inputs, outputs): file_name = input["file_name"] basename = os.path.splitext(os.path.basename(file_name))[0] pred_txt = os.path.join(self._temp_dir, basename + "_pred.txt") if "instances" in output: output = output["instances"].to(self._cpu_device) num_instances = len(output) with open(pred_txt, "w") as fout: for i in range(num_instances): pred_class = output.pred_classes[i] classes = self._metadata.thing_classes[pred_class] class_id = name2label[classes].id score = output.scores[i] mask = output.pred_masks[i].numpy().astype("uint8") png_filename = os.path.join( self._temp_dir, basename + "_{}_{}.png".format(i, classes) ) Image.fromarray(mask * 255).save(png_filename) fout.write( "{} {} {}\n".format(os.path.basename(png_filename), class_id, score) ) else: # Cityscapes requires a prediction file for every ground truth image. with open(pred_txt, "w") as fout: pass def evaluate(self): """ Returns: dict: has a key "segm", whose value is a dict of "AP" and "AP50". """ comm.synchronize() if comm.get_rank() > 0: return import cityscapesscripts.evaluation.evalInstanceLevelSemanticLabeling as cityscapes_eval self._logger.info("Evaluating results under {} ...".format(self._temp_dir)) # set some global states in cityscapes evaluation API, before evaluating cityscapes_eval.args.predictionPath = os.path.abspath(self._temp_dir) cityscapes_eval.args.predictionWalk = None cityscapes_eval.args.JSONOutput = False cityscapes_eval.args.colorized = False cityscapes_eval.args.gtInstancesFile = os.path.join(self._temp_dir, "gtInstances.json") # These lines are adopted from # https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/evaluation/evalInstanceLevelSemanticLabeling.py # noqa gt_dir = PathManager.get_local_path(self._metadata.gt_dir) groundTruthImgList = glob.glob(os.path.join(gt_dir, "*", "*_gtFine_instanceIds.png")) assert len( groundTruthImgList ), "Cannot find any ground truth images to use for evaluation. Searched for: {}".format( cityscapes_eval.args.groundTruthSearch ) predictionImgList = [] for gt in groundTruthImgList: predictionImgList.append(cityscapes_eval.getPrediction(gt, cityscapes_eval.args)) results = cityscapes_eval.evaluateImgLists( predictionImgList, groundTruthImgList, cityscapes_eval.args )["averages"] ret = OrderedDict() ret["segm"] = {"AP": results["allAp"] * 100, "AP50": results["allAp50%"] * 100} self._working_dir.cleanup() return ret class CityscapesSemSegEvaluator(CityscapesEvaluator): """ Evaluate semantic segmentation results on cityscapes dataset using cityscapes API. Note: * It does not work in multi-machine distributed training. * It contains a synchronization, therefore has to be used on all ranks. * Only the main process runs evaluation. """ def process(self, inputs, outputs): from cityscapesscripts.helpers.labels import trainId2label for input, output in zip(inputs, outputs): file_name = input["file_name"] basename = os.path.splitext(os.path.basename(file_name))[0] pred_filename = os.path.join(self._temp_dir, basename + "_pred.png") output = output["sem_seg"].argmax(dim=0).to(self._cpu_device).numpy() pred = 255 * np.ones(output.shape, dtype=np.uint8) for train_id, label in trainId2label.items(): if label.ignoreInEval: continue pred[output == train_id] = label.id Image.fromarray(pred).save(pred_filename) def evaluate(self): comm.synchronize() if comm.get_rank() > 0: return # Load the Cityscapes eval script *after* setting the required env var, # since the script reads CITYSCAPES_DATASET into global variables at load time. import cityscapesscripts.evaluation.evalPixelLevelSemanticLabeling as cityscapes_eval self._logger.info("Evaluating results under {} ...".format(self._temp_dir)) # set some global states in cityscapes evaluation API, before evaluating cityscapes_eval.args.predictionPath = os.path.abspath(self._temp_dir) cityscapes_eval.args.predictionWalk = None cityscapes_eval.args.JSONOutput = False cityscapes_eval.args.colorized = False # These lines are adopted from # https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/evaluation/evalPixelLevelSemanticLabeling.py # noqa gt_dir = PathManager.get_local_path(self._metadata.gt_dir) groundTruthImgList = glob.glob(os.path.join(gt_dir, "*", "*_gtFine_labelIds.png")) assert len( groundTruthImgList ), "Cannot find any ground truth images to use for evaluation. Searched for: {}".format( cityscapes_eval.args.groundTruthSearch ) predictionImgList = [] for gt in groundTruthImgList: predictionImgList.append(cityscapes_eval.getPrediction(cityscapes_eval.args, gt)) results = cityscapes_eval.evaluateImgLists( predictionImgList, groundTruthImgList, cityscapes_eval.args ) ret = OrderedDict() ret["sem_seg"] = { "IoU": 100.0 * results["averageScoreClasses"], "iIoU": 100.0 * results["averageScoreInstClasses"], "IoU_sup": 100.0 * results["averageScoreCategories"], "iIoU_sup": 100.0 * results["averageScoreInstCategories"], } self._working_dir.cleanup() return ret
banmo-main
third_party/detectron2_old/detectron2/evaluation/cityscapes_evaluation.py
# Copyright (c) Facebook, Inc. and its affiliates. import contextlib import copy import io import itertools import json import logging import numpy as np import os import pickle from collections import OrderedDict import pycocotools.mask as mask_util import torch from pycocotools.coco import COCO from pycocotools.cocoeval import COCOeval from tabulate import tabulate import detectron2.utils.comm as comm from detectron2.config import CfgNode from detectron2.data import MetadataCatalog from detectron2.data.datasets.coco import convert_to_coco_json from detectron2.evaluation.fast_eval_api import COCOeval_opt from detectron2.structures import Boxes, BoxMode, pairwise_iou from detectron2.utils.file_io import PathManager from detectron2.utils.logger import create_small_table from .evaluator import DatasetEvaluator class COCOEvaluator(DatasetEvaluator): """ Evaluate AR for object proposals, AP for instance detection/segmentation, AP for keypoint detection outputs using COCO's metrics. See http://cocodataset.org/#detection-eval and http://cocodataset.org/#keypoints-eval to understand its metrics. The metrics range from 0 to 100 (instead of 0 to 1), where a -1 or NaN means the metric cannot be computed (e.g. due to no predictions made). In addition to COCO, this evaluator is able to support any bounding box detection, instance segmentation, or keypoint detection dataset. """ def __init__( self, dataset_name, tasks=None, distributed=True, output_dir=None, *, use_fast_impl=True, kpt_oks_sigmas=(), ): """ Args: dataset_name (str): name of the dataset to be evaluated. It must have either the following corresponding metadata: "json_file": the path to the COCO format annotation Or it must be in detectron2's standard dataset format so it can be converted to COCO format automatically. tasks (tuple[str]): tasks that can be evaluated under the given configuration. A task is one of "bbox", "segm", "keypoints". By default, will infer this automatically from predictions. distributed (True): if True, will collect results from all ranks and run evaluation in the main process. Otherwise, will only evaluate the results in the current process. output_dir (str): optional, an output directory to dump all results predicted on the dataset. The dump contains two files: 1. "instances_predictions.pth" a file that can be loaded with `torch.load` and contains all the results in the format they are produced by the model. 2. "coco_instances_results.json" a json file in COCO's result format. use_fast_impl (bool): use a fast but **unofficial** implementation to compute AP. Although the results should be very close to the official implementation in COCO API, it is still recommended to compute results with the official API for use in papers. The faster implementation also uses more RAM. kpt_oks_sigmas (list[float]): The sigmas used to calculate keypoint OKS. See http://cocodataset.org/#keypoints-eval When empty, it will use the defaults in COCO. Otherwise it should be the same length as ROI_KEYPOINT_HEAD.NUM_KEYPOINTS. """ self._logger = logging.getLogger(__name__) self._distributed = distributed self._output_dir = output_dir self._use_fast_impl = use_fast_impl if tasks is not None and isinstance(tasks, CfgNode): kpt_oks_sigmas = ( tasks.TEST.KEYPOINT_OKS_SIGMAS if not kpt_oks_sigmas else kpt_oks_sigmas ) self._logger.warn( "COCO Evaluator instantiated using config, this is deprecated behavior." " Please pass in explicit arguments instead." ) self._tasks = None # Infering it from predictions should be better else: self._tasks = tasks self._cpu_device = torch.device("cpu") self._metadata = MetadataCatalog.get(dataset_name) if not hasattr(self._metadata, "json_file"): self._logger.info( f"'{dataset_name}' is not registered by `register_coco_instances`." " Therefore trying to convert it to COCO format ..." ) cache_path = os.path.join(output_dir, f"{dataset_name}_coco_format.json") self._metadata.json_file = cache_path convert_to_coco_json(dataset_name, cache_path) json_file = PathManager.get_local_path(self._metadata.json_file) with contextlib.redirect_stdout(io.StringIO()): self._coco_api = COCO(json_file) # Test set json files do not contain annotations (evaluation must be # performed using the COCO evaluation server). self._do_evaluation = "annotations" in self._coco_api.dataset if self._do_evaluation: self._kpt_oks_sigmas = kpt_oks_sigmas def reset(self): self._predictions = [] def process(self, inputs, outputs): """ Args: inputs: the inputs to a COCO model (e.g., GeneralizedRCNN). It is a list of dict. Each dict corresponds to an image and contains keys like "height", "width", "file_name", "image_id". outputs: the outputs of a COCO model. It is a list of dicts with key "instances" that contains :class:`Instances`. """ for input, output in zip(inputs, outputs): prediction = {"image_id": input["image_id"]} if "instances" in output: instances = output["instances"].to(self._cpu_device) prediction["instances"] = instances_to_coco_json(instances, input["image_id"]) if "proposals" in output: prediction["proposals"] = output["proposals"].to(self._cpu_device) if len(prediction) > 1: self._predictions.append(prediction) def evaluate(self, img_ids=None): """ Args: img_ids: a list of image IDs to evaluate on. Default to None for the whole dataset """ if self._distributed: comm.synchronize() predictions = comm.gather(self._predictions, dst=0) predictions = list(itertools.chain(*predictions)) if not comm.is_main_process(): return {} else: predictions = self._predictions if len(predictions) == 0: self._logger.warning("[COCOEvaluator] Did not receive valid predictions.") return {} if self._output_dir: PathManager.mkdirs(self._output_dir) file_path = os.path.join(self._output_dir, "instances_predictions.pth") with PathManager.open(file_path, "wb") as f: torch.save(predictions, f) self._results = OrderedDict() if "proposals" in predictions[0]: self._eval_box_proposals(predictions) if "instances" in predictions[0]: self._eval_predictions(predictions, img_ids=img_ids) # Copy so the caller can do whatever with results return copy.deepcopy(self._results) def _tasks_from_predictions(self, predictions): """ Get COCO API "tasks" (i.e. iou_type) from COCO-format predictions. """ tasks = {"bbox"} for pred in predictions: if "segmentation" in pred: tasks.add("segm") if "keypoints" in pred: tasks.add("keypoints") return sorted(tasks) def _eval_predictions(self, predictions, img_ids=None): """ Evaluate predictions. Fill self._results with the metrics of the tasks. """ self._logger.info("Preparing results for COCO format ...") coco_results = list(itertools.chain(*[x["instances"] for x in predictions])) tasks = self._tasks or self._tasks_from_predictions(coco_results) # unmap the category ids for COCO if hasattr(self._metadata, "thing_dataset_id_to_contiguous_id"): dataset_id_to_contiguous_id = self._metadata.thing_dataset_id_to_contiguous_id all_contiguous_ids = list(dataset_id_to_contiguous_id.values()) num_classes = len(all_contiguous_ids) assert min(all_contiguous_ids) == 0 and max(all_contiguous_ids) == num_classes - 1 reverse_id_mapping = {v: k for k, v in dataset_id_to_contiguous_id.items()} for result in coco_results: category_id = result["category_id"] assert category_id < num_classes, ( f"A prediction has class={category_id}, " f"but the dataset only has {num_classes} classes and " f"predicted class id should be in [0, {num_classes - 1}]." ) result["category_id"] = reverse_id_mapping[category_id] if self._output_dir: file_path = os.path.join(self._output_dir, "coco_instances_results.json") self._logger.info("Saving results to {}".format(file_path)) with PathManager.open(file_path, "w") as f: f.write(json.dumps(coco_results)) f.flush() if not self._do_evaluation: self._logger.info("Annotations are not available for evaluation.") return self._logger.info( "Evaluating predictions with {} COCO API...".format( "unofficial" if self._use_fast_impl else "official" ) ) for task in sorted(tasks): assert task in {"bbox", "segm", "keypoints"}, f"Got unknown task: {task}!" coco_eval = ( _evaluate_predictions_on_coco( self._coco_api, coco_results, task, kpt_oks_sigmas=self._kpt_oks_sigmas, use_fast_impl=self._use_fast_impl, img_ids=img_ids, ) if len(coco_results) > 0 else None # cocoapi does not handle empty results very well ) res = self._derive_coco_results( coco_eval, task, class_names=self._metadata.get("thing_classes") ) self._results[task] = res def _eval_box_proposals(self, predictions): """ Evaluate the box proposals in predictions. Fill self._results with the metrics for "box_proposals" task. """ if self._output_dir: # Saving generated box proposals to file. # Predicted box_proposals are in XYXY_ABS mode. bbox_mode = BoxMode.XYXY_ABS.value ids, boxes, objectness_logits = [], [], [] for prediction in predictions: ids.append(prediction["image_id"]) boxes.append(prediction["proposals"].proposal_boxes.tensor.numpy()) objectness_logits.append(prediction["proposals"].objectness_logits.numpy()) proposal_data = { "boxes": boxes, "objectness_logits": objectness_logits, "ids": ids, "bbox_mode": bbox_mode, } with PathManager.open(os.path.join(self._output_dir, "box_proposals.pkl"), "wb") as f: pickle.dump(proposal_data, f) if not self._do_evaluation: self._logger.info("Annotations are not available for evaluation.") return self._logger.info("Evaluating bbox proposals ...") res = {} areas = {"all": "", "small": "s", "medium": "m", "large": "l"} for limit in [100, 1000]: for area, suffix in areas.items(): stats = _evaluate_box_proposals(predictions, self._coco_api, area=area, limit=limit) key = "AR{}@{:d}".format(suffix, limit) res[key] = float(stats["ar"].item() * 100) self._logger.info("Proposal metrics: \n" + create_small_table(res)) self._results["box_proposals"] = res def _derive_coco_results(self, coco_eval, iou_type, class_names=None): """ Derive the desired score numbers from summarized COCOeval. Args: coco_eval (None or COCOEval): None represents no predictions from model. iou_type (str): class_names (None or list[str]): if provided, will use it to predict per-category AP. Returns: a dict of {metric name: score} """ metrics = { "bbox": ["AP", "AP50", "AP75", "APs", "APm", "APl"], "segm": ["AP", "AP50", "AP75", "APs", "APm", "APl"], "keypoints": ["AP", "AP50", "AP75", "APm", "APl"], }[iou_type] if coco_eval is None: self._logger.warn("No predictions from the model!") return {metric: float("nan") for metric in metrics} # the standard metrics results = { metric: float(coco_eval.stats[idx] * 100 if coco_eval.stats[idx] >= 0 else "nan") for idx, metric in enumerate(metrics) } self._logger.info( "Evaluation results for {}: \n".format(iou_type) + create_small_table(results) ) if not np.isfinite(sum(results.values())): self._logger.info("Some metrics cannot be computed and is shown as NaN.") if class_names is None or len(class_names) <= 1: return results # Compute per-category AP # from https://github.com/facebookresearch/Detectron/blob/a6a835f5b8208c45d0dce217ce9bbda915f44df7/detectron/datasets/json_dataset_evaluator.py#L222-L252 # noqa precisions = coco_eval.eval["precision"] # precision has dims (iou, recall, cls, area range, max dets) assert len(class_names) == precisions.shape[2] results_per_category = [] for idx, name in enumerate(class_names): # area range index 0: all area ranges # max dets index -1: typically 100 per image precision = precisions[:, :, idx, 0, -1] precision = precision[precision > -1] ap = np.mean(precision) if precision.size else float("nan") results_per_category.append(("{}".format(name), float(ap * 100))) # tabulate it N_COLS = min(6, len(results_per_category) * 2) results_flatten = list(itertools.chain(*results_per_category)) results_2d = itertools.zip_longest(*[results_flatten[i::N_COLS] for i in range(N_COLS)]) table = tabulate( results_2d, tablefmt="pipe", floatfmt=".3f", headers=["category", "AP"] * (N_COLS // 2), numalign="left", ) self._logger.info("Per-category {} AP: \n".format(iou_type) + table) results.update({"AP-" + name: ap for name, ap in results_per_category}) return results def instances_to_coco_json(instances, img_id): """ Dump an "Instances" object to a COCO-format json that's used for evaluation. Args: instances (Instances): img_id (int): the image id Returns: list[dict]: list of json annotations in COCO format. """ num_instance = len(instances) if num_instance == 0: return [] boxes = instances.pred_boxes.tensor.numpy() boxes = BoxMode.convert(boxes, BoxMode.XYXY_ABS, BoxMode.XYWH_ABS) boxes = boxes.tolist() scores = instances.scores.tolist() classes = instances.pred_classes.tolist() has_mask = instances.has("pred_masks") if has_mask: # use RLE to encode the masks, because they are too large and takes memory # since this evaluator stores outputs of the entire dataset rles = [ mask_util.encode(np.array(mask[:, :, None], order="F", dtype="uint8"))[0] for mask in instances.pred_masks ] for rle in rles: # "counts" is an array encoded by mask_util as a byte-stream. Python3's # json writer which always produces strings cannot serialize a bytestream # unless you decode it. Thankfully, utf-8 works out (which is also what # the pycocotools/_mask.pyx does). rle["counts"] = rle["counts"].decode("utf-8") has_keypoints = instances.has("pred_keypoints") if has_keypoints: keypoints = instances.pred_keypoints results = [] for k in range(num_instance): result = { "image_id": img_id, "category_id": classes[k], "bbox": boxes[k], "score": scores[k], } if has_mask: result["segmentation"] = rles[k] if has_keypoints: # In COCO annotations, # keypoints coordinates are pixel indices. # However our predictions are floating point coordinates. # Therefore we subtract 0.5 to be consistent with the annotation format. # This is the inverse of data loading logic in `datasets/coco.py`. keypoints[k][:, :2] -= 0.5 result["keypoints"] = keypoints[k].flatten().tolist() results.append(result) return results # inspired from Detectron: # https://github.com/facebookresearch/Detectron/blob/a6a835f5b8208c45d0dce217ce9bbda915f44df7/detectron/datasets/json_dataset_evaluator.py#L255 # noqa def _evaluate_box_proposals(dataset_predictions, coco_api, thresholds=None, area="all", limit=None): """ Evaluate detection proposal recall metrics. This function is a much faster alternative to the official COCO API recall evaluation code. However, it produces slightly different results. """ # Record max overlap value for each gt box # Return vector of overlap values areas = { "all": 0, "small": 1, "medium": 2, "large": 3, "96-128": 4, "128-256": 5, "256-512": 6, "512-inf": 7, } area_ranges = [ [0 ** 2, 1e5 ** 2], # all [0 ** 2, 32 ** 2], # small [32 ** 2, 96 ** 2], # medium [96 ** 2, 1e5 ** 2], # large [96 ** 2, 128 ** 2], # 96-128 [128 ** 2, 256 ** 2], # 128-256 [256 ** 2, 512 ** 2], # 256-512 [512 ** 2, 1e5 ** 2], ] # 512-inf assert area in areas, "Unknown area range: {}".format(area) area_range = area_ranges[areas[area]] gt_overlaps = [] num_pos = 0 for prediction_dict in dataset_predictions: predictions = prediction_dict["proposals"] # sort predictions in descending order # TODO maybe remove this and make it explicit in the documentation inds = predictions.objectness_logits.sort(descending=True)[1] predictions = predictions[inds] ann_ids = coco_api.getAnnIds(imgIds=prediction_dict["image_id"]) anno = coco_api.loadAnns(ann_ids) gt_boxes = [ BoxMode.convert(obj["bbox"], BoxMode.XYWH_ABS, BoxMode.XYXY_ABS) for obj in anno if obj["iscrowd"] == 0 ] gt_boxes = torch.as_tensor(gt_boxes).reshape(-1, 4) # guard against no boxes gt_boxes = Boxes(gt_boxes) gt_areas = torch.as_tensor([obj["area"] for obj in anno if obj["iscrowd"] == 0]) if len(gt_boxes) == 0 or len(predictions) == 0: continue valid_gt_inds = (gt_areas >= area_range[0]) & (gt_areas <= area_range[1]) gt_boxes = gt_boxes[valid_gt_inds] num_pos += len(gt_boxes) if len(gt_boxes) == 0: continue if limit is not None and len(predictions) > limit: predictions = predictions[:limit] overlaps = pairwise_iou(predictions.proposal_boxes, gt_boxes) _gt_overlaps = torch.zeros(len(gt_boxes)) for j in range(min(len(predictions), len(gt_boxes))): # find which proposal box maximally covers each gt box # and get the iou amount of coverage for each gt box max_overlaps, argmax_overlaps = overlaps.max(dim=0) # find which gt box is 'best' covered (i.e. 'best' = most iou) gt_ovr, gt_ind = max_overlaps.max(dim=0) assert gt_ovr >= 0 # find the proposal box that covers the best covered gt box box_ind = argmax_overlaps[gt_ind] # record the iou coverage of this gt box _gt_overlaps[j] = overlaps[box_ind, gt_ind] assert _gt_overlaps[j] == gt_ovr # mark the proposal box and the gt box as used overlaps[box_ind, :] = -1 overlaps[:, gt_ind] = -1 # append recorded iou coverage level gt_overlaps.append(_gt_overlaps) gt_overlaps = ( torch.cat(gt_overlaps, dim=0) if len(gt_overlaps) else torch.zeros(0, dtype=torch.float32) ) gt_overlaps, _ = torch.sort(gt_overlaps) if thresholds is None: step = 0.05 thresholds = torch.arange(0.5, 0.95 + 1e-5, step, dtype=torch.float32) recalls = torch.zeros_like(thresholds) # compute recall for each iou threshold for i, t in enumerate(thresholds): recalls[i] = (gt_overlaps >= t).float().sum() / float(num_pos) # ar = 2 * np.trapz(recalls, thresholds) ar = recalls.mean() return { "ar": ar, "recalls": recalls, "thresholds": thresholds, "gt_overlaps": gt_overlaps, "num_pos": num_pos, } def _evaluate_predictions_on_coco( coco_gt, coco_results, iou_type, kpt_oks_sigmas=None, use_fast_impl=True, img_ids=None ): """ Evaluate the coco results using COCOEval API. """ assert len(coco_results) > 0 if iou_type == "segm": coco_results = copy.deepcopy(coco_results) # When evaluating mask AP, if the results contain bbox, cocoapi will # use the box area as the area of the instance, instead of the mask area. # This leads to a different definition of small/medium/large. # We remove the bbox field to let mask AP use mask area. for c in coco_results: c.pop("bbox", None) coco_dt = coco_gt.loadRes(coco_results) coco_eval = (COCOeval_opt if use_fast_impl else COCOeval)(coco_gt, coco_dt, iou_type) if img_ids is not None: coco_eval.params.imgIds = img_ids if iou_type == "keypoints": # Use the COCO default keypoint OKS sigmas unless overrides are specified if kpt_oks_sigmas: assert hasattr(coco_eval.params, "kpt_oks_sigmas"), "pycocotools is too old!" coco_eval.params.kpt_oks_sigmas = np.array(kpt_oks_sigmas) # COCOAPI requires every detection and every gt to have keypoints, so # we just take the first entry from both num_keypoints_dt = len(coco_results[0]["keypoints"]) // 3 num_keypoints_gt = len(next(iter(coco_gt.anns.values()))["keypoints"]) // 3 num_keypoints_oks = len(coco_eval.params.kpt_oks_sigmas) assert num_keypoints_oks == num_keypoints_dt == num_keypoints_gt, ( f"[COCOEvaluator] Prediction contain {num_keypoints_dt} keypoints. " f"Ground truth contains {num_keypoints_gt} keypoints. " f"The length of cfg.TEST.KEYPOINT_OKS_SIGMAS is {num_keypoints_oks}. " "They have to agree with each other. For meaning of OKS, please refer to " "http://cocodataset.org/#keypoints-eval." ) coco_eval.evaluate() coco_eval.accumulate() coco_eval.summarize() return coco_eval
banmo-main
third_party/detectron2_old/detectron2/evaluation/coco_evaluation.py
# Copyright (c) Facebook, Inc. and its affiliates. import itertools import json import logging import numpy as np import os from collections import OrderedDict import PIL.Image as Image import pycocotools.mask as mask_util import torch from detectron2.data import DatasetCatalog, MetadataCatalog from detectron2.utils.comm import all_gather, is_main_process, synchronize from detectron2.utils.file_io import PathManager from .evaluator import DatasetEvaluator class SemSegEvaluator(DatasetEvaluator): """ Evaluate semantic segmentation metrics. """ def __init__( self, dataset_name, distributed=True, output_dir=None, *, num_classes=None, ignore_label=None, ): """ Args: dataset_name (str): name of the dataset to be evaluated. distributed (bool): if True, will collect results from all ranks for evaluation. Otherwise, will evaluate the results in the current process. output_dir (str): an output directory to dump results. num_classes, ignore_label: deprecated argument """ self._logger = logging.getLogger(__name__) if num_classes is not None: self._logger.warn( "SemSegEvaluator(num_classes) is deprecated! It should be obtained from metadata." ) if ignore_label is not None: self._logger.warn( "SemSegEvaluator(ignore_label) is deprecated! It should be obtained from metadata." ) self._dataset_name = dataset_name self._distributed = distributed self._output_dir = output_dir self._cpu_device = torch.device("cpu") self.input_file_to_gt_file = { dataset_record["file_name"]: dataset_record["sem_seg_file_name"] for dataset_record in DatasetCatalog.get(dataset_name) } meta = MetadataCatalog.get(dataset_name) # Dict that maps contiguous training ids to COCO category ids try: c2d = meta.stuff_dataset_id_to_contiguous_id self._contiguous_id_to_dataset_id = {v: k for k, v in c2d.items()} except AttributeError: self._contiguous_id_to_dataset_id = None self._class_names = meta.stuff_classes self._num_classes = len(meta.stuff_classes) if num_classes is not None: assert self._num_classes == num_classes, f"{self._num_classes} != {num_classes}" self._ignore_label = ignore_label if ignore_label is not None else meta.ignore_label def reset(self): self._conf_matrix = np.zeros((self._num_classes + 1, self._num_classes + 1), dtype=np.int64) self._predictions = [] def process(self, inputs, outputs): """ Args: inputs: the inputs to a model. It is a list of dicts. Each dict corresponds to an image and contains keys like "height", "width", "file_name". outputs: the outputs of a model. It is either list of semantic segmentation predictions (Tensor [H, W]) or list of dicts with key "sem_seg" that contains semantic segmentation prediction in the same format. """ for input, output in zip(inputs, outputs): output = output["sem_seg"].argmax(dim=0).to(self._cpu_device) pred = np.array(output, dtype=np.int) with PathManager.open(self.input_file_to_gt_file[input["file_name"]], "rb") as f: gt = np.array(Image.open(f), dtype=np.int) gt[gt == self._ignore_label] = self._num_classes self._conf_matrix += np.bincount( (self._num_classes + 1) * pred.reshape(-1) + gt.reshape(-1), minlength=self._conf_matrix.size, ).reshape(self._conf_matrix.shape) self._predictions.extend(self.encode_json_sem_seg(pred, input["file_name"])) def evaluate(self): """ Evaluates standard semantic segmentation metrics (http://cocodataset.org/#stuff-eval): * Mean intersection-over-union averaged across classes (mIoU) * Frequency Weighted IoU (fwIoU) * Mean pixel accuracy averaged across classes (mACC) * Pixel Accuracy (pACC) """ if self._distributed: synchronize() conf_matrix_list = all_gather(self._conf_matrix) self._predictions = all_gather(self._predictions) self._predictions = list(itertools.chain(*self._predictions)) if not is_main_process(): return self._conf_matrix = np.zeros_like(self._conf_matrix) for conf_matrix in conf_matrix_list: self._conf_matrix += conf_matrix if self._output_dir: PathManager.mkdirs(self._output_dir) file_path = os.path.join(self._output_dir, "sem_seg_predictions.json") with PathManager.open(file_path, "w") as f: f.write(json.dumps(self._predictions)) acc = np.full(self._num_classes, np.nan, dtype=np.float) iou = np.full(self._num_classes, np.nan, dtype=np.float) tp = self._conf_matrix.diagonal()[:-1].astype(np.float) pos_gt = np.sum(self._conf_matrix[:-1, :-1], axis=0).astype(np.float) class_weights = pos_gt / np.sum(pos_gt) pos_pred = np.sum(self._conf_matrix[:-1, :-1], axis=1).astype(np.float) acc_valid = pos_gt > 0 acc[acc_valid] = tp[acc_valid] / pos_gt[acc_valid] iou_valid = (pos_gt + pos_pred) > 0 union = pos_gt + pos_pred - tp iou[acc_valid] = tp[acc_valid] / union[acc_valid] macc = np.sum(acc[acc_valid]) / np.sum(acc_valid) miou = np.sum(iou[acc_valid]) / np.sum(iou_valid) fiou = np.sum(iou[acc_valid] * class_weights[acc_valid]) pacc = np.sum(tp) / np.sum(pos_gt) res = {} res["mIoU"] = 100 * miou res["fwIoU"] = 100 * fiou for i, name in enumerate(self._class_names): res["IoU-{}".format(name)] = 100 * iou[i] res["mACC"] = 100 * macc res["pACC"] = 100 * pacc for i, name in enumerate(self._class_names): res["ACC-{}".format(name)] = 100 * acc[i] if self._output_dir: file_path = os.path.join(self._output_dir, "sem_seg_evaluation.pth") with PathManager.open(file_path, "wb") as f: torch.save(res, f) results = OrderedDict({"sem_seg": res}) self._logger.info(results) return results def encode_json_sem_seg(self, sem_seg, input_file_name): """ Convert semantic segmentation to COCO stuff format with segments encoded as RLEs. See http://cocodataset.org/#format-results """ json_list = [] for label in np.unique(sem_seg): if self._contiguous_id_to_dataset_id is not None: assert ( label in self._contiguous_id_to_dataset_id ), "Label {} is not in the metadata info for {}".format(label, self._dataset_name) dataset_id = self._contiguous_id_to_dataset_id[label] else: dataset_id = int(label) mask = (sem_seg == label).astype(np.uint8) mask_rle = mask_util.encode(np.array(mask[:, :, None], order="F"))[0] mask_rle["counts"] = mask_rle["counts"].decode("utf-8") json_list.append( {"file_name": input_file_name, "category_id": dataset_id, "segmentation": mask_rle} ) return json_list
banmo-main
third_party/detectron2_old/detectron2/evaluation/sem_seg_evaluation.py
# -*- coding: utf-8 -*- # Copyright (c) Facebook, Inc. and its affiliates. import logging import numpy as np import os import tempfile import xml.etree.ElementTree as ET from collections import OrderedDict, defaultdict from functools import lru_cache import torch from detectron2.data import MetadataCatalog from detectron2.utils import comm from detectron2.utils.file_io import PathManager from .evaluator import DatasetEvaluator class PascalVOCDetectionEvaluator(DatasetEvaluator): """ Evaluate Pascal VOC style AP for Pascal VOC dataset. It contains a synchronization, therefore has to be called from all ranks. Note that the concept of AP can be implemented in different ways and may not produce identical results. This class mimics the implementation of the official Pascal VOC Matlab API, and should produce similar but not identical results to the official API. """ def __init__(self, dataset_name): """ Args: dataset_name (str): name of the dataset, e.g., "voc_2007_test" """ self._dataset_name = dataset_name meta = MetadataCatalog.get(dataset_name) # Too many tiny files, download all to local for speed. annotation_dir_local = PathManager.get_local_path( os.path.join(meta.dirname, "Annotations/") ) self._anno_file_template = os.path.join(annotation_dir_local, "{}.xml") self._image_set_path = os.path.join(meta.dirname, "ImageSets", "Main", meta.split + ".txt") self._class_names = meta.thing_classes assert meta.year in [2007, 2012], meta.year self._is_2007 = meta.year == 2007 self._cpu_device = torch.device("cpu") self._logger = logging.getLogger(__name__) def reset(self): self._predictions = defaultdict(list) # class name -> list of prediction strings def process(self, inputs, outputs): for input, output in zip(inputs, outputs): image_id = input["image_id"] instances = output["instances"].to(self._cpu_device) boxes = instances.pred_boxes.tensor.numpy() scores = instances.scores.tolist() classes = instances.pred_classes.tolist() for box, score, cls in zip(boxes, scores, classes): xmin, ymin, xmax, ymax = box # The inverse of data loading logic in `datasets/pascal_voc.py` xmin += 1 ymin += 1 self._predictions[cls].append( f"{image_id} {score:.3f} {xmin:.1f} {ymin:.1f} {xmax:.1f} {ymax:.1f}" ) def evaluate(self): """ Returns: dict: has a key "segm", whose value is a dict of "AP", "AP50", and "AP75". """ all_predictions = comm.gather(self._predictions, dst=0) if not comm.is_main_process(): return predictions = defaultdict(list) for predictions_per_rank in all_predictions: for clsid, lines in predictions_per_rank.items(): predictions[clsid].extend(lines) del all_predictions self._logger.info( "Evaluating {} using {} metric. " "Note that results do not use the official Matlab API.".format( self._dataset_name, 2007 if self._is_2007 else 2012 ) ) with tempfile.TemporaryDirectory(prefix="pascal_voc_eval_") as dirname: res_file_template = os.path.join(dirname, "{}.txt") aps = defaultdict(list) # iou -> ap per class for cls_id, cls_name in enumerate(self._class_names): lines = predictions.get(cls_id, [""]) with open(res_file_template.format(cls_name), "w") as f: f.write("\n".join(lines)) for thresh in range(50, 100, 5): rec, prec, ap = voc_eval( res_file_template, self._anno_file_template, self._image_set_path, cls_name, ovthresh=thresh / 100.0, use_07_metric=self._is_2007, ) aps[thresh].append(ap * 100) ret = OrderedDict() mAP = {iou: np.mean(x) for iou, x in aps.items()} ret["bbox"] = {"AP": np.mean(list(mAP.values())), "AP50": mAP[50], "AP75": mAP[75]} return ret ############################################################################## # # Below code is modified from # https://github.com/rbgirshick/py-faster-rcnn/blob/master/lib/datasets/voc_eval.py # -------------------------------------------------------- # Fast/er R-CNN # Licensed under The MIT License [see LICENSE for details] # Written by Bharath Hariharan # -------------------------------------------------------- """Python implementation of the PASCAL VOC devkit's AP evaluation code.""" @lru_cache(maxsize=None) def parse_rec(filename): """Parse a PASCAL VOC xml file.""" with PathManager.open(filename) as f: tree = ET.parse(f) objects = [] for obj in tree.findall("object"): obj_struct = {} obj_struct["name"] = obj.find("name").text obj_struct["pose"] = obj.find("pose").text obj_struct["truncated"] = int(obj.find("truncated").text) obj_struct["difficult"] = int(obj.find("difficult").text) bbox = obj.find("bndbox") obj_struct["bbox"] = [ int(bbox.find("xmin").text), int(bbox.find("ymin").text), int(bbox.find("xmax").text), int(bbox.find("ymax").text), ] objects.append(obj_struct) return objects def voc_ap(rec, prec, use_07_metric=False): """Compute VOC AP given precision and recall. If use_07_metric is true, uses the VOC 07 11-point method (default:False). """ if use_07_metric: # 11 point metric ap = 0.0 for t in np.arange(0.0, 1.1, 0.1): if np.sum(rec >= t) == 0: p = 0 else: p = np.max(prec[rec >= t]) ap = ap + p / 11.0 else: # correct AP calculation # first append sentinel values at the end mrec = np.concatenate(([0.0], rec, [1.0])) mpre = np.concatenate(([0.0], prec, [0.0])) # compute the precision envelope for i in range(mpre.size - 1, 0, -1): mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i]) # to calculate area under PR curve, look for points # where X axis (recall) changes value i = np.where(mrec[1:] != mrec[:-1])[0] # and sum (\Delta recall) * prec ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) return ap def voc_eval(detpath, annopath, imagesetfile, classname, ovthresh=0.5, use_07_metric=False): """rec, prec, ap = voc_eval(detpath, annopath, imagesetfile, classname, [ovthresh], [use_07_metric]) Top level function that does the PASCAL VOC evaluation. detpath: Path to detections detpath.format(classname) should produce the detection results file. annopath: Path to annotations annopath.format(imagename) should be the xml annotations file. imagesetfile: Text file containing the list of images, one image per line. classname: Category name (duh) [ovthresh]: Overlap threshold (default = 0.5) [use_07_metric]: Whether to use VOC07's 11 point AP computation (default False) """ # assumes detections are in detpath.format(classname) # assumes annotations are in annopath.format(imagename) # assumes imagesetfile is a text file with each line an image name # first load gt # read list of images with PathManager.open(imagesetfile, "r") as f: lines = f.readlines() imagenames = [x.strip() for x in lines] # load annots recs = {} for imagename in imagenames: recs[imagename] = parse_rec(annopath.format(imagename)) # extract gt objects for this class class_recs = {} npos = 0 for imagename in imagenames: R = [obj for obj in recs[imagename] if obj["name"] == classname] bbox = np.array([x["bbox"] for x in R]) difficult = np.array([x["difficult"] for x in R]).astype(np.bool) # difficult = np.array([False for x in R]).astype(np.bool) # treat all "difficult" as GT det = [False] * len(R) npos = npos + sum(~difficult) class_recs[imagename] = {"bbox": bbox, "difficult": difficult, "det": det} # read dets detfile = detpath.format(classname) with open(detfile, "r") as f: lines = f.readlines() splitlines = [x.strip().split(" ") for x in lines] image_ids = [x[0] for x in splitlines] confidence = np.array([float(x[1]) for x in splitlines]) BB = np.array([[float(z) for z in x[2:]] for x in splitlines]).reshape(-1, 4) # sort by confidence sorted_ind = np.argsort(-confidence) BB = BB[sorted_ind, :] image_ids = [image_ids[x] for x in sorted_ind] # go down dets and mark TPs and FPs nd = len(image_ids) tp = np.zeros(nd) fp = np.zeros(nd) for d in range(nd): R = class_recs[image_ids[d]] bb = BB[d, :].astype(float) ovmax = -np.inf BBGT = R["bbox"].astype(float) if BBGT.size > 0: # compute overlaps # intersection ixmin = np.maximum(BBGT[:, 0], bb[0]) iymin = np.maximum(BBGT[:, 1], bb[1]) ixmax = np.minimum(BBGT[:, 2], bb[2]) iymax = np.minimum(BBGT[:, 3], bb[3]) iw = np.maximum(ixmax - ixmin + 1.0, 0.0) ih = np.maximum(iymax - iymin + 1.0, 0.0) inters = iw * ih # union uni = ( (bb[2] - bb[0] + 1.0) * (bb[3] - bb[1] + 1.0) + (BBGT[:, 2] - BBGT[:, 0] + 1.0) * (BBGT[:, 3] - BBGT[:, 1] + 1.0) - inters ) overlaps = inters / uni ovmax = np.max(overlaps) jmax = np.argmax(overlaps) if ovmax > ovthresh: if not R["difficult"][jmax]: if not R["det"][jmax]: tp[d] = 1.0 R["det"][jmax] = 1 else: fp[d] = 1.0 else: fp[d] = 1.0 # compute precision recall fp = np.cumsum(fp) tp = np.cumsum(tp) rec = tp / float(npos) # avoid divide by zero in case the first detection matches a difficult # ground truth prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps) ap = voc_ap(rec, prec, use_07_metric) return rec, prec, ap
banmo-main
third_party/detectron2_old/detectron2/evaluation/pascal_voc_evaluation.py
# Copyright (c) Facebook, Inc. and its affiliates. import copy import itertools import json import logging import os import pickle from collections import OrderedDict import torch import detectron2.utils.comm as comm from detectron2.config import CfgNode from detectron2.data import MetadataCatalog from detectron2.structures import Boxes, BoxMode, pairwise_iou from detectron2.utils.file_io import PathManager from detectron2.utils.logger import create_small_table from .coco_evaluation import instances_to_coco_json from .evaluator import DatasetEvaluator class LVISEvaluator(DatasetEvaluator): """ Evaluate object proposal and instance detection/segmentation outputs using LVIS's metrics and evaluation API. """ def __init__(self, dataset_name, tasks=None, distributed=True, output_dir=None): """ Args: dataset_name (str): name of the dataset to be evaluated. It must have the following corresponding metadata: "json_file": the path to the LVIS format annotation tasks (tuple[str]): tasks that can be evaluated under the given configuration. A task is one of "bbox", "segm". By default, will infer this automatically from predictions. distributed (True): if True, will collect results from all ranks for evaluation. Otherwise, will evaluate the results in the current process. output_dir (str): optional, an output directory to dump results. """ from lvis import LVIS self._logger = logging.getLogger(__name__) if tasks is not None and isinstance(tasks, CfgNode): self._logger.warn( "COCO Evaluator instantiated using config, this is deprecated behavior." " Please pass in explicit arguments instead." ) self._tasks = None # Infering it from predictions should be better else: self._tasks = tasks self._distributed = distributed self._output_dir = output_dir self._cpu_device = torch.device("cpu") self._metadata = MetadataCatalog.get(dataset_name) json_file = PathManager.get_local_path(self._metadata.json_file) self._lvis_api = LVIS(json_file) # Test set json files do not contain annotations (evaluation must be # performed using the LVIS evaluation server). self._do_evaluation = len(self._lvis_api.get_ann_ids()) > 0 def reset(self): self._predictions = [] def process(self, inputs, outputs): """ Args: inputs: the inputs to a LVIS model (e.g., GeneralizedRCNN). It is a list of dict. Each dict corresponds to an image and contains keys like "height", "width", "file_name", "image_id". outputs: the outputs of a LVIS model. It is a list of dicts with key "instances" that contains :class:`Instances`. """ for input, output in zip(inputs, outputs): prediction = {"image_id": input["image_id"]} if "instances" in output: instances = output["instances"].to(self._cpu_device) prediction["instances"] = instances_to_coco_json(instances, input["image_id"]) if "proposals" in output: prediction["proposals"] = output["proposals"].to(self._cpu_device) self._predictions.append(prediction) def evaluate(self): if self._distributed: comm.synchronize() predictions = comm.gather(self._predictions, dst=0) predictions = list(itertools.chain(*predictions)) if not comm.is_main_process(): return else: predictions = self._predictions if len(predictions) == 0: self._logger.warning("[LVISEvaluator] Did not receive valid predictions.") return {} if self._output_dir: PathManager.mkdirs(self._output_dir) file_path = os.path.join(self._output_dir, "instances_predictions.pth") with PathManager.open(file_path, "wb") as f: torch.save(predictions, f) self._results = OrderedDict() if "proposals" in predictions[0]: self._eval_box_proposals(predictions) if "instances" in predictions[0]: self._eval_predictions(predictions) # Copy so the caller can do whatever with results return copy.deepcopy(self._results) def _tasks_from_predictions(self, predictions): for pred in predictions: if "segmentation" in pred: return ("bbox", "segm") return ("bbox",) def _eval_predictions(self, predictions): """ Evaluate predictions. Fill self._results with the metrics of the tasks. Args: predictions (list[dict]): list of outputs from the model """ self._logger.info("Preparing results in the LVIS format ...") lvis_results = list(itertools.chain(*[x["instances"] for x in predictions])) tasks = self._tasks or self._tasks_from_predictions(lvis_results) # LVIS evaluator can be used to evaluate results for COCO dataset categories. # In this case `_metadata` variable will have a field with COCO-specific category mapping. if hasattr(self._metadata, "thing_dataset_id_to_contiguous_id"): reverse_id_mapping = { v: k for k, v in self._metadata.thing_dataset_id_to_contiguous_id.items() } for result in lvis_results: result["category_id"] = reverse_id_mapping[result["category_id"]] else: # unmap the category ids for LVIS (from 0-indexed to 1-indexed) for result in lvis_results: result["category_id"] += 1 if self._output_dir: file_path = os.path.join(self._output_dir, "lvis_instances_results.json") self._logger.info("Saving results to {}".format(file_path)) with PathManager.open(file_path, "w") as f: f.write(json.dumps(lvis_results)) f.flush() if not self._do_evaluation: self._logger.info("Annotations are not available for evaluation.") return self._logger.info("Evaluating predictions ...") for task in sorted(tasks): res = _evaluate_predictions_on_lvis( self._lvis_api, lvis_results, task, class_names=self._metadata.get("thing_classes") ) self._results[task] = res def _eval_box_proposals(self, predictions): """ Evaluate the box proposals in predictions. Fill self._results with the metrics for "box_proposals" task. """ if self._output_dir: # Saving generated box proposals to file. # Predicted box_proposals are in XYXY_ABS mode. bbox_mode = BoxMode.XYXY_ABS.value ids, boxes, objectness_logits = [], [], [] for prediction in predictions: ids.append(prediction["image_id"]) boxes.append(prediction["proposals"].proposal_boxes.tensor.numpy()) objectness_logits.append(prediction["proposals"].objectness_logits.numpy()) proposal_data = { "boxes": boxes, "objectness_logits": objectness_logits, "ids": ids, "bbox_mode": bbox_mode, } with PathManager.open(os.path.join(self._output_dir, "box_proposals.pkl"), "wb") as f: pickle.dump(proposal_data, f) if not self._do_evaluation: self._logger.info("Annotations are not available for evaluation.") return self._logger.info("Evaluating bbox proposals ...") res = {} areas = {"all": "", "small": "s", "medium": "m", "large": "l"} for limit in [100, 1000]: for area, suffix in areas.items(): stats = _evaluate_box_proposals(predictions, self._lvis_api, area=area, limit=limit) key = "AR{}@{:d}".format(suffix, limit) res[key] = float(stats["ar"].item() * 100) self._logger.info("Proposal metrics: \n" + create_small_table(res)) self._results["box_proposals"] = res # inspired from Detectron: # https://github.com/facebookresearch/Detectron/blob/a6a835f5b8208c45d0dce217ce9bbda915f44df7/detectron/datasets/json_dataset_evaluator.py#L255 # noqa def _evaluate_box_proposals(dataset_predictions, lvis_api, thresholds=None, area="all", limit=None): """ Evaluate detection proposal recall metrics. This function is a much faster alternative to the official LVIS API recall evaluation code. However, it produces slightly different results. """ # Record max overlap value for each gt box # Return vector of overlap values areas = { "all": 0, "small": 1, "medium": 2, "large": 3, "96-128": 4, "128-256": 5, "256-512": 6, "512-inf": 7, } area_ranges = [ [0 ** 2, 1e5 ** 2], # all [0 ** 2, 32 ** 2], # small [32 ** 2, 96 ** 2], # medium [96 ** 2, 1e5 ** 2], # large [96 ** 2, 128 ** 2], # 96-128 [128 ** 2, 256 ** 2], # 128-256 [256 ** 2, 512 ** 2], # 256-512 [512 ** 2, 1e5 ** 2], ] # 512-inf assert area in areas, "Unknown area range: {}".format(area) area_range = area_ranges[areas[area]] gt_overlaps = [] num_pos = 0 for prediction_dict in dataset_predictions: predictions = prediction_dict["proposals"] # sort predictions in descending order # TODO maybe remove this and make it explicit in the documentation inds = predictions.objectness_logits.sort(descending=True)[1] predictions = predictions[inds] ann_ids = lvis_api.get_ann_ids(img_ids=[prediction_dict["image_id"]]) anno = lvis_api.load_anns(ann_ids) gt_boxes = [ BoxMode.convert(obj["bbox"], BoxMode.XYWH_ABS, BoxMode.XYXY_ABS) for obj in anno ] gt_boxes = torch.as_tensor(gt_boxes).reshape(-1, 4) # guard against no boxes gt_boxes = Boxes(gt_boxes) gt_areas = torch.as_tensor([obj["area"] for obj in anno]) if len(gt_boxes) == 0 or len(predictions) == 0: continue valid_gt_inds = (gt_areas >= area_range[0]) & (gt_areas <= area_range[1]) gt_boxes = gt_boxes[valid_gt_inds] num_pos += len(gt_boxes) if len(gt_boxes) == 0: continue if limit is not None and len(predictions) > limit: predictions = predictions[:limit] overlaps = pairwise_iou(predictions.proposal_boxes, gt_boxes) _gt_overlaps = torch.zeros(len(gt_boxes)) for j in range(min(len(predictions), len(gt_boxes))): # find which proposal box maximally covers each gt box # and get the iou amount of coverage for each gt box max_overlaps, argmax_overlaps = overlaps.max(dim=0) # find which gt box is 'best' covered (i.e. 'best' = most iou) gt_ovr, gt_ind = max_overlaps.max(dim=0) assert gt_ovr >= 0 # find the proposal box that covers the best covered gt box box_ind = argmax_overlaps[gt_ind] # record the iou coverage of this gt box _gt_overlaps[j] = overlaps[box_ind, gt_ind] assert _gt_overlaps[j] == gt_ovr # mark the proposal box and the gt box as used overlaps[box_ind, :] = -1 overlaps[:, gt_ind] = -1 # append recorded iou coverage level gt_overlaps.append(_gt_overlaps) gt_overlaps = ( torch.cat(gt_overlaps, dim=0) if len(gt_overlaps) else torch.zeros(0, dtype=torch.float32) ) gt_overlaps, _ = torch.sort(gt_overlaps) if thresholds is None: step = 0.05 thresholds = torch.arange(0.5, 0.95 + 1e-5, step, dtype=torch.float32) recalls = torch.zeros_like(thresholds) # compute recall for each iou threshold for i, t in enumerate(thresholds): recalls[i] = (gt_overlaps >= t).float().sum() / float(num_pos) # ar = 2 * np.trapz(recalls, thresholds) ar = recalls.mean() return { "ar": ar, "recalls": recalls, "thresholds": thresholds, "gt_overlaps": gt_overlaps, "num_pos": num_pos, } def _evaluate_predictions_on_lvis(lvis_gt, lvis_results, iou_type, class_names=None): """ Args: iou_type (str): kpt_oks_sigmas (list[float]): class_names (None or list[str]): if provided, will use it to predict per-category AP. Returns: a dict of {metric name: score} """ metrics = { "bbox": ["AP", "AP50", "AP75", "APs", "APm", "APl", "APr", "APc", "APf"], "segm": ["AP", "AP50", "AP75", "APs", "APm", "APl", "APr", "APc", "APf"], }[iou_type] logger = logging.getLogger(__name__) if len(lvis_results) == 0: # TODO: check if needed logger.warn("No predictions from the model!") return {metric: float("nan") for metric in metrics} if iou_type == "segm": lvis_results = copy.deepcopy(lvis_results) # When evaluating mask AP, if the results contain bbox, LVIS API will # use the box area as the area of the instance, instead of the mask area. # This leads to a different definition of small/medium/large. # We remove the bbox field to let mask AP use mask area. for c in lvis_results: c.pop("bbox", None) from lvis import LVISEval, LVISResults lvis_results = LVISResults(lvis_gt, lvis_results) lvis_eval = LVISEval(lvis_gt, lvis_results, iou_type) lvis_eval.run() lvis_eval.print_results() # Pull the standard metrics from the LVIS results results = lvis_eval.get_results() results = {metric: float(results[metric] * 100) for metric in metrics} logger.info("Evaluation results for {}: \n".format(iou_type) + create_small_table(results)) return results
banmo-main
third_party/detectron2_old/detectron2/evaluation/lvis_evaluation.py
# Copyright (c) Facebook, Inc. and its affiliates. import logging import numpy as np import pprint import sys from collections.abc import Mapping def print_csv_format(results): """ Print main metrics in a format similar to Detectron, so that they are easy to copypaste into a spreadsheet. Args: results (OrderedDict[dict]): task_name -> {metric -> score} unordered dict can also be printed, but in arbitrary order """ assert isinstance(results, Mapping) or not len(results), results logger = logging.getLogger(__name__) for task, res in results.items(): if isinstance(res, Mapping): # Don't print "AP-category" metrics since they are usually not tracked. important_res = [(k, v) for k, v in res.items() if "-" not in k] logger.info("copypaste: Task: {}".format(task)) logger.info("copypaste: " + ",".join([k[0] for k in important_res])) logger.info("copypaste: " + ",".join(["{0:.4f}".format(k[1]) for k in important_res])) else: logger.info(f"copypaste: {task}={res}") def verify_results(cfg, results): """ Args: results (OrderedDict[dict]): task_name -> {metric -> score} Returns: bool: whether the verification succeeds or not """ expected_results = cfg.TEST.EXPECTED_RESULTS if not len(expected_results): return True ok = True for task, metric, expected, tolerance in expected_results: actual = results[task].get(metric, None) if actual is None: ok = False continue if not np.isfinite(actual): ok = False continue diff = abs(actual - expected) if diff > tolerance: ok = False logger = logging.getLogger(__name__) if not ok: logger.error("Result verification failed!") logger.error("Expected Results: " + str(expected_results)) logger.error("Actual Results: " + pprint.pformat(results)) sys.exit(1) else: logger.info("Results verification passed.") return ok def flatten_results_dict(results): """ Expand a hierarchical dict of scalars into a flat dict of scalars. If results[k1][k2][k3] = v, the returned dict will have the entry {"k1/k2/k3": v}. Args: results (dict): """ r = {} for k, v in results.items(): if isinstance(v, Mapping): v = flatten_results_dict(v) for kk, vv in v.items(): r[k + "/" + kk] = vv else: r[k] = v return r
banmo-main
third_party/detectron2_old/detectron2/evaluation/testing.py
# Copyright (c) Facebook, Inc. and its affiliates. import datetime import logging import time from collections import OrderedDict, abc from contextlib import ExitStack, contextmanager from typing import List, Union import torch from torch import nn from detectron2.utils.comm import get_world_size, is_main_process from detectron2.utils.logger import log_every_n_seconds class DatasetEvaluator: """ Base class for a dataset evaluator. The function :func:`inference_on_dataset` runs the model over all samples in the dataset, and have a DatasetEvaluator to process the inputs/outputs. This class will accumulate information of the inputs/outputs (by :meth:`process`), and produce evaluation results in the end (by :meth:`evaluate`). """ def reset(self): """ Preparation for a new round of evaluation. Should be called before starting a round of evaluation. """ pass def process(self, inputs, outputs): """ Process the pair of inputs and outputs. If they contain batches, the pairs can be consumed one-by-one using `zip`: .. code-block:: python for input_, output in zip(inputs, outputs): # do evaluation on single input/output pair ... Args: inputs (list): the inputs that's used to call the model. outputs (list): the return value of `model(inputs)` """ pass def evaluate(self): """ Evaluate/summarize the performance, after processing all input/output pairs. Returns: dict: A new evaluator class can return a dict of arbitrary format as long as the user can process the results. In our train_net.py, we expect the following format: * key: the name of the task (e.g., bbox) * value: a dict of {metric name: score}, e.g.: {"AP50": 80} """ pass class DatasetEvaluators(DatasetEvaluator): """ Wrapper class to combine multiple :class:`DatasetEvaluator` instances. This class dispatches every evaluation call to all of its :class:`DatasetEvaluator`. """ def __init__(self, evaluators): """ Args: evaluators (list): the evaluators to combine. """ super().__init__() self._evaluators = evaluators def reset(self): for evaluator in self._evaluators: evaluator.reset() def process(self, inputs, outputs): for evaluator in self._evaluators: evaluator.process(inputs, outputs) def evaluate(self): results = OrderedDict() for evaluator in self._evaluators: result = evaluator.evaluate() if is_main_process() and result is not None: for k, v in result.items(): assert ( k not in results ), "Different evaluators produce results with the same key {}".format(k) results[k] = v return results def inference_on_dataset( model, data_loader, evaluator: Union[DatasetEvaluator, List[DatasetEvaluator], None] ): """ Run model on the data_loader and evaluate the metrics with evaluator. Also benchmark the inference speed of `model.__call__` accurately. The model will be used in eval mode. Args: model (callable): a callable which takes an object from `data_loader` and returns some outputs. If it's an nn.Module, it will be temporarily set to `eval` mode. If you wish to evaluate a model in `training` mode instead, you can wrap the given model and override its behavior of `.eval()` and `.train()`. data_loader: an iterable object with a length. The elements it generates will be the inputs to the model. evaluator: the evaluator(s) to run. Use `None` if you only want to benchmark, but don't want to do any evaluation. Returns: The return value of `evaluator.evaluate()` """ num_devices = get_world_size() logger = logging.getLogger(__name__) logger.info("Start inference on {} batches".format(len(data_loader))) total = len(data_loader) # inference data loader must have a fixed length if evaluator is None: # create a no-op evaluator evaluator = DatasetEvaluators([]) if isinstance(evaluator, abc.MutableSequence): evaluator = DatasetEvaluators(evaluator) evaluator.reset() num_warmup = min(5, total - 1) start_time = time.perf_counter() total_data_time = 0 total_compute_time = 0 total_eval_time = 0 with ExitStack() as stack: if isinstance(model, nn.Module): stack.enter_context(inference_context(model)) stack.enter_context(torch.no_grad()) start_data_time = time.perf_counter() for idx, inputs in enumerate(data_loader): total_data_time += time.perf_counter() - start_data_time if idx == num_warmup: start_time = time.perf_counter() total_data_time = 0 total_compute_time = 0 total_eval_time = 0 start_compute_time = time.perf_counter() outputs = model(inputs) if torch.cuda.is_available(): torch.cuda.synchronize() total_compute_time += time.perf_counter() - start_compute_time start_eval_time = time.perf_counter() evaluator.process(inputs, outputs) total_eval_time += time.perf_counter() - start_eval_time iters_after_start = idx + 1 - num_warmup * int(idx >= num_warmup) data_seconds_per_iter = total_data_time / iters_after_start compute_seconds_per_iter = total_compute_time / iters_after_start eval_seconds_per_iter = total_eval_time / iters_after_start total_seconds_per_iter = (time.perf_counter() - start_time) / iters_after_start if idx >= num_warmup * 2 or compute_seconds_per_iter > 5: eta = datetime.timedelta(seconds=int(total_seconds_per_iter * (total - idx - 1))) log_every_n_seconds( logging.INFO, ( f"Inference done {idx + 1}/{total}. " f"Dataloading: {data_seconds_per_iter:.4f} s / iter. " f"Inference: {compute_seconds_per_iter:.4f} s / iter. " f"Eval: {eval_seconds_per_iter:.4f} s / iter. " f"Total: {total_seconds_per_iter:.4f} s / iter. " f"ETA={eta}" ), n=5, ) start_data_time = time.perf_counter() # Measure the time only for this worker (before the synchronization barrier) total_time = time.perf_counter() - start_time total_time_str = str(datetime.timedelta(seconds=total_time)) # NOTE this format is parsed by grep logger.info( "Total inference time: {} ({:.6f} s / iter per device, on {} devices)".format( total_time_str, total_time / (total - num_warmup), num_devices ) ) total_compute_time_str = str(datetime.timedelta(seconds=int(total_compute_time))) logger.info( "Total inference pure compute time: {} ({:.6f} s / iter per device, on {} devices)".format( total_compute_time_str, total_compute_time / (total - num_warmup), num_devices ) ) results = evaluator.evaluate() # An evaluator may return None when not in main process. # Replace it by an empty dict instead to make it easier for downstream code to handle if results is None: results = {} return results @contextmanager def inference_context(model): """ A context where the model is temporarily changed to eval mode, and restored to previous mode afterwards. Args: model: a torch Module """ training_mode = model.training model.eval() yield model.train(training_mode)
banmo-main
third_party/detectron2_old/detectron2/evaluation/evaluator.py
# Copyright (c) Facebook, Inc. and its affiliates. import itertools import json import numpy as np import os import torch from pycocotools.cocoeval import COCOeval, maskUtils from detectron2.structures import BoxMode, RotatedBoxes, pairwise_iou_rotated from detectron2.utils.file_io import PathManager from .coco_evaluation import COCOEvaluator class RotatedCOCOeval(COCOeval): @staticmethod def is_rotated(box_list): if type(box_list) == np.ndarray: return box_list.shape[1] == 5 elif type(box_list) == list: if box_list == []: # cannot decide the box_dim return False return np.all( np.array( [ (len(obj) == 5) and ((type(obj) == list) or (type(obj) == np.ndarray)) for obj in box_list ] ) ) return False @staticmethod def boxlist_to_tensor(boxlist, output_box_dim): if type(boxlist) == np.ndarray: box_tensor = torch.from_numpy(boxlist) elif type(boxlist) == list: if boxlist == []: return torch.zeros((0, output_box_dim), dtype=torch.float32) else: box_tensor = torch.FloatTensor(boxlist) else: raise Exception("Unrecognized boxlist type") input_box_dim = box_tensor.shape[1] if input_box_dim != output_box_dim: if input_box_dim == 4 and output_box_dim == 5: box_tensor = BoxMode.convert(box_tensor, BoxMode.XYWH_ABS, BoxMode.XYWHA_ABS) else: raise Exception( "Unable to convert from {}-dim box to {}-dim box".format( input_box_dim, output_box_dim ) ) return box_tensor def compute_iou_dt_gt(self, dt, gt, is_crowd): if self.is_rotated(dt) or self.is_rotated(gt): # TODO: take is_crowd into consideration assert all(c == 0 for c in is_crowd) dt = RotatedBoxes(self.boxlist_to_tensor(dt, output_box_dim=5)) gt = RotatedBoxes(self.boxlist_to_tensor(gt, output_box_dim=5)) return pairwise_iou_rotated(dt, gt) else: # This is the same as the classical COCO evaluation return maskUtils.iou(dt, gt, is_crowd) def computeIoU(self, imgId, catId): p = self.params if p.useCats: gt = self._gts[imgId, catId] dt = self._dts[imgId, catId] else: gt = [_ for cId in p.catIds for _ in self._gts[imgId, cId]] dt = [_ for cId in p.catIds for _ in self._dts[imgId, cId]] if len(gt) == 0 and len(dt) == 0: return [] inds = np.argsort([-d["score"] for d in dt], kind="mergesort") dt = [dt[i] for i in inds] if len(dt) > p.maxDets[-1]: dt = dt[0 : p.maxDets[-1]] assert p.iouType == "bbox", "unsupported iouType for iou computation" g = [g["bbox"] for g in gt] d = [d["bbox"] for d in dt] # compute iou between each dt and gt region iscrowd = [int(o["iscrowd"]) for o in gt] # Note: this function is copied from cocoeval.py in cocoapi # and the major difference is here. ious = self.compute_iou_dt_gt(d, g, iscrowd) return ious class RotatedCOCOEvaluator(COCOEvaluator): """ Evaluate object proposal/instance detection outputs using COCO-like metrics and APIs, with rotated boxes support. Note: this uses IOU only and does not consider angle differences. """ def process(self, inputs, outputs): """ Args: inputs: the inputs to a COCO model (e.g., GeneralizedRCNN). It is a list of dict. Each dict corresponds to an image and contains keys like "height", "width", "file_name", "image_id". outputs: the outputs of a COCO model. It is a list of dicts with key "instances" that contains :class:`Instances`. """ for input, output in zip(inputs, outputs): prediction = {"image_id": input["image_id"]} if "instances" in output: instances = output["instances"].to(self._cpu_device) prediction["instances"] = self.instances_to_json(instances, input["image_id"]) if "proposals" in output: prediction["proposals"] = output["proposals"].to(self._cpu_device) self._predictions.append(prediction) def instances_to_json(self, instances, img_id): num_instance = len(instances) if num_instance == 0: return [] boxes = instances.pred_boxes.tensor.numpy() if boxes.shape[1] == 4: boxes = BoxMode.convert(boxes, BoxMode.XYXY_ABS, BoxMode.XYWH_ABS) boxes = boxes.tolist() scores = instances.scores.tolist() classes = instances.pred_classes.tolist() results = [] for k in range(num_instance): result = { "image_id": img_id, "category_id": classes[k], "bbox": boxes[k], "score": scores[k], } results.append(result) return results def _eval_predictions(self, predictions, img_ids=None): # img_ids: unused """ Evaluate predictions on the given tasks. Fill self._results with the metrics of the tasks. """ self._logger.info("Preparing results for COCO format ...") coco_results = list(itertools.chain(*[x["instances"] for x in predictions])) # unmap the category ids for COCO if hasattr(self._metadata, "thing_dataset_id_to_contiguous_id"): reverse_id_mapping = { v: k for k, v in self._metadata.thing_dataset_id_to_contiguous_id.items() } for result in coco_results: result["category_id"] = reverse_id_mapping[result["category_id"]] if self._output_dir: file_path = os.path.join(self._output_dir, "coco_instances_results.json") self._logger.info("Saving results to {}".format(file_path)) with PathManager.open(file_path, "w") as f: f.write(json.dumps(coco_results)) f.flush() if not self._do_evaluation: self._logger.info("Annotations are not available for evaluation.") return self._logger.info("Evaluating predictions ...") assert self._tasks is None or set(self._tasks) == { "bbox" }, "[RotatedCOCOEvaluator] Only bbox evaluation is supported" coco_eval = ( self._evaluate_predictions_on_coco(self._coco_api, coco_results) if len(coco_results) > 0 else None # cocoapi does not handle empty results very well ) task = "bbox" res = self._derive_coco_results( coco_eval, task, class_names=self._metadata.get("thing_classes") ) self._results[task] = res def _evaluate_predictions_on_coco(self, coco_gt, coco_results): """ Evaluate the coco results using COCOEval API. """ assert len(coco_results) > 0 coco_dt = coco_gt.loadRes(coco_results) # Only bbox is supported for now coco_eval = RotatedCOCOeval(coco_gt, coco_dt, iouType="bbox") coco_eval.evaluate() coco_eval.accumulate() coco_eval.summarize() return coco_eval
banmo-main
third_party/detectron2_old/detectron2/evaluation/rotated_coco_evaluation.py
# Copyright (c) Facebook, Inc. and its affiliates. import math import torch import torch.nn.functional as F from detectron2.layers import cat from detectron2.layers.roi_align_rotated import ROIAlignRotated from detectron2.modeling import poolers from detectron2.modeling.proposal_generator import rpn from detectron2.modeling.roi_heads.mask_head import mask_rcnn_inference from detectron2.structures import Boxes, ImageList, Instances, Keypoints from .shared import alias, to_device """ This file contains caffe2-compatible implementation of several detectron2 components. """ class Caffe2Boxes(Boxes): """ Representing a list of detectron2.structures.Boxes from minibatch, each box is represented by a 5d vector (batch index + 4 coordinates), or a 6d vector (batch index + 5 coordinates) for RotatedBoxes. """ def __init__(self, tensor): assert isinstance(tensor, torch.Tensor) assert tensor.dim() == 2 and tensor.size(-1) in [4, 5, 6], tensor.size() # TODO: make tensor immutable when dim is Nx5 for Boxes, # and Nx6 for RotatedBoxes? self.tensor = tensor # TODO clean up this class, maybe just extend Instances class InstancesList(object): """ Tensor representation of a list of Instances object for a batch of images. When dealing with a batch of images with Caffe2 ops, a list of bboxes (instances) are usually represented by single Tensor with size (sigma(Ni), 5) or (sigma(Ni), 4) plus a batch split Tensor. This class is for providing common functions to convert between these two representations. """ def __init__(self, im_info, indices, extra_fields=None): # [N, 3] -> (H, W, Scale) self.im_info = im_info # [N,] -> indice of batch to which the instance belongs self.indices = indices # [N, ...] self.batch_extra_fields = extra_fields or {} self.image_size = self.im_info def get_fields(self): """like `get_fields` in the Instances object, but return each field in tensor representations""" ret = {} for k, v in self.batch_extra_fields.items(): # if isinstance(v, torch.Tensor): # tensor_rep = v # elif isinstance(v, (Boxes, Keypoints)): # tensor_rep = v.tensor # else: # raise ValueError("Can't find tensor representation for: {}".format()) ret[k] = v return ret def has(self, name): return name in self.batch_extra_fields def set(self, name, value): data_len = len(value) if len(self.batch_extra_fields): assert ( len(self) == data_len ), "Adding a field of length {} to a Instances of length {}".format(data_len, len(self)) self.batch_extra_fields[name] = value def __setattr__(self, name, val): if name in ["im_info", "indices", "batch_extra_fields", "image_size"]: super().__setattr__(name, val) else: self.set(name, val) def __getattr__(self, name): if name not in self.batch_extra_fields: raise AttributeError("Cannot find field '{}' in the given Instances!".format(name)) return self.batch_extra_fields[name] def __len__(self): return len(self.indices) def flatten(self): ret = [] for _, v in self.batch_extra_fields.items(): if isinstance(v, (Boxes, Keypoints)): ret.append(v.tensor) else: ret.append(v) return ret @staticmethod def to_d2_instances_list(instances_list): """ Convert InstancesList to List[Instances]. The input `instances_list` can also be a List[Instances], in this case this method is a non-op. """ if not isinstance(instances_list, InstancesList): assert all(isinstance(x, Instances) for x in instances_list) return instances_list ret = [] for i, info in enumerate(instances_list.im_info): instances = Instances(torch.Size([int(info[0].item()), int(info[1].item())])) ids = instances_list.indices == i for k, v in instances_list.batch_extra_fields.items(): if isinstance(v, torch.Tensor): instances.set(k, v[ids]) continue elif isinstance(v, Boxes): instances.set(k, v[ids, -4:]) continue target_type, tensor_source = v assert isinstance(tensor_source, torch.Tensor) assert tensor_source.shape[0] == instances_list.indices.shape[0] tensor_source = tensor_source[ids] if issubclass(target_type, Boxes): instances.set(k, Boxes(tensor_source[:, -4:])) elif issubclass(target_type, Keypoints): instances.set(k, Keypoints(tensor_source)) elif issubclass(target_type, torch.Tensor): instances.set(k, tensor_source) else: raise ValueError("Can't handle targe type: {}".format(target_type)) ret.append(instances) return ret class Caffe2Compatible(object): """ A model can inherit this class to indicate that it can be traced and deployed with caffe2. """ def _get_tensor_mode(self): return self._tensor_mode def _set_tensor_mode(self, v): self._tensor_mode = v tensor_mode = property(_get_tensor_mode, _set_tensor_mode) """ If true, the model expects C2-style tensor only inputs/outputs format. """ class Caffe2RPN(Caffe2Compatible, rpn.RPN): def _generate_proposals( self, images, objectness_logits_pred, anchor_deltas_pred, gt_instances=None ): assert isinstance(images, ImageList) if self.tensor_mode: im_info = images.image_sizes else: im_info = torch.tensor([[im_sz[0], im_sz[1], 1.0] for im_sz in images.image_sizes]).to( images.tensor.device ) assert isinstance(im_info, torch.Tensor) rpn_rois_list = [] rpn_roi_probs_list = [] for scores, bbox_deltas, cell_anchors_tensor, feat_stride in zip( objectness_logits_pred, anchor_deltas_pred, iter(self.anchor_generator.cell_anchors), self.anchor_generator.strides, ): scores = scores.detach() bbox_deltas = bbox_deltas.detach() rpn_rois, rpn_roi_probs = torch.ops._caffe2.GenerateProposals( scores, bbox_deltas, im_info, cell_anchors_tensor, spatial_scale=1.0 / feat_stride, pre_nms_topN=self.pre_nms_topk[self.training], post_nms_topN=self.post_nms_topk[self.training], nms_thresh=self.nms_thresh, min_size=self.min_box_size, # correct_transform_coords=True, # deprecated argument angle_bound_on=True, # Default angle_bound_lo=-180, angle_bound_hi=180, clip_angle_thresh=1.0, # Default legacy_plus_one=False, ) rpn_rois_list.append(rpn_rois) rpn_roi_probs_list.append(rpn_roi_probs) # For FPN in D2, in RPN all proposals from different levels are concated # together, ranked and picked by top post_nms_topk. Then in ROIPooler # it calculates level_assignments and calls the RoIAlign from # the corresponding level. if len(objectness_logits_pred) == 1: rpn_rois = rpn_rois_list[0] rpn_roi_probs = rpn_roi_probs_list[0] else: assert len(rpn_rois_list) == len(rpn_roi_probs_list) rpn_post_nms_topN = self.post_nms_topk[self.training] device = rpn_rois_list[0].device input_list = [to_device(x, "cpu") for x in (rpn_rois_list + rpn_roi_probs_list)] # TODO remove this after confirming rpn_max_level/rpn_min_level # is not needed in CollectRpnProposals. feature_strides = list(self.anchor_generator.strides) rpn_min_level = int(math.log2(feature_strides[0])) rpn_max_level = int(math.log2(feature_strides[-1])) assert (rpn_max_level - rpn_min_level + 1) == len( rpn_rois_list ), "CollectRpnProposals requires continuous levels" rpn_rois = torch.ops._caffe2.CollectRpnProposals( input_list, # NOTE: in current implementation, rpn_max_level and rpn_min_level # are not needed, only the subtraction of two matters and it # can be infer from the number of inputs. Keep them now for # consistency. rpn_max_level=2 + len(rpn_rois_list) - 1, rpn_min_level=2, rpn_post_nms_topN=rpn_post_nms_topN, ) rpn_rois = to_device(rpn_rois, device) rpn_roi_probs = [] proposals = self.c2_postprocess(im_info, rpn_rois, rpn_roi_probs, self.tensor_mode) return proposals, {} def forward(self, images, features, gt_instances=None): assert not self.training features = [features[f] for f in self.in_features] objectness_logits_pred, anchor_deltas_pred = self.rpn_head(features) return self._generate_proposals( images, objectness_logits_pred, anchor_deltas_pred, gt_instances, ) @staticmethod def c2_postprocess(im_info, rpn_rois, rpn_roi_probs, tensor_mode): proposals = InstancesList( im_info=im_info, indices=rpn_rois[:, 0], extra_fields={ "proposal_boxes": Caffe2Boxes(rpn_rois), "objectness_logits": (torch.Tensor, rpn_roi_probs), }, ) if not tensor_mode: proposals = InstancesList.to_d2_instances_list(proposals) else: proposals = [proposals] return proposals class Caffe2ROIPooler(Caffe2Compatible, poolers.ROIPooler): @staticmethod def c2_preprocess(box_lists): assert all(isinstance(x, Boxes) for x in box_lists) if all(isinstance(x, Caffe2Boxes) for x in box_lists): # input is pure-tensor based assert len(box_lists) == 1 pooler_fmt_boxes = box_lists[0].tensor else: pooler_fmt_boxes = poolers.convert_boxes_to_pooler_format(box_lists) return pooler_fmt_boxes def forward(self, x, box_lists): assert not self.training pooler_fmt_boxes = self.c2_preprocess(box_lists) num_level_assignments = len(self.level_poolers) if num_level_assignments == 1: if isinstance(self.level_poolers[0], ROIAlignRotated): c2_roi_align = torch.ops._caffe2.RoIAlignRotated aligned = True else: c2_roi_align = torch.ops._caffe2.RoIAlign aligned = self.level_poolers[0].aligned out = c2_roi_align( x[0], pooler_fmt_boxes, order="NCHW", spatial_scale=float(self.level_poolers[0].spatial_scale), pooled_h=int(self.output_size[0]), pooled_w=int(self.output_size[1]), sampling_ratio=int(self.level_poolers[0].sampling_ratio), aligned=aligned, ) return out device = pooler_fmt_boxes.device assert ( self.max_level - self.min_level + 1 == 4 ), "Currently DistributeFpnProposals only support 4 levels" fpn_outputs = torch.ops._caffe2.DistributeFpnProposals( to_device(pooler_fmt_boxes, "cpu"), roi_canonical_scale=self.canonical_box_size, roi_canonical_level=self.canonical_level, roi_max_level=self.max_level, roi_min_level=self.min_level, legacy_plus_one=False, ) fpn_outputs = [to_device(x, device) for x in fpn_outputs] rois_fpn_list = fpn_outputs[:-1] rois_idx_restore_int32 = fpn_outputs[-1] roi_feat_fpn_list = [] for roi_fpn, x_level, pooler in zip(rois_fpn_list, x, self.level_poolers): if isinstance(pooler, ROIAlignRotated): c2_roi_align = torch.ops._caffe2.RoIAlignRotated aligned = True else: c2_roi_align = torch.ops._caffe2.RoIAlign aligned = bool(pooler.aligned) roi_feat_fpn = c2_roi_align( x_level, roi_fpn, order="NCHW", spatial_scale=float(pooler.spatial_scale), pooled_h=int(self.output_size[0]), pooled_w=int(self.output_size[1]), sampling_ratio=int(pooler.sampling_ratio), aligned=aligned, ) roi_feat_fpn_list.append(roi_feat_fpn) roi_feat_shuffled = cat(roi_feat_fpn_list, dim=0) assert roi_feat_shuffled.numel() > 0 and rois_idx_restore_int32.numel() > 0, ( "Caffe2 export requires tracing with a model checkpoint + input that can produce valid" " detections. But no detections were obtained with the given checkpoint and input!" ) roi_feat = torch.ops._caffe2.BatchPermutation(roi_feat_shuffled, rois_idx_restore_int32) return roi_feat class Caffe2FastRCNNOutputsInference: def __init__(self, tensor_mode): self.tensor_mode = tensor_mode # whether the output is caffe2 tensor mode def __call__(self, box_predictor, predictions, proposals): """equivalent to FastRCNNOutputLayers.inference""" num_classes = box_predictor.num_classes score_thresh = box_predictor.test_score_thresh nms_thresh = box_predictor.test_nms_thresh topk_per_image = box_predictor.test_topk_per_image is_rotated = len(box_predictor.box2box_transform.weights) == 5 if is_rotated: box_dim = 5 assert box_predictor.box2box_transform.weights[4] == 1, ( "The weights for Rotated BBoxTransform in C2 have only 4 dimensions," + " thus enforcing the angle weight to be 1 for now" ) box2box_transform_weights = box_predictor.box2box_transform.weights[:4] else: box_dim = 4 box2box_transform_weights = box_predictor.box2box_transform.weights class_logits, box_regression = predictions if num_classes + 1 == class_logits.shape[1]: class_prob = F.softmax(class_logits, -1) else: assert num_classes == class_logits.shape[1] class_prob = F.sigmoid(class_logits) # BoxWithNMSLimit will infer num_classes from the shape of the class_prob # So append a zero column as placeholder for the background class class_prob = torch.cat((class_prob, torch.zeros(class_prob.shape[0], 1)), dim=1) assert box_regression.shape[1] % box_dim == 0 cls_agnostic_bbox_reg = box_regression.shape[1] // box_dim == 1 input_tensor_mode = proposals[0].proposal_boxes.tensor.shape[1] == box_dim + 1 rois = type(proposals[0].proposal_boxes).cat([p.proposal_boxes for p in proposals]) device, dtype = rois.tensor.device, rois.tensor.dtype if input_tensor_mode: im_info = proposals[0].image_size rois = rois.tensor else: im_info = torch.tensor( [[sz[0], sz[1], 1.0] for sz in [x.image_size for x in proposals]] ) batch_ids = cat( [ torch.full((b, 1), i, dtype=dtype, device=device) for i, b in enumerate(len(p) for p in proposals) ], dim=0, ) rois = torch.cat([batch_ids, rois.tensor], dim=1) roi_pred_bbox, roi_batch_splits = torch.ops._caffe2.BBoxTransform( to_device(rois, "cpu"), to_device(box_regression, "cpu"), to_device(im_info, "cpu"), weights=box2box_transform_weights, apply_scale=True, rotated=is_rotated, angle_bound_on=True, angle_bound_lo=-180, angle_bound_hi=180, clip_angle_thresh=1.0, legacy_plus_one=False, ) roi_pred_bbox = to_device(roi_pred_bbox, device) roi_batch_splits = to_device(roi_batch_splits, device) nms_outputs = torch.ops._caffe2.BoxWithNMSLimit( to_device(class_prob, "cpu"), to_device(roi_pred_bbox, "cpu"), to_device(roi_batch_splits, "cpu"), score_thresh=float(score_thresh), nms=float(nms_thresh), detections_per_im=int(topk_per_image), soft_nms_enabled=False, soft_nms_method="linear", soft_nms_sigma=0.5, soft_nms_min_score_thres=0.001, rotated=is_rotated, cls_agnostic_bbox_reg=cls_agnostic_bbox_reg, input_boxes_include_bg_cls=False, output_classes_include_bg_cls=False, legacy_plus_one=False, ) roi_score_nms = to_device(nms_outputs[0], device) roi_bbox_nms = to_device(nms_outputs[1], device) roi_class_nms = to_device(nms_outputs[2], device) roi_batch_splits_nms = to_device(nms_outputs[3], device) roi_keeps_nms = to_device(nms_outputs[4], device) roi_keeps_size_nms = to_device(nms_outputs[5], device) if not self.tensor_mode: roi_class_nms = roi_class_nms.to(torch.int64) roi_batch_ids = cat( [ torch.full((b, 1), i, dtype=dtype, device=device) for i, b in enumerate(int(x.item()) for x in roi_batch_splits_nms) ], dim=0, ) roi_class_nms = alias(roi_class_nms, "class_nms") roi_score_nms = alias(roi_score_nms, "score_nms") roi_bbox_nms = alias(roi_bbox_nms, "bbox_nms") roi_batch_splits_nms = alias(roi_batch_splits_nms, "batch_splits_nms") roi_keeps_nms = alias(roi_keeps_nms, "keeps_nms") roi_keeps_size_nms = alias(roi_keeps_size_nms, "keeps_size_nms") results = InstancesList( im_info=im_info, indices=roi_batch_ids[:, 0], extra_fields={ "pred_boxes": Caffe2Boxes(roi_bbox_nms), "scores": roi_score_nms, "pred_classes": roi_class_nms, }, ) if not self.tensor_mode: results = InstancesList.to_d2_instances_list(results) batch_splits = roi_batch_splits_nms.int().tolist() kept_indices = list(roi_keeps_nms.to(torch.int64).split(batch_splits)) else: results = [results] kept_indices = [roi_keeps_nms] return results, kept_indices class Caffe2MaskRCNNInference: def __call__(self, pred_mask_logits, pred_instances): """equivalent to mask_head.mask_rcnn_inference""" if all(isinstance(x, InstancesList) for x in pred_instances): assert len(pred_instances) == 1 mask_probs_pred = pred_mask_logits.sigmoid() mask_probs_pred = alias(mask_probs_pred, "mask_fcn_probs") pred_instances[0].pred_masks = mask_probs_pred else: mask_rcnn_inference(pred_mask_logits, pred_instances) class Caffe2KeypointRCNNInference: def __init__(self, use_heatmap_max_keypoint): self.use_heatmap_max_keypoint = use_heatmap_max_keypoint def __call__(self, pred_keypoint_logits, pred_instances): # just return the keypoint heatmap for now, # there will be option to call HeatmapMaxKeypointOp output = alias(pred_keypoint_logits, "kps_score") if all(isinstance(x, InstancesList) for x in pred_instances): assert len(pred_instances) == 1 if self.use_heatmap_max_keypoint: device = output.device output = torch.ops._caffe2.HeatmapMaxKeypoint( to_device(output, "cpu"), pred_instances[0].pred_boxes.tensor, should_output_softmax=True, # worth make it configerable? ) output = to_device(output, device) output = alias(output, "keypoints_out") pred_instances[0].pred_keypoints = output return pred_keypoint_logits
banmo-main
third_party/detectron2_old/detectron2/export/c10.py
# Copyright (c) Facebook, Inc. and its affiliates. import copy import io import logging import numpy as np from typing import List import onnx import torch from caffe2.proto import caffe2_pb2 from caffe2.python import core from caffe2.python.onnx.backend import Caffe2Backend from tabulate import tabulate from termcolor import colored from torch.onnx import OperatorExportTypes from .shared import ( ScopedWS, construct_init_net_from_params, fuse_alias_placeholder, fuse_copy_between_cpu_and_gpu, get_params_from_init_net, group_norm_replace_aten_with_caffe2, infer_device_type, remove_dead_end_ops, remove_reshape_for_fc, save_graph, ) logger = logging.getLogger(__name__) def export_onnx_model(model, inputs): """ Trace and export a model to onnx format. Args: model (nn.Module): inputs (tuple[args]): the model will be called by `model(*inputs)` Returns: an onnx model """ assert isinstance(model, torch.nn.Module) # make sure all modules are in eval mode, onnx may change the training state # of the module if the states are not consistent def _check_eval(module): assert not module.training model.apply(_check_eval) # Export the model to ONNX with torch.no_grad(): with io.BytesIO() as f: torch.onnx.export( model, inputs, f, operator_export_type=OperatorExportTypes.ONNX_ATEN_FALLBACK, # verbose=True, # NOTE: uncomment this for debugging # export_params=True, ) onnx_model = onnx.load_from_string(f.getvalue()) # Apply ONNX's Optimization all_passes = onnx.optimizer.get_available_passes() passes = ["fuse_bn_into_conv"] assert all(p in all_passes for p in passes) onnx_model = onnx.optimizer.optimize(onnx_model, passes) return onnx_model def _op_stats(net_def): type_count = {} for t in [op.type for op in net_def.op]: type_count[t] = type_count.get(t, 0) + 1 type_count_list = sorted(type_count.items(), key=lambda kv: kv[0]) # alphabet type_count_list = sorted(type_count_list, key=lambda kv: -kv[1]) # count return "\n".join("{:>4}x {}".format(count, name) for name, count in type_count_list) def _assign_device_option( predict_net: caffe2_pb2.NetDef, init_net: caffe2_pb2.NetDef, tensor_inputs: List[torch.Tensor] ): """ ONNX exported network doesn't have concept of device, assign necessary device option for each op in order to make it runable on GPU runtime. """ def _get_device_type(torch_tensor): assert torch_tensor.device.type in ["cpu", "cuda"] assert torch_tensor.device.index == 0 return torch_tensor.device.type def _assign_op_device_option(net_proto, net_ssa, blob_device_types): for op, ssa_i in zip(net_proto.op, net_ssa): if op.type in ["CopyCPUToGPU", "CopyGPUToCPU"]: op.device_option.CopyFrom(core.DeviceOption(caffe2_pb2.CUDA, 0)) else: devices = [blob_device_types[b] for b in ssa_i[0] + ssa_i[1]] assert all(d == devices[0] for d in devices) if devices[0] == "cuda": op.device_option.CopyFrom(core.DeviceOption(caffe2_pb2.CUDA, 0)) # update ops in predict_net predict_net_input_device_types = { (name, 0): _get_device_type(tensor) for name, tensor in zip(predict_net.external_input, tensor_inputs) } predict_net_device_types = infer_device_type( predict_net, known_status=predict_net_input_device_types, device_name_style="pytorch" ) predict_net_ssa, _ = core.get_ssa(predict_net) _assign_op_device_option(predict_net, predict_net_ssa, predict_net_device_types) # update ops in init_net init_net_ssa, versions = core.get_ssa(init_net) init_net_output_device_types = { (name, versions[name]): predict_net_device_types[(name, 0)] for name in init_net.external_output } init_net_device_types = infer_device_type( init_net, known_status=init_net_output_device_types, device_name_style="pytorch" ) _assign_op_device_option(init_net, init_net_ssa, init_net_device_types) def export_caffe2_detection_model(model: torch.nn.Module, tensor_inputs: List[torch.Tensor]): """ Export a caffe2-compatible Detectron2 model to caffe2 format via ONNX. Arg: model: a caffe2-compatible version of detectron2 model, defined in caffe2_modeling.py tensor_inputs: a list of tensors that caffe2 model takes as input. """ model = copy.deepcopy(model) assert isinstance(model, torch.nn.Module) assert hasattr(model, "encode_additional_info") # Export via ONNX logger.info( "Exporting a {} model via ONNX ...".format(type(model).__name__) + " Some warnings from ONNX are expected and are usually not to worry about." ) onnx_model = export_onnx_model(model, (tensor_inputs,)) # Convert ONNX model to Caffe2 protobuf init_net, predict_net = Caffe2Backend.onnx_graph_to_caffe2_net(onnx_model) ops_table = [[op.type, op.input, op.output] for op in predict_net.op] table = tabulate(ops_table, headers=["type", "input", "output"], tablefmt="pipe") logger.info( "ONNX export Done. Exported predict_net (before optimizations):\n" + colored(table, "cyan") ) # Apply protobuf optimization fuse_alias_placeholder(predict_net, init_net) if any(t.device.type != "cpu" for t in tensor_inputs): fuse_copy_between_cpu_and_gpu(predict_net) remove_dead_end_ops(init_net) _assign_device_option(predict_net, init_net, tensor_inputs) params, device_options = get_params_from_init_net(init_net) predict_net, params = remove_reshape_for_fc(predict_net, params) init_net = construct_init_net_from_params(params, device_options) group_norm_replace_aten_with_caffe2(predict_net) # Record necessary information for running the pb model in Detectron2 system. model.encode_additional_info(predict_net, init_net) logger.info("Operators used in predict_net: \n{}".format(_op_stats(predict_net))) logger.info("Operators used in init_net: \n{}".format(_op_stats(init_net))) return predict_net, init_net def run_and_save_graph(predict_net, init_net, tensor_inputs, graph_save_path): """ Run the caffe2 model on given inputs, recording the shape and draw the graph. predict_net/init_net: caffe2 model. tensor_inputs: a list of tensors that caffe2 model takes as input. graph_save_path: path for saving graph of exported model. """ logger.info("Saving graph of ONNX exported model to {} ...".format(graph_save_path)) save_graph(predict_net, graph_save_path, op_only=False) # Run the exported Caffe2 net logger.info("Running ONNX exported model ...") with ScopedWS("__ws_tmp__", True) as ws: ws.RunNetOnce(init_net) initialized_blobs = set(ws.Blobs()) uninitialized = [inp for inp in predict_net.external_input if inp not in initialized_blobs] for name, blob in zip(uninitialized, tensor_inputs): ws.FeedBlob(name, blob) try: ws.RunNetOnce(predict_net) except RuntimeError as e: logger.warning("Encountered RuntimeError: \n{}".format(str(e))) ws_blobs = {b: ws.FetchBlob(b) for b in ws.Blobs()} blob_sizes = {b: ws_blobs[b].shape for b in ws_blobs if isinstance(ws_blobs[b], np.ndarray)} logger.info("Saving graph with blob shapes to {} ...".format(graph_save_path)) save_graph(predict_net, graph_save_path, op_only=False, blob_sizes=blob_sizes) return ws_blobs
banmo-main
third_party/detectron2_old/detectron2/export/caffe2_export.py
import collections from dataclasses import dataclass from typing import Callable, List, Optional, Tuple import torch from torch import nn from detectron2.structures import Boxes, Instances, ROIMasks from detectron2.utils.registry import _convert_target_to_string, locate from .torchscript_patch import patch_builtin_len @dataclass class Schema: """ A Schema defines how to flatten a possibly hierarchical object into tuple of primitive objects, so it can be used as inputs/outputs of PyTorch's tracing. PyTorch does not support tracing a function that produces rich output structures (e.g. dict, Instances, Boxes). To trace such a function, we flatten the rich object into tuple of tensors, and return this tuple of tensors instead. Meanwhile, we also need to know how to "rebuild" the original object from the flattened results, so we can evaluate the flattened results. A Schema defines how to flatten an object, and while flattening it, it records necessary schemas so that the object can be rebuilt using the flattened outputs. The flattened object and the schema object is returned by ``.flatten`` classmethod. Then the original object can be rebuilt with the ``__call__`` method of schema. A Schema is a dataclass that can be serialized easily. """ # inspired by FetchMapper in tensorflow/python/client/session.py @classmethod def flatten(cls, obj): raise NotImplementedError def __call__(self, values): raise NotImplementedError @staticmethod def _concat(values): ret = () sizes = [] for v in values: assert isinstance(v, tuple), "Flattened results must be a tuple" ret = ret + v sizes.append(len(v)) return ret, sizes @staticmethod def _split(values, sizes): if len(sizes): expected_len = sum(sizes) assert ( len(values) == expected_len ), f"Values has length {len(values)} but expect length {expected_len}." ret = [] for k in range(len(sizes)): begin, end = sum(sizes[:k]), sum(sizes[: k + 1]) ret.append(values[begin:end]) return ret @dataclass class ListSchema(Schema): schemas: List[Schema] # the schemas that define how to flatten each element in the list sizes: List[int] # the flattened length of each element def __call__(self, values): values = self._split(values, self.sizes) if len(values) != len(self.schemas): raise ValueError( f"Values has length {len(values)} but schemas " f"has length {len(self.schemas)}!" ) values = [m(v) for m, v in zip(self.schemas, values)] return list(values) @classmethod def flatten(cls, obj): res = [flatten_to_tuple(k) for k in obj] values, sizes = cls._concat([k[0] for k in res]) return values, cls([k[1] for k in res], sizes) @dataclass class TupleSchema(ListSchema): def __call__(self, values): return tuple(super().__call__(values)) @dataclass class IdentitySchema(Schema): def __call__(self, values): return values[0] @classmethod def flatten(cls, obj): return (obj,), cls() @dataclass class DictSchema(ListSchema): keys: List[str] def __call__(self, values): values = super().__call__(values) return dict(zip(self.keys, values)) @classmethod def flatten(cls, obj): for k in obj.keys(): if not isinstance(k, str): raise KeyError("Only support flattening dictionaries if keys are str.") keys = sorted(obj.keys()) values = [obj[k] for k in keys] ret, schema = ListSchema.flatten(values) return ret, cls(schema.schemas, schema.sizes, keys) @dataclass class InstancesSchema(DictSchema): def __call__(self, values): image_size, fields = values[-1], values[:-1] fields = super().__call__(fields) return Instances(image_size, **fields) @classmethod def flatten(cls, obj): ret, schema = super().flatten(obj.get_fields()) size = obj.image_size if not isinstance(size, torch.Tensor): size = torch.tensor(size) return ret + (size,), schema @dataclass class TensorWrapSchema(Schema): """ For classes that are simple wrapper of tensors, e.g. Boxes, RotatedBoxes, BitMasks """ class_name: str def __call__(self, values): return locate(self.class_name)(values[0]) @classmethod def flatten(cls, obj): return (obj.tensor,), cls(_convert_target_to_string(type(obj))) # if more custom structures needed in the future, can allow # passing in extra schemas for custom types def flatten_to_tuple(obj): """ Flatten an object so it can be used for PyTorch tracing. Also returns how to rebuild the original object from the flattened outputs. Returns: res (tuple): the flattened results that can be used as tracing outputs schema: an object with a ``__call__`` method such that ``schema(res) == obj``. It is a pure dataclass that can be serialized. """ schemas = [ ((str, bytes), IdentitySchema), (list, ListSchema), (tuple, TupleSchema), (collections.abc.Mapping, DictSchema), (Instances, InstancesSchema), ((Boxes, ROIMasks), TensorWrapSchema), ] for klass, schema in schemas: if isinstance(obj, klass): F = schema break else: F = IdentitySchema return F.flatten(obj) class TracingAdapter(nn.Module): """ A model may take rich input/output format (e.g. dict or custom classes), but `torch.jit.trace` requires tuple of tensors as input/output. This adapter flattens input/output format of a model so it becomes traceable. It also records the necessary schema to rebuild model's inputs/outputs from flattened inputs/outputs. Example: :: outputs = model(inputs) # inputs/outputs may be rich structure adapter = TracingAdapter(model, inputs) # can now trace the model, with adapter.flattened_inputs, or another # tuple of tensors with the same length and meaning traced = torch.jit.trace(adapter, adapter.flattened_inputs) # traced model can only produce flattened outputs (tuple of tensors) flattened_outputs = traced(*adapter.flattened_inputs) # adapter knows the schema to convert it back (new_outputs == outputs) new_outputs = adapter.outputs_schema(flattened_outputs) """ flattened_inputs: Tuple[torch.Tensor] = None """ Flattened version of inputs given to this class's constructor. """ inputs_schema: Schema = None """ Schema of the inputs given to this class's constructor. """ outputs_schema: Schema = None """ Schema of the output produced by calling the given model with inputs. """ def __init__( self, model: nn.Module, inputs, inference_func: Optional[Callable] = None, allow_non_tensor: bool = False, ): """ Args: model: an nn.Module inputs: An input argument or a tuple of input arguments used to call model. After flattening, it has to only consist of tensors. inference_func: a callable that takes (model, *inputs), calls the model with inputs, and return outputs. By default it is ``lambda model, *inputs: model(*inputs)``. Can be override if you need to call the model differently. allow_non_tensor: allow inputs/outputs to contain non-tensor objects. This option will filter out non-tensor objects to make the model traceable, but ``inputs_schema``/``outputs_schema`` cannot be used anymore because inputs/outputs cannot be rebuilt from pure tensors. This is useful when you're only interested in the single trace of execution (e.g. for flop count), but not interested in generalizing the traced graph to new inputs. """ super().__init__() if isinstance(model, (nn.parallel.distributed.DistributedDataParallel, nn.DataParallel)): model = model.module self.model = model if not isinstance(inputs, tuple): inputs = (inputs,) self.inputs = inputs self.allow_non_tensor = allow_non_tensor if inference_func is None: inference_func = lambda model, *inputs: model(*inputs) # noqa self.inference_func = inference_func self.flattened_inputs, self.inputs_schema = flatten_to_tuple(inputs) if all(isinstance(x, torch.Tensor) for x in self.flattened_inputs): return if self.allow_non_tensor: self.flattened_inputs = tuple( [x for x in self.flattened_inputs if isinstance(x, torch.Tensor)] ) self.inputs_schema = None else: for input in self.flattened_inputs: if not isinstance(input, torch.Tensor): raise ValueError( "Inputs for tracing must only contain tensors. " f"Got a {type(input)} instead." ) def forward(self, *args: torch.Tensor): with torch.no_grad(), patch_builtin_len(): if self.inputs_schema is not None: inputs_orig_format = self.inputs_schema(args) else: if args != self.flattened_inputs: raise ValueError( "TracingAdapter does not contain valid inputs_schema." " So it cannot generalize to other inputs and must be" " traced with `.flattened_inputs`." ) inputs_orig_format = self.inputs outputs = self.inference_func(self.model, *inputs_orig_format) flattened_outputs, schema = flatten_to_tuple(outputs) flattened_output_tensors = tuple( [x for x in flattened_outputs if isinstance(x, torch.Tensor)] ) if len(flattened_output_tensors) < len(flattened_outputs): if self.allow_non_tensor: flattened_outputs = flattened_output_tensors self.outputs_schema = None else: raise ValueError( "Model cannot be traced because some model outputs " "cannot flatten to tensors." ) else: # schema is valid if self.outputs_schema is None: self.outputs_schema = schema else: assert self.outputs_schema == schema, ( "Model should always return outputs with the same " "structure so it can be traced!" ) return flattened_outputs def _create_wrapper(self, traced_model): """ Return a function that has an input/output interface the same as the original model, but it calls the given traced model under the hood. """ def forward(*args): flattened_inputs, _ = flatten_to_tuple(args) flattened_outputs = traced_model(*flattened_inputs) return self.outputs_schema(flattened_outputs) return forward
banmo-main
third_party/detectron2_old/detectron2/export/flatten.py
# Copyright (c) Facebook, Inc. and its affiliates. import contextlib from unittest import mock import torch from detectron2.modeling import poolers from detectron2.modeling.proposal_generator import rpn from detectron2.modeling.roi_heads import keypoint_head, mask_head from detectron2.modeling.roi_heads.fast_rcnn import FastRCNNOutputLayers from .c10 import ( Caffe2Compatible, Caffe2FastRCNNOutputsInference, Caffe2KeypointRCNNInference, Caffe2MaskRCNNInference, Caffe2ROIPooler, Caffe2RPN, ) class GenericMixin(object): pass class Caffe2CompatibleConverter(object): """ A GenericUpdater which implements the `create_from` interface, by modifying module object and assign it with another class replaceCls. """ def __init__(self, replaceCls): self.replaceCls = replaceCls def create_from(self, module): # update module's class to the new class assert isinstance(module, torch.nn.Module) if issubclass(self.replaceCls, GenericMixin): # replaceCls should act as mixin, create a new class on-the-fly new_class = type( "{}MixedWith{}".format(self.replaceCls.__name__, module.__class__.__name__), (self.replaceCls, module.__class__), {}, # {"new_method": lambda self: ...}, ) module.__class__ = new_class else: # replaceCls is complete class, this allow arbitrary class swap module.__class__ = self.replaceCls # initialize Caffe2Compatible if isinstance(module, Caffe2Compatible): module.tensor_mode = False return module def patch(model, target, updater, *args, **kwargs): """ recursively (post-order) update all modules with the target type and its subclasses, make a initialization/composition/inheritance/... via the updater.create_from. """ for name, module in model.named_children(): model._modules[name] = patch(module, target, updater, *args, **kwargs) if isinstance(model, target): return updater.create_from(model, *args, **kwargs) return model def patch_generalized_rcnn(model): ccc = Caffe2CompatibleConverter model = patch(model, rpn.RPN, ccc(Caffe2RPN)) model = patch(model, poolers.ROIPooler, ccc(Caffe2ROIPooler)) return model @contextlib.contextmanager def mock_fastrcnn_outputs_inference( tensor_mode, check=True, box_predictor_type=FastRCNNOutputLayers ): with mock.patch.object( box_predictor_type, "inference", autospec=True, side_effect=Caffe2FastRCNNOutputsInference(tensor_mode), ) as mocked_func: yield if check: assert mocked_func.call_count > 0 @contextlib.contextmanager def mock_mask_rcnn_inference(tensor_mode, patched_module, check=True): with mock.patch( "{}.mask_rcnn_inference".format(patched_module), side_effect=Caffe2MaskRCNNInference() ) as mocked_func: yield if check: assert mocked_func.call_count > 0 @contextlib.contextmanager def mock_keypoint_rcnn_inference(tensor_mode, patched_module, use_heatmap_max_keypoint, check=True): with mock.patch( "{}.keypoint_rcnn_inference".format(patched_module), side_effect=Caffe2KeypointRCNNInference(use_heatmap_max_keypoint), ) as mocked_func: yield if check: assert mocked_func.call_count > 0 class ROIHeadsPatcher: def __init__(self, heads, use_heatmap_max_keypoint): self.heads = heads self.use_heatmap_max_keypoint = use_heatmap_max_keypoint @contextlib.contextmanager def mock_roi_heads(self, tensor_mode=True): """ Patching several inference functions inside ROIHeads and its subclasses Args: tensor_mode (bool): whether the inputs/outputs are caffe2's tensor format or not. Default to True. """ # NOTE: this requries the `keypoint_rcnn_inference` and `mask_rcnn_inference` # are called inside the same file as BaseXxxHead due to using mock.patch. kpt_heads_mod = keypoint_head.BaseKeypointRCNNHead.__module__ mask_head_mod = mask_head.BaseMaskRCNNHead.__module__ mock_ctx_managers = [ mock_fastrcnn_outputs_inference( tensor_mode=tensor_mode, check=True, box_predictor_type=type(self.heads.box_predictor), ) ] if getattr(self.heads, "keypoint_on", False): mock_ctx_managers += [ mock_keypoint_rcnn_inference( tensor_mode, kpt_heads_mod, self.use_heatmap_max_keypoint ) ] if getattr(self.heads, "mask_on", False): mock_ctx_managers += [mock_mask_rcnn_inference(tensor_mode, mask_head_mod)] with contextlib.ExitStack() as stack: # python 3.3+ for mgr in mock_ctx_managers: stack.enter_context(mgr) yield
banmo-main
third_party/detectron2_old/detectron2/export/caffe2_patch.py
# Copyright (c) Facebook, Inc. and its affiliates. import functools import io import struct import types import torch from detectron2.modeling import meta_arch from detectron2.modeling.box_regression import Box2BoxTransform from detectron2.modeling.meta_arch.panoptic_fpn import combine_semantic_and_instance_outputs from detectron2.modeling.meta_arch.retinanet import permute_to_N_HWA_K from detectron2.modeling.postprocessing import detector_postprocess, sem_seg_postprocess from detectron2.modeling.roi_heads import keypoint_head from detectron2.structures import Boxes, ImageList, Instances, RotatedBoxes from .c10 import Caffe2Compatible from .caffe2_patch import ROIHeadsPatcher, patch_generalized_rcnn from .shared import ( alias, check_set_pb_arg, get_pb_arg_floats, get_pb_arg_valf, get_pb_arg_vali, get_pb_arg_vals, mock_torch_nn_functional_interpolate, ) def assemble_rcnn_outputs_by_name(image_sizes, tensor_outputs, force_mask_on=False): """ A function to assemble caffe2 model's outputs (i.e. Dict[str, Tensor]) to detectron2's format (i.e. list of Instances instance). This only works when the model follows the Caffe2 detectron's naming convention. Args: image_sizes (List[List[int, int]]): [H, W] of every image. tensor_outputs (Dict[str, Tensor]): external_output to its tensor. force_mask_on (Bool): if true, the it make sure there'll be pred_masks even if the mask is not found from tensor_outputs (usually due to model crash) """ results = [Instances(image_size) for image_size in image_sizes] batch_splits = tensor_outputs.get("batch_splits", None) if batch_splits: raise NotImplementedError() assert len(image_sizes) == 1 result = results[0] bbox_nms = tensor_outputs["bbox_nms"] score_nms = tensor_outputs["score_nms"] class_nms = tensor_outputs["class_nms"] # Detection will always success because Conv support 0-batch assert bbox_nms is not None assert score_nms is not None assert class_nms is not None if bbox_nms.shape[1] == 5: result.pred_boxes = RotatedBoxes(bbox_nms) else: result.pred_boxes = Boxes(bbox_nms) result.scores = score_nms result.pred_classes = class_nms.to(torch.int64) mask_fcn_probs = tensor_outputs.get("mask_fcn_probs", None) if mask_fcn_probs is not None: # finish the mask pred mask_probs_pred = mask_fcn_probs num_masks = mask_probs_pred.shape[0] class_pred = result.pred_classes indices = torch.arange(num_masks, device=class_pred.device) mask_probs_pred = mask_probs_pred[indices, class_pred][:, None] result.pred_masks = mask_probs_pred elif force_mask_on: # NOTE: there's no way to know the height/width of mask here, it won't be # used anyway when batch size is 0, so just set them to 0. result.pred_masks = torch.zeros([0, 1, 0, 0], dtype=torch.uint8) keypoints_out = tensor_outputs.get("keypoints_out", None) kps_score = tensor_outputs.get("kps_score", None) if keypoints_out is not None: # keypoints_out: [N, 4, #kypoints], where 4 is in order of (x, y, score, prob) keypoints_tensor = keypoints_out # NOTE: it's possible that prob is not calculated if "should_output_softmax" # is set to False in HeatmapMaxKeypoint, so just using raw score, seems # it doesn't affect mAP. TODO: check more carefully. keypoint_xyp = keypoints_tensor.transpose(1, 2)[:, :, [0, 1, 2]] result.pred_keypoints = keypoint_xyp elif kps_score is not None: # keypoint heatmap to sparse data structure pred_keypoint_logits = kps_score keypoint_head.keypoint_rcnn_inference(pred_keypoint_logits, [result]) return results def _cast_to_f32(f64): return struct.unpack("f", struct.pack("f", f64))[0] def set_caffe2_compatible_tensor_mode(model, enable=True): def _fn(m): if isinstance(m, Caffe2Compatible): m.tensor_mode = enable model.apply(_fn) def convert_batched_inputs_to_c2_format(batched_inputs, size_divisibility, device): """ See get_caffe2_inputs() below. """ assert all(isinstance(x, dict) for x in batched_inputs) assert all(x["image"].dim() == 3 for x in batched_inputs) images = [x["image"] for x in batched_inputs] images = ImageList.from_tensors(images, size_divisibility) im_info = [] for input_per_image, image_size in zip(batched_inputs, images.image_sizes): target_height = input_per_image.get("height", image_size[0]) target_width = input_per_image.get("width", image_size[1]) # noqa # NOTE: The scale inside im_info is kept as convention and for providing # post-processing information if further processing is needed. For # current Caffe2 model definitions that don't include post-processing inside # the model, this number is not used. # NOTE: There can be a slight difference between width and height # scales, using a single number can results in numerical difference # compared with D2's post-processing. scale = target_height / image_size[0] im_info.append([image_size[0], image_size[1], scale]) im_info = torch.Tensor(im_info) return images.tensor.to(device), im_info.to(device) class Caffe2MetaArch(Caffe2Compatible, torch.nn.Module): """ Base class for caffe2-compatible implementation of a meta architecture. The forward is traceable and its traced graph can be converted to caffe2 graph through ONNX. """ def __init__(self, cfg, torch_model): """ Args: cfg (CfgNode): torch_model (nn.Module): the detectron2 model (meta_arch) to be converted. """ super().__init__() self._wrapped_model = torch_model self.eval() set_caffe2_compatible_tensor_mode(self, True) def get_caffe2_inputs(self, batched_inputs): """ Convert pytorch-style structured inputs to caffe2-style inputs that are tuples of tensors. Args: batched_inputs (list[dict]): inputs to a detectron2 model in its standard format. Each dict has "image" (CHW tensor), and optionally "height" and "width". Returns: tuple[Tensor]: tuple of tensors that will be the inputs to the :meth:`forward` method. For existing models, the first is an NCHW tensor (padded and batched); the second is a im_info Nx3 tensor, where the rows are (height, width, unused legacy parameter) """ return convert_batched_inputs_to_c2_format( batched_inputs, self._wrapped_model.backbone.size_divisibility, self._wrapped_model.device, ) def encode_additional_info(self, predict_net, init_net): """ Save extra metadata that will be used by inference in the output protobuf. """ pass def forward(self, inputs): """ Run the forward in caffe2-style. It has to use caffe2-compatible ops and the method will be used for tracing. Args: inputs (tuple[Tensor]): inputs defined by :meth:`get_caffe2_input`. They will be the inputs of the converted caffe2 graph. Returns: tuple[Tensor]: output tensors. They will be the outputs of the converted caffe2 graph. """ raise NotImplementedError def _caffe2_preprocess_image(self, inputs): """ Caffe2 implementation of preprocess_image, which is called inside each MetaArch's forward. It normalizes the input images, and the final caffe2 graph assumes the inputs have been batched already. """ data, im_info = inputs data = alias(data, "data") im_info = alias(im_info, "im_info") mean, std = self._wrapped_model.pixel_mean, self._wrapped_model.pixel_std normalized_data = (data - mean) / std normalized_data = alias(normalized_data, "normalized_data") # Pack (data, im_info) into ImageList which is recognized by self.inference. images = ImageList(tensor=normalized_data, image_sizes=im_info) return images @staticmethod def get_outputs_converter(predict_net, init_net): """ Creates a function that converts outputs of the caffe2 model to detectron2's standard format. The function uses information in `predict_net` and `init_net` that are available at inferene time. Therefore the function logic can be used in inference. The returned function has the following signature: def convert(batched_inputs, c2_inputs, c2_results) -> detectron2_outputs Where * batched_inputs (list[dict]): the original input format of the meta arch * c2_inputs (tuple[Tensor]): the caffe2 inputs. * c2_results (dict[str, Tensor]): the caffe2 output format, corresponding to the outputs of the :meth:`forward` function. * detectron2_outputs: the original output format of the meta arch. This function can be used to compare the outputs of the original meta arch and the converted caffe2 graph. Returns: callable: a callable of the above signature. """ raise NotImplementedError class Caffe2GeneralizedRCNN(Caffe2MetaArch): def __init__(self, cfg, torch_model): assert isinstance(torch_model, meta_arch.GeneralizedRCNN) torch_model = patch_generalized_rcnn(torch_model) super().__init__(cfg, torch_model) self.roi_heads_patcher = ROIHeadsPatcher( self._wrapped_model.roi_heads, cfg.EXPORT_CAFFE2.USE_HEATMAP_MAX_KEYPOINT ) def encode_additional_info(self, predict_net, init_net): size_divisibility = self._wrapped_model.backbone.size_divisibility check_set_pb_arg(predict_net, "size_divisibility", "i", size_divisibility) check_set_pb_arg( predict_net, "device", "s", str.encode(str(self._wrapped_model.device), "ascii") ) check_set_pb_arg(predict_net, "meta_architecture", "s", b"GeneralizedRCNN") @mock_torch_nn_functional_interpolate() def forward(self, inputs): if not self.tensor_mode: return self._wrapped_model.inference(inputs) images = self._caffe2_preprocess_image(inputs) features = self._wrapped_model.backbone(images.tensor) proposals, _ = self._wrapped_model.proposal_generator(images, features) with self.roi_heads_patcher.mock_roi_heads(): detector_results, _ = self._wrapped_model.roi_heads(images, features, proposals) return tuple(detector_results[0].flatten()) @staticmethod def get_outputs_converter(predict_net, init_net): def f(batched_inputs, c2_inputs, c2_results): _, im_info = c2_inputs image_sizes = [[int(im[0]), int(im[1])] for im in im_info] results = assemble_rcnn_outputs_by_name(image_sizes, c2_results) return meta_arch.GeneralizedRCNN._postprocess(results, batched_inputs, image_sizes) return f class Caffe2PanopticFPN(Caffe2MetaArch): def __init__(self, cfg, torch_model): assert isinstance(torch_model, meta_arch.PanopticFPN) torch_model = patch_generalized_rcnn(torch_model) super().__init__(cfg, torch_model) self.roi_heads_patcher = ROIHeadsPatcher( self._wrapped_model.roi_heads, cfg.EXPORT_CAFFE2.USE_HEATMAP_MAX_KEYPOINT ) @mock_torch_nn_functional_interpolate() def forward(self, inputs): assert self.tensor_mode images = self._caffe2_preprocess_image(inputs) features = self._wrapped_model.backbone(images.tensor) sem_seg_results, _ = self._wrapped_model.sem_seg_head(features) sem_seg_results = alias(sem_seg_results, "sem_seg") proposals, _ = self._wrapped_model.proposal_generator(images, features) with self.roi_heads_patcher.mock_roi_heads(self.tensor_mode): detector_results, _ = self._wrapped_model.roi_heads(images, features, proposals) return tuple(detector_results[0].flatten()) + (sem_seg_results,) def encode_additional_info(self, predict_net, init_net): size_divisibility = self._wrapped_model.backbone.size_divisibility check_set_pb_arg(predict_net, "size_divisibility", "i", size_divisibility) check_set_pb_arg( predict_net, "device", "s", str.encode(str(self._wrapped_model.device), "ascii") ) check_set_pb_arg(predict_net, "meta_architecture", "s", b"PanopticFPN") # Inference parameters: check_set_pb_arg( predict_net, "combine_overlap_threshold", "f", _cast_to_f32(self._wrapped_model.combine_overlap_thresh), ) check_set_pb_arg( predict_net, "combine_stuff_area_limit", "i", self._wrapped_model.combine_stuff_area_thresh, ) check_set_pb_arg( predict_net, "combine_instances_confidence_threshold", "f", _cast_to_f32(self._wrapped_model.combine_instances_score_thresh), ) @staticmethod def get_outputs_converter(predict_net, init_net): combine_overlap_threshold = get_pb_arg_valf(predict_net, "combine_overlap_threshold", None) combine_stuff_area_limit = get_pb_arg_vali(predict_net, "combine_stuff_area_limit", None) combine_instances_confidence_threshold = get_pb_arg_valf( predict_net, "combine_instances_confidence_threshold", None ) def f(batched_inputs, c2_inputs, c2_results): _, im_info = c2_inputs image_sizes = [[int(im[0]), int(im[1])] for im in im_info] detector_results = assemble_rcnn_outputs_by_name( image_sizes, c2_results, force_mask_on=True ) sem_seg_results = c2_results["sem_seg"] # copied from meta_arch/panoptic_fpn.py ... processed_results = [] for sem_seg_result, detector_result, input_per_image, image_size in zip( sem_seg_results, detector_results, batched_inputs, image_sizes ): height = input_per_image.get("height", image_size[0]) width = input_per_image.get("width", image_size[1]) sem_seg_r = sem_seg_postprocess(sem_seg_result, image_size, height, width) detector_r = detector_postprocess(detector_result, height, width) processed_results.append({"sem_seg": sem_seg_r, "instances": detector_r}) panoptic_r = combine_semantic_and_instance_outputs( detector_r, sem_seg_r.argmax(dim=0), combine_overlap_threshold, combine_stuff_area_limit, combine_instances_confidence_threshold, ) processed_results[-1]["panoptic_seg"] = panoptic_r return processed_results return f class Caffe2RetinaNet(Caffe2MetaArch): def __init__(self, cfg, torch_model): assert isinstance(torch_model, meta_arch.RetinaNet) super().__init__(cfg, torch_model) @mock_torch_nn_functional_interpolate() def forward(self, inputs): assert self.tensor_mode images = self._caffe2_preprocess_image(inputs) # explicitly return the images sizes to avoid removing "im_info" by ONNX # since it's not used in the forward path return_tensors = [images.image_sizes] features = self._wrapped_model.backbone(images.tensor) features = [features[f] for f in self._wrapped_model.head_in_features] for i, feature_i in enumerate(features): features[i] = alias(feature_i, "feature_{}".format(i), is_backward=True) return_tensors.append(features[i]) pred_logits, pred_anchor_deltas = self._wrapped_model.head(features) for i, (box_cls_i, box_delta_i) in enumerate(zip(pred_logits, pred_anchor_deltas)): return_tensors.append(alias(box_cls_i, "box_cls_{}".format(i))) return_tensors.append(alias(box_delta_i, "box_delta_{}".format(i))) return tuple(return_tensors) def encode_additional_info(self, predict_net, init_net): size_divisibility = self._wrapped_model.backbone.size_divisibility check_set_pb_arg(predict_net, "size_divisibility", "i", size_divisibility) check_set_pb_arg( predict_net, "device", "s", str.encode(str(self._wrapped_model.device), "ascii") ) check_set_pb_arg(predict_net, "meta_architecture", "s", b"RetinaNet") # Inference parameters: check_set_pb_arg( predict_net, "score_threshold", "f", _cast_to_f32(self._wrapped_model.test_score_thresh) ) check_set_pb_arg( predict_net, "topk_candidates", "i", self._wrapped_model.test_topk_candidates ) check_set_pb_arg( predict_net, "nms_threshold", "f", _cast_to_f32(self._wrapped_model.test_nms_thresh) ) check_set_pb_arg( predict_net, "max_detections_per_image", "i", self._wrapped_model.max_detections_per_image, ) check_set_pb_arg( predict_net, "bbox_reg_weights", "floats", [_cast_to_f32(w) for w in self._wrapped_model.box2box_transform.weights], ) self._encode_anchor_generator_cfg(predict_net) def _encode_anchor_generator_cfg(self, predict_net): # serialize anchor_generator for future use serialized_anchor_generator = io.BytesIO() torch.save(self._wrapped_model.anchor_generator, serialized_anchor_generator) # Ideally we can put anchor generating inside the model, then we don't # need to store this information. bytes = serialized_anchor_generator.getvalue() check_set_pb_arg(predict_net, "serialized_anchor_generator", "s", bytes) @staticmethod def get_outputs_converter(predict_net, init_net): self = types.SimpleNamespace() serialized_anchor_generator = io.BytesIO( get_pb_arg_vals(predict_net, "serialized_anchor_generator", None) ) self.anchor_generator = torch.load(serialized_anchor_generator) bbox_reg_weights = get_pb_arg_floats(predict_net, "bbox_reg_weights", None) self.box2box_transform = Box2BoxTransform(weights=tuple(bbox_reg_weights)) self.test_score_thresh = get_pb_arg_valf(predict_net, "score_threshold", None) self.test_topk_candidates = get_pb_arg_vali(predict_net, "topk_candidates", None) self.test_nms_thresh = get_pb_arg_valf(predict_net, "nms_threshold", None) self.max_detections_per_image = get_pb_arg_vali( predict_net, "max_detections_per_image", None ) # hack to reuse inference code from RetinaNet self.inference = functools.partial(meta_arch.RetinaNet.inference, self) self.inference_single_image = functools.partial( meta_arch.RetinaNet.inference_single_image, self ) def f(batched_inputs, c2_inputs, c2_results): _, im_info = c2_inputs image_sizes = [[int(im[0]), int(im[1])] for im in im_info] num_features = len([x for x in c2_results.keys() if x.startswith("box_cls_")]) pred_logits = [c2_results["box_cls_{}".format(i)] for i in range(num_features)] pred_anchor_deltas = [c2_results["box_delta_{}".format(i)] for i in range(num_features)] # For each feature level, feature should have the same batch size and # spatial dimension as the box_cls and box_delta. dummy_features = [x.clone()[:, 0:0, :, :] for x in pred_logits] anchors = self.anchor_generator(dummy_features) # self.num_classess can be inferred self.num_classes = pred_logits[0].shape[1] // (pred_anchor_deltas[0].shape[1] // 4) pred_logits = [permute_to_N_HWA_K(x, self.num_classes) for x in pred_logits] pred_anchor_deltas = [permute_to_N_HWA_K(x, 4) for x in pred_anchor_deltas] results = self.inference(anchors, pred_logits, pred_anchor_deltas, image_sizes) return meta_arch.GeneralizedRCNN._postprocess(results, batched_inputs, image_sizes) return f META_ARCH_CAFFE2_EXPORT_TYPE_MAP = { "GeneralizedRCNN": Caffe2GeneralizedRCNN, "PanopticFPN": Caffe2PanopticFPN, "RetinaNet": Caffe2RetinaNet, }
banmo-main
third_party/detectron2_old/detectron2/export/caffe2_modeling.py
# -*- coding: utf-8 -*- from .api import * from .flatten import TracingAdapter from .torchscript import scripting_with_instances, dump_torchscript_IR __all__ = [k for k in globals().keys() if not k.startswith("_")]
banmo-main
third_party/detectron2_old/detectron2/export/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. import collections import contextlib import copy import functools import logging import numpy as np import os from typing import Any, Callable, Dict, List, Optional, Tuple, Union from unittest import mock import caffe2.python.utils as putils import torch import torch.nn.functional as F from caffe2.proto import caffe2_pb2 from caffe2.python import core, net_drawer, workspace from torch.nn.functional import interpolate as interp logger = logging.getLogger(__name__) # ==== torch/utils_toffee/cast.py ======================================= def to_device(t, device_str): """ This function is a replacement of .to(another_device) such that it allows the casting to be traced properly by explicitly calling the underlying copy ops. It also avoids introducing unncessary op when casting to the same device. """ src = t.device dst = torch.device(device_str) if src == dst: return t elif src.type == "cuda" and dst.type == "cpu": return torch.ops._caffe2.CopyGPUToCPU(t) elif src.type == "cpu" and dst.type == "cuda": return torch.ops._caffe2.CopyCPUToGPU(t) else: raise RuntimeError("Can't cast tensor from device {} to device {}".format(src, dst)) # ==== torch/utils_toffee/interpolate.py ======================================= # Note: borrowed from vision/detection/fair/detectron/detectron/modeling/detector.py def BilinearInterpolation(tensor_in, up_scale): assert up_scale % 2 == 0, "Scale should be even" def upsample_filt(size): factor = (size + 1) // 2 if size % 2 == 1: center = factor - 1 else: center = factor - 0.5 og = np.ogrid[:size, :size] return (1 - abs(og[0] - center) / factor) * (1 - abs(og[1] - center) / factor) kernel_size = int(up_scale) * 2 bil_filt = upsample_filt(kernel_size) dim = int(tensor_in.shape[1]) kernel = np.zeros((dim, dim, kernel_size, kernel_size), dtype=np.float32) kernel[range(dim), range(dim), :, :] = bil_filt tensor_out = F.conv_transpose2d( tensor_in, weight=to_device(torch.Tensor(kernel), tensor_in.device), bias=None, stride=int(up_scale), padding=int(up_scale / 2), ) return tensor_out # NOTE: ONNX is incompatible with traced torch.nn.functional.interpolate if # using dynamic `scale_factor` rather than static `size`. (T43166860) # NOTE: Caffe2 Int8 conversion might not be able to quantize `size` properly. def onnx_compatibale_interpolate( input, size=None, scale_factor=None, mode="nearest", align_corners=None ): # NOTE: The input dimensions are interpreted in the form: # `mini-batch x channels x [optional depth] x [optional height] x width`. if size is None and scale_factor is not None: if input.dim() == 4: if isinstance(scale_factor, (int, float)): height_scale, width_scale = (scale_factor, scale_factor) else: assert isinstance(scale_factor, (tuple, list)) assert len(scale_factor) == 2 height_scale, width_scale = scale_factor assert not align_corners, "No matching C2 op for align_corners == True" if mode == "nearest": return torch.ops._caffe2.ResizeNearest( input, order="NCHW", width_scale=width_scale, height_scale=height_scale ) elif mode == "bilinear": logger.warning( "Use F.conv_transpose2d for bilinear interpolate" " because there's no such C2 op, this may cause significant" " slowdown and the boundary pixels won't be as same as" " using F.interpolate due to padding." ) assert height_scale == width_scale return BilinearInterpolation(input, up_scale=height_scale) logger.warning("Output size is not static, it might cause ONNX conversion issue") return interp(input, size, scale_factor, mode, align_corners) @contextlib.contextmanager def mock_torch_nn_functional_interpolate(): if torch.onnx.is_in_onnx_export(): with mock.patch( "torch.nn.functional.interpolate", side_effect=onnx_compatibale_interpolate ): yield else: yield # ==== torch/utils_caffe2/ws_utils.py ========================================== class ScopedWS(object): def __init__(self, ws_name, is_reset, is_cleanup=False): self.ws_name = ws_name self.is_reset = is_reset self.is_cleanup = is_cleanup self.org_ws = "" def __enter__(self): self.org_ws = workspace.CurrentWorkspace() if self.ws_name is not None: workspace.SwitchWorkspace(self.ws_name, True) if self.is_reset: workspace.ResetWorkspace() return workspace def __exit__(self, *args): if self.is_cleanup: workspace.ResetWorkspace() if self.ws_name is not None: workspace.SwitchWorkspace(self.org_ws) def fetch_any_blob(name): bb = None try: bb = workspace.FetchBlob(name) except TypeError: bb = workspace.FetchInt8Blob(name) except Exception as e: logger.error("Get blob {} error: {}".format(name, e)) return bb # ==== torch/utils_caffe2/protobuf.py ========================================== def get_pb_arg(pb, arg_name): for x in pb.arg: if x.name == arg_name: return x return None def get_pb_arg_valf(pb, arg_name, default_val): arg = get_pb_arg(pb, arg_name) return arg.f if arg is not None else default_val def get_pb_arg_floats(pb, arg_name, default_val): arg = get_pb_arg(pb, arg_name) return list(map(float, arg.floats)) if arg is not None else default_val def get_pb_arg_ints(pb, arg_name, default_val): arg = get_pb_arg(pb, arg_name) return list(map(int, arg.ints)) if arg is not None else default_val def get_pb_arg_vali(pb, arg_name, default_val): arg = get_pb_arg(pb, arg_name) return arg.i if arg is not None else default_val def get_pb_arg_vals(pb, arg_name, default_val): arg = get_pb_arg(pb, arg_name) return arg.s if arg is not None else default_val def get_pb_arg_valstrings(pb, arg_name, default_val): arg = get_pb_arg(pb, arg_name) return list(arg.strings) if arg is not None else default_val def check_set_pb_arg(pb, arg_name, arg_attr, arg_value, allow_override=False): arg = get_pb_arg(pb, arg_name) if arg is None: arg = putils.MakeArgument(arg_name, arg_value) assert hasattr(arg, arg_attr) pb.arg.extend([arg]) if allow_override and getattr(arg, arg_attr) != arg_value: logger.warning( "Override argument {}: {} -> {}".format(arg_name, getattr(arg, arg_attr), arg_value) ) setattr(arg, arg_attr, arg_value) else: assert arg is not None assert getattr(arg, arg_attr) == arg_value, "Existing value {}, new value {}".format( getattr(arg, arg_attr), arg_value ) def _create_const_fill_op_from_numpy(name, tensor, device_option=None): assert type(tensor) == np.ndarray kTypeNameMapper = { np.dtype("float32"): "GivenTensorFill", np.dtype("int32"): "GivenTensorIntFill", np.dtype("int64"): "GivenTensorInt64Fill", np.dtype("uint8"): "GivenTensorStringFill", } args_dict = {} if tensor.dtype == np.dtype("uint8"): args_dict.update({"values": [str(tensor.data)], "shape": [1]}) else: args_dict.update({"values": tensor, "shape": tensor.shape}) if device_option is not None: args_dict["device_option"] = device_option return core.CreateOperator(kTypeNameMapper[tensor.dtype], [], [name], **args_dict) def _create_const_fill_op_from_c2_int8_tensor(name, int8_tensor): assert type(int8_tensor) == workspace.Int8Tensor kTypeNameMapper = { np.dtype("int32"): "Int8GivenIntTensorFill", np.dtype("uint8"): "Int8GivenTensorFill", } tensor = int8_tensor.data assert tensor.dtype in [np.dtype("uint8"), np.dtype("int32")] values = tensor.tobytes() if tensor.dtype == np.dtype("uint8") else tensor return core.CreateOperator( kTypeNameMapper[tensor.dtype], [], [name], values=values, shape=tensor.shape, Y_scale=int8_tensor.scale, Y_zero_point=int8_tensor.zero_point, ) def create_const_fill_op( name: str, blob: Union[np.ndarray, workspace.Int8Tensor], device_option: Optional[caffe2_pb2.DeviceOption] = None, ) -> caffe2_pb2.OperatorDef: """ Given a blob object, return the Caffe2 operator that creates this blob as constant. Currently support NumPy tensor and Caffe2 Int8Tensor. """ tensor_type = type(blob) assert tensor_type in [ np.ndarray, workspace.Int8Tensor, ], 'Error when creating const fill op for "{}", unsupported blob type: {}'.format( name, type(blob) ) if tensor_type == np.ndarray: return _create_const_fill_op_from_numpy(name, blob, device_option) elif tensor_type == workspace.Int8Tensor: assert device_option is None return _create_const_fill_op_from_c2_int8_tensor(name, blob) def construct_init_net_from_params( params: Dict[str, Any], device_options: Optional[Dict[str, caffe2_pb2.DeviceOption]] = None ) -> caffe2_pb2.NetDef: """ Construct the init_net from params dictionary """ init_net = caffe2_pb2.NetDef() device_options = device_options or {} for name, blob in params.items(): if isinstance(blob, str): logger.warning( ( "Blob {} with type {} is not supported in generating init net," " skipped.".format(name, type(blob)) ) ) continue init_net.op.extend( [create_const_fill_op(name, blob, device_option=device_options.get(name, None))] ) init_net.external_output.append(name) return init_net def get_producer_map(ssa): """ Return dict from versioned blob to (i, j), where i is index of producer op, j is the index of output of that op. """ producer_map = {} for i in range(len(ssa)): outputs = ssa[i][1] for j, outp in enumerate(outputs): producer_map[outp] = (i, j) return producer_map def get_consumer_map(ssa): """ Return dict from versioned blob to list of (i, j), where i is index of consumer op, j is the index of input of that op. """ consumer_map = collections.defaultdict(list) for i in range(len(ssa)): inputs = ssa[i][0] for j, inp in enumerate(inputs): consumer_map[inp].append((i, j)) return consumer_map def get_params_from_init_net( init_net: caffe2_pb2.NetDef, ) -> [Dict[str, Any], Dict[str, caffe2_pb2.DeviceOption]]: """ Take the output blobs from init_net by running it. Outputs: params: dict from blob name to numpy array device_options: dict from blob name to the device option of its creating op """ # NOTE: this assumes that the params is determined by producer op with the # only exception be CopyGPUToCPU which is CUDA op but returns CPU tensor. def _get_device_option(producer_op): if producer_op.type == "CopyGPUToCPU": return caffe2_pb2.DeviceOption() else: return producer_op.device_option with ScopedWS("__get_params_from_init_net__", is_reset=True, is_cleanup=True) as ws: ws.RunNetOnce(init_net) params = {b: fetch_any_blob(b) for b in init_net.external_output} ssa, versions = core.get_ssa(init_net) producer_map = get_producer_map(ssa) device_options = { b: _get_device_option(init_net.op[producer_map[(b, versions[b])][0]]) for b in init_net.external_output } return params, device_options def _updater_raise(op, input_types, output_types): raise RuntimeError( "Failed to apply updater for op {} given input_types {} and" " output_types {}".format(op, input_types, output_types) ) def _generic_status_identifier( predict_net: caffe2_pb2.NetDef, status_updater: Callable, known_status: Dict[Tuple[str, int], Any], ) -> Dict[Tuple[str, int], Any]: """ Statically infer the status of each blob, the status can be such as device type (CPU/GPU), layout (NCHW/NHWC), data type (float32/int8), etc. "Blob" here is versioned blob (Tuple[str, int]) in the format compatible with ssa. Inputs: predict_net: the caffe2 network status_updater: a callable, given an op and the status of its input/output, it returns the updated status of input/output. `None` is used for representing unknown status. known_status: a dict containing known status, used as initialization. Outputs: A dict mapping from versioned blob to its status """ ssa, versions = core.get_ssa(predict_net) versioned_ext_input = [(b, 0) for b in predict_net.external_input] versioned_ext_output = [(b, versions[b]) for b in predict_net.external_output] all_versioned_blobs = set().union(*[set(x[0] + x[1]) for x in ssa]) allowed_vbs = all_versioned_blobs.union(versioned_ext_input).union(versioned_ext_output) assert all(k in allowed_vbs for k in known_status) assert all(v is not None for v in known_status.values()) _known_status = copy.deepcopy(known_status) def _check_and_update(key, value): assert value is not None if key in _known_status: if not _known_status[key] == value: raise RuntimeError( "Confilict status for {}, existing status {}, new status {}".format( key, _known_status[key], value ) ) _known_status[key] = value def _update_i(op, ssa_i): versioned_inputs = ssa_i[0] versioned_outputs = ssa_i[1] inputs_status = [_known_status.get(b, None) for b in versioned_inputs] outputs_status = [_known_status.get(b, None) for b in versioned_outputs] new_inputs_status, new_outputs_status = status_updater(op, inputs_status, outputs_status) for versioned_blob, status in zip( versioned_inputs + versioned_outputs, new_inputs_status + new_outputs_status ): if status is not None: _check_and_update(versioned_blob, status) for op, ssa_i in zip(predict_net.op, ssa): _update_i(op, ssa_i) for op, ssa_i in zip(reversed(predict_net.op), reversed(ssa)): _update_i(op, ssa_i) # NOTE: This strictly checks all the blob from predict_net must be assgined # a known status. However sometimes it's impossible (eg. having deadend op), # we may relax this constraint if for k in all_versioned_blobs: if k not in _known_status: raise NotImplementedError( "Can not infer the status for {}. Currently only support the case where" " a single forward and backward pass can identify status for all blobs.".format(k) ) return _known_status def infer_device_type( predict_net: caffe2_pb2.NetDef, known_status: Dict[Tuple[str, int], Any], device_name_style: str = "caffe2", ) -> Dict[Tuple[str, int], str]: """Return the device type ("cpu" or "gpu"/"cuda") of each (versioned) blob""" assert device_name_style in ["caffe2", "pytorch"] _CPU_STR = "cpu" _GPU_STR = "gpu" if device_name_style == "caffe2" else "cuda" def _copy_cpu_to_gpu_updater(op, input_types, output_types): if input_types[0] == _GPU_STR or output_types[0] == _CPU_STR: _updater_raise(op, input_types, output_types) return ([_CPU_STR], [_GPU_STR]) def _copy_gpu_to_cpu_updater(op, input_types, output_types): if input_types[0] == _CPU_STR or output_types[0] == _GPU_STR: _updater_raise(op, input_types, output_types) return ([_GPU_STR], [_CPU_STR]) def _other_ops_updater(op, input_types, output_types): non_none_types = [x for x in input_types + output_types if x is not None] if len(non_none_types) > 0: the_type = non_none_types[0] if not all(x == the_type for x in non_none_types): _updater_raise(op, input_types, output_types) else: the_type = None return ([the_type for _ in op.input], [the_type for _ in op.output]) def _device_updater(op, *args, **kwargs): return { "CopyCPUToGPU": _copy_cpu_to_gpu_updater, "CopyGPUToCPU": _copy_gpu_to_cpu_updater, }.get(op.type, _other_ops_updater)(op, *args, **kwargs) return _generic_status_identifier(predict_net, _device_updater, known_status) # ==== torch/utils_caffe2/vis.py =============================================== def _modify_blob_names(ops, blob_rename_f): ret = [] def _replace_list(blob_list, replaced_list): del blob_list[:] blob_list.extend(replaced_list) for x in ops: cur = copy.deepcopy(x) _replace_list(cur.input, list(map(blob_rename_f, cur.input))) _replace_list(cur.output, list(map(blob_rename_f, cur.output))) ret.append(cur) return ret def _rename_blob(name, blob_sizes, blob_ranges): def _list_to_str(bsize): ret = ", ".join([str(x) for x in bsize]) ret = "[" + ret + "]" return ret ret = name if blob_sizes is not None and name in blob_sizes: ret += "\n" + _list_to_str(blob_sizes[name]) if blob_ranges is not None and name in blob_ranges: ret += "\n" + _list_to_str(blob_ranges[name]) return ret # graph_name could not contain word 'graph' def save_graph(net, file_name, graph_name="net", op_only=True, blob_sizes=None, blob_ranges=None): blob_rename_f = functools.partial(_rename_blob, blob_sizes=blob_sizes, blob_ranges=blob_ranges) return save_graph_base(net, file_name, graph_name, op_only, blob_rename_f) def save_graph_base(net, file_name, graph_name="net", op_only=True, blob_rename_func=None): graph = None ops = net.op if blob_rename_func is not None: ops = _modify_blob_names(ops, blob_rename_func) if not op_only: graph = net_drawer.GetPydotGraph(ops, graph_name, rankdir="TB") else: graph = net_drawer.GetPydotGraphMinimal( ops, graph_name, rankdir="TB", minimal_dependency=True ) try: par_dir = os.path.dirname(file_name) if not os.path.exists(par_dir): os.makedirs(par_dir) format = os.path.splitext(os.path.basename(file_name))[-1] if format == ".png": graph.write_png(file_name) elif format == ".pdf": graph.write_pdf(file_name) elif format == ".svg": graph.write_svg(file_name) else: print("Incorrect format {}".format(format)) except Exception as e: print("Error when writing graph to image {}".format(e)) return graph # ==== torch/utils_toffee/aten_to_caffe2.py ==================================== def group_norm_replace_aten_with_caffe2(predict_net: caffe2_pb2.NetDef): """ For ONNX exported model, GroupNorm will be represented as ATen op, this can be a drop in replacement from ATen to GroupNorm """ count = 0 for op in predict_net.op: if op.type == "ATen": op_name = get_pb_arg_vals(op, "operator", None) # return byte in py3 if op_name and op_name.decode() == "group_norm": op.arg.remove(get_pb_arg(op, "operator")) if get_pb_arg_vali(op, "cudnn_enabled", None): op.arg.remove(get_pb_arg(op, "cudnn_enabled")) num_groups = get_pb_arg_vali(op, "num_groups", None) if num_groups is not None: op.arg.remove(get_pb_arg(op, "num_groups")) check_set_pb_arg(op, "group", "i", num_groups) op.type = "GroupNorm" count += 1 if count > 1: logger.info("Replaced {} ATen operator to GroupNormOp".format(count)) # ==== torch/utils_toffee/alias.py ============================================= def alias(x, name, is_backward=False): if not torch.onnx.is_in_onnx_export(): return x assert isinstance(x, torch.Tensor) return torch.ops._caffe2.AliasWithName(x, name, is_backward=is_backward) def fuse_alias_placeholder(predict_net, init_net): """Remove AliasWithName placeholder and rename the input/output of it""" # First we finish all the re-naming for i, op in enumerate(predict_net.op): if op.type == "AliasWithName": assert len(op.input) == 1 assert len(op.output) == 1 name = get_pb_arg_vals(op, "name", None).decode() is_backward = bool(get_pb_arg_vali(op, "is_backward", 0)) rename_op_input(predict_net, init_net, i, 0, name, from_producer=is_backward) rename_op_output(predict_net, i, 0, name) # Remove AliasWithName, should be very safe since it's a non-op new_ops = [] for op in predict_net.op: if op.type != "AliasWithName": new_ops.append(op) else: # safety check assert op.input == op.output assert op.input[0] == op.arg[0].s.decode() del predict_net.op[:] predict_net.op.extend(new_ops) # ==== torch/utils_caffe2/graph_transform.py =================================== class IllegalGraphTransformError(ValueError): """When a graph transform function call can't be executed.""" def _rename_versioned_blob_in_proto( proto: caffe2_pb2.NetDef, old_name: str, new_name: str, version: int, ssa: List[Tuple[List[Tuple[str, int]], List[Tuple[str, int]]]], start_versions: Dict[str, int], end_versions: Dict[str, int], ): """In given proto, rename all blobs with matched version""" # Operater list for op, i_th_ssa in zip(proto.op, ssa): versioned_inputs, versioned_outputs = i_th_ssa for i in range(len(op.input)): if versioned_inputs[i] == (old_name, version): op.input[i] = new_name for i in range(len(op.output)): if versioned_outputs[i] == (old_name, version): op.output[i] = new_name # external_input if start_versions.get(old_name, 0) == version: for i in range(len(proto.external_input)): if proto.external_input[i] == old_name: proto.external_input[i] = new_name # external_output if end_versions.get(old_name, 0) == version: for i in range(len(proto.external_output)): if proto.external_output[i] == old_name: proto.external_output[i] = new_name def rename_op_input( predict_net: caffe2_pb2.NetDef, init_net: caffe2_pb2.NetDef, op_id: int, input_id: int, new_name: str, from_producer: bool = False, ): """ Rename the op_id-th operator in predict_net, change it's input_id-th input's name to the new_name. It also does automatic re-route and change external_input and init_net if necessary. - It requires the input is only consumed by this op. - This function modifies predict_net and init_net in-place. - When from_producer is enable, this also updates other operators that consumes the same input. Be cautious because may trigger unintended behavior. """ assert isinstance(predict_net, caffe2_pb2.NetDef) assert isinstance(init_net, caffe2_pb2.NetDef) init_net_ssa, init_net_versions = core.get_ssa(init_net) predict_net_ssa, predict_net_versions = core.get_ssa( predict_net, copy.deepcopy(init_net_versions) ) versioned_inputs, versioned_outputs = predict_net_ssa[op_id] old_name, version = versioned_inputs[input_id] if from_producer: producer_map = get_producer_map(predict_net_ssa) if not (old_name, version) in producer_map: raise NotImplementedError( "Can't find producer, the input {} is probably from" " init_net, this is not supported yet.".format(old_name) ) producer = producer_map[(old_name, version)] rename_op_output(predict_net, producer[0], producer[1], new_name) return def contain_targets(op_ssa): return (old_name, version) in op_ssa[0] is_consumer = [contain_targets(op_ssa) for op_ssa in predict_net_ssa] if sum(is_consumer) > 1: raise IllegalGraphTransformError( ( "Input '{}' of operator(#{}) are consumed by other ops, please use" + " rename_op_output on the producer instead. Offending op: \n{}" ).format(old_name, op_id, predict_net.op[op_id]) ) # update init_net _rename_versioned_blob_in_proto( init_net, old_name, new_name, version, init_net_ssa, {}, init_net_versions ) # update predict_net _rename_versioned_blob_in_proto( predict_net, old_name, new_name, version, predict_net_ssa, init_net_versions, predict_net_versions, ) def rename_op_output(predict_net: caffe2_pb2.NetDef, op_id: int, output_id: int, new_name: str): """ Rename the op_id-th operator in predict_net, change it's output_id-th input's name to the new_name. It also does automatic re-route and change external_output and if necessary. - It allows multiple consumers of its output. - This function modifies predict_net in-place, doesn't need init_net. """ assert isinstance(predict_net, caffe2_pb2.NetDef) ssa, blob_versions = core.get_ssa(predict_net) versioned_inputs, versioned_outputs = ssa[op_id] old_name, version = versioned_outputs[output_id] # update predict_net _rename_versioned_blob_in_proto( predict_net, old_name, new_name, version, ssa, {}, blob_versions ) def get_sub_graph_external_input_output( predict_net: caffe2_pb2.NetDef, sub_graph_op_indices: List[int] ) -> Tuple[List[Tuple[str, int]], List[Tuple[str, int]]]: """ Return the list of external input/output of sub-graph, each element is tuple of the name and corresponding version in predict_net. external input/output is defined the same way as caffe2 NetDef. """ ssa, versions = core.get_ssa(predict_net) all_inputs = [] all_outputs = [] for op_id in sub_graph_op_indices: all_inputs += [inp for inp in ssa[op_id][0] if inp not in all_inputs] all_outputs += list(ssa[op_id][1]) # ssa output won't repeat # for versioned blobs, external inputs are just those blob in all_inputs # but not in all_outputs ext_inputs = [inp for inp in all_inputs if inp not in all_outputs] # external outputs are essentially outputs of this subgraph that are used # outside of this sub-graph (including predict_net.external_output) all_other_inputs = sum( (ssa[i][0] for i in range(len(ssa)) if i not in sub_graph_op_indices), [(outp, versions[outp]) for outp in predict_net.external_output], ) ext_outputs = [outp for outp in all_outputs if outp in set(all_other_inputs)] return ext_inputs, ext_outputs class DiGraph: """A DAG representation of caffe2 graph, each vertice is a versioned blob.""" def __init__(self): self.vertices = set() self.graph = collections.defaultdict(list) def add_edge(self, u, v): self.graph[u].append(v) self.vertices.add(u) self.vertices.add(v) # grab from https://www.geeksforgeeks.org/find-paths-given-source-destination/ def get_all_paths(self, s, d): visited = {k: False for k in self.vertices} path = [] all_paths = [] def _get_all_paths_util(graph, u, d, visited, path): visited[u] = True path.append(u) if u == d: all_paths.append(copy.deepcopy(path)) else: for i in graph[u]: if not visited[i]: _get_all_paths_util(graph, i, d, visited, path) path.pop() visited[u] = False _get_all_paths_util(self.graph, s, d, visited, path) return all_paths @staticmethod def from_ssa(ssa): graph = DiGraph() for op_id in range(len(ssa)): for inp in ssa[op_id][0]: for outp in ssa[op_id][1]: graph.add_edge(inp, outp) return graph def _get_dependency_chain(ssa, versioned_target, versioned_source): """ Return the index list of relevant operator to produce target blob from source blob, if there's no dependency, return empty list. """ # finding all paths between nodes can be O(N!), thus we can only search # in the subgraph using the op starting from the first consumer of source blob # to the producer of the target blob. consumer_map = get_consumer_map(ssa) producer_map = get_producer_map(ssa) start_op = min(x[0] for x in consumer_map[versioned_source]) - 15 end_op = ( producer_map[versioned_target][0] + 15 if versioned_target in producer_map else start_op ) sub_graph_ssa = ssa[start_op : end_op + 1] if len(sub_graph_ssa) > 30: logger.warning( "Subgraph bebetween {} and {} is large (from op#{} to op#{}), it" " might take non-trival time to find all paths between them.".format( versioned_source, versioned_target, start_op, end_op ) ) dag = DiGraph.from_ssa(sub_graph_ssa) paths = dag.get_all_paths(versioned_source, versioned_target) # include two ends ops_in_paths = [[producer_map[blob][0] for blob in path[1:]] for path in paths] return sorted(set().union(*[set(ops) for ops in ops_in_paths])) def identify_reshape_sub_graph(predict_net: caffe2_pb2.NetDef) -> List[List[int]]: """ Idenfity the reshape sub-graph in a protobuf. The reshape sub-graph is defined as matching the following pattern: (input_blob) -> Op_1 -> ... -> Op_N -> (new_shape) -─┐ └-------------------------------------------> Reshape -> (output_blob) Return: List of sub-graphs, each sub-graph is represented as a list of indices of the relavent ops, [Op_1, Op_2, ..., Op_N, Reshape] """ ssa, _ = core.get_ssa(predict_net) ret = [] for i, op in enumerate(predict_net.op): if op.type == "Reshape": assert len(op.input) == 2 input_ssa = ssa[i][0] data_source = input_ssa[0] shape_source = input_ssa[1] op_indices = _get_dependency_chain(ssa, shape_source, data_source) ret.append(op_indices + [i]) return ret def remove_reshape_for_fc(predict_net, params): """ In PyTorch nn.Linear has to take 2D tensor, this often leads to reshape a 4D tensor to 2D by calling .view(). However this (dynamic) reshaping doesn't work well with ONNX and Int8 tools, and cause using extra ops (eg. ExpandDims) that might not be available on mobile. Luckily Caffe2 supports 4D tensor for FC, so we can remove those reshape after exporting ONNX model. """ from caffe2.python import core # find all reshape sub-graph that can be removed, which is now all Reshape # sub-graph whose output is only consumed by FC. # TODO: to make it safer, we may need the actually value to better determine # if a Reshape before FC is removable. reshape_sub_graphs = identify_reshape_sub_graph(predict_net) sub_graphs_to_remove = [] for reshape_sub_graph in reshape_sub_graphs: reshape_op_id = reshape_sub_graph[-1] assert predict_net.op[reshape_op_id].type == "Reshape" ssa, _ = core.get_ssa(predict_net) reshape_output = ssa[reshape_op_id][1][0] consumers = [i for i in range(len(ssa)) if reshape_output in ssa[i][0]] if all(predict_net.op[consumer].type == "FC" for consumer in consumers): # safety check if the sub-graph is isolated, for this reshape sub-graph, # it means it has one non-param external input and one external output. ext_inputs, ext_outputs = get_sub_graph_external_input_output( predict_net, reshape_sub_graph ) non_params_ext_inputs = [inp for inp in ext_inputs if inp[1] != 0] if len(non_params_ext_inputs) == 1 and len(ext_outputs) == 1: sub_graphs_to_remove.append(reshape_sub_graph) # perform removing subgraph by: # 1: rename the Reshape's output to its input, then the graph can be # seen as in-place itentify, meaning whose external input/output are the same. # 2: simply remove those ops. remove_op_ids = [] params_to_remove = [] for sub_graph in sub_graphs_to_remove: logger.info( "Remove Reshape sub-graph:\n{}".format( "".join(["(#{:>4})\n{}".format(i, predict_net.op[i]) for i in sub_graph]) ) ) reshape_op_id = sub_graph[-1] new_reshap_output = predict_net.op[reshape_op_id].input[0] rename_op_output(predict_net, reshape_op_id, 0, new_reshap_output) ext_inputs, ext_outputs = get_sub_graph_external_input_output(predict_net, sub_graph) non_params_ext_inputs = [inp for inp in ext_inputs if inp[1] != 0] params_ext_inputs = [inp for inp in ext_inputs if inp[1] == 0] assert len(non_params_ext_inputs) == 1 and len(ext_outputs) == 1 assert ext_outputs[0][0] == non_params_ext_inputs[0][0] assert ext_outputs[0][1] == non_params_ext_inputs[0][1] + 1 remove_op_ids.extend(sub_graph) params_to_remove.extend(params_ext_inputs) predict_net = copy.deepcopy(predict_net) new_ops = [op for i, op in enumerate(predict_net.op) if i not in remove_op_ids] del predict_net.op[:] predict_net.op.extend(new_ops) for versioned_params in params_to_remove: name = versioned_params[0] logger.info("Remove params: {} from init_net and predict_net.external_input".format(name)) del params[name] predict_net.external_input.remove(name) return predict_net, params def fuse_copy_between_cpu_and_gpu(predict_net: caffe2_pb2.NetDef): """ In-place fuse extra copy ops between cpu/gpu for the following case: a -CopyAToB-> b -CopyBToA> c1 -NextOp1-> d1 -CopyBToA> c2 -NextOp2-> d2 The fused network will look like: a -NextOp1-> d1 -NextOp2-> d2 """ _COPY_OPS = ["CopyCPUToGPU", "CopyGPUToCPU"] def _fuse_once(predict_net): ssa, blob_versions = core.get_ssa(predict_net) consumer_map = get_consumer_map(ssa) versioned_external_output = [ (name, blob_versions[name]) for name in predict_net.external_output ] for op_id, op in enumerate(predict_net.op): if op.type in _COPY_OPS: fw_copy_versioned_output = ssa[op_id][1][0] consumer_ids = [x[0] for x in consumer_map[fw_copy_versioned_output]] reverse_op_type = _COPY_OPS[1 - _COPY_OPS.index(op.type)] is_fusable = ( len(consumer_ids) > 0 and fw_copy_versioned_output not in versioned_external_output and all( predict_net.op[_op_id].type == reverse_op_type and ssa[_op_id][1][0] not in versioned_external_output for _op_id in consumer_ids ) ) if is_fusable: for rv_copy_op_id in consumer_ids: # making each NextOp uses "a" directly and removing Copy ops rs_copy_versioned_output = ssa[rv_copy_op_id][1][0] next_op_id, inp_id = consumer_map[rs_copy_versioned_output][0] predict_net.op[next_op_id].input[inp_id] = op.input[0] # remove CopyOps new_ops = [ op for i, op in enumerate(predict_net.op) if i != op_id and i not in consumer_ids ] del predict_net.op[:] predict_net.op.extend(new_ops) return True return False # _fuse_once returns False is nothing can be fused while _fuse_once(predict_net): pass def remove_dead_end_ops(net_def: caffe2_pb2.NetDef): """remove ops if its output is not used or not in external_output""" ssa, versions = core.get_ssa(net_def) versioned_external_output = [(name, versions[name]) for name in net_def.external_output] consumer_map = get_consumer_map(ssa) removed_op_ids = set() def _is_dead_end(versioned_blob): return not ( versioned_blob in versioned_external_output or ( len(consumer_map[versioned_blob]) > 0 and all(x[0] not in removed_op_ids for x in consumer_map[versioned_blob]) ) ) for i, ssa_i in reversed(list(enumerate(ssa))): versioned_outputs = ssa_i[1] if all(_is_dead_end(outp) for outp in versioned_outputs): removed_op_ids.add(i) # simply removing those deadend ops should have no effect to external_output new_ops = [op for i, op in enumerate(net_def.op) if i not in removed_op_ids] del net_def.op[:] net_def.op.extend(new_ops)
banmo-main
third_party/detectron2_old/detectron2/export/shared.py
# Copyright (c) Facebook, Inc. and its affiliates. import copy import logging import os import torch from caffe2.proto import caffe2_pb2 from torch import nn from detectron2.config import CfgNode from detectron2.utils.file_io import PathManager from .caffe2_inference import ProtobufDetectionModel from .caffe2_modeling import META_ARCH_CAFFE2_EXPORT_TYPE_MAP, convert_batched_inputs_to_c2_format from .shared import get_pb_arg_vali, get_pb_arg_vals, save_graph __all__ = [ "add_export_config", "export_caffe2_model", "Caffe2Model", "export_onnx_model", "Caffe2Tracer", ] def add_export_config(cfg): """ Add options needed by caffe2 export. Args: cfg (CfgNode): a detectron2 config Returns: CfgNode: an updated config with new options that will be used by :class:`Caffe2Tracer`. """ is_frozen = cfg.is_frozen() cfg.defrost() cfg.EXPORT_CAFFE2 = CfgNode() cfg.EXPORT_CAFFE2.USE_HEATMAP_MAX_KEYPOINT = False if is_frozen: cfg.freeze() return cfg class Caffe2Tracer: """ Make a detectron2 model traceable with Caffe2 operators. This class creates a traceable version of a detectron2 model which: 1. Rewrite parts of the model using ops in Caffe2. Note that some ops do not have GPU implementation in Caffe2. 2. Remove post-processing and only produce raw layer outputs After making a traceable model, the class provide methods to export such a model to different deployment formats. Exported graph produced by this class take two input tensors: 1. (1, C, H, W) float "data" which is an image (usually in [0, 255]). (H, W) often has to be padded to multiple of 32 (depend on the model architecture). 2. 1x3 float "im_info", each row of which is (height, width, 1.0). Height and width are true image shapes before padding. The class currently only supports models using builtin meta architectures. Batch inference is not supported, and contributions are welcome. """ def __init__(self, cfg: CfgNode, model: nn.Module, inputs): """ Args: cfg (CfgNode): a detectron2 config, with extra export-related options added by :func:`add_export_config`. It's used to construct caffe2-compatible model. model (nn.Module): An original pytorch model. Must be among a few official models in detectron2 that can be converted to become caffe2-compatible automatically. Weights have to be already loaded to this model. inputs: sample inputs that the given model takes for inference. Will be used to trace the model. For most models, random inputs with no detected objects will not work as they lead to wrong traces. """ assert isinstance(cfg, CfgNode), cfg assert isinstance(model, torch.nn.Module), type(model) if "EXPORT_CAFFE2" not in cfg: cfg = add_export_config(cfg) # will just the defaults # TODO make it support custom models, by passing in c2 model directly C2MetaArch = META_ARCH_CAFFE2_EXPORT_TYPE_MAP[cfg.MODEL.META_ARCHITECTURE] self.traceable_model = C2MetaArch(cfg, copy.deepcopy(model)) self.inputs = inputs self.traceable_inputs = self.traceable_model.get_caffe2_inputs(inputs) def export_caffe2(self): """ Export the model to Caffe2's protobuf format. The returned object can be saved with its :meth:`.save_protobuf()` method. The result can be loaded and executed using Caffe2 runtime. Returns: :class:`Caffe2Model` """ from .caffe2_export import export_caffe2_detection_model predict_net, init_net = export_caffe2_detection_model( self.traceable_model, self.traceable_inputs ) return Caffe2Model(predict_net, init_net) def export_onnx(self): """ Export the model to ONNX format. Note that the exported model contains custom ops only available in caffe2, therefore it cannot be directly executed by other runtime (such as onnxruntime or TensorRT). Post-processing or transformation passes may be applied on the model to accommodate different runtimes, but we currently do not provide support for them. Returns: onnx.ModelProto: an onnx model. """ from .caffe2_export import export_onnx_model as export_onnx_model_impl return export_onnx_model_impl(self.traceable_model, (self.traceable_inputs,)) def export_torchscript(self): """ Export the model to a ``torch.jit.TracedModule`` by tracing. The returned object can be saved to a file by ``.save()``. Returns: torch.jit.TracedModule: a torch TracedModule """ logger = logging.getLogger(__name__) logger.info("Tracing the model with torch.jit.trace ...") with torch.no_grad(): return torch.jit.trace(self.traceable_model, (self.traceable_inputs,)) class Caffe2Model(nn.Module): """ A wrapper around the traced model in Caffe2's protobuf format. The exported graph has different inputs/outputs from the original Pytorch model, as explained in :class:`Caffe2Tracer`. This class wraps around the exported graph to simulate the same interface as the original Pytorch model. It also provides functions to save/load models in Caffe2's format.' Examples: :: c2_model = Caffe2Tracer(cfg, torch_model, inputs).export_caffe2() inputs = [{"image": img_tensor_CHW}] outputs = c2_model(inputs) orig_outputs = torch_model(inputs) """ def __init__(self, predict_net, init_net): super().__init__() self.eval() # always in eval mode self._predict_net = predict_net self._init_net = init_net self._predictor = None __init__.__HIDE_SPHINX_DOC__ = True @property def predict_net(self): """ caffe2.core.Net: the underlying caffe2 predict net """ return self._predict_net @property def init_net(self): """ caffe2.core.Net: the underlying caffe2 init net """ return self._init_net def save_protobuf(self, output_dir): """ Save the model as caffe2's protobuf format. It saves the following files: * "model.pb": definition of the graph. Can be visualized with tools like `netron <https://github.com/lutzroeder/netron>`_. * "model_init.pb": model parameters * "model.pbtxt": human-readable definition of the graph. Not needed for deployment. Args: output_dir (str): the output directory to save protobuf files. """ logger = logging.getLogger(__name__) logger.info("Saving model to {} ...".format(output_dir)) if not PathManager.exists(output_dir): PathManager.mkdirs(output_dir) with PathManager.open(os.path.join(output_dir, "model.pb"), "wb") as f: f.write(self._predict_net.SerializeToString()) with PathManager.open(os.path.join(output_dir, "model.pbtxt"), "w") as f: f.write(str(self._predict_net)) with PathManager.open(os.path.join(output_dir, "model_init.pb"), "wb") as f: f.write(self._init_net.SerializeToString()) def save_graph(self, output_file, inputs=None): """ Save the graph as SVG format. Args: output_file (str): a SVG file inputs: optional inputs given to the model. If given, the inputs will be used to run the graph to record shape of every tensor. The shape information will be saved together with the graph. """ from .caffe2_export import run_and_save_graph if inputs is None: save_graph(self._predict_net, output_file, op_only=False) else: size_divisibility = get_pb_arg_vali(self._predict_net, "size_divisibility", 0) device = get_pb_arg_vals(self._predict_net, "device", b"cpu").decode("ascii") inputs = convert_batched_inputs_to_c2_format(inputs, size_divisibility, device) inputs = [x.cpu().numpy() for x in inputs] run_and_save_graph(self._predict_net, self._init_net, inputs, output_file) @staticmethod def load_protobuf(dir): """ Args: dir (str): a directory used to save Caffe2Model with :meth:`save_protobuf`. The files "model.pb" and "model_init.pb" are needed. Returns: Caffe2Model: the caffe2 model loaded from this directory. """ predict_net = caffe2_pb2.NetDef() with PathManager.open(os.path.join(dir, "model.pb"), "rb") as f: predict_net.ParseFromString(f.read()) init_net = caffe2_pb2.NetDef() with PathManager.open(os.path.join(dir, "model_init.pb"), "rb") as f: init_net.ParseFromString(f.read()) return Caffe2Model(predict_net, init_net) def __call__(self, inputs): """ An interface that wraps around a Caffe2 model and mimics detectron2's models' input/output format. See details about the format at :doc:`/tutorials/models`. This is used to compare the outputs of caffe2 model with its original torch model. Due to the extra conversion between Pytorch/Caffe2, this method is not meant for benchmark. Because of the conversion, this method also has dependency on detectron2 in order to convert to detectron2's output format. """ if self._predictor is None: self._predictor = ProtobufDetectionModel(self._predict_net, self._init_net) return self._predictor(inputs) def export_caffe2_model(cfg, model, inputs): logger = logging.getLogger(__name__) logger.warning( "export_caffe2_model() is deprecated. Please use `Caffe2Tracer().export_caffe2() instead." ) return Caffe2Tracer(cfg, model, inputs).export_caffe2() def export_onnx_model(cfg, model, inputs): logger = logging.getLogger(__name__) logger.warning( "export_caffe2_model() is deprecated. Please use `Caffe2Tracer().export_onnx() instead." ) return Caffe2Tracer(cfg, model, inputs).export_onnx()
banmo-main
third_party/detectron2_old/detectron2/export/api.py
# Copyright (c) Facebook, Inc. and its affiliates. import logging import numpy as np from itertools import count import torch from caffe2.proto import caffe2_pb2 from caffe2.python import core from .caffe2_modeling import META_ARCH_CAFFE2_EXPORT_TYPE_MAP, convert_batched_inputs_to_c2_format from .shared import ScopedWS, get_pb_arg_vali, get_pb_arg_vals, infer_device_type logger = logging.getLogger(__name__) # ===== ref: mobile-vision predictor's 'Caffe2Wrapper' class ====== class ProtobufModel(torch.nn.Module): """ Wrapper of a caffe2's protobuf model. It works just like nn.Module, but running caffe2 under the hood. Input/Output are tuple[tensor] that match the caffe2 net's external_input/output. """ _ids = count(0) def __init__(self, predict_net, init_net): logger.info(f"Initializing ProtobufModel for: {predict_net.name} ...") super().__init__() assert isinstance(predict_net, caffe2_pb2.NetDef) assert isinstance(init_net, caffe2_pb2.NetDef) # create unique temporary workspace for each instance self.ws_name = "__tmp_ProtobufModel_{}__".format(next(self._ids)) self.net = core.Net(predict_net) logger.info("Running init_net once to fill the parameters ...") with ScopedWS(self.ws_name, is_reset=True, is_cleanup=False) as ws: ws.RunNetOnce(init_net) uninitialized_external_input = [] for blob in self.net.Proto().external_input: if blob not in ws.Blobs(): uninitialized_external_input.append(blob) ws.CreateBlob(blob) ws.CreateNet(self.net) self._error_msgs = set() self._input_blobs = uninitialized_external_input def _infer_output_devices(self, inputs): """ Returns: list[str]: list of device for each external output """ def _get_device_type(torch_tensor): assert torch_tensor.device.type in ["cpu", "cuda"] assert torch_tensor.device.index == 0 return torch_tensor.device.type predict_net = self.net.Proto() input_device_types = { (name, 0): _get_device_type(tensor) for name, tensor in zip(self._input_blobs, inputs) } device_type_map = infer_device_type( predict_net, known_status=input_device_types, device_name_style="pytorch" ) ssa, versions = core.get_ssa(predict_net) versioned_outputs = [(name, versions[name]) for name in predict_net.external_output] output_devices = [device_type_map[outp] for outp in versioned_outputs] return output_devices def forward(self, inputs): """ Args: inputs (tuple[torch.Tensor]) Returns: tuple[torch.Tensor] """ assert len(inputs) == len(self._input_blobs), ( f"Length of inputs ({len(inputs)}) " f"doesn't match the required input blobs: {self._input_blobs}" ) with ScopedWS(self.ws_name, is_reset=False, is_cleanup=False) as ws: for b, tensor in zip(self._input_blobs, inputs): ws.FeedBlob(b, tensor) try: ws.RunNet(self.net.Proto().name) except RuntimeError as e: if not str(e) in self._error_msgs: self._error_msgs.add(str(e)) logger.warning("Encountered new RuntimeError: \n{}".format(str(e))) logger.warning("Catch the error and use partial results.") c2_outputs = [ws.FetchBlob(b) for b in self.net.Proto().external_output] # Remove outputs of current run, this is necessary in order to # prevent fetching the result from previous run if the model fails # in the middle. for b in self.net.Proto().external_output: # Needs to create uninitialized blob to make the net runable. # This is "equivalent" to: ws.RemoveBlob(b) then ws.CreateBlob(b), # but there'no such API. ws.FeedBlob(b, f"{b}, a C++ native class of type nullptr (uninitialized).") # Cast output to torch.Tensor on the desired device output_devices = ( self._infer_output_devices(inputs) if any(t.device.type != "cpu" for t in inputs) else ["cpu" for _ in self.net.Proto().external_output] ) outputs = [] for name, c2_output, device in zip( self.net.Proto().external_output, c2_outputs, output_devices ): if not isinstance(c2_output, np.ndarray): raise RuntimeError( "Invalid output for blob {}, received: {}".format(name, c2_output) ) outputs.append(torch.tensor(c2_output).to(device=device)) return tuple(outputs) class ProtobufDetectionModel(torch.nn.Module): """ A class works just like a pytorch meta arch in terms of inference, but running caffe2 model under the hood. """ def __init__(self, predict_net, init_net, *, convert_outputs=None): """ Args: predict_net, init_net (core.Net): caffe2 nets convert_outptus (callable): a function that converts caffe2 outputs to the same format of the original pytorch model. By default, use the one defined in the caffe2 meta_arch. """ super().__init__() self.protobuf_model = ProtobufModel(predict_net, init_net) self.size_divisibility = get_pb_arg_vali(predict_net, "size_divisibility", 0) self.device = get_pb_arg_vals(predict_net, "device", b"cpu").decode("ascii") if convert_outputs is None: meta_arch = get_pb_arg_vals(predict_net, "meta_architecture", b"GeneralizedRCNN") meta_arch = META_ARCH_CAFFE2_EXPORT_TYPE_MAP[meta_arch.decode("ascii")] self._convert_outputs = meta_arch.get_outputs_converter(predict_net, init_net) else: self._convert_outputs = convert_outputs def _convert_inputs(self, batched_inputs): # currently all models convert inputs in the same way return convert_batched_inputs_to_c2_format( batched_inputs, self.size_divisibility, self.device ) def forward(self, batched_inputs): c2_inputs = self._convert_inputs(batched_inputs) c2_results = self.protobuf_model(c2_inputs) c2_results = dict(zip(self.protobuf_model.net.Proto().external_output, c2_results)) return self._convert_outputs(batched_inputs, c2_inputs, c2_results)
banmo-main
third_party/detectron2_old/detectron2/export/caffe2_inference.py
# Copyright (c) Facebook, Inc. and its affiliates. import os import sys import tempfile from contextlib import ExitStack, contextmanager from copy import deepcopy from unittest import mock import torch from torch import nn # need some explicit imports due to https://github.com/pytorch/pytorch/issues/38964 import detectron2 # noqa F401 from detectron2.structures import Boxes, Instances from detectron2.utils.env import _import_file _counter = 0 def _clear_jit_cache(): from torch.jit._recursive import concrete_type_store from torch.jit._state import _jit_caching_layer concrete_type_store.type_store.clear() # for modules _jit_caching_layer.clear() # for free functions def _add_instances_conversion_methods(newInstances): """ Add from_instances methods to the scripted Instances class. """ cls_name = newInstances.__name__ @torch.jit.unused def from_instances(instances: Instances): """ Create scripted Instances from original Instances """ fields = instances.get_fields() image_size = instances.image_size ret = newInstances(image_size) for name, val in fields.items(): assert hasattr(ret, f"_{name}"), f"No attribute named {name} in {cls_name}" setattr(ret, name, deepcopy(val)) return ret newInstances.from_instances = from_instances @contextmanager def patch_instances(fields): """ A contextmanager, under which the Instances class in detectron2 is replaced by a statically-typed scriptable class, defined by `fields`. See more in `scripting_with_instances`. """ with tempfile.TemporaryDirectory(prefix="detectron2") as dir, tempfile.NamedTemporaryFile( mode="w", encoding="utf-8", suffix=".py", dir=dir, delete=False ) as f: try: # Objects that use Instances should not reuse previously-compiled # results in cache, because `Instances` could be a new class each time. _clear_jit_cache() cls_name, s = _gen_instance_module(fields) f.write(s) f.flush() f.close() module = _import(f.name) new_instances = getattr(module, cls_name) _ = torch.jit.script(new_instances) # let torchscript think Instances was scripted already Instances.__torch_script_class__ = True # let torchscript find new_instances when looking for the jit type of Instances Instances._jit_override_qualname = torch._jit_internal._qualified_name(new_instances) _add_instances_conversion_methods(new_instances) yield new_instances finally: try: del Instances.__torch_script_class__ del Instances._jit_override_qualname except AttributeError: pass sys.modules.pop(module.__name__) def _gen_instance_class(fields): """ Args: fields (dict[name: type]) """ class _FieldType: def __init__(self, name, type_): assert isinstance(name, str), f"Field name must be str, got {name}" self.name = name self.type_ = type_ self.annotation = f"{type_.__module__}.{type_.__name__}" fields = [_FieldType(k, v) for k, v in fields.items()] def indent(level, s): return " " * 4 * level + s lines = [] global _counter _counter += 1 cls_name = "ScriptedInstances{}".format(_counter) field_names = tuple(x.name for x in fields) lines.append( f""" class {cls_name}: def __init__(self, image_size: Tuple[int, int]): self.image_size = image_size self._field_names = {field_names} """ ) for f in fields: lines.append( indent(2, f"self._{f.name} = torch.jit.annotate(Optional[{f.annotation}], None)") ) for f in fields: lines.append( f""" @property def {f.name}(self) -> {f.annotation}: # has to use a local for type refinement # https://pytorch.org/docs/stable/jit_language_reference.html#optional-type-refinement t = self._{f.name} assert t is not None return t @{f.name}.setter def {f.name}(self, value: {f.annotation}) -> None: self._{f.name} = value """ ) # support method `__len__` lines.append( """ def __len__(self) -> int: """ ) for f in fields: lines.append( f""" t = self._{f.name} if t is not None: return len(t) """ ) lines.append( """ raise NotImplementedError("Empty Instances does not support __len__!") """ ) # support method `has` lines.append( """ def has(self, name: str) -> bool: """ ) for f in fields: lines.append( f""" if name == "{f.name}": return self._{f.name} is not None """ ) lines.append( """ return False """ ) # support method `to` lines.append( f""" def to(self, device: torch.device) -> "{cls_name}": ret = {cls_name}(self.image_size) """ ) for f in fields: if hasattr(f.type_, "to"): lines.append( f""" t = self._{f.name} if t is not None: ret._{f.name} = t.to(device) """ ) else: # For now, ignore fields that cannot be moved to devices. # Maybe can support other tensor-like classes (e.g. __torch_function__) pass lines.append( """ return ret """ ) # support method `getitem` lines.append( f""" def __getitem__(self, item) -> "{cls_name}": ret = {cls_name}(self.image_size) """ ) for f in fields: lines.append( f""" t = self._{f.name} if t is not None: ret._{f.name} = t[item] """ ) lines.append( """ return ret """ ) # support method `get_fields()` lines.append( """ def get_fields(self) -> Dict[str, Tensor]: ret = {} """ ) for f in fields: if f.type_ == Boxes: stmt = "t.tensor" elif f.type_ == torch.Tensor: stmt = "t" else: stmt = f'assert False, "unsupported type {str(f.type_)}"' lines.append( f""" t = self._{f.name} if t is not None: ret["{f.name}"] = {stmt} """ ) lines.append( """ return ret""" ) return cls_name, os.linesep.join(lines) def _gen_instance_module(fields): # TODO: find a more automatic way to enable import of other classes s = """ from copy import deepcopy import torch from torch import Tensor import typing from typing import * import detectron2 from detectron2.structures import Boxes, Instances """ cls_name, cls_def = _gen_instance_class(fields) s += cls_def return cls_name, s def _import(path): return _import_file( "{}{}".format(sys.modules[__name__].__name__, _counter), path, make_importable=True ) @contextmanager def patch_builtin_len(modules=()): """ Patch the builtin len() function of a few detectron2 modules to use __len__ instead, because __len__ does not convert values to integers and therefore is friendly to tracing. Args: modules (list[stsr]): names of extra modules to patch len(), in addition to those in detectron2. """ def _new_len(obj): return obj.__len__() with ExitStack() as stack: MODULES = [ "detectron2.modeling.roi_heads.fast_rcnn", "detectron2.modeling.roi_heads.mask_head", "detectron2.modeling.roi_heads.keypoint_head", ] + list(modules) ctxs = [stack.enter_context(mock.patch(mod + ".len")) for mod in MODULES] for m in ctxs: m.side_effect = _new_len yield def patch_nonscriptable_classes(): """ Apply patches on a few nonscriptable detectron2 classes. Should not have side-effects on eager usage. """ # __prepare_scriptable__ can also be added to models for easier maintenance. # But it complicates the clean model code. from detectron2.modeling.backbone import ResNet, FPN # Due to https://github.com/pytorch/pytorch/issues/36061, # we change backbone to use ModuleList for scripting. # (note: this changes param names in state_dict) def prepare_resnet(self): ret = deepcopy(self) ret.stages = nn.ModuleList(ret.stages) for k in self.stage_names: delattr(ret, k) return ret ResNet.__prepare_scriptable__ = prepare_resnet def prepare_fpn(self): ret = deepcopy(self) ret.lateral_convs = nn.ModuleList(ret.lateral_convs) ret.output_convs = nn.ModuleList(ret.output_convs) for name, _ in self.named_children(): if name.startswith("fpn_"): delattr(ret, name) return ret FPN.__prepare_scriptable__ = prepare_fpn # Annotate some attributes to be constants for the purpose of scripting, # even though they are not constants in eager mode. from detectron2.modeling.roi_heads import StandardROIHeads if hasattr(StandardROIHeads, "__annotations__"): # copy first to avoid editing annotations of base class StandardROIHeads.__annotations__ = deepcopy(StandardROIHeads.__annotations__) StandardROIHeads.__annotations__["mask_on"] = torch.jit.Final[bool] StandardROIHeads.__annotations__["keypoint_on"] = torch.jit.Final[bool] # These patches are not supposed to have side-effects. patch_nonscriptable_classes() @contextmanager def freeze_training_mode(model): """ A context manager that annotates the "training" attribute of every submodule to constant, so that the training codepath in these modules can be meta-compiled away. Upon exiting, the annotations are reverted. """ classes = {type(x) for x in model.modules()} # __constants__ is the old way to annotate constants and not compatible # with __annotations__ . classes = {x for x in classes if not hasattr(x, "__constants__")} for cls in classes: cls.__annotations__["training"] = torch.jit.Final[bool] yield for cls in classes: cls.__annotations__["training"] = bool
banmo-main
third_party/detectron2_old/detectron2/export/torchscript_patch.py
# Copyright (c) Facebook, Inc. and its affiliates. import os import torch from detectron2.utils.env import TORCH_VERSION from detectron2.utils.file_io import PathManager from .torchscript_patch import freeze_training_mode, patch_instances __all__ = ["scripting_with_instances", "dump_torchscript_IR"] def scripting_with_instances(model, fields): """ Run :func:`torch.jit.script` on a model that uses the :class:`Instances` class. Since attributes of :class:`Instances` are "dynamically" added in eager mode,it is difficult for scripting to support it out of the box. This function is made to support scripting a model that uses :class:`Instances`. It does the following: 1. Create a scriptable ``new_Instances`` class which behaves similarly to ``Instances``, but with all attributes been "static". The attributes need to be statically declared in the ``fields`` argument. 2. Register ``new_Instances``, and force scripting compiler to use it when trying to compile ``Instances``. After this function, the process will be reverted. User should be able to script another model using different fields. Example: Assume that ``Instances`` in the model consist of two attributes named ``proposal_boxes`` and ``objectness_logits`` with type :class:`Boxes` and :class:`Tensor` respectively during inference. You can call this function like: :: fields = {"proposal_boxes": Boxes, "objectness_logits": torch.Tensor} torchscipt_model = scripting_with_instances(model, fields) Note: It only support models in evaluation mode. Args: model (nn.Module): The input model to be exported by scripting. fields (Dict[str, type]): Attribute names and corresponding type that ``Instances`` will use in the model. Note that all attributes used in ``Instances`` need to be added, regardless of whether they are inputs/outputs of the model. Data type not defined in detectron2 is not supported for now. Returns: torch.jit.ScriptModule: the model in torchscript format """ assert TORCH_VERSION >= (1, 8), "This feature is not available in PyTorch < 1.8" assert ( not model.training ), "Currently we only support exporting models in evaluation mode to torchscript" with freeze_training_mode(model), patch_instances(fields): scripted_model = torch.jit.script(model) return scripted_model # alias for old name export_torchscript_with_instances = scripting_with_instances def dump_torchscript_IR(model, dir): """ Dump IR of a TracedModule/ScriptModule/Function in various format (code, graph, inlined graph). Useful for debugging. Args: model (TracedModule/ScriptModule/ScriptFUnction): traced or scripted module dir (str): output directory to dump files. """ PathManager.mkdirs(dir) def _get_script_mod(mod): if isinstance(mod, torch.jit.TracedModule): return mod._actual_script_module return mod # Dump pretty-printed code: https://pytorch.org/docs/stable/jit.html#inspecting-code with PathManager.open(os.path.join(dir, "model_ts_code.txt"), "w") as f: def get_code(mod): # Try a few ways to get code using private attributes. try: # This contains more information than just `mod.code` return _get_script_mod(mod)._c.code except AttributeError: pass try: return mod.code except AttributeError: return None def dump_code(prefix, mod): code = get_code(mod) name = prefix or "root model" if code is None: f.write(f"Could not found code for {name} (type={mod.original_name})\n") f.write("\n") else: f.write(f"\nCode for {name}, type={mod.original_name}:\n") f.write(code) f.write("\n") f.write("-" * 80) for name, m in mod.named_children(): dump_code(prefix + "." + name, m) if isinstance(model, torch.jit.ScriptFunction): f.write(get_code(model)) else: dump_code("", model) def _get_graph(model): try: # Recursively dump IR of all modules return _get_script_mod(model)._c.dump_to_str(True, False, False) except AttributeError: return model.graph.str() with PathManager.open(os.path.join(dir, "model_ts_IR.txt"), "w") as f: f.write(_get_graph(model)) # Dump IR of the entire graph (all submodules inlined) with PathManager.open(os.path.join(dir, "model_ts_IR_inlined.txt"), "w") as f: f.write(str(model.inlined_graph)) if not isinstance(model, torch.jit.ScriptFunction): # Dump the model structure in pytorch style with PathManager.open(os.path.join(dir, "model.txt"), "w") as f: f.write(str(model))
banmo-main
third_party/detectron2_old/detectron2/export/torchscript.py
# Copyright (c) Facebook, Inc. and its affiliates. import copy import logging import types from collections import UserDict from typing import List from detectron2.utils.logger import log_first_n __all__ = ["DatasetCatalog", "MetadataCatalog", "Metadata"] class _DatasetCatalog(UserDict): """ A global dictionary that stores information about the datasets and how to obtain them. It contains a mapping from strings (which are names that identify a dataset, e.g. "coco_2014_train") to a function which parses the dataset and returns the samples in the format of `list[dict]`. The returned dicts should be in Detectron2 Dataset format (See DATASETS.md for details) if used with the data loader functionalities in `data/build.py,data/detection_transform.py`. The purpose of having this catalog is to make it easy to choose different datasets, by just using the strings in the config. """ def register(self, name, func): """ Args: name (str): the name that identifies a dataset, e.g. "coco_2014_train". func (callable): a callable which takes no arguments and returns a list of dicts. It must return the same results if called multiple times. """ assert callable(func), "You must register a function with `DatasetCatalog.register`!" assert name not in self, "Dataset '{}' is already registered!".format(name) self[name] = func def get(self, name): """ Call the registered function and return its results. Args: name (str): the name that identifies a dataset, e.g. "coco_2014_train". Returns: list[dict]: dataset annotations. """ try: f = self[name] except KeyError as e: raise KeyError( "Dataset '{}' is not registered! Available datasets are: {}".format( name, ", ".join(list(self.keys())) ) ) from e return f() def list(self) -> List[str]: """ List all registered datasets. Returns: list[str] """ return list(self.keys()) def remove(self, name): """ Alias of ``pop``. """ self.pop(name) def __str__(self): return "DatasetCatalog(registered datasets: {})".format(", ".join(self.keys())) __repr__ = __str__ DatasetCatalog = _DatasetCatalog() DatasetCatalog.__doc__ = ( _DatasetCatalog.__doc__ + """ .. automethod:: detectron2.data.catalog.DatasetCatalog.register .. automethod:: detectron2.data.catalog.DatasetCatalog.get """ ) class Metadata(types.SimpleNamespace): """ A class that supports simple attribute setter/getter. It is intended for storing metadata of a dataset and make it accessible globally. Examples: :: # somewhere when you load the data: MetadataCatalog.get("mydataset").thing_classes = ["person", "dog"] # somewhere when you print statistics or visualize: classes = MetadataCatalog.get("mydataset").thing_classes """ # the name of the dataset # set default to N/A so that `self.name` in the errors will not trigger getattr again name: str = "N/A" _RENAMED = { "class_names": "thing_classes", "dataset_id_to_contiguous_id": "thing_dataset_id_to_contiguous_id", "stuff_class_names": "stuff_classes", } def __getattr__(self, key): if key in self._RENAMED: log_first_n( logging.WARNING, "Metadata '{}' was renamed to '{}'!".format(key, self._RENAMED[key]), n=10, ) return getattr(self, self._RENAMED[key]) # "name" exists in every metadata if len(self.__dict__) > 1: raise AttributeError( "Attribute '{}' does not exist in the metadata of dataset '{}'. Available " "keys are {}.".format(key, self.name, str(self.__dict__.keys())) ) else: raise AttributeError( f"Attribute '{key}' does not exist in the metadata of dataset '{self.name}': " "metadata is empty." ) def __setattr__(self, key, val): if key in self._RENAMED: log_first_n( logging.WARNING, "Metadata '{}' was renamed to '{}'!".format(key, self._RENAMED[key]), n=10, ) setattr(self, self._RENAMED[key], val) # Ensure that metadata of the same name stays consistent try: oldval = getattr(self, key) assert oldval == val, ( "Attribute '{}' in the metadata of '{}' cannot be set " "to a different value!\n{} != {}".format(key, self.name, oldval, val) ) except AttributeError: super().__setattr__(key, val) def as_dict(self): """ Returns all the metadata as a dict. Note that modifications to the returned dict will not reflect on the Metadata object. """ return copy.copy(self.__dict__) def set(self, **kwargs): """ Set multiple metadata with kwargs. """ for k, v in kwargs.items(): setattr(self, k, v) return self def get(self, key, default=None): """ Access an attribute and return its value if exists. Otherwise return default. """ try: return getattr(self, key) except AttributeError: return default class _MetadataCatalog(UserDict): """ MetadataCatalog is a global dictionary that provides access to :class:`Metadata` of a given dataset. The metadata associated with a certain name is a singleton: once created, the metadata will stay alive and will be returned by future calls to ``get(name)``. It's like global variables, so don't abuse it. It's meant for storing knowledge that's constant and shared across the execution of the program, e.g.: the class names in COCO. """ def get(self, name): """ Args: name (str): name of a dataset (e.g. coco_2014_train). Returns: Metadata: The :class:`Metadata` instance associated with this name, or create an empty one if none is available. """ assert len(name) r = super().get(name, None) if r is None: r = self[name] = Metadata(name=name) return r def list(self): """ List all registered metadata. Returns: list[str]: keys (names of datasets) of all registered metadata """ return list(self.keys()) def remove(self, name): """ Alias of ``pop``. """ self.pop(name) def __str__(self): return "MetadataCatalog(registered metadata: {})".format(", ".join(self.keys())) __repr__ = __str__ MetadataCatalog = _MetadataCatalog() MetadataCatalog.__doc__ = ( _MetadataCatalog.__doc__ + """ .. automethod:: detectron2.data.catalog.MetadataCatalog.get """ )
banmo-main
third_party/detectron2_old/detectron2/data/catalog.py
# Copyright (c) Facebook, Inc. and its affiliates. import itertools import logging import numpy as np import operator import pickle import torch.utils.data from tabulate import tabulate from termcolor import colored from detectron2.config import configurable from detectron2.structures import BoxMode from detectron2.utils.comm import get_world_size from detectron2.utils.env import seed_all_rng from detectron2.utils.file_io import PathManager from detectron2.utils.logger import _log_api_usage, log_first_n from .catalog import DatasetCatalog, MetadataCatalog from .common import AspectRatioGroupedDataset, DatasetFromList, MapDataset from .dataset_mapper import DatasetMapper from .detection_utils import check_metadata_consistency from .samplers import InferenceSampler, RepeatFactorTrainingSampler, TrainingSampler """ This file contains the default logic to build a dataloader for training or testing. """ __all__ = [ "build_batch_data_loader", "build_detection_train_loader", "build_detection_test_loader", "get_detection_dataset_dicts", "load_proposals_into_dataset", "print_instances_class_histogram", ] def filter_images_with_only_crowd_annotations(dataset_dicts): """ Filter out images with none annotations or only crowd annotations (i.e., images without non-crowd annotations). A common training-time preprocessing on COCO dataset. Args: dataset_dicts (list[dict]): annotations in Detectron2 Dataset format. Returns: list[dict]: the same format, but filtered. """ num_before = len(dataset_dicts) def valid(anns): for ann in anns: if ann.get("iscrowd", 0) == 0: return True return False dataset_dicts = [x for x in dataset_dicts if valid(x["annotations"])] num_after = len(dataset_dicts) logger = logging.getLogger(__name__) logger.info( "Removed {} images with no usable annotations. {} images left.".format( num_before - num_after, num_after ) ) return dataset_dicts def filter_images_with_few_keypoints(dataset_dicts, min_keypoints_per_image): """ Filter out images with too few number of keypoints. Args: dataset_dicts (list[dict]): annotations in Detectron2 Dataset format. Returns: list[dict]: the same format as dataset_dicts, but filtered. """ num_before = len(dataset_dicts) def visible_keypoints_in_image(dic): # Each keypoints field has the format [x1, y1, v1, ...], where v is visibility annotations = dic["annotations"] return sum( (np.array(ann["keypoints"][2::3]) > 0).sum() for ann in annotations if "keypoints" in ann ) dataset_dicts = [ x for x in dataset_dicts if visible_keypoints_in_image(x) >= min_keypoints_per_image ] num_after = len(dataset_dicts) logger = logging.getLogger(__name__) logger.info( "Removed {} images with fewer than {} keypoints.".format( num_before - num_after, min_keypoints_per_image ) ) return dataset_dicts def load_proposals_into_dataset(dataset_dicts, proposal_file): """ Load precomputed object proposals into the dataset. The proposal file should be a pickled dict with the following keys: - "ids": list[int] or list[str], the image ids - "boxes": list[np.ndarray], each is an Nx4 array of boxes corresponding to the image id - "objectness_logits": list[np.ndarray], each is an N sized array of objectness scores corresponding to the boxes. - "bbox_mode": the BoxMode of the boxes array. Defaults to ``BoxMode.XYXY_ABS``. Args: dataset_dicts (list[dict]): annotations in Detectron2 Dataset format. proposal_file (str): file path of pre-computed proposals, in pkl format. Returns: list[dict]: the same format as dataset_dicts, but added proposal field. """ logger = logging.getLogger(__name__) logger.info("Loading proposals from: {}".format(proposal_file)) with PathManager.open(proposal_file, "rb") as f: proposals = pickle.load(f, encoding="latin1") # Rename the key names in D1 proposal files rename_keys = {"indexes": "ids", "scores": "objectness_logits"} for key in rename_keys: if key in proposals: proposals[rename_keys[key]] = proposals.pop(key) # Fetch the indexes of all proposals that are in the dataset # Convert image_id to str since they could be int. img_ids = set({str(record["image_id"]) for record in dataset_dicts}) id_to_index = {str(id): i for i, id in enumerate(proposals["ids"]) if str(id) in img_ids} # Assuming default bbox_mode of precomputed proposals are 'XYXY_ABS' bbox_mode = BoxMode(proposals["bbox_mode"]) if "bbox_mode" in proposals else BoxMode.XYXY_ABS for record in dataset_dicts: # Get the index of the proposal i = id_to_index[str(record["image_id"])] boxes = proposals["boxes"][i] objectness_logits = proposals["objectness_logits"][i] # Sort the proposals in descending order of the scores inds = objectness_logits.argsort()[::-1] record["proposal_boxes"] = boxes[inds] record["proposal_objectness_logits"] = objectness_logits[inds] record["proposal_bbox_mode"] = bbox_mode return dataset_dicts def print_instances_class_histogram(dataset_dicts, class_names): """ Args: dataset_dicts (list[dict]): list of dataset dicts. class_names (list[str]): list of class names (zero-indexed). """ num_classes = len(class_names) hist_bins = np.arange(num_classes + 1) histogram = np.zeros((num_classes,), dtype=np.int) for entry in dataset_dicts: annos = entry["annotations"] classes = np.asarray( [x["category_id"] for x in annos if not x.get("iscrowd", 0)], dtype=np.int ) if len(classes): assert classes.min() >= 0, f"Got an invalid category_id={classes.min()}" assert ( classes.max() < num_classes ), f"Got an invalid category_id={classes.max()} for a dataset of {num_classes} classes" histogram += np.histogram(classes, bins=hist_bins)[0] N_COLS = min(6, len(class_names) * 2) def short_name(x): # make long class names shorter. useful for lvis if len(x) > 13: return x[:11] + ".." return x data = list( itertools.chain(*[[short_name(class_names[i]), int(v)] for i, v in enumerate(histogram)]) ) total_num_instances = sum(data[1::2]) data.extend([None] * (N_COLS - (len(data) % N_COLS))) if num_classes > 1: data.extend(["total", total_num_instances]) data = itertools.zip_longest(*[data[i::N_COLS] for i in range(N_COLS)]) table = tabulate( data, headers=["category", "#instances"] * (N_COLS // 2), tablefmt="pipe", numalign="left", stralign="center", ) log_first_n( logging.INFO, "Distribution of instances among all {} categories:\n".format(num_classes) + colored(table, "cyan"), key="message", ) def get_detection_dataset_dicts(names, filter_empty=True, min_keypoints=0, proposal_files=None): """ Load and prepare dataset dicts for instance detection/segmentation and semantic segmentation. Args: names (str or list[str]): a dataset name or a list of dataset names filter_empty (bool): whether to filter out images without instance annotations min_keypoints (int): filter out images with fewer keypoints than `min_keypoints`. Set to 0 to do nothing. proposal_files (list[str]): if given, a list of object proposal files that match each dataset in `names`. Returns: list[dict]: a list of dicts following the standard dataset dict format. """ if isinstance(names, str): names = [names] assert len(names), names dataset_dicts = [DatasetCatalog.get(dataset_name) for dataset_name in names] for dataset_name, dicts in zip(names, dataset_dicts): assert len(dicts), "Dataset '{}' is empty!".format(dataset_name) if proposal_files is not None: assert len(names) == len(proposal_files) # load precomputed proposals from proposal files dataset_dicts = [ load_proposals_into_dataset(dataset_i_dicts, proposal_file) for dataset_i_dicts, proposal_file in zip(dataset_dicts, proposal_files) ] dataset_dicts = list(itertools.chain.from_iterable(dataset_dicts)) has_instances = "annotations" in dataset_dicts[0] if filter_empty and has_instances: dataset_dicts = filter_images_with_only_crowd_annotations(dataset_dicts) if min_keypoints > 0 and has_instances: dataset_dicts = filter_images_with_few_keypoints(dataset_dicts, min_keypoints) if has_instances: try: class_names = MetadataCatalog.get(names[0]).thing_classes check_metadata_consistency("thing_classes", names) print_instances_class_histogram(dataset_dicts, class_names) except AttributeError: # class names are not available for this dataset pass assert len(dataset_dicts), "No valid data found in {}.".format(",".join(names)) return dataset_dicts def build_batch_data_loader( dataset, sampler, total_batch_size, *, aspect_ratio_grouping=False, num_workers=0 ): """ Build a batched dataloader. The main differences from `torch.utils.data.DataLoader` are: 1. support aspect ratio grouping options 2. use no "batch collation", because this is common for detection training Args: dataset (torch.utils.data.Dataset): map-style PyTorch dataset. Can be indexed. sampler (torch.utils.data.sampler.Sampler): a sampler that produces indices total_batch_size, aspect_ratio_grouping, num_workers): see :func:`build_detection_train_loader`. Returns: iterable[list]. Length of each list is the batch size of the current GPU. Each element in the list comes from the dataset. """ world_size = get_world_size() assert ( total_batch_size > 0 and total_batch_size % world_size == 0 ), "Total batch size ({}) must be divisible by the number of gpus ({}).".format( total_batch_size, world_size ) batch_size = total_batch_size // world_size if aspect_ratio_grouping: data_loader = torch.utils.data.DataLoader( dataset, sampler=sampler, num_workers=num_workers, batch_sampler=None, collate_fn=operator.itemgetter(0), # don't batch, but yield individual elements worker_init_fn=worker_init_reset_seed, ) # yield individual mapped dict return AspectRatioGroupedDataset(data_loader, batch_size) else: batch_sampler = torch.utils.data.sampler.BatchSampler( sampler, batch_size, drop_last=True ) # drop_last so the batch always have the same size return torch.utils.data.DataLoader( dataset, num_workers=num_workers, batch_sampler=batch_sampler, collate_fn=trivial_batch_collator, worker_init_fn=worker_init_reset_seed, ) def _train_loader_from_config(cfg, mapper=None, *, dataset=None, sampler=None): if dataset is None: dataset = get_detection_dataset_dicts( cfg.DATASETS.TRAIN, filter_empty=cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS, min_keypoints=cfg.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE if cfg.MODEL.KEYPOINT_ON else 0, proposal_files=cfg.DATASETS.PROPOSAL_FILES_TRAIN if cfg.MODEL.LOAD_PROPOSALS else None, ) _log_api_usage("dataset." + cfg.DATASETS.TRAIN[0]) if mapper is None: mapper = DatasetMapper(cfg, True) if sampler is None: sampler_name = cfg.DATALOADER.SAMPLER_TRAIN logger = logging.getLogger(__name__) logger.info("Using training sampler {}".format(sampler_name)) if sampler_name == "TrainingSampler": sampler = TrainingSampler(len(dataset)) elif sampler_name == "RepeatFactorTrainingSampler": repeat_factors = RepeatFactorTrainingSampler.repeat_factors_from_category_frequency( dataset, cfg.DATALOADER.REPEAT_THRESHOLD ) sampler = RepeatFactorTrainingSampler(repeat_factors) else: raise ValueError("Unknown training sampler: {}".format(sampler_name)) return { "dataset": dataset, "sampler": sampler, "mapper": mapper, "total_batch_size": cfg.SOLVER.IMS_PER_BATCH, "aspect_ratio_grouping": cfg.DATALOADER.ASPECT_RATIO_GROUPING, "num_workers": cfg.DATALOADER.NUM_WORKERS, } # TODO can allow dataset as an iterable or IterableDataset to make this function more general @configurable(from_config=_train_loader_from_config) def build_detection_train_loader( dataset, *, mapper, sampler=None, total_batch_size, aspect_ratio_grouping=True, num_workers=0 ): """ Build a dataloader for object detection with some default features. This interface is experimental. Args: dataset (list or torch.utils.data.Dataset): a list of dataset dicts, or a map-style pytorch dataset. They can be obtained by using :func:`DatasetCatalog.get` or :func:`get_detection_dataset_dicts`. mapper (callable): a callable which takes a sample (dict) from dataset and returns the format to be consumed by the model. When using cfg, the default choice is ``DatasetMapper(cfg, is_train=True)``. sampler (torch.utils.data.sampler.Sampler or None): a sampler that produces indices to be applied on ``dataset``. Default to :class:`TrainingSampler`, which coordinates an infinite random shuffle sequence across all workers. total_batch_size (int): total batch size across all workers. Batching simply puts data into a list. aspect_ratio_grouping (bool): whether to group images with similar aspect ratio for efficiency. When enabled, it requires each element in dataset be a dict with keys "width" and "height". num_workers (int): number of parallel data loading workers Returns: torch.utils.data.DataLoader: a dataloader. Each output from it is a ``list[mapped_element]`` of length ``total_batch_size / num_workers``, where ``mapped_element`` is produced by the ``mapper``. """ if isinstance(dataset, list): dataset = DatasetFromList(dataset, copy=False) if mapper is not None: dataset = MapDataset(dataset, mapper) if sampler is None: sampler = TrainingSampler(len(dataset)) assert isinstance(sampler, torch.utils.data.sampler.Sampler) return build_batch_data_loader( dataset, sampler, total_batch_size, aspect_ratio_grouping=aspect_ratio_grouping, num_workers=num_workers, ) def _test_loader_from_config(cfg, dataset_name, mapper=None): """ Uses the given `dataset_name` argument (instead of the names in cfg), because the standard practice is to evaluate each test set individually (not combining them). """ dataset = get_detection_dataset_dicts( [dataset_name], filter_empty=False, proposal_files=[ cfg.DATASETS.PROPOSAL_FILES_TEST[list(cfg.DATASETS.TEST).index(dataset_name)] ] if cfg.MODEL.LOAD_PROPOSALS else None, ) if mapper is None: mapper = DatasetMapper(cfg, False) return {"dataset": dataset, "mapper": mapper, "num_workers": cfg.DATALOADER.NUM_WORKERS} @configurable(from_config=_test_loader_from_config) def build_detection_test_loader(dataset, *, mapper, sampler=None, num_workers=0): """ Similar to `build_detection_train_loader`, but uses a batch size of 1, and :class:`InferenceSampler`. This sampler coordinates all workers to produce the exact set of all samples. This interface is experimental. Args: dataset (list or torch.utils.data.Dataset): a list of dataset dicts, or a map-style pytorch dataset. They can be obtained by using :func:`DatasetCatalog.get` or :func:`get_detection_dataset_dicts`. mapper (callable): a callable which takes a sample (dict) from dataset and returns the format to be consumed by the model. When using cfg, the default choice is ``DatasetMapper(cfg, is_train=False)``. sampler (torch.utils.data.sampler.Sampler or None): a sampler that produces indices to be applied on ``dataset``. Default to :class:`InferenceSampler`, which splits the dataset across all workers. num_workers (int): number of parallel data loading workers Returns: DataLoader: a torch DataLoader, that loads the given detection dataset, with test-time transformation and batching. Examples: :: data_loader = build_detection_test_loader( DatasetRegistry.get("my_test"), mapper=DatasetMapper(...)) # or, instantiate with a CfgNode: data_loader = build_detection_test_loader(cfg, "my_test") """ if isinstance(dataset, list): dataset = DatasetFromList(dataset, copy=False) if mapper is not None: dataset = MapDataset(dataset, mapper) if sampler is None: sampler = InferenceSampler(len(dataset)) # Always use 1 image per worker during inference since this is the # standard when reporting inference time in papers. batch_sampler = torch.utils.data.sampler.BatchSampler(sampler, 1, drop_last=False) data_loader = torch.utils.data.DataLoader( dataset, num_workers=num_workers, batch_sampler=batch_sampler, collate_fn=trivial_batch_collator, ) return data_loader def trivial_batch_collator(batch): """ A batch collator that does nothing. """ return batch def worker_init_reset_seed(worker_id): initial_seed = torch.initial_seed() % 2 ** 31 seed_all_rng(initial_seed + worker_id)
banmo-main
third_party/detectron2_old/detectron2/data/build.py
# Copyright (c) Facebook, Inc. and its affiliates. from . import transforms # isort:skip from .build import ( build_batch_data_loader, build_detection_test_loader, build_detection_train_loader, get_detection_dataset_dicts, load_proposals_into_dataset, print_instances_class_histogram, ) from .catalog import DatasetCatalog, MetadataCatalog, Metadata from .common import DatasetFromList, MapDataset from .dataset_mapper import DatasetMapper # ensure the builtin datasets are registered from . import datasets, samplers # isort:skip __all__ = [k for k in globals().keys() if not k.startswith("_")]
banmo-main
third_party/detectron2_old/detectron2/data/__init__.py
# -*- coding: utf-8 -*- # Copyright (c) Facebook, Inc. and its affiliates. """ Common data processing utilities that are used in a typical object detection data pipeline. """ import logging import numpy as np from typing import List, Union import pycocotools.mask as mask_util import torch from PIL import Image from detectron2.structures import ( BitMasks, Boxes, BoxMode, Instances, Keypoints, PolygonMasks, RotatedBoxes, polygons_to_bitmask, ) from detectron2.utils.file_io import PathManager from . import transforms as T from .catalog import MetadataCatalog __all__ = [ "SizeMismatchError", "convert_image_to_rgb", "check_image_size", "transform_proposals", "transform_instance_annotations", "annotations_to_instances", "annotations_to_instances_rotated", "build_augmentation", "build_transform_gen", "create_keypoint_hflip_indices", "filter_empty_instances", "read_image", ] class SizeMismatchError(ValueError): """ When loaded image has difference width/height compared with annotation. """ # https://en.wikipedia.org/wiki/YUV#SDTV_with_BT.601 _M_RGB2YUV = [[0.299, 0.587, 0.114], [-0.14713, -0.28886, 0.436], [0.615, -0.51499, -0.10001]] _M_YUV2RGB = [[1.0, 0.0, 1.13983], [1.0, -0.39465, -0.58060], [1.0, 2.03211, 0.0]] # https://www.exiv2.org/tags.html _EXIF_ORIENT = 274 # exif 'Orientation' tag def convert_PIL_to_numpy(image, format): """ Convert PIL image to numpy array of target format. Args: image (PIL.Image): a PIL image format (str): the format of output image Returns: (np.ndarray): also see `read_image` """ if format is not None: # PIL only supports RGB, so convert to RGB and flip channels over below conversion_format = format if format in ["BGR", "YUV-BT.601"]: conversion_format = "RGB" image = image.convert(conversion_format) image = np.asarray(image) # PIL squeezes out the channel dimension for "L", so make it HWC if format == "L": image = np.expand_dims(image, -1) # handle formats not supported by PIL elif format == "BGR": # flip channels if needed image = image[:, :, ::-1] elif format == "YUV-BT.601": image = image / 255.0 image = np.dot(image, np.array(_M_RGB2YUV).T) return image def convert_image_to_rgb(image, format): """ Convert an image from given format to RGB. Args: image (np.ndarray or Tensor): an HWC image format (str): the format of input image, also see `read_image` Returns: (np.ndarray): (H,W,3) RGB image in 0-255 range, can be either float or uint8 """ if isinstance(image, torch.Tensor): image = image.cpu().numpy() if format == "BGR": image = image[:, :, [2, 1, 0]] elif format == "YUV-BT.601": image = np.dot(image, np.array(_M_YUV2RGB).T) image = image * 255.0 else: if format == "L": image = image[:, :, 0] image = image.astype(np.uint8) image = np.asarray(Image.fromarray(image, mode=format).convert("RGB")) return image def _apply_exif_orientation(image): """ Applies the exif orientation correctly. This code exists per the bug: https://github.com/python-pillow/Pillow/issues/3973 with the function `ImageOps.exif_transpose`. The Pillow source raises errors with various methods, especially `tobytes` Function based on: https://github.com/wkentaro/labelme/blob/v4.5.4/labelme/utils/image.py#L59 https://github.com/python-pillow/Pillow/blob/7.1.2/src/PIL/ImageOps.py#L527 Args: image (PIL.Image): a PIL image Returns: (PIL.Image): the PIL image with exif orientation applied, if applicable """ if not hasattr(image, "getexif"): return image try: exif = image.getexif() except Exception: # https://github.com/facebookresearch/detectron2/issues/1885 exif = None if exif is None: return image orientation = exif.get(_EXIF_ORIENT) method = { 2: Image.FLIP_LEFT_RIGHT, 3: Image.ROTATE_180, 4: Image.FLIP_TOP_BOTTOM, 5: Image.TRANSPOSE, 6: Image.ROTATE_270, 7: Image.TRANSVERSE, 8: Image.ROTATE_90, }.get(orientation) if method is not None: return image.transpose(method) return image def read_image(file_name, format=None): """ Read an image into the given format. Will apply rotation and flipping if the image has such exif information. Args: file_name (str): image file path format (str): one of the supported image modes in PIL, or "BGR" or "YUV-BT.601". Returns: image (np.ndarray): an HWC image in the given format, which is 0-255, uint8 for supported image modes in PIL or "BGR"; float (0-1 for Y) for YUV-BT.601. """ with PathManager.open(file_name, "rb") as f: image = Image.open(f) # work around this bug: https://github.com/python-pillow/Pillow/issues/3973 image = _apply_exif_orientation(image) return convert_PIL_to_numpy(image, format) def check_image_size(dataset_dict, image): """ Raise an error if the image does not match the size specified in the dict. """ if "width" in dataset_dict or "height" in dataset_dict: image_wh = (image.shape[1], image.shape[0]) expected_wh = (dataset_dict["width"], dataset_dict["height"]) if not image_wh == expected_wh: raise SizeMismatchError( "Mismatched image shape{}, got {}, expect {}.".format( " for image " + dataset_dict["file_name"] if "file_name" in dataset_dict else "", image_wh, expected_wh, ) + " Please check the width/height in your annotation." ) # To ensure bbox always remap to original image size if "width" not in dataset_dict: dataset_dict["width"] = image.shape[1] if "height" not in dataset_dict: dataset_dict["height"] = image.shape[0] def transform_proposals(dataset_dict, image_shape, transforms, *, proposal_topk, min_box_size=0): """ Apply transformations to the proposals in dataset_dict, if any. Args: dataset_dict (dict): a dict read from the dataset, possibly contains fields "proposal_boxes", "proposal_objectness_logits", "proposal_bbox_mode" image_shape (tuple): height, width transforms (TransformList): proposal_topk (int): only keep top-K scoring proposals min_box_size (int): proposals with either side smaller than this threshold are removed The input dict is modified in-place, with abovementioned keys removed. A new key "proposals" will be added. Its value is an `Instances` object which contains the transformed proposals in its field "proposal_boxes" and "objectness_logits". """ if "proposal_boxes" in dataset_dict: # Transform proposal boxes boxes = transforms.apply_box( BoxMode.convert( dataset_dict.pop("proposal_boxes"), dataset_dict.pop("proposal_bbox_mode"), BoxMode.XYXY_ABS, ) ) boxes = Boxes(boxes) objectness_logits = torch.as_tensor( dataset_dict.pop("proposal_objectness_logits").astype("float32") ) boxes.clip(image_shape) keep = boxes.nonempty(threshold=min_box_size) boxes = boxes[keep] objectness_logits = objectness_logits[keep] proposals = Instances(image_shape) proposals.proposal_boxes = boxes[:proposal_topk] proposals.objectness_logits = objectness_logits[:proposal_topk] dataset_dict["proposals"] = proposals def transform_instance_annotations( annotation, transforms, image_size, *, keypoint_hflip_indices=None ): """ Apply transforms to box, segmentation and keypoints annotations of a single instance. It will use `transforms.apply_box` for the box, and `transforms.apply_coords` for segmentation polygons & keypoints. If you need anything more specially designed for each data structure, you'll need to implement your own version of this function or the transforms. Args: annotation (dict): dict of instance annotations for a single instance. It will be modified in-place. transforms (TransformList or list[Transform]): image_size (tuple): the height, width of the transformed image keypoint_hflip_indices (ndarray[int]): see `create_keypoint_hflip_indices`. Returns: dict: the same input dict with fields "bbox", "segmentation", "keypoints" transformed according to `transforms`. The "bbox_mode" field will be set to XYXY_ABS. """ if isinstance(transforms, (tuple, list)): transforms = T.TransformList(transforms) # bbox is 1d (per-instance bounding box) bbox = BoxMode.convert(annotation["bbox"], annotation["bbox_mode"], BoxMode.XYXY_ABS) # clip transformed bbox to image size bbox = transforms.apply_box(np.array([bbox]))[0].clip(min=0) annotation["bbox"] = np.minimum(bbox, list(image_size + image_size)[::-1]) annotation["bbox_mode"] = BoxMode.XYXY_ABS if "segmentation" in annotation: # each instance contains 1 or more polygons segm = annotation["segmentation"] if isinstance(segm, list): # polygons polygons = [np.asarray(p).reshape(-1, 2) for p in segm] annotation["segmentation"] = [ p.reshape(-1) for p in transforms.apply_polygons(polygons) ] elif isinstance(segm, dict): # RLE mask = mask_util.decode(segm) mask = transforms.apply_segmentation(mask) assert tuple(mask.shape[:2]) == image_size annotation["segmentation"] = mask else: raise ValueError( "Cannot transform segmentation of type '{}'!" "Supported types are: polygons as list[list[float] or ndarray]," " COCO-style RLE as a dict.".format(type(segm)) ) if "keypoints" in annotation: keypoints = transform_keypoint_annotations( annotation["keypoints"], transforms, image_size, keypoint_hflip_indices ) annotation["keypoints"] = keypoints return annotation def transform_keypoint_annotations(keypoints, transforms, image_size, keypoint_hflip_indices=None): """ Transform keypoint annotations of an image. If a keypoint is transformed out of image boundary, it will be marked "unlabeled" (visibility=0) Args: keypoints (list[float]): Nx3 float in Detectron2's Dataset format. Each point is represented by (x, y, visibility). transforms (TransformList): image_size (tuple): the height, width of the transformed image keypoint_hflip_indices (ndarray[int]): see `create_keypoint_hflip_indices`. When `transforms` includes horizontal flip, will use the index mapping to flip keypoints. """ # (N*3,) -> (N, 3) keypoints = np.asarray(keypoints, dtype="float64").reshape(-1, 3) keypoints_xy = transforms.apply_coords(keypoints[:, :2]) # Set all out-of-boundary points to "unlabeled" inside = (keypoints_xy >= np.array([0, 0])) & (keypoints_xy <= np.array(image_size[::-1])) inside = inside.all(axis=1) keypoints[:, :2] = keypoints_xy keypoints[:, 2][~inside] = 0 # This assumes that HorizFlipTransform is the only one that does flip do_hflip = sum(isinstance(t, T.HFlipTransform) for t in transforms.transforms) % 2 == 1 # Alternative way: check if probe points was horizontally flipped. # probe = np.asarray([[0.0, 0.0], [image_width, 0.0]]) # probe_aug = transforms.apply_coords(probe.copy()) # do_hflip = np.sign(probe[1][0] - probe[0][0]) != np.sign(probe_aug[1][0] - probe_aug[0][0]) # noqa # If flipped, swap each keypoint with its opposite-handed equivalent if do_hflip: assert keypoint_hflip_indices is not None keypoints = keypoints[np.asarray(keypoint_hflip_indices, dtype=np.int32), :] # Maintain COCO convention that if visibility == 0 (unlabeled), then x, y = 0 keypoints[keypoints[:, 2] == 0] = 0 return keypoints def annotations_to_instances(annos, image_size, mask_format="polygon"): """ Create an :class:`Instances` object used by the models, from instance annotations in the dataset dict. Args: annos (list[dict]): a list of instance annotations in one image, each element for one instance. image_size (tuple): height, width Returns: Instances: It will contain fields "gt_boxes", "gt_classes", "gt_masks", "gt_keypoints", if they can be obtained from `annos`. This is the format that builtin models expect. """ boxes = [BoxMode.convert(obj["bbox"], obj["bbox_mode"], BoxMode.XYXY_ABS) for obj in annos] target = Instances(image_size) target.gt_boxes = Boxes(boxes) classes = [int(obj["category_id"]) for obj in annos] classes = torch.tensor(classes, dtype=torch.int64) target.gt_classes = classes if len(annos) and "segmentation" in annos[0]: segms = [obj["segmentation"] for obj in annos] if mask_format == "polygon": try: masks = PolygonMasks(segms) except ValueError as e: raise ValueError( "Failed to use mask_format=='polygon' from the given annotations!" ) from e else: assert mask_format == "bitmask", mask_format masks = [] for segm in segms: if isinstance(segm, list): # polygon masks.append(polygons_to_bitmask(segm, *image_size)) elif isinstance(segm, dict): # COCO RLE masks.append(mask_util.decode(segm)) elif isinstance(segm, np.ndarray): assert segm.ndim == 2, "Expect segmentation of 2 dimensions, got {}.".format( segm.ndim ) # mask array masks.append(segm) else: raise ValueError( "Cannot convert segmentation of type '{}' to BitMasks!" "Supported types are: polygons as list[list[float] or ndarray]," " COCO-style RLE as a dict, or a binary segmentation mask " " in a 2D numpy array of shape HxW.".format(type(segm)) ) # torch.from_numpy does not support array with negative stride. masks = BitMasks( torch.stack([torch.from_numpy(np.ascontiguousarray(x)) for x in masks]) ) target.gt_masks = masks if len(annos) and "keypoints" in annos[0]: kpts = [obj.get("keypoints", []) for obj in annos] target.gt_keypoints = Keypoints(kpts) return target def annotations_to_instances_rotated(annos, image_size): """ Create an :class:`Instances` object used by the models, from instance annotations in the dataset dict. Compared to `annotations_to_instances`, this function is for rotated boxes only Args: annos (list[dict]): a list of instance annotations in one image, each element for one instance. image_size (tuple): height, width Returns: Instances: Containing fields "gt_boxes", "gt_classes", if they can be obtained from `annos`. This is the format that builtin models expect. """ boxes = [obj["bbox"] for obj in annos] target = Instances(image_size) boxes = target.gt_boxes = RotatedBoxes(boxes) boxes.clip(image_size) classes = [obj["category_id"] for obj in annos] classes = torch.tensor(classes, dtype=torch.int64) target.gt_classes = classes return target def filter_empty_instances(instances, by_box=True, by_mask=True, box_threshold=1e-5): """ Filter out empty instances in an `Instances` object. Args: instances (Instances): by_box (bool): whether to filter out instances with empty boxes by_mask (bool): whether to filter out instances with empty masks box_threshold (float): minimum width and height to be considered non-empty Returns: Instances: the filtered instances. """ assert by_box or by_mask r = [] if by_box: r.append(instances.gt_boxes.nonempty(threshold=box_threshold)) if instances.has("gt_masks") and by_mask: r.append(instances.gt_masks.nonempty()) # TODO: can also filter visible keypoints if not r: return instances m = r[0] for x in r[1:]: m = m & x return instances[m] def create_keypoint_hflip_indices(dataset_names: Union[str, List[str]]) -> List[int]: """ Args: dataset_names: list of dataset names Returns: list[int]: a list of size=#keypoints, storing the horizontally-flipped keypoint indices. """ if isinstance(dataset_names, str): dataset_names = [dataset_names] check_metadata_consistency("keypoint_names", dataset_names) check_metadata_consistency("keypoint_flip_map", dataset_names) meta = MetadataCatalog.get(dataset_names[0]) names = meta.keypoint_names # TODO flip -> hflip flip_map = dict(meta.keypoint_flip_map) flip_map.update({v: k for k, v in flip_map.items()}) flipped_names = [i if i not in flip_map else flip_map[i] for i in names] flip_indices = [names.index(i) for i in flipped_names] return flip_indices def gen_crop_transform_with_instance(crop_size, image_size, instance): """ Generate a CropTransform so that the cropping region contains the center of the given instance. Args: crop_size (tuple): h, w in pixels image_size (tuple): h, w instance (dict): an annotation dict of one instance, in Detectron2's dataset format. """ crop_size = np.asarray(crop_size, dtype=np.int32) bbox = BoxMode.convert(instance["bbox"], instance["bbox_mode"], BoxMode.XYXY_ABS) center_yx = (bbox[1] + bbox[3]) * 0.5, (bbox[0] + bbox[2]) * 0.5 assert ( image_size[0] >= center_yx[0] and image_size[1] >= center_yx[1] ), "The annotation bounding box is outside of the image!" assert ( image_size[0] >= crop_size[0] and image_size[1] >= crop_size[1] ), "Crop size is larger than image size!" min_yx = np.maximum(np.floor(center_yx).astype(np.int32) - crop_size, 0) max_yx = np.maximum(np.asarray(image_size, dtype=np.int32) - crop_size, 0) max_yx = np.minimum(max_yx, np.ceil(center_yx).astype(np.int32)) y0 = np.random.randint(min_yx[0], max_yx[0] + 1) x0 = np.random.randint(min_yx[1], max_yx[1] + 1) return T.CropTransform(x0, y0, crop_size[1], crop_size[0]) def check_metadata_consistency(key, dataset_names): """ Check that the datasets have consistent metadata. Args: key (str): a metadata key dataset_names (list[str]): a list of dataset names Raises: AttributeError: if the key does not exist in the metadata ValueError: if the given datasets do not have the same metadata values defined by key """ if len(dataset_names) == 0: return logger = logging.getLogger(__name__) entries_per_dataset = [getattr(MetadataCatalog.get(d), key) for d in dataset_names] for idx, entry in enumerate(entries_per_dataset): if entry != entries_per_dataset[0]: logger.error( "Metadata '{}' for dataset '{}' is '{}'".format(key, dataset_names[idx], str(entry)) ) logger.error( "Metadata '{}' for dataset '{}' is '{}'".format( key, dataset_names[0], str(entries_per_dataset[0]) ) ) raise ValueError("Datasets have different metadata '{}'!".format(key)) def build_augmentation(cfg, is_train): """ Create a list of default :class:`Augmentation` from config. Now it includes resizing and flipping. Returns: list[Augmentation] """ if is_train: min_size = cfg.INPUT.MIN_SIZE_TRAIN max_size = cfg.INPUT.MAX_SIZE_TRAIN sample_style = cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING else: min_size = cfg.INPUT.MIN_SIZE_TEST max_size = cfg.INPUT.MAX_SIZE_TEST sample_style = "choice" augmentation = [T.ResizeShortestEdge(min_size, max_size, sample_style)] if is_train and cfg.INPUT.RANDOM_FLIP != "none": augmentation.append( T.RandomFlip( horizontal=cfg.INPUT.RANDOM_FLIP == "horizontal", vertical=cfg.INPUT.RANDOM_FLIP == "vertical", ) ) return augmentation build_transform_gen = build_augmentation """ Alias for backward-compatibility. """
banmo-main
third_party/detectron2_old/detectron2/data/detection_utils.py
# Copyright (c) Facebook, Inc. and its affiliates. import copy import logging import numpy as np from typing import List, Optional, Union import torch from detectron2.config import configurable from . import detection_utils as utils from . import transforms as T """ This file contains the default mapping that's applied to "dataset dicts". """ __all__ = ["DatasetMapper"] class DatasetMapper: """ A callable which takes a dataset dict in Detectron2 Dataset format, and map it into a format used by the model. This is the default callable to be used to map your dataset dict into training data. You may need to follow it to implement your own one for customized logic, such as a different way to read or transform images. See :doc:`/tutorials/data_loading` for details. The callable currently does the following: 1. Read the image from "file_name" 2. Applies cropping/geometric transforms to the image and annotations 3. Prepare data and annotations to Tensor and :class:`Instances` """ @configurable def __init__( self, is_train: bool, *, augmentations: List[Union[T.Augmentation, T.Transform]], image_format: str, use_instance_mask: bool = False, use_keypoint: bool = False, instance_mask_format: str = "polygon", keypoint_hflip_indices: Optional[np.ndarray] = None, precomputed_proposal_topk: Optional[int] = None, recompute_boxes: bool = False, ): """ NOTE: this interface is experimental. Args: is_train: whether it's used in training or inference augmentations: a list of augmentations or deterministic transforms to apply image_format: an image format supported by :func:`detection_utils.read_image`. use_instance_mask: whether to process instance segmentation annotations, if available use_keypoint: whether to process keypoint annotations if available instance_mask_format: one of "polygon" or "bitmask". Process instance segmentation masks into this format. keypoint_hflip_indices: see :func:`detection_utils.create_keypoint_hflip_indices` precomputed_proposal_topk: if given, will load pre-computed proposals from dataset_dict and keep the top k proposals for each image. recompute_boxes: whether to overwrite bounding box annotations by computing tight bounding boxes from instance mask annotations. """ if recompute_boxes: assert use_instance_mask, "recompute_boxes requires instance masks" # fmt: off self.is_train = is_train self.augmentations = T.AugmentationList(augmentations) self.image_format = image_format self.use_instance_mask = use_instance_mask self.instance_mask_format = instance_mask_format self.use_keypoint = use_keypoint self.keypoint_hflip_indices = keypoint_hflip_indices self.proposal_topk = precomputed_proposal_topk self.recompute_boxes = recompute_boxes # fmt: on logger = logging.getLogger(__name__) mode = "training" if is_train else "inference" logger.info(f"[DatasetMapper] Augmentations used in {mode}: {augmentations}") @classmethod def from_config(cls, cfg, is_train: bool = True): augs = utils.build_augmentation(cfg, is_train) if cfg.INPUT.CROP.ENABLED and is_train: augs.insert(0, T.RandomCrop(cfg.INPUT.CROP.TYPE, cfg.INPUT.CROP.SIZE)) recompute_boxes = cfg.MODEL.MASK_ON else: recompute_boxes = False ret = { "is_train": is_train, "augmentations": augs, "image_format": cfg.INPUT.FORMAT, "use_instance_mask": cfg.MODEL.MASK_ON, "instance_mask_format": cfg.INPUT.MASK_FORMAT, "use_keypoint": cfg.MODEL.KEYPOINT_ON, "recompute_boxes": recompute_boxes, } if cfg.MODEL.KEYPOINT_ON: ret["keypoint_hflip_indices"] = utils.create_keypoint_hflip_indices(cfg.DATASETS.TRAIN) if cfg.MODEL.LOAD_PROPOSALS: ret["precomputed_proposal_topk"] = ( cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TRAIN if is_train else cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TEST ) return ret def __call__(self, dataset_dict): """ Args: dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format. Returns: dict: a format that builtin models in detectron2 accept """ dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below # USER: Write your own image loading if it's not from a file image = utils.read_image(dataset_dict["file_name"], format=self.image_format) utils.check_image_size(dataset_dict, image) # USER: Remove if you don't do semantic/panoptic segmentation. if "sem_seg_file_name" in dataset_dict: sem_seg_gt = utils.read_image(dataset_dict.pop("sem_seg_file_name"), "L").squeeze(2) else: sem_seg_gt = None aug_input = T.AugInput(image, sem_seg=sem_seg_gt) transforms = self.augmentations(aug_input) image, sem_seg_gt = aug_input.image, aug_input.sem_seg image_shape = image.shape[:2] # h, w # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory, # but not efficient on large generic data structures due to the use of pickle & mp.Queue. # Therefore it's important to use torch.Tensor. dataset_dict["image"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1))) if sem_seg_gt is not None: dataset_dict["sem_seg"] = torch.as_tensor(sem_seg_gt.astype("long")) # USER: Remove if you don't use pre-computed proposals. # Most users would not need this feature. if self.proposal_topk is not None: utils.transform_proposals( dataset_dict, image_shape, transforms, proposal_topk=self.proposal_topk ) if not self.is_train: # USER: Modify this if you want to keep them for some reason. dataset_dict.pop("annotations", None) dataset_dict.pop("sem_seg_file_name", None) return dataset_dict if "annotations" in dataset_dict: # USER: Modify this if you want to keep them for some reason. for anno in dataset_dict["annotations"]: if not self.use_instance_mask: anno.pop("segmentation", None) if not self.use_keypoint: anno.pop("keypoints", None) # USER: Implement additional transformations if you have other types of data annos = [ utils.transform_instance_annotations( obj, transforms, image_shape, keypoint_hflip_indices=self.keypoint_hflip_indices ) for obj in dataset_dict.pop("annotations") if obj.get("iscrowd", 0) == 0 ] instances = utils.annotations_to_instances( annos, image_shape, mask_format=self.instance_mask_format ) # After transforms such as cropping are applied, the bounding box may no longer # tightly bound the object. As an example, imagine a triangle object # [(0,0), (2,0), (0,2)] cropped by a box [(1,0),(2,2)] (XYXY format). The tight # bounding box of the cropped triangle should be [(1,0),(2,1)], which is not equal to # the intersection of original bounding box and the cropping box. if self.recompute_boxes: instances.gt_boxes = instances.gt_masks.get_bounding_boxes() dataset_dict["instances"] = utils.filter_empty_instances(instances) return dataset_dict
banmo-main
third_party/detectron2_old/detectron2/data/dataset_mapper.py