python_code
stringlengths 0
4.04M
| repo_name
stringlengths 7
58
| file_path
stringlengths 5
147
|
---|---|---|
banmo-main
|
third_party/detectron2_old/tests/layers/__init__.py
|
|
# Copyright (c) Facebook, Inc. and its affiliates.
import numpy as np
import unittest
from copy import copy
import cv2
import torch
from fvcore.common.benchmark import benchmark
from torch.nn import functional as F
from detectron2.layers.roi_align import ROIAlign, roi_align
class ROIAlignTest(unittest.TestCase):
def test_forward_output(self):
input = np.arange(25).reshape(5, 5).astype("float32")
"""
0 1 2 3 4
5 6 7 8 9
10 11 12 13 14
15 16 17 18 19
20 21 22 23 24
"""
output = self._simple_roialign(input, [1, 1, 3, 3], (4, 4), aligned=False)
output_correct = self._simple_roialign(input, [1, 1, 3, 3], (4, 4), aligned=True)
# without correction:
old_results = [
[7.5, 8, 8.5, 9],
[10, 10.5, 11, 11.5],
[12.5, 13, 13.5, 14],
[15, 15.5, 16, 16.5],
]
# with 0.5 correction:
correct_results = [
[4.5, 5.0, 5.5, 6.0],
[7.0, 7.5, 8.0, 8.5],
[9.5, 10.0, 10.5, 11.0],
[12.0, 12.5, 13.0, 13.5],
]
# This is an upsampled version of [[6, 7], [11, 12]]
self.assertTrue(np.allclose(output.flatten(), np.asarray(old_results).flatten()))
self.assertTrue(
np.allclose(output_correct.flatten(), np.asarray(correct_results).flatten())
)
# Also see similar issues in tensorflow at
# https://github.com/tensorflow/tensorflow/issues/26278
def test_resize(self):
H, W = 30, 30
input = np.random.rand(H, W).astype("float32") * 100
box = [10, 10, 20, 20]
output = self._simple_roialign(input, box, (5, 5), aligned=True)
input2x = cv2.resize(input, (W // 2, H // 2), interpolation=cv2.INTER_LINEAR)
box2x = [x / 2 for x in box]
output2x = self._simple_roialign(input2x, box2x, (5, 5), aligned=True)
diff = np.abs(output2x - output)
self.assertTrue(diff.max() < 1e-4)
def test_grid_sample_equivalence(self):
H, W = 30, 30
input = np.random.rand(H, W).astype("float32") * 100
box = [10, 10, 20, 20]
for ratio in [1, 2, 3]:
output = self._simple_roialign(input, box, (5, 5), sampling_ratio=ratio)
output_grid_sample = grid_sample_roi_align(
torch.from_numpy(input[None, None, :, :]).float(),
torch.as_tensor(box).float()[None, :],
5,
1.0,
ratio,
)
self.assertTrue(torch.allclose(output, output_grid_sample))
def _simple_roialign(self, img, box, resolution, sampling_ratio=0, aligned=True):
"""
RoiAlign with scale 1.0.
"""
if isinstance(resolution, int):
resolution = (resolution, resolution)
op = ROIAlign(resolution, 1.0, sampling_ratio, aligned=aligned)
input = torch.from_numpy(img[None, None, :, :].astype("float32"))
rois = [0] + list(box)
rois = torch.from_numpy(np.asarray(rois)[None, :].astype("float32"))
output = op.forward(input, rois)
if torch.cuda.is_available():
output_cuda = op.forward(input.cuda(), rois.cuda()).cpu()
self.assertTrue(torch.allclose(output, output_cuda))
return output[0, 0]
def _simple_roialign_with_grad(self, img, box, resolution, device):
if isinstance(resolution, int):
resolution = (resolution, resolution)
op = ROIAlign(resolution, 1.0, 0, aligned=True)
input = torch.from_numpy(img[None, None, :, :].astype("float32"))
rois = [0] + list(box)
rois = torch.from_numpy(np.asarray(rois)[None, :].astype("float32"))
input = input.to(device=device)
rois = rois.to(device=device)
input.requires_grad = True
output = op.forward(input, rois)
return input, output
def test_empty_box(self):
img = np.random.rand(5, 5)
box = [3, 4, 5, 4]
o = self._simple_roialign(img, box, 7)
self.assertTrue(o.shape == (7, 7))
self.assertTrue((o == 0).all())
for dev in ["cpu"] + ["cuda"] if torch.cuda.is_available() else []:
input, output = self._simple_roialign_with_grad(img, box, 7, torch.device(dev))
output.sum().backward()
self.assertTrue(torch.allclose(input.grad, torch.zeros_like(input)))
def test_empty_batch(self):
input = torch.zeros(0, 3, 10, 10, dtype=torch.float32)
rois = torch.zeros(0, 5, dtype=torch.float32)
op = ROIAlign((7, 7), 1.0, 0, aligned=True)
output = op.forward(input, rois)
self.assertTrue(output.shape == (0, 3, 7, 7))
def grid_sample_roi_align(input, boxes, output_size, scale, sampling_ratio):
# unlike true roi_align, this does not support different batch_idx
from detectron2.projects.point_rend.point_features import (
generate_regular_grid_point_coords,
get_point_coords_wrt_image,
point_sample,
)
N, _, H, W = input.shape
R = len(boxes)
assert N == 1
boxes = boxes * scale
grid = generate_regular_grid_point_coords(R, output_size * sampling_ratio, device=boxes.device)
coords = get_point_coords_wrt_image(boxes, grid)
coords = coords / torch.as_tensor([W, H], device=coords.device) # R, s^2, 2
res = point_sample(input, coords.unsqueeze(0), align_corners=False) # 1,C, R,s^2
res = (
res.squeeze(0)
.permute(1, 0, 2)
.reshape(R, -1, output_size * sampling_ratio, output_size * sampling_ratio)
)
res = F.avg_pool2d(res, sampling_ratio)
return res
def benchmark_roi_align():
def random_boxes(mean_box, stdev, N, maxsize):
ret = torch.rand(N, 4) * stdev + torch.tensor(mean_box, dtype=torch.float)
ret.clamp_(min=0, max=maxsize)
return ret
def func(shape, nboxes_per_img, sampling_ratio, device, box_size="large"):
N, _, H, _ = shape
input = torch.rand(*shape)
boxes = []
batch_idx = []
for k in range(N):
if box_size == "large":
b = random_boxes([80, 80, 130, 130], 24, nboxes_per_img, H)
else:
b = random_boxes([100, 100, 110, 110], 4, nboxes_per_img, H)
boxes.append(b)
batch_idx.append(torch.zeros(nboxes_per_img, 1, dtype=torch.float32) + k)
boxes = torch.cat(boxes, axis=0)
batch_idx = torch.cat(batch_idx, axis=0)
boxes = torch.cat([batch_idx, boxes], axis=1)
input = input.to(device=device)
boxes = boxes.to(device=device)
def bench():
if False and sampling_ratio > 0 and N == 1:
# enable to benchmark grid_sample (slower)
grid_sample_roi_align(input, boxes[:, 1:], 7, 1.0, sampling_ratio)
else:
roi_align(input, boxes, 7, 1.0, sampling_ratio, True)
if device == "cuda":
torch.cuda.synchronize()
return bench
def gen_args(arg):
args = []
for size in ["small", "large"]:
for ratio in [0, 2]:
args.append(copy(arg))
args[-1]["sampling_ratio"] = ratio
args[-1]["box_size"] = size
return args
arg = dict(shape=(1, 512, 256, 256), nboxes_per_img=512, device="cuda")
benchmark(func, "cuda_roialign", gen_args(arg), num_iters=20, warmup_iters=1)
arg.update({"device": "cpu", "shape": (1, 256, 128, 128)})
benchmark(func, "cpu_roialign", gen_args(arg), num_iters=5, warmup_iters=1)
if __name__ == "__main__":
if torch.cuda.is_available():
benchmark_roi_align()
unittest.main()
|
banmo-main
|
third_party/detectron2_old/tests/layers/test_roi_align.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import numpy as np
import unittest
import torch
from detectron2.layers import DeformConv, ModulatedDeformConv
class DeformableTest(unittest.TestCase):
@unittest.skipIf(not torch.cuda.is_available(), "Deformable not supported for cpu")
def test_forward_output(self):
device = torch.device("cuda")
N, C, H, W = shape = 1, 1, 5, 5
kernel_size = 3
padding = 1
inputs = torch.arange(np.prod(shape), dtype=torch.float32).reshape(*shape).to(device)
"""
0 1 2 3 4
5 6 7 8 9
10 11 12 13 14
15 16 17 18 19
20 21 22 23 24
"""
offset_channels = kernel_size * kernel_size * 2
offset = torch.full((N, offset_channels, H, W), 0.5, dtype=torch.float32).to(device)
# Test DCN v1
deform = DeformConv(C, C, kernel_size=kernel_size, padding=padding).to(device)
deform.weight = torch.nn.Parameter(torch.ones_like(deform.weight))
output = deform(inputs, offset)
output = output.detach().cpu().numpy()
deform_results = np.array(
[
[30, 41.25, 48.75, 45, 28.75],
[62.25, 81, 90, 80.25, 50.25],
[99.75, 126, 135, 117.75, 72.75],
[105, 131.25, 138.75, 120, 73.75],
[71.75, 89.25, 93.75, 80.75, 49.5],
]
)
self.assertTrue(np.allclose(output.flatten(), deform_results.flatten()))
# Test DCN v2
mask_channels = kernel_size * kernel_size
mask = torch.full((N, mask_channels, H, W), 0.5, dtype=torch.float32).to(device)
modulate_deform = ModulatedDeformConv(C, C, kernel_size, padding=padding, bias=False).to(
device
)
modulate_deform.weight = deform.weight
output = modulate_deform(inputs, offset, mask)
output = output.detach().cpu().numpy()
self.assertTrue(np.allclose(output.flatten(), deform_results.flatten() * 0.5))
def test_forward_output_on_cpu(self):
device = torch.device("cpu")
N, C, H, W = shape = 1, 1, 5, 5
kernel_size = 3
padding = 1
inputs = torch.arange(np.prod(shape), dtype=torch.float32).reshape(*shape).to(device)
offset_channels = kernel_size * kernel_size * 2
offset = torch.full((N, offset_channels, H, W), 0.5, dtype=torch.float32).to(device)
# Test DCN v1 on cpu
deform = DeformConv(C, C, kernel_size=kernel_size, padding=padding).to(device)
deform.weight = torch.nn.Parameter(torch.ones_like(deform.weight))
output = deform(inputs, offset)
output = output.detach().cpu().numpy()
deform_results = np.array(
[
[30, 41.25, 48.75, 45, 28.75],
[62.25, 81, 90, 80.25, 50.25],
[99.75, 126, 135, 117.75, 72.75],
[105, 131.25, 138.75, 120, 73.75],
[71.75, 89.25, 93.75, 80.75, 49.5],
]
)
self.assertTrue(np.allclose(output.flatten(), deform_results.flatten()))
@unittest.skipIf(not torch.cuda.is_available(), "This test requires gpu access")
def test_forward_output_on_cpu_equals_output_on_gpu(self):
N, C, H, W = shape = 2, 4, 10, 10
kernel_size = 3
padding = 1
for groups in [1, 2]:
inputs = torch.arange(np.prod(shape), dtype=torch.float32).reshape(*shape)
offset_channels = kernel_size * kernel_size * 2
offset = torch.full((N, offset_channels, H, W), 0.5, dtype=torch.float32)
deform_gpu = DeformConv(
C, C, kernel_size=kernel_size, padding=padding, groups=groups
).to("cuda")
deform_gpu.weight = torch.nn.Parameter(torch.ones_like(deform_gpu.weight))
output_gpu = deform_gpu(inputs.to("cuda"), offset.to("cuda")).detach().cpu().numpy()
deform_cpu = DeformConv(
C, C, kernel_size=kernel_size, padding=padding, groups=groups
).to("cpu")
deform_cpu.weight = torch.nn.Parameter(torch.ones_like(deform_cpu.weight))
output_cpu = deform_cpu(inputs.to("cpu"), offset.to("cpu")).detach().numpy()
self.assertTrue(np.allclose(output_gpu.flatten(), output_cpu.flatten()))
@unittest.skipIf(not torch.cuda.is_available(), "Deformable not supported for cpu")
def test_small_input(self):
device = torch.device("cuda")
for kernel_size in [3, 5]:
padding = kernel_size // 2
N, C, H, W = shape = (1, 1, kernel_size - 1, kernel_size - 1)
inputs = torch.rand(shape).to(device) # input size is smaller than kernel size
offset_channels = kernel_size * kernel_size * 2
offset = torch.randn((N, offset_channels, H, W), dtype=torch.float32).to(device)
deform = DeformConv(C, C, kernel_size=kernel_size, padding=padding).to(device)
output = deform(inputs, offset)
self.assertTrue(output.shape == inputs.shape)
mask_channels = kernel_size * kernel_size
mask = torch.ones((N, mask_channels, H, W), dtype=torch.float32).to(device)
modulate_deform = ModulatedDeformConv(
C, C, kernel_size, padding=padding, bias=False
).to(device)
output = modulate_deform(inputs, offset, mask)
self.assertTrue(output.shape == inputs.shape)
@unittest.skipIf(not torch.cuda.is_available(), "Deformable not supported for cpu")
def test_raise_exception(self):
device = torch.device("cuda")
N, C, H, W = shape = 1, 1, 3, 3
kernel_size = 3
padding = 1
inputs = torch.rand(shape, dtype=torch.float32).to(device)
offset_channels = kernel_size * kernel_size # This is wrong channels for offset
offset = torch.randn((N, offset_channels, H, W), dtype=torch.float32).to(device)
deform = DeformConv(C, C, kernel_size=kernel_size, padding=padding).to(device)
self.assertRaises(RuntimeError, deform, inputs, offset)
offset_channels = kernel_size * kernel_size * 2
offset = torch.randn((N, offset_channels, H, W), dtype=torch.float32).to(device)
mask_channels = kernel_size * kernel_size * 2 # This is wrong channels for mask
mask = torch.ones((N, mask_channels, H, W), dtype=torch.float32).to(device)
modulate_deform = ModulatedDeformConv(C, C, kernel_size, padding=padding, bias=False).to(
device
)
self.assertRaises(RuntimeError, modulate_deform, inputs, offset, mask)
def test_repr(self):
module = DeformConv(3, 10, kernel_size=3, padding=1, deformable_groups=2)
correct_string = (
"DeformConv(in_channels=3, out_channels=10, kernel_size=(3, 3), "
"stride=(1, 1), padding=(1, 1), dilation=(1, 1), "
"groups=1, deformable_groups=2, bias=False)"
)
self.assertEqual(repr(module), correct_string)
module = ModulatedDeformConv(3, 10, kernel_size=3, padding=1, deformable_groups=2)
correct_string = (
"ModulatedDeformConv(in_channels=3, out_channels=10, kernel_size=(3, 3), "
"stride=1, padding=1, dilation=1, groups=1, deformable_groups=2, bias=True)"
)
self.assertEqual(repr(module), correct_string)
if __name__ == "__main__":
unittest.main()
|
banmo-main
|
third_party/detectron2_old/tests/layers/test_deformable.py
|
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
import contextlib
import io
import numpy as np
import unittest
from collections import defaultdict
import torch
import tqdm
from fvcore.common.benchmark import benchmark
from pycocotools.coco import COCO
from tabulate import tabulate
from torch.nn import functional as F
from detectron2.data import MetadataCatalog
from detectron2.layers.mask_ops import (
pad_masks,
paste_mask_in_image_old,
paste_masks_in_image,
scale_boxes,
)
from detectron2.structures import BitMasks, Boxes, BoxMode, PolygonMasks
from detectron2.structures.masks import polygons_to_bitmask
from detectron2.utils.file_io import PathManager
from detectron2.utils.testing import random_boxes
def iou_between_full_image_bit_masks(a, b):
intersect = (a & b).sum()
union = (a | b).sum()
return intersect / union
def rasterize_polygons_with_grid_sample(full_image_bit_mask, box, mask_size, threshold=0.5):
x0, y0, x1, y1 = box[0], box[1], box[2], box[3]
img_h, img_w = full_image_bit_mask.shape
mask_y = np.arange(0.0, mask_size) + 0.5 # mask y sample coords in [0.5, mask_size - 0.5]
mask_x = np.arange(0.0, mask_size) + 0.5 # mask x sample coords in [0.5, mask_size - 0.5]
mask_y = mask_y / mask_size * (y1 - y0) + y0
mask_x = mask_x / mask_size * (x1 - x0) + x0
mask_x = (mask_x - 0.5) / (img_w - 1) * 2 + -1
mask_y = (mask_y - 0.5) / (img_h - 1) * 2 + -1
gy, gx = torch.meshgrid(torch.from_numpy(mask_y), torch.from_numpy(mask_x))
ind = torch.stack([gx, gy], dim=-1).to(dtype=torch.float32)
full_image_bit_mask = torch.from_numpy(full_image_bit_mask)
mask = F.grid_sample(
full_image_bit_mask[None, None, :, :].to(dtype=torch.float32),
ind[None, :, :, :],
align_corners=True,
)
return mask[0, 0] >= threshold
class TestMaskCropPaste(unittest.TestCase):
def setUp(self):
json_file = MetadataCatalog.get("coco_2017_val_100").json_file
if not PathManager.isfile(json_file):
raise unittest.SkipTest("{} not found".format(json_file))
with contextlib.redirect_stdout(io.StringIO()):
json_file = PathManager.get_local_path(json_file)
self.coco = COCO(json_file)
def test_crop_paste_consistency(self):
"""
rasterize_polygons_within_box (used in training)
and
paste_masks_in_image (used in inference)
should be inverse operations to each other.
This function runs several implementation of the above two operations and prints
the reconstruction error.
"""
anns = self.coco.loadAnns(self.coco.getAnnIds(iscrowd=False)) # avoid crowd annotations
selected_anns = anns[:100]
ious = []
for ann in tqdm.tqdm(selected_anns):
results = self.process_annotation(ann)
ious.append([k[2] for k in results])
ious = np.array(ious)
mean_ious = ious.mean(axis=0)
table = []
res_dic = defaultdict(dict)
for row, iou in zip(results, mean_ious):
table.append((row[0], row[1], iou))
res_dic[row[0]][row[1]] = iou
print(tabulate(table, headers=["rasterize", "paste", "iou"], tablefmt="simple"))
# assert that the reconstruction is good:
self.assertTrue(res_dic["polygon"]["aligned"] > 0.94)
self.assertTrue(res_dic["roialign"]["aligned"] > 0.95)
def process_annotation(self, ann, mask_side_len=28):
# Parse annotation data
img_info = self.coco.loadImgs(ids=[ann["image_id"]])[0]
height, width = img_info["height"], img_info["width"]
gt_polygons = [np.array(p, dtype=np.float64) for p in ann["segmentation"]]
gt_bbox = BoxMode.convert(ann["bbox"], BoxMode.XYWH_ABS, BoxMode.XYXY_ABS)
gt_bit_mask = polygons_to_bitmask(gt_polygons, height, width)
# Run rasterize ..
torch_gt_bbox = torch.tensor(gt_bbox).to(dtype=torch.float32).reshape(-1, 4)
box_bitmasks = {
"polygon": PolygonMasks([gt_polygons]).crop_and_resize(torch_gt_bbox, mask_side_len)[0],
"gridsample": rasterize_polygons_with_grid_sample(gt_bit_mask, gt_bbox, mask_side_len),
"roialign": BitMasks(torch.from_numpy(gt_bit_mask[None, :, :])).crop_and_resize(
torch_gt_bbox, mask_side_len
)[0],
}
# Run paste ..
results = defaultdict(dict)
for k, box_bitmask in box_bitmasks.items():
padded_bitmask, scale = pad_masks(box_bitmask[None, :, :], 1)
scaled_boxes = scale_boxes(torch_gt_bbox, scale)
r = results[k]
r["old"] = paste_mask_in_image_old(
padded_bitmask[0], scaled_boxes[0], height, width, threshold=0.5
)
r["aligned"] = paste_masks_in_image(
box_bitmask[None, :, :], Boxes(torch_gt_bbox), (height, width)
)[0]
table = []
for rasterize_method, r in results.items():
for paste_method, mask in r.items():
mask = np.asarray(mask)
iou = iou_between_full_image_bit_masks(gt_bit_mask.astype("uint8"), mask)
table.append((rasterize_method, paste_method, iou))
return table
def test_polygon_area(self):
# Draw polygon boxes
for d in [5.0, 10.0, 1000.0]:
polygon = PolygonMasks([[[0, 0, 0, d, d, d, d, 0]]])
area = polygon.area()[0]
target = d ** 2
self.assertEqual(area, target)
# Draw polygon triangles
for d in [5.0, 10.0, 1000.0]:
polygon = PolygonMasks([[[0, 0, 0, d, d, d]]])
area = polygon.area()[0]
target = d ** 2 / 2
self.assertEqual(area, target)
def test_paste_mask_scriptable(self):
scripted_f = torch.jit.script(paste_masks_in_image)
N = 10
masks = torch.rand(N, 28, 28)
boxes = Boxes(random_boxes(N, 100))
image_shape = (150, 150)
out = paste_masks_in_image(masks, boxes, image_shape)
scripted_out = scripted_f(masks, boxes, image_shape)
self.assertTrue(torch.equal(out, scripted_out))
def benchmark_paste():
S = 800
H, W = image_shape = (S, S)
N = 64
torch.manual_seed(42)
masks = torch.rand(N, 28, 28)
center = torch.rand(N, 2) * 600 + 100
wh = torch.clamp(torch.randn(N, 2) * 40 + 200, min=50)
x0y0 = torch.clamp(center - wh * 0.5, min=0.0)
x1y1 = torch.clamp(center + wh * 0.5, max=S)
boxes = Boxes(torch.cat([x0y0, x1y1], axis=1))
def func(device, n=3):
m = masks.to(device=device)
b = boxes.to(device=device)
def bench():
for _ in range(n):
paste_masks_in_image(m, b, image_shape)
if device.type == "cuda":
torch.cuda.synchronize()
return bench
specs = [{"device": torch.device("cpu"), "n": 3}]
if torch.cuda.is_available():
specs.append({"device": torch.device("cuda"), "n": 3})
benchmark(func, "paste_masks", specs, num_iters=10, warmup_iters=2)
if __name__ == "__main__":
benchmark_paste()
unittest.main()
|
banmo-main
|
third_party/detectron2_old/tests/layers/test_mask_ops.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import logging
import unittest
import cv2
import torch
from torch.autograd import Variable, gradcheck
from detectron2.layers.roi_align import ROIAlign
from detectron2.layers.roi_align_rotated import ROIAlignRotated
logger = logging.getLogger(__name__)
class ROIAlignRotatedTest(unittest.TestCase):
def _box_to_rotated_box(self, box, angle):
return [
(box[0] + box[2]) / 2.0,
(box[1] + box[3]) / 2.0,
box[2] - box[0],
box[3] - box[1],
angle,
]
def _rot90(self, img, num):
num = num % 4 # note: -1 % 4 == 3
for _ in range(num):
img = img.transpose(0, 1).flip(0)
return img
def test_forward_output_0_90_180_270(self):
for i in range(4):
# i = 0, 1, 2, 3 corresponding to 0, 90, 180, 270 degrees
img = torch.arange(25, dtype=torch.float32).reshape(5, 5)
"""
0 1 2 3 4
5 6 7 8 9
10 11 12 13 14
15 16 17 18 19
20 21 22 23 24
"""
box = [1, 1, 3, 3]
rotated_box = self._box_to_rotated_box(box=box, angle=90 * i)
result = self._simple_roi_align_rotated(img=img, box=rotated_box, resolution=(4, 4))
# Here's an explanation for 0 degree case:
# point 0 in the original input lies at [0.5, 0.5]
# (the center of bin [0, 1] x [0, 1])
# point 1 in the original input lies at [1.5, 0.5], etc.
# since the resolution is (4, 4) that divides [1, 3] x [1, 3]
# into 4 x 4 equal bins,
# the top-left bin is [1, 1.5] x [1, 1.5], and its center
# (1.25, 1.25) lies at the 3/4 position
# between point 0 and point 1, point 5 and point 6,
# point 0 and point 5, point 1 and point 6, so it can be calculated as
# 0.25*(0*0.25+1*0.75)+(5*0.25+6*0.75)*0.75 = 4.5
result_expected = torch.tensor(
[
[4.5, 5.0, 5.5, 6.0],
[7.0, 7.5, 8.0, 8.5],
[9.5, 10.0, 10.5, 11.0],
[12.0, 12.5, 13.0, 13.5],
]
)
# This is also an upsampled version of [[6, 7], [11, 12]]
# When the box is rotated by 90 degrees CCW,
# the result would be rotated by 90 degrees CW, thus it's -i here
result_expected = self._rot90(result_expected, -i)
assert torch.allclose(result, result_expected)
def test_resize(self):
H, W = 30, 30
input = torch.rand(H, W) * 100
box = [10, 10, 20, 20]
rotated_box = self._box_to_rotated_box(box, angle=0)
output = self._simple_roi_align_rotated(img=input, box=rotated_box, resolution=(5, 5))
input2x = cv2.resize(input.numpy(), (W // 2, H // 2), interpolation=cv2.INTER_LINEAR)
input2x = torch.from_numpy(input2x)
box2x = [x / 2 for x in box]
rotated_box2x = self._box_to_rotated_box(box2x, angle=0)
output2x = self._simple_roi_align_rotated(img=input2x, box=rotated_box2x, resolution=(5, 5))
assert torch.allclose(output2x, output)
def _simple_roi_align_rotated(self, img, box, resolution):
"""
RoiAlignRotated with scale 1.0 and 0 sample ratio.
"""
op = ROIAlignRotated(output_size=resolution, spatial_scale=1.0, sampling_ratio=0)
input = img[None, None, :, :]
rois = [0] + list(box)
rois = torch.tensor(rois, dtype=torch.float32)[None, :]
result_cpu = op.forward(input, rois)
if torch.cuda.is_available():
result_cuda = op.forward(input.cuda(), rois.cuda())
assert torch.allclose(result_cpu, result_cuda.cpu())
return result_cpu[0, 0]
def test_empty_box(self):
img = torch.rand(5, 5)
out = self._simple_roi_align_rotated(img, [2, 3, 0, 0, 0], (7, 7))
self.assertTrue((out == 0).all())
def test_roi_align_rotated_gradcheck_cpu(self):
dtype = torch.float64
device = torch.device("cpu")
roi_align_rotated_op = ROIAlignRotated(
output_size=(5, 5), spatial_scale=0.5, sampling_ratio=1
).to(dtype=dtype, device=device)
x = torch.rand(1, 1, 10, 10, dtype=dtype, device=device, requires_grad=True)
# roi format is (batch index, x_center, y_center, width, height, angle)
rois = torch.tensor(
[[0, 4.5, 4.5, 9, 9, 0], [0, 2, 7, 4, 4, 0], [0, 7, 7, 4, 4, 0]],
dtype=dtype,
device=device,
)
def func(input):
return roi_align_rotated_op(input, rois)
assert gradcheck(func, (x,)), "gradcheck failed for RoIAlignRotated CPU"
assert gradcheck(func, (x.transpose(2, 3),)), "gradcheck failed for RoIAlignRotated CPU"
@unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
def test_roi_align_rotated_gradient_cuda(self):
"""
Compute gradients for ROIAlignRotated with multiple bounding boxes on the GPU,
and compare the result with ROIAlign
"""
# torch.manual_seed(123)
dtype = torch.float64
device = torch.device("cuda")
pool_h, pool_w = (5, 5)
roi_align = ROIAlign(output_size=(pool_h, pool_w), spatial_scale=1, sampling_ratio=2).to(
device=device
)
roi_align_rotated = ROIAlignRotated(
output_size=(pool_h, pool_w), spatial_scale=1, sampling_ratio=2
).to(device=device)
x = torch.rand(1, 1, 10, 10, dtype=dtype, device=device, requires_grad=True)
# x_rotated = x.clone() won't work (will lead to grad_fun=CloneBackward)!
x_rotated = Variable(x.data.clone(), requires_grad=True)
# roi_rotated format is (batch index, x_center, y_center, width, height, angle)
rois_rotated = torch.tensor(
[[0, 4.5, 4.5, 9, 9, 0], [0, 2, 7, 4, 4, 0], [0, 7, 7, 4, 4, 0]],
dtype=dtype,
device=device,
)
y_rotated = roi_align_rotated(x_rotated, rois_rotated)
s_rotated = y_rotated.sum()
s_rotated.backward()
# roi format is (batch index, x1, y1, x2, y2)
rois = torch.tensor(
[[0, 0, 0, 9, 9], [0, 0, 5, 4, 9], [0, 5, 5, 9, 9]], dtype=dtype, device=device
)
y = roi_align(x, rois)
s = y.sum()
s.backward()
assert torch.allclose(
x.grad, x_rotated.grad
), "gradients for ROIAlign and ROIAlignRotated mismatch on CUDA"
if __name__ == "__main__":
unittest.main()
|
banmo-main
|
third_party/detectron2_old/tests/layers/test_roi_align_rotated.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
import unittest
from copy import deepcopy
import torch
from torchvision import ops
from detectron2.layers import batched_nms, batched_nms_rotated, nms_rotated
from detectron2.utils.env import TORCH_VERSION
from detectron2.utils.testing import random_boxes
def nms_edit_distance(keep1, keep2):
"""
Compare the "keep" result of two nms call.
They are allowed to be different in terms of edit distance
due to floating point precision issues, e.g.,
if a box happen to have an IoU of 0.5 with another box,
one implentation may choose to keep it while another may discard it.
"""
keep1, keep2 = keep1.cpu(), keep2.cpu()
if torch.equal(keep1, keep2):
# they should be equal most of the time
return 0
keep1, keep2 = tuple(keep1), tuple(keep2)
m, n = len(keep1), len(keep2)
# edit distance with DP
f = [np.arange(n + 1), np.arange(n + 1)]
for i in range(m):
cur_row = i % 2
other_row = (i + 1) % 2
f[other_row][0] = i + 1
for j in range(n):
f[other_row][j + 1] = (
f[cur_row][j]
if keep1[i] == keep2[j]
else min(min(f[cur_row][j], f[cur_row][j + 1]), f[other_row][j]) + 1
)
return f[m % 2][n]
class TestNMSRotated(unittest.TestCase):
def reference_horizontal_nms(self, boxes, scores, iou_threshold):
"""
Args:
box_scores (N, 5): boxes in corner-form and probabilities.
(Note here 5 == 4 + 1, i.e., 4-dim horizontal box + 1-dim prob)
iou_threshold: intersection over union threshold.
Returns:
picked: a list of indexes of the kept boxes
"""
picked = []
_, indexes = scores.sort(descending=True)
while len(indexes) > 0:
current = indexes[0]
picked.append(current.item())
if len(indexes) == 1:
break
current_box = boxes[current, :]
indexes = indexes[1:]
rest_boxes = boxes[indexes, :]
iou = ops.box_iou(rest_boxes, current_box.unsqueeze(0)).squeeze(1)
indexes = indexes[iou <= iou_threshold]
return torch.as_tensor(picked)
def _create_tensors(self, N, device="cpu"):
boxes = random_boxes(N, 200, device=device)
scores = torch.rand(N, device=device)
return boxes, scores
def test_batched_nms_rotated_0_degree_cpu(self, device="cpu"):
N = 2000
num_classes = 50
boxes, scores = self._create_tensors(N, device=device)
idxs = torch.randint(0, num_classes, (N,))
rotated_boxes = torch.zeros(N, 5, device=device)
rotated_boxes[:, 0] = (boxes[:, 0] + boxes[:, 2]) / 2.0
rotated_boxes[:, 1] = (boxes[:, 1] + boxes[:, 3]) / 2.0
rotated_boxes[:, 2] = boxes[:, 2] - boxes[:, 0]
rotated_boxes[:, 3] = boxes[:, 3] - boxes[:, 1]
err_msg = "Rotated NMS with 0 degree is incompatible with horizontal NMS for IoU={}"
for iou in [0.2, 0.5, 0.8]:
backup = boxes.clone()
keep_ref = batched_nms(boxes, scores, idxs, iou)
assert torch.allclose(boxes, backup), "boxes modified by batched_nms"
backup = rotated_boxes.clone()
keep = batched_nms_rotated(rotated_boxes, scores, idxs, iou)
assert torch.allclose(
rotated_boxes, backup
), "rotated_boxes modified by batched_nms_rotated"
# Occasionally the gap can be large if there are many IOU on the threshold boundary
self.assertLessEqual(nms_edit_distance(keep, keep_ref), 5, err_msg.format(iou))
@unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
def test_batched_nms_rotated_0_degree_cuda(self):
self.test_batched_nms_rotated_0_degree_cpu(device="cuda")
def test_nms_rotated_0_degree_cpu(self, device="cpu"):
N = 1000
boxes, scores = self._create_tensors(N, device=device)
rotated_boxes = torch.zeros(N, 5, device=device)
rotated_boxes[:, 0] = (boxes[:, 0] + boxes[:, 2]) / 2.0
rotated_boxes[:, 1] = (boxes[:, 1] + boxes[:, 3]) / 2.0
rotated_boxes[:, 2] = boxes[:, 2] - boxes[:, 0]
rotated_boxes[:, 3] = boxes[:, 3] - boxes[:, 1]
err_msg = "Rotated NMS incompatible between CPU and reference implementation for IoU={}"
for iou in [0.2, 0.5, 0.8]:
keep_ref = self.reference_horizontal_nms(boxes, scores, iou)
keep = nms_rotated(rotated_boxes, scores, iou)
self.assertLessEqual(nms_edit_distance(keep, keep_ref), 1, err_msg.format(iou))
@unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
def test_nms_rotated_0_degree_cuda(self):
self.test_nms_rotated_0_degree_cpu(device="cuda")
def test_nms_rotated_90_degrees_cpu(self):
N = 1000
boxes, scores = self._create_tensors(N)
rotated_boxes = torch.zeros(N, 5)
rotated_boxes[:, 0] = (boxes[:, 0] + boxes[:, 2]) / 2.0
rotated_boxes[:, 1] = (boxes[:, 1] + boxes[:, 3]) / 2.0
# Note for rotated_boxes[:, 2] and rotated_boxes[:, 3]:
# widths and heights are intentionally swapped here for 90 degrees case
# so that the reference horizontal nms could be used
rotated_boxes[:, 2] = boxes[:, 3] - boxes[:, 1]
rotated_boxes[:, 3] = boxes[:, 2] - boxes[:, 0]
rotated_boxes[:, 4] = torch.ones(N) * 90
err_msg = "Rotated NMS incompatible between CPU and reference implementation for IoU={}"
for iou in [0.2, 0.5, 0.8]:
keep_ref = self.reference_horizontal_nms(boxes, scores, iou)
keep = nms_rotated(rotated_boxes, scores, iou)
self.assertLessEqual(nms_edit_distance(keep, keep_ref), 1, err_msg.format(iou))
def test_nms_rotated_180_degrees_cpu(self):
N = 1000
boxes, scores = self._create_tensors(N)
rotated_boxes = torch.zeros(N, 5)
rotated_boxes[:, 0] = (boxes[:, 0] + boxes[:, 2]) / 2.0
rotated_boxes[:, 1] = (boxes[:, 1] + boxes[:, 3]) / 2.0
rotated_boxes[:, 2] = boxes[:, 2] - boxes[:, 0]
rotated_boxes[:, 3] = boxes[:, 3] - boxes[:, 1]
rotated_boxes[:, 4] = torch.ones(N) * 180
err_msg = "Rotated NMS incompatible between CPU and reference implementation for IoU={}"
for iou in [0.2, 0.5, 0.8]:
keep_ref = self.reference_horizontal_nms(boxes, scores, iou)
keep = nms_rotated(rotated_boxes, scores, iou)
self.assertLessEqual(nms_edit_distance(keep, keep_ref), 1, err_msg.format(iou))
class TestScriptable(unittest.TestCase):
def setUp(self):
class TestingModule(torch.nn.Module):
def forward(self, boxes, scores, threshold):
return nms_rotated(boxes, scores, threshold)
self.module = TestingModule()
@unittest.skipIf(TORCH_VERSION < (1, 7), "Insufficient pytorch version")
def test_scriptable_cpu(self):
m = deepcopy(self.module).cpu()
_ = torch.jit.script(m)
@unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
@unittest.skipIf(TORCH_VERSION < (1, 7), "Insufficient pytorch version")
def test_scriptable_cuda(self):
m = deepcopy(self.module).cuda()
_ = torch.jit.script(m)
if __name__ == "__main__":
unittest.main()
|
banmo-main
|
third_party/detectron2_old/tests/layers/test_nms_rotated.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import unittest
from typing import List, Sequence, Tuple
import torch
from detectron2.structures import ImageList
from detectron2.utils.env import TORCH_VERSION
class TestImageList(unittest.TestCase):
@unittest.skipIf(TORCH_VERSION < (1, 7), "Insufficient pytorch version")
def test_imagelist_padding_tracing(self):
# test that the trace does not contain hard-coded constant sizes
def to_imagelist(tensors: Sequence[torch.Tensor]):
image_list = ImageList.from_tensors(tensors, 4)
return image_list.tensor, image_list.image_sizes
def _tensor(*shape):
return torch.ones(shape, dtype=torch.float32)
# test CHW (inputs needs padding vs. no padding)
for shape in [(3, 10, 10), (3, 12, 12)]:
func = torch.jit.trace(to_imagelist, ([_tensor(*shape)],))
tensor, image_sizes = func([_tensor(3, 15, 20)])
self.assertEqual(tensor.shape, (1, 3, 16, 20), tensor.shape)
self.assertEqual(image_sizes[0].tolist(), [15, 20], image_sizes[0])
# test HW
func = torch.jit.trace(to_imagelist, ([_tensor(10, 10)],))
tensor, image_sizes = func([_tensor(15, 20)])
self.assertEqual(tensor.shape, (1, 16, 20), tensor.shape)
self.assertEqual(image_sizes[0].tolist(), [15, 20], image_sizes[0])
# test 2x CHW
func = torch.jit.trace(
to_imagelist,
([_tensor(3, 16, 10), _tensor(3, 13, 11)],),
)
tensor, image_sizes = func([_tensor(3, 25, 20), _tensor(3, 10, 10)])
self.assertEqual(tensor.shape, (2, 3, 28, 20), tensor.shape)
self.assertEqual(image_sizes[0].tolist(), [25, 20], image_sizes[0])
self.assertEqual(image_sizes[1].tolist(), [10, 10], image_sizes[1])
# support calling with different spatial sizes, but not with different #images
@unittest.skipIf(TORCH_VERSION < (1, 7), "Insufficient pytorch version")
def test_imagelist_scriptability(self):
image_nums = 2
image_tensor = torch.randn((image_nums, 10, 20), dtype=torch.float32)
image_shape = [(10, 20)] * image_nums
def f(image_tensor, image_shape: List[Tuple[int, int]]):
return ImageList(image_tensor, image_shape)
ret = f(image_tensor, image_shape)
ret_script = torch.jit.script(f)(image_tensor, image_shape)
self.assertEqual(len(ret), len(ret_script))
for i in range(image_nums):
self.assertTrue(torch.equal(ret[i], ret_script[i]))
@unittest.skipIf(TORCH_VERSION < (1, 7), "Insufficient pytorch version")
def test_imagelist_from_tensors_scriptability(self):
image_tensor_0 = torch.randn(10, 20, dtype=torch.float32)
image_tensor_1 = torch.randn(12, 22, dtype=torch.float32)
inputs = [image_tensor_0, image_tensor_1]
def f(image_tensor: List[torch.Tensor]):
return ImageList.from_tensors(image_tensor, 10)
ret = f(inputs)
ret_script = torch.jit.script(f)(inputs)
self.assertEqual(len(ret), len(ret_script))
self.assertTrue(torch.equal(ret.tensor, ret_script.tensor))
if __name__ == "__main__":
unittest.main()
|
banmo-main
|
third_party/detectron2_old/tests/structures/test_imagelist.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import unittest
import torch
from detectron2.structures.masks import BitMasks, PolygonMasks, polygons_to_bitmask
class TestBitMask(unittest.TestCase):
def test_get_bounding_box(self):
masks = torch.tensor(
[
[
[False, False, False, True],
[False, False, True, True],
[False, True, True, False],
[False, True, True, False],
],
[
[False, False, False, False],
[False, False, True, False],
[False, True, True, False],
[False, True, True, False],
],
torch.zeros(4, 4),
]
)
bitmask = BitMasks(masks)
box_true = torch.tensor([[1, 0, 4, 4], [1, 1, 3, 4], [0, 0, 0, 0]], dtype=torch.float32)
box = bitmask.get_bounding_boxes()
self.assertTrue(torch.all(box.tensor == box_true).item())
for box in box_true:
poly = box[[0, 1, 2, 1, 2, 3, 0, 3]].numpy()
mask = polygons_to_bitmask([poly], 4, 4)
reconstruct_box = BitMasks(mask[None, :, :]).get_bounding_boxes()[0].tensor
self.assertTrue(torch.all(box == reconstruct_box).item())
reconstruct_box = PolygonMasks([[poly]]).get_bounding_boxes()[0].tensor
self.assertTrue(torch.all(box == reconstruct_box).item())
if __name__ == "__main__":
unittest.main()
|
banmo-main
|
third_party/detectron2_old/tests/structures/test_masks.py
|
banmo-main
|
third_party/detectron2_old/tests/structures/__init__.py
|
|
# Copyright (c) Facebook, Inc. and its affiliates.
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import math
import random
import unittest
import torch
from fvcore.common.benchmark import benchmark
from detectron2.layers.rotated_boxes import pairwise_iou_rotated
from detectron2.structures.boxes import Boxes
from detectron2.structures.rotated_boxes import RotatedBoxes, pairwise_iou
from detectron2.utils.env import TORCH_VERSION
from detectron2.utils.testing import reload_script_model
logger = logging.getLogger(__name__)
class TestRotatedBoxesLayer(unittest.TestCase):
def test_iou_0_dim_cpu(self):
boxes1 = torch.rand(0, 5, dtype=torch.float32)
boxes2 = torch.rand(10, 5, dtype=torch.float32)
expected_ious = torch.zeros(0, 10, dtype=torch.float32)
ious = pairwise_iou_rotated(boxes1, boxes2)
self.assertTrue(torch.allclose(ious, expected_ious))
boxes1 = torch.rand(10, 5, dtype=torch.float32)
boxes2 = torch.rand(0, 5, dtype=torch.float32)
expected_ious = torch.zeros(10, 0, dtype=torch.float32)
ious = pairwise_iou_rotated(boxes1, boxes2)
self.assertTrue(torch.allclose(ious, expected_ious))
@unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
def test_iou_0_dim_cuda(self):
boxes1 = torch.rand(0, 5, dtype=torch.float32)
boxes2 = torch.rand(10, 5, dtype=torch.float32)
expected_ious = torch.zeros(0, 10, dtype=torch.float32)
ious_cuda = pairwise_iou_rotated(boxes1.cuda(), boxes2.cuda())
self.assertTrue(torch.allclose(ious_cuda.cpu(), expected_ious))
boxes1 = torch.rand(10, 5, dtype=torch.float32)
boxes2 = torch.rand(0, 5, dtype=torch.float32)
expected_ious = torch.zeros(10, 0, dtype=torch.float32)
ious_cuda = pairwise_iou_rotated(boxes1.cuda(), boxes2.cuda())
self.assertTrue(torch.allclose(ious_cuda.cpu(), expected_ious))
def test_iou_half_overlap_cpu(self):
boxes1 = torch.tensor([[0.5, 0.5, 1.0, 1.0, 0.0]], dtype=torch.float32)
boxes2 = torch.tensor([[0.25, 0.5, 0.5, 1.0, 0.0]], dtype=torch.float32)
expected_ious = torch.tensor([[0.5]], dtype=torch.float32)
ious = pairwise_iou_rotated(boxes1, boxes2)
self.assertTrue(torch.allclose(ious, expected_ious))
@unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
def test_iou_half_overlap_cuda(self):
boxes1 = torch.tensor([[0.5, 0.5, 1.0, 1.0, 0.0]], dtype=torch.float32)
boxes2 = torch.tensor([[0.25, 0.5, 0.5, 1.0, 0.0]], dtype=torch.float32)
expected_ious = torch.tensor([[0.5]], dtype=torch.float32)
ious_cuda = pairwise_iou_rotated(boxes1.cuda(), boxes2.cuda())
self.assertTrue(torch.allclose(ious_cuda.cpu(), expected_ious))
def test_iou_precision(self):
for device in ["cpu"] + (["cuda"] if torch.cuda.is_available() else []):
boxes1 = torch.tensor([[565, 565, 10, 10.0, 0]], dtype=torch.float32, device=device)
boxes2 = torch.tensor([[565, 565, 10, 8.3, 0]], dtype=torch.float32, device=device)
iou = 8.3 / 10.0
expected_ious = torch.tensor([[iou]], dtype=torch.float32)
ious = pairwise_iou_rotated(boxes1, boxes2)
self.assertTrue(torch.allclose(ious.cpu(), expected_ious))
@unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
def test_iou_too_many_boxes_cuda(self):
s1, s2 = 5, 1289035
boxes1 = torch.zeros(s1, 5)
boxes2 = torch.zeros(s2, 5)
ious_cuda = pairwise_iou_rotated(boxes1.cuda(), boxes2.cuda())
self.assertTupleEqual(tuple(ious_cuda.shape), (s1, s2))
def test_iou_extreme(self):
# Cause floating point issues in cuda kernels (#1266)
for device in ["cpu"] + (["cuda"] if torch.cuda.is_available() else []):
boxes1 = torch.tensor([[160.0, 153.0, 230.0, 23.0, -37.0]], device=device)
boxes2 = torch.tensor(
[
[
-1.117407639806935e17,
1.3858420478349148e18,
1000.0000610351562,
1000.0000610351562,
1612.0,
]
],
device=device,
)
ious = pairwise_iou_rotated(boxes1, boxes2)
self.assertTrue(ious.min() >= 0, ious)
def test_iou_issue_2154(self):
for device in ["cpu"] + (["cuda"] if torch.cuda.is_available() else []):
boxes1 = torch.tensor(
[
[
296.6620178222656,
458.73883056640625,
23.515729904174805,
47.677001953125,
0.08795166015625,
]
],
device=device,
)
boxes2 = torch.tensor(
[[296.66201, 458.73882000000003, 23.51573, 47.67702, 0.087951]],
device=device,
)
ious = pairwise_iou_rotated(boxes1, boxes2)
expected_ious = torch.tensor([[1.0]], dtype=torch.float32)
self.assertTrue(torch.allclose(ious.cpu(), expected_ious))
def test_iou_issue_2167(self):
for device in ["cpu"] + (["cuda"] if torch.cuda.is_available() else []):
boxes1 = torch.tensor(
[
[
2563.74462890625000000000,
1436.79016113281250000000,
2174.70336914062500000000,
214.09500122070312500000,
115.11834716796875000000,
]
],
device=device,
)
boxes2 = torch.tensor(
[
[
2563.74462890625000000000,
1436.79028320312500000000,
2174.70288085937500000000,
214.09495544433593750000,
115.11835479736328125000,
]
],
device=device,
)
ious = pairwise_iou_rotated(boxes1, boxes2)
expected_ious = torch.tensor([[1.0]], dtype=torch.float32)
self.assertTrue(torch.allclose(ious.cpu(), expected_ious))
class TestRotatedBoxesStructure(unittest.TestCase):
def test_clip_area_0_degree(self):
for _ in range(50):
num_boxes = 100
boxes_5d = torch.zeros(num_boxes, 5)
boxes_5d[:, 0] = torch.FloatTensor(num_boxes).uniform_(-100, 500)
boxes_5d[:, 1] = torch.FloatTensor(num_boxes).uniform_(-100, 500)
boxes_5d[:, 2] = torch.FloatTensor(num_boxes).uniform_(0, 500)
boxes_5d[:, 3] = torch.FloatTensor(num_boxes).uniform_(0, 500)
# Convert from (x_ctr, y_ctr, w, h, 0) to (x1, y1, x2, y2)
boxes_4d = torch.zeros(num_boxes, 4)
boxes_4d[:, 0] = boxes_5d[:, 0] - boxes_5d[:, 2] / 2.0
boxes_4d[:, 1] = boxes_5d[:, 1] - boxes_5d[:, 3] / 2.0
boxes_4d[:, 2] = boxes_5d[:, 0] + boxes_5d[:, 2] / 2.0
boxes_4d[:, 3] = boxes_5d[:, 1] + boxes_5d[:, 3] / 2.0
image_size = (500, 600)
test_boxes_4d = Boxes(boxes_4d)
test_boxes_5d = RotatedBoxes(boxes_5d)
# Before clip
areas_4d = test_boxes_4d.area()
areas_5d = test_boxes_5d.area()
self.assertTrue(torch.allclose(areas_4d, areas_5d, atol=1e-1, rtol=1e-5))
# After clip
test_boxes_4d.clip(image_size)
test_boxes_5d.clip(image_size)
areas_4d = test_boxes_4d.area()
areas_5d = test_boxes_5d.area()
self.assertTrue(torch.allclose(areas_4d, areas_5d, atol=1e-1, rtol=1e-5))
def test_clip_area_arbitrary_angle(self):
num_boxes = 100
boxes_5d = torch.zeros(num_boxes, 5)
boxes_5d[:, 0] = torch.FloatTensor(num_boxes).uniform_(-100, 500)
boxes_5d[:, 1] = torch.FloatTensor(num_boxes).uniform_(-100, 500)
boxes_5d[:, 2] = torch.FloatTensor(num_boxes).uniform_(0, 500)
boxes_5d[:, 3] = torch.FloatTensor(num_boxes).uniform_(0, 500)
boxes_5d[:, 4] = torch.FloatTensor(num_boxes).uniform_(-1800, 1800)
clip_angle_threshold = random.uniform(0, 180)
image_size = (500, 600)
test_boxes_5d = RotatedBoxes(boxes_5d)
# Before clip
areas_before = test_boxes_5d.area()
# After clip
test_boxes_5d.clip(image_size, clip_angle_threshold)
areas_diff = test_boxes_5d.area() - areas_before
# the areas should only decrease after clipping
self.assertTrue(torch.all(areas_diff <= 0))
# whenever the box is clipped (thus the area shrinks),
# the angle for the box must be within the clip_angle_threshold
# Note that the clip function will normalize the angle range
# to be within (-180, 180]
self.assertTrue(
torch.all(torch.abs(boxes_5d[:, 4][torch.where(areas_diff < 0)]) < clip_angle_threshold)
)
def test_normalize_angles(self):
# torch.manual_seed(0)
for _ in range(50):
num_boxes = 100
boxes_5d = torch.zeros(num_boxes, 5)
boxes_5d[:, 0] = torch.FloatTensor(num_boxes).uniform_(-100, 500)
boxes_5d[:, 1] = torch.FloatTensor(num_boxes).uniform_(-100, 500)
boxes_5d[:, 2] = torch.FloatTensor(num_boxes).uniform_(0, 500)
boxes_5d[:, 3] = torch.FloatTensor(num_boxes).uniform_(0, 500)
boxes_5d[:, 4] = torch.FloatTensor(num_boxes).uniform_(-1800, 1800)
rotated_boxes = RotatedBoxes(boxes_5d)
normalized_boxes = rotated_boxes.clone()
normalized_boxes.normalize_angles()
self.assertTrue(torch.all(normalized_boxes.tensor[:, 4] >= -180))
self.assertTrue(torch.all(normalized_boxes.tensor[:, 4] < 180))
# x, y, w, h should not change
self.assertTrue(torch.allclose(boxes_5d[:, :4], normalized_boxes.tensor[:, :4]))
# the cos/sin values of the angles should stay the same
self.assertTrue(
torch.allclose(
torch.cos(boxes_5d[:, 4] * math.pi / 180),
torch.cos(normalized_boxes.tensor[:, 4] * math.pi / 180),
atol=1e-5,
)
)
self.assertTrue(
torch.allclose(
torch.sin(boxes_5d[:, 4] * math.pi / 180),
torch.sin(normalized_boxes.tensor[:, 4] * math.pi / 180),
atol=1e-5,
)
)
def test_pairwise_iou_0_degree(self):
for device in ["cpu"] + (["cuda"] if torch.cuda.is_available() else []):
boxes1 = torch.tensor(
[[0.5, 0.5, 1.0, 1.0, 0.0], [0.5, 0.5, 1.0, 1.0, 0.0]],
dtype=torch.float32,
device=device,
)
boxes2 = torch.tensor(
[
[0.5, 0.5, 1.0, 1.0, 0.0],
[0.25, 0.5, 0.5, 1.0, 0.0],
[0.5, 0.25, 1.0, 0.5, 0.0],
[0.25, 0.25, 0.5, 0.5, 0.0],
[0.75, 0.75, 0.5, 0.5, 0.0],
[1.0, 1.0, 1.0, 1.0, 0.0],
],
dtype=torch.float32,
device=device,
)
expected_ious = torch.tensor(
[
[1.0, 0.5, 0.5, 0.25, 0.25, 0.25 / (2 - 0.25)],
[1.0, 0.5, 0.5, 0.25, 0.25, 0.25 / (2 - 0.25)],
],
dtype=torch.float32,
device=device,
)
ious = pairwise_iou(RotatedBoxes(boxes1), RotatedBoxes(boxes2))
self.assertTrue(torch.allclose(ious, expected_ious))
def test_pairwise_iou_45_degrees(self):
for device in ["cpu"] + (["cuda"] if torch.cuda.is_available() else []):
boxes1 = torch.tensor(
[
[1, 1, math.sqrt(2), math.sqrt(2), 45],
[1, 1, 2 * math.sqrt(2), 2 * math.sqrt(2), -45],
],
dtype=torch.float32,
device=device,
)
boxes2 = torch.tensor([[1, 1, 2, 2, 0]], dtype=torch.float32, device=device)
expected_ious = torch.tensor([[0.5], [0.5]], dtype=torch.float32, device=device)
ious = pairwise_iou(RotatedBoxes(boxes1), RotatedBoxes(boxes2))
self.assertTrue(torch.allclose(ious, expected_ious))
def test_pairwise_iou_orthogonal(self):
for device in ["cpu"] + (["cuda"] if torch.cuda.is_available() else []):
boxes1 = torch.tensor([[5, 5, 10, 6, 55]], dtype=torch.float32, device=device)
boxes2 = torch.tensor([[5, 5, 10, 6, -35]], dtype=torch.float32, device=device)
iou = (6.0 * 6.0) / (6.0 * 6.0 + 4.0 * 6.0 + 4.0 * 6.0)
expected_ious = torch.tensor([[iou]], dtype=torch.float32, device=device)
ious = pairwise_iou(RotatedBoxes(boxes1), RotatedBoxes(boxes2))
self.assertTrue(torch.allclose(ious, expected_ious))
def test_pairwise_iou_large_close_boxes(self):
for device in ["cpu"] + (["cuda"] if torch.cuda.is_available() else []):
boxes1 = torch.tensor(
[[299.500000, 417.370422, 600.000000, 364.259186, 27.1828]],
dtype=torch.float32,
device=device,
)
boxes2 = torch.tensor(
[[299.500000, 417.370422, 600.000000, 364.259155, 27.1828]],
dtype=torch.float32,
device=device,
)
iou = 364.259155 / 364.259186
expected_ious = torch.tensor([[iou]], dtype=torch.float32, device=device)
ious = pairwise_iou(RotatedBoxes(boxes1), RotatedBoxes(boxes2))
self.assertTrue(torch.allclose(ious, expected_ious))
def test_pairwise_iou_many_boxes(self):
for device in ["cpu"] + (["cuda"] if torch.cuda.is_available() else []):
num_boxes1 = 100
num_boxes2 = 200
boxes1 = torch.stack(
[
torch.tensor(
[5 + 20 * i, 5 + 20 * i, 10, 10, 0],
dtype=torch.float32,
device=device,
)
for i in range(num_boxes1)
]
)
boxes2 = torch.stack(
[
torch.tensor(
[5 + 20 * i, 5 + 20 * i, 10, 1 + 9 * i / num_boxes2, 0],
dtype=torch.float32,
device=device,
)
for i in range(num_boxes2)
]
)
expected_ious = torch.zeros(num_boxes1, num_boxes2, dtype=torch.float32, device=device)
for i in range(min(num_boxes1, num_boxes2)):
expected_ious[i][i] = (1 + 9 * i / num_boxes2) / 10.0
ious = pairwise_iou(RotatedBoxes(boxes1), RotatedBoxes(boxes2))
self.assertTrue(torch.allclose(ious, expected_ious))
def test_pairwise_iou_issue1207_simplified(self):
for device in ["cpu"] + (["cuda"] if torch.cuda.is_available() else []):
# Simplified test case of D2-issue-1207
boxes1 = torch.tensor([[3, 3, 8, 2, -45.0]], device=device)
boxes2 = torch.tensor([[6, 0, 8, 2, -45.0]], device=device)
iou = 0.0
expected_ious = torch.tensor([[iou]], dtype=torch.float32, device=device)
ious = pairwise_iou(RotatedBoxes(boxes1), RotatedBoxes(boxes2))
self.assertTrue(torch.allclose(ious, expected_ious))
def test_pairwise_iou_issue1207(self):
for device in ["cpu"] + (["cuda"] if torch.cuda.is_available() else []):
# The original test case in D2-issue-1207
boxes1 = torch.tensor([[160.0, 153.0, 230.0, 23.0, -37.0]], device=device)
boxes2 = torch.tensor([[190.0, 127.0, 80.0, 21.0, -46.0]], device=device)
iou = 0.0
expected_ious = torch.tensor([[iou]], dtype=torch.float32, device=device)
ious = pairwise_iou(RotatedBoxes(boxes1), RotatedBoxes(boxes2))
self.assertTrue(torch.allclose(ious, expected_ious))
def test_empty_cat(self):
x = RotatedBoxes.cat([])
self.assertTrue(x.tensor.shape, (0, 5))
@unittest.skipIf(TORCH_VERSION < (1, 8), "Insufficient pytorch version")
def test_scriptability(self):
def func(x):
boxes = RotatedBoxes(x)
test = boxes.to(torch.device("cpu")).tensor
return boxes.area(), test
f = torch.jit.script(func)
f = reload_script_model(f)
f(torch.rand((3, 5)))
data = torch.rand((3, 5))
def func_cat(x: torch.Tensor):
boxes1 = RotatedBoxes(x)
boxes2 = RotatedBoxes(x)
# this is not supported by torchscript for now.
# boxes3 = RotatedBoxes.cat([boxes1, boxes2])
boxes3 = boxes1.cat([boxes1, boxes2])
return boxes3
f = torch.jit.script(func_cat)
script_box = f(data)
self.assertTrue(torch.equal(torch.cat([data, data]), script_box.tensor))
def benchmark_rotated_iou():
num_boxes1 = 200
num_boxes2 = 500
boxes1 = torch.stack(
[
torch.tensor([5 + 20 * i, 5 + 20 * i, 10, 10, 0], dtype=torch.float32)
for i in range(num_boxes1)
]
)
boxes2 = torch.stack(
[
torch.tensor(
[5 + 20 * i, 5 + 20 * i, 10, 1 + 9 * i / num_boxes2, 0],
dtype=torch.float32,
)
for i in range(num_boxes2)
]
)
def func(dev, n=1):
b1 = boxes1.to(device=dev)
b2 = boxes2.to(device=dev)
def bench():
for _ in range(n):
pairwise_iou_rotated(b1, b2)
if dev.type == "cuda":
torch.cuda.synchronize()
return bench
# only run it once per timed loop, since it's slow
args = [{"dev": torch.device("cpu"), "n": 1}]
if torch.cuda.is_available():
args.append({"dev": torch.device("cuda"), "n": 10})
benchmark(func, "rotated_iou", args, warmup_iters=3)
if __name__ == "__main__":
unittest.main()
benchmark_rotated_iou()
|
banmo-main
|
third_party/detectron2_old/tests/structures/test_rotated_boxes.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import json
import math
import numpy as np
import unittest
import torch
from detectron2.structures import Boxes, BoxMode, pairwise_ioa, pairwise_iou
from detectron2.utils.env import TORCH_VERSION
from detectron2.utils.testing import reload_script_model
class TestBoxMode(unittest.TestCase):
def _convert_xy_to_wh(self, x):
return BoxMode.convert(x, BoxMode.XYXY_ABS, BoxMode.XYWH_ABS)
def _convert_xywha_to_xyxy(self, x):
return BoxMode.convert(x, BoxMode.XYWHA_ABS, BoxMode.XYXY_ABS)
def _convert_xywh_to_xywha(self, x):
return BoxMode.convert(x, BoxMode.XYWH_ABS, BoxMode.XYWHA_ABS)
def test_convert_int_mode(self):
BoxMode.convert([1, 2, 3, 4], 0, 1)
def test_box_convert_list(self):
for tp in [list, tuple]:
box = tp([5.0, 5.0, 10.0, 10.0])
output = self._convert_xy_to_wh(box)
self.assertIsInstance(output, tp)
self.assertIsInstance(output[0], float)
self.assertEqual(output, tp([5.0, 5.0, 5.0, 5.0]))
with self.assertRaises(Exception):
self._convert_xy_to_wh([box])
def test_box_convert_array(self):
box = np.asarray([[5, 5, 10, 10], [1, 1, 2, 3]])
output = self._convert_xy_to_wh(box)
self.assertEqual(output.dtype, box.dtype)
self.assertEqual(output.shape, box.shape)
self.assertTrue((output[0] == [5, 5, 5, 5]).all())
self.assertTrue((output[1] == [1, 1, 1, 2]).all())
def test_box_convert_cpu_tensor(self):
box = torch.tensor([[5, 5, 10, 10], [1, 1, 2, 3]])
output = self._convert_xy_to_wh(box)
self.assertEqual(output.dtype, box.dtype)
self.assertEqual(output.shape, box.shape)
output = output.numpy()
self.assertTrue((output[0] == [5, 5, 5, 5]).all())
self.assertTrue((output[1] == [1, 1, 1, 2]).all())
@unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
def test_box_convert_cuda_tensor(self):
box = torch.tensor([[5, 5, 10, 10], [1, 1, 2, 3]]).cuda()
output = self._convert_xy_to_wh(box)
self.assertEqual(output.dtype, box.dtype)
self.assertEqual(output.shape, box.shape)
self.assertEqual(output.device, box.device)
output = output.cpu().numpy()
self.assertTrue((output[0] == [5, 5, 5, 5]).all())
self.assertTrue((output[1] == [1, 1, 1, 2]).all())
def test_box_convert_xywha_to_xyxy_list(self):
for tp in [list, tuple]:
box = tp([50, 50, 30, 20, 0])
output = self._convert_xywha_to_xyxy(box)
self.assertIsInstance(output, tp)
self.assertEqual(output, tp([35, 40, 65, 60]))
with self.assertRaises(Exception):
self._convert_xywha_to_xyxy([box])
def test_box_convert_xywha_to_xyxy_array(self):
for dtype in [np.float64, np.float32]:
box = np.asarray(
[
[50, 50, 30, 20, 0],
[50, 50, 30, 20, 90],
[1, 1, math.sqrt(2), math.sqrt(2), -45],
],
dtype=dtype,
)
output = self._convert_xywha_to_xyxy(box)
self.assertEqual(output.dtype, box.dtype)
expected = np.asarray([[35, 40, 65, 60], [40, 35, 60, 65], [0, 0, 2, 2]], dtype=dtype)
self.assertTrue(np.allclose(output, expected, atol=1e-6), "output={}".format(output))
def test_box_convert_xywha_to_xyxy_tensor(self):
for dtype in [torch.float32, torch.float64]:
box = torch.tensor(
[
[50, 50, 30, 20, 0],
[50, 50, 30, 20, 90],
[1, 1, math.sqrt(2), math.sqrt(2), -45],
],
dtype=dtype,
)
output = self._convert_xywha_to_xyxy(box)
self.assertEqual(output.dtype, box.dtype)
expected = torch.tensor([[35, 40, 65, 60], [40, 35, 60, 65], [0, 0, 2, 2]], dtype=dtype)
self.assertTrue(torch.allclose(output, expected, atol=1e-6), "output={}".format(output))
def test_box_convert_xywh_to_xywha_list(self):
for tp in [list, tuple]:
box = tp([50, 50, 30, 20])
output = self._convert_xywh_to_xywha(box)
self.assertIsInstance(output, tp)
self.assertEqual(output, tp([65, 60, 30, 20, 0]))
with self.assertRaises(Exception):
self._convert_xywh_to_xywha([box])
def test_box_convert_xywh_to_xywha_array(self):
for dtype in [np.float64, np.float32]:
box = np.asarray([[30, 40, 70, 60], [30, 40, 60, 70], [-1, -1, 2, 2]], dtype=dtype)
output = self._convert_xywh_to_xywha(box)
self.assertEqual(output.dtype, box.dtype)
expected = np.asarray(
[[65, 70, 70, 60, 0], [60, 75, 60, 70, 0], [0, 0, 2, 2, 0]], dtype=dtype
)
self.assertTrue(np.allclose(output, expected, atol=1e-6), "output={}".format(output))
def test_box_convert_xywh_to_xywha_tensor(self):
for dtype in [torch.float32, torch.float64]:
box = torch.tensor([[30, 40, 70, 60], [30, 40, 60, 70], [-1, -1, 2, 2]], dtype=dtype)
output = self._convert_xywh_to_xywha(box)
self.assertEqual(output.dtype, box.dtype)
expected = torch.tensor(
[[65, 70, 70, 60, 0], [60, 75, 60, 70, 0], [0, 0, 2, 2, 0]], dtype=dtype
)
self.assertTrue(torch.allclose(output, expected, atol=1e-6), "output={}".format(output))
def test_json_serializable(self):
payload = {"box_mode": BoxMode.XYWH_REL}
try:
json.dumps(payload)
except Exception:
self.fail("JSON serialization failed")
def test_json_deserializable(self):
payload = '{"box_mode": 2}'
obj = json.loads(payload)
try:
obj["box_mode"] = BoxMode(obj["box_mode"])
except Exception:
self.fail("JSON deserialization failed")
class TestBoxIOU(unittest.TestCase):
def create_boxes(self):
boxes1 = torch.tensor([[0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0]])
boxes2 = torch.tensor(
[
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 0.5, 1.0],
[0.0, 0.0, 1.0, 0.5],
[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 1.0, 1.0],
[0.5, 0.5, 1.5, 1.5],
]
)
return boxes1, boxes2
def test_pairwise_iou(self):
boxes1, boxes2 = self.create_boxes()
expected_ious = torch.tensor(
[
[1.0, 0.5, 0.5, 0.25, 0.25, 0.25 / (2 - 0.25)],
[1.0, 0.5, 0.5, 0.25, 0.25, 0.25 / (2 - 0.25)],
]
)
ious = pairwise_iou(Boxes(boxes1), Boxes(boxes2))
self.assertTrue(torch.allclose(ious, expected_ious))
def test_pairwise_ioa(self):
boxes1, boxes2 = self.create_boxes()
expected_ioas = torch.tensor(
[[1.0, 1.0, 1.0, 1.0, 1.0, 0.25], [1.0, 1.0, 1.0, 1.0, 1.0, 0.25]]
)
ioas = pairwise_ioa(Boxes(boxes1), Boxes(boxes2))
self.assertTrue(torch.allclose(ioas, expected_ioas))
class TestBoxes(unittest.TestCase):
def test_empty_cat(self):
x = Boxes.cat([])
self.assertTrue(x.tensor.shape, (0, 4))
def test_to(self):
x = Boxes(torch.rand(3, 4))
self.assertEqual(x.to(device="cpu").tensor.device.type, "cpu")
@unittest.skipIf(TORCH_VERSION < (1, 8), "Insufficient pytorch version")
def test_scriptability(self):
def func(x):
boxes = Boxes(x)
test = boxes.to(torch.device("cpu")).tensor
return boxes.area(), test
f = torch.jit.script(func)
f = reload_script_model(f)
f(torch.rand((3, 4)))
data = torch.rand((3, 4))
def func_cat(x: torch.Tensor):
boxes1 = Boxes(x)
boxes2 = Boxes(x)
# boxes3 = Boxes.cat([boxes1, boxes2]) # this is not supported by torchsript for now.
boxes3 = boxes1.cat([boxes1, boxes2])
return boxes3
f = torch.jit.script(func_cat)
script_box = f(data)
self.assertTrue(torch.equal(torch.cat([data, data]), script_box.tensor))
if __name__ == "__main__":
unittest.main()
|
banmo-main
|
third_party/detectron2_old/tests/structures/test_boxes.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import unittest
import torch
from torch import Tensor
from detectron2.export.torchscript import patch_instances
from detectron2.structures import Boxes, Instances
from detectron2.utils.env import TORCH_VERSION
from detectron2.utils.testing import convert_scripted_instances
class TestInstances(unittest.TestCase):
def test_int_indexing(self):
attr1 = torch.tensor([[0.0, 0.0, 1.0], [0.0, 0.0, 0.5], [0.0, 0.0, 1.0], [0.0, 0.5, 0.5]])
attr2 = torch.tensor([0.1, 0.2, 0.3, 0.4])
instances = Instances((100, 100))
instances.attr1 = attr1
instances.attr2 = attr2
for i in range(-len(instances), len(instances)):
inst = instances[i]
self.assertEqual((inst.attr1 == attr1[i]).all(), True)
self.assertEqual((inst.attr2 == attr2[i]).all(), True)
self.assertRaises(IndexError, lambda: instances[len(instances)])
self.assertRaises(IndexError, lambda: instances[-len(instances) - 1])
@unittest.skipIf(TORCH_VERSION < (1, 7), "Insufficient pytorch version")
def test_script_new_fields(self):
def get_mask(x: Instances) -> torch.Tensor:
return x.mask
class f(torch.nn.Module):
def forward(self, x: Instances):
proposal_boxes = x.proposal_boxes # noqa F841
objectness_logits = x.objectness_logits # noqa F841
return x
class g(torch.nn.Module):
def forward(self, x: Instances):
return get_mask(x)
class g2(torch.nn.Module):
def __init__(self):
super().__init__()
self.g = g()
def forward(self, x: Instances):
proposal_boxes = x.proposal_boxes # noqa F841
return x, self.g(x)
fields = {"proposal_boxes": Boxes, "objectness_logits": Tensor}
with patch_instances(fields):
torch.jit.script(f())
# can't script anymore after exiting the context
with self.assertRaises(Exception):
# will create a ConcreteType for g
torch.jit.script(g2())
new_fields = {"mask": Tensor}
with patch_instances(new_fields):
# will compile g with a different Instances; this should pass
torch.jit.script(g())
with self.assertRaises(Exception):
torch.jit.script(g2())
new_fields = {"mask": Tensor, "proposal_boxes": Boxes}
with patch_instances(new_fields) as NewInstances:
# get_mask will be compiled with a different Instances; this should pass
scripted_g2 = torch.jit.script(g2())
x = NewInstances((3, 4))
x.mask = torch.rand(3)
x.proposal_boxes = Boxes(torch.rand(3, 4))
scripted_g2(x) # it should accept the new Instances object and run successfully
@unittest.skipIf(TORCH_VERSION < (1, 7), "Insufficient pytorch version")
def test_script_access_fields(self):
class f(torch.nn.Module):
def forward(self, x: Instances):
proposal_boxes = x.proposal_boxes
objectness_logits = x.objectness_logits
return proposal_boxes.tensor + objectness_logits
fields = {"proposal_boxes": Boxes, "objectness_logits": Tensor}
with patch_instances(fields):
torch.jit.script(f())
@unittest.skipIf(TORCH_VERSION < (1, 7), "Insufficient pytorch version")
def test_script_len(self):
class f(torch.nn.Module):
def forward(self, x: Instances):
return len(x)
class g(torch.nn.Module):
def forward(self, x: Instances):
return len(x)
image_shape = (15, 15)
fields = {"proposal_boxes": Boxes}
with patch_instances(fields) as new_instance:
script_module = torch.jit.script(f())
x = new_instance(image_shape)
with self.assertRaises(Exception):
script_module(x)
box_tensors = torch.tensor([[5, 5, 10, 10], [1, 1, 2, 3]])
x.proposal_boxes = Boxes(box_tensors)
length = script_module(x)
self.assertEqual(length, 2)
fields = {"objectness_logits": Tensor}
with patch_instances(fields) as new_instance:
script_module = torch.jit.script(g())
x = new_instance(image_shape)
objectness_logits = torch.tensor([1.0]).reshape(1, 1)
x.objectness_logits = objectness_logits
length = script_module(x)
self.assertEqual(length, 1)
@unittest.skipIf(TORCH_VERSION < (1, 7), "Insufficient pytorch version")
def test_script_has(self):
class f(torch.nn.Module):
def forward(self, x: Instances):
return x.has("proposal_boxes")
image_shape = (15, 15)
fields = {"proposal_boxes": Boxes}
with patch_instances(fields) as new_instance:
script_module = torch.jit.script(f())
x = new_instance(image_shape)
self.assertFalse(script_module(x))
box_tensors = torch.tensor([[5, 5, 10, 10], [1, 1, 2, 3]])
x.proposal_boxes = Boxes(box_tensors)
self.assertTrue(script_module(x))
@unittest.skipIf(TORCH_VERSION < (1, 8), "Insufficient pytorch version")
def test_script_to(self):
class f(torch.nn.Module):
def forward(self, x: Instances):
return x.to(torch.device("cpu"))
image_shape = (15, 15)
fields = {"proposal_boxes": Boxes, "a": Tensor}
with patch_instances(fields) as new_instance:
script_module = torch.jit.script(f())
x = new_instance(image_shape)
script_module(x)
box_tensors = torch.tensor([[5, 5, 10, 10], [1, 1, 2, 3]])
x.proposal_boxes = Boxes(box_tensors)
x.a = box_tensors
script_module(x)
@unittest.skipIf(TORCH_VERSION < (1, 7), "Insufficient pytorch version")
def test_script_getitem(self):
class f(torch.nn.Module):
def forward(self, x: Instances, idx):
return x[idx]
image_shape = (15, 15)
fields = {"proposal_boxes": Boxes, "a": Tensor}
inst = Instances(image_shape)
inst.proposal_boxes = Boxes(torch.rand(4, 4))
inst.a = torch.rand(4, 10)
idx = torch.tensor([True, False, True, False])
with patch_instances(fields) as new_instance:
script_module = torch.jit.script(f())
out = f()(inst, idx)
out_scripted = script_module(new_instance.from_instances(inst), idx)
self.assertTrue(
torch.equal(out.proposal_boxes.tensor, out_scripted.proposal_boxes.tensor)
)
self.assertTrue(torch.equal(out.a, out_scripted.a))
@unittest.skipIf(TORCH_VERSION < (1, 7), "Insufficient pytorch version")
def test_from_to_instances(self):
orig = Instances((30, 30))
orig.proposal_boxes = Boxes(torch.rand(3, 4))
fields = {"proposal_boxes": Boxes, "a": Tensor}
with patch_instances(fields) as NewInstances:
# convert to NewInstances and back
new1 = NewInstances.from_instances(orig)
new2 = convert_scripted_instances(new1)
self.assertTrue(torch.equal(orig.proposal_boxes.tensor, new1.proposal_boxes.tensor))
self.assertTrue(torch.equal(orig.proposal_boxes.tensor, new2.proposal_boxes.tensor))
if __name__ == "__main__":
unittest.main()
|
banmo-main
|
third_party/detectron2_old/tests/structures/test_instances.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import os
import tempfile
import unittest
import yaml
from omegaconf import OmegaConf
from omegaconf import __version__ as oc_version
from detectron2.config import instantiate, LazyCall as L
from detectron2.layers import ShapeSpec
OC_VERSION = tuple(int(x) for x in oc_version.split(".")[:2])
class TestClass:
def __init__(self, int_arg, list_arg=None, dict_arg=None, extra_arg=None):
self.int_arg = int_arg
self.list_arg = list_arg
self.dict_arg = dict_arg
self.extra_arg = extra_arg
def __call__(self, call_arg):
return call_arg + self.int_arg
@unittest.skipIf(OC_VERSION < (2, 1), "omegaconf version too old")
class TestConstruction(unittest.TestCase):
def test_basic_construct(self):
objconf = L(TestClass)(
int_arg=3,
list_arg=[10],
dict_arg={},
extra_arg=L(TestClass)(int_arg=4, list_arg="${..list_arg}"),
)
obj = instantiate(objconf)
self.assertIsInstance(obj, TestClass)
self.assertEqual(obj.int_arg, 3)
self.assertEqual(obj.extra_arg.int_arg, 4)
self.assertEqual(obj.extra_arg.list_arg, obj.list_arg)
objconf.extra_arg.list_arg = [5]
obj = instantiate(objconf)
self.assertIsInstance(obj, TestClass)
self.assertEqual(obj.extra_arg.list_arg, [5])
def test_instantiate_other_obj(self):
# do nothing for other obj
self.assertEqual(instantiate(5), 5)
x = [3, 4, 5]
self.assertEqual(instantiate(x), x)
x = TestClass(1)
self.assertIs(instantiate(x), x)
x = {"xx": "yy"}
self.assertIs(instantiate(x), x)
def test_instantiate_lazy_target(self):
# _target_ is result of instantiate
objconf = L(L(len)(int_arg=3))(call_arg=4)
objconf._target_._target_ = TestClass
self.assertEqual(instantiate(objconf), 7)
def test_instantiate_lst(self):
lst = [1, 2, L(TestClass)(int_arg=1)]
x = L(TestClass)(int_arg=lst) # list as an argument should be recursively instantiated
x = instantiate(x).int_arg
self.assertEqual(x[:2], [1, 2])
self.assertIsInstance(x[2], TestClass)
self.assertEqual(x[2].int_arg, 1)
def test_instantiate_namedtuple(self):
x = L(TestClass)(int_arg=ShapeSpec(channels=1, width=3))
# test serialization
with tempfile.TemporaryDirectory() as d:
fname = os.path.join(d, "d2_test.yaml")
OmegaConf.save(x, fname)
with open(fname) as f:
x = yaml.unsafe_load(f)
x = instantiate(x)
self.assertIsInstance(x.int_arg, ShapeSpec)
self.assertEqual(x.int_arg.channels, 1)
def test_bad_lazycall(self):
with self.assertRaises(Exception):
L(3)
|
banmo-main
|
third_party/detectron2_old/tests/config/test_instantiate_config.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from itertools import count
from detectron2.config import LazyCall as L
from .dir1.dir1_a import dir1a_dict, dir1a_str
dir1a_dict.a = "modified"
# modification above won't affect future imports
from .dir1.dir1_b import dir1b_dict, dir1b_str
lazyobj = L(count)(x=dir1a_str, y=dir1b_str)
|
banmo-main
|
third_party/detectron2_old/tests/config/root_cfg.py
|
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
import os
import tempfile
import unittest
import torch
from omegaconf import OmegaConf
from detectron2 import model_zoo
from detectron2.config import configurable, downgrade_config, get_cfg, upgrade_config
from detectron2.layers import ShapeSpec
from detectron2.modeling import build_model
_V0_CFG = """
MODEL:
RPN_HEAD:
NAME: "TEST"
VERSION: 0
"""
_V1_CFG = """
MODEL:
WEIGHT: "/path/to/weight"
"""
class TestConfigVersioning(unittest.TestCase):
def test_upgrade_downgrade_consistency(self):
cfg = get_cfg()
# check that custom is preserved
cfg.USER_CUSTOM = 1
down = downgrade_config(cfg, to_version=0)
up = upgrade_config(down)
self.assertTrue(up == cfg)
def _merge_cfg_str(self, cfg, merge_str):
f = tempfile.NamedTemporaryFile(mode="w", suffix=".yaml", delete=False)
try:
f.write(merge_str)
f.close()
cfg.merge_from_file(f.name)
finally:
os.remove(f.name)
return cfg
def test_auto_upgrade(self):
cfg = get_cfg()
latest_ver = cfg.VERSION
cfg.USER_CUSTOM = 1
self._merge_cfg_str(cfg, _V0_CFG)
self.assertEqual(cfg.MODEL.RPN.HEAD_NAME, "TEST")
self.assertEqual(cfg.VERSION, latest_ver)
def test_guess_v1(self):
cfg = get_cfg()
latest_ver = cfg.VERSION
self._merge_cfg_str(cfg, _V1_CFG)
self.assertEqual(cfg.VERSION, latest_ver)
class _TestClassA(torch.nn.Module):
@configurable
def __init__(self, arg1, arg2, arg3=3):
super().__init__()
self.arg1 = arg1
self.arg2 = arg2
self.arg3 = arg3
assert arg1 == 1
assert arg2 == 2
assert arg3 == 3
@classmethod
def from_config(cls, cfg):
args = {"arg1": cfg.ARG1, "arg2": cfg.ARG2}
return args
class _TestClassB(_TestClassA):
@configurable
def __init__(self, input_shape, arg1, arg2, arg3=3):
"""
Doc of _TestClassB
"""
assert input_shape == "shape"
super().__init__(arg1, arg2, arg3)
@classmethod
def from_config(cls, cfg, input_shape): # test extra positional arg in from_config
args = {"arg1": cfg.ARG1, "arg2": cfg.ARG2}
args["input_shape"] = input_shape
return args
class _LegacySubClass(_TestClassB):
# an old subclass written in cfg style
def __init__(self, cfg, input_shape, arg4=4):
super().__init__(cfg, input_shape)
assert self.arg1 == 1
assert self.arg2 == 2
assert self.arg3 == 3
class _NewSubClassNewInit(_TestClassB):
# test new subclass with a new __init__
@configurable
def __init__(self, input_shape, arg4=4, **kwargs):
super().__init__(input_shape, **kwargs)
assert self.arg1 == 1
assert self.arg2 == 2
assert self.arg3 == 3
class _LegacySubClassNotCfg(_TestClassB):
# an old subclass written in cfg style, but argument is not called "cfg"
def __init__(self, config, input_shape):
super().__init__(config, input_shape)
assert self.arg1 == 1
assert self.arg2 == 2
assert self.arg3 == 3
class _TestClassC(_TestClassB):
@classmethod
def from_config(cls, cfg, input_shape, **kwargs): # test extra kwarg overwrite
args = {"arg1": cfg.ARG1, "arg2": cfg.ARG2}
args["input_shape"] = input_shape
args.update(kwargs)
return args
class _TestClassD(_TestClassA):
@configurable
def __init__(self, input_shape: ShapeSpec, arg1: int, arg2, arg3=3):
assert input_shape == "shape"
super().__init__(arg1, arg2, arg3)
# _TestClassA.from_config does not have input_shape args.
# Test whether input_shape will be forwarded to __init__
@configurable(from_config=lambda cfg, arg2: {"arg1": cfg.ARG1, "arg2": arg2, "arg3": cfg.ARG3})
def _test_func(arg1, arg2=2, arg3=3, arg4=4):
return arg1, arg2, arg3, arg4
class TestConfigurable(unittest.TestCase):
def testInitWithArgs(self):
_ = _TestClassA(arg1=1, arg2=2, arg3=3)
_ = _TestClassB("shape", arg1=1, arg2=2)
_ = _TestClassC("shape", arg1=1, arg2=2)
_ = _TestClassD("shape", arg1=1, arg2=2, arg3=3)
def testPatchedAttr(self):
self.assertTrue("Doc" in _TestClassB.__init__.__doc__)
self.assertEqual(_TestClassD.__init__.__annotations__["arg1"], int)
def testInitWithCfg(self):
cfg = get_cfg()
cfg.ARG1 = 1
cfg.ARG2 = 2
cfg.ARG3 = 3
_ = _TestClassA(cfg)
_ = _TestClassB(cfg, input_shape="shape")
_ = _TestClassC(cfg, input_shape="shape")
_ = _TestClassD(cfg, input_shape="shape")
_ = _LegacySubClass(cfg, input_shape="shape")
_ = _NewSubClassNewInit(cfg, input_shape="shape")
_ = _LegacySubClassNotCfg(cfg, input_shape="shape")
with self.assertRaises(TypeError):
# disallow forwarding positional args to __init__ since it's prone to errors
_ = _TestClassD(cfg, "shape")
# call with kwargs instead
_ = _TestClassA(cfg=cfg)
_ = _TestClassB(cfg=cfg, input_shape="shape")
_ = _TestClassC(cfg=cfg, input_shape="shape")
_ = _TestClassD(cfg=cfg, input_shape="shape")
_ = _LegacySubClass(cfg=cfg, input_shape="shape")
_ = _NewSubClassNewInit(cfg=cfg, input_shape="shape")
_ = _LegacySubClassNotCfg(config=cfg, input_shape="shape")
def testInitWithCfgOverwrite(self):
cfg = get_cfg()
cfg.ARG1 = 1
cfg.ARG2 = 999 # wrong config
with self.assertRaises(AssertionError):
_ = _TestClassA(cfg, arg3=3)
# overwrite arg2 with correct config later:
_ = _TestClassA(cfg, arg2=2, arg3=3)
_ = _TestClassB(cfg, input_shape="shape", arg2=2, arg3=3)
_ = _TestClassC(cfg, input_shape="shape", arg2=2, arg3=3)
_ = _TestClassD(cfg, input_shape="shape", arg2=2, arg3=3)
# call with kwargs cfg=cfg instead
_ = _TestClassA(cfg=cfg, arg2=2, arg3=3)
_ = _TestClassB(cfg=cfg, input_shape="shape", arg2=2, arg3=3)
_ = _TestClassC(cfg=cfg, input_shape="shape", arg2=2, arg3=3)
_ = _TestClassD(cfg=cfg, input_shape="shape", arg2=2, arg3=3)
def testInitWithCfgWrongArgs(self):
cfg = get_cfg()
cfg.ARG1 = 1
cfg.ARG2 = 2
with self.assertRaises(TypeError):
_ = _TestClassB(cfg, "shape", not_exist=1)
with self.assertRaises(TypeError):
_ = _TestClassC(cfg, "shape", not_exist=1)
with self.assertRaises(TypeError):
_ = _TestClassD(cfg, "shape", not_exist=1)
def testBadClass(self):
class _BadClass1:
@configurable
def __init__(self, a=1, b=2):
pass
class _BadClass2:
@configurable
def __init__(self, a=1, b=2):
pass
def from_config(self, cfg): # noqa
pass
class _BadClass3:
@configurable
def __init__(self, a=1, b=2):
pass
# bad name: must be cfg
@classmethod
def from_config(cls, config): # noqa
pass
with self.assertRaises(AttributeError):
_ = _BadClass1(a=1)
with self.assertRaises(TypeError):
_ = _BadClass2(a=1)
with self.assertRaises(TypeError):
_ = _BadClass3(get_cfg())
def testFuncWithCfg(self):
cfg = get_cfg()
cfg.ARG1 = 10
cfg.ARG3 = 30
self.assertEqual(_test_func(1), (1, 2, 3, 4))
with self.assertRaises(TypeError):
_test_func(cfg)
self.assertEqual(_test_func(cfg, arg2=2), (10, 2, 30, 4))
self.assertEqual(_test_func(cfg, arg1=100, arg2=20), (100, 20, 30, 4))
self.assertEqual(_test_func(cfg, arg1=100, arg2=20, arg4=40), (100, 20, 30, 40))
def testOmegaConf(self):
cfg = model_zoo.get_config("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml")
cfg = OmegaConf.create(cfg.dump())
if not torch.cuda.is_available():
cfg.MODEL.DEVICE = "cpu"
# test that a model can be built with omegaconf config as well
build_model(cfg)
|
banmo-main
|
third_party/detectron2_old/tests/config/test_yacs_config.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import os
import unittest
import tempfile
from itertools import count
from detectron2.config import LazyConfig, LazyCall as L
class TestLazyPythonConfig(unittest.TestCase):
def setUp(self):
self.root_filename = os.path.join(os.path.dirname(__file__), "root_cfg.py")
def test_load(self):
cfg = LazyConfig.load(self.root_filename)
self.assertEqual(cfg.dir1a_dict.a, "modified")
self.assertEqual(cfg.dir1b_dict.a, 1)
self.assertEqual(cfg.lazyobj.x, "base_a_1")
cfg.lazyobj.x = "new_x"
# reload
cfg = LazyConfig.load(self.root_filename)
self.assertEqual(cfg.lazyobj.x, "base_a_1")
def test_save_load(self):
cfg = LazyConfig.load(self.root_filename)
with tempfile.TemporaryDirectory(prefix="detectron2") as d:
fname = os.path.join(d, "test_config.yaml")
LazyConfig.save(cfg, fname)
cfg2 = LazyConfig.load(fname)
self.assertEqual(cfg2.lazyobj._target_, "itertools.count")
self.assertEqual(cfg.lazyobj._target_, count)
cfg2.lazyobj.pop("_target_")
cfg.lazyobj.pop("_target_")
# the rest are equal
self.assertEqual(cfg, cfg2)
def test_overrides(self):
cfg = LazyConfig.load(self.root_filename)
LazyConfig.apply_overrides(cfg, ["lazyobj.x=123", 'dir1b_dict.a="123"'])
self.assertEqual(cfg.dir1b_dict.a, "123")
self.assertEqual(cfg.lazyobj.x, 123)
def test_invalid_overrides(self):
cfg = LazyConfig.load(self.root_filename)
with self.assertRaises(KeyError):
LazyConfig.apply_overrides(cfg, ["lazyobj.x.xxx=123"])
def test_to_py(self):
cfg = LazyConfig.load(self.root_filename)
cfg.lazyobj.x = {"a": 1, "b": 2, "c": L(count)(x={"r": "a", "s": 2.4, "t": [1, 2, 3, "z"]})}
cfg.list = ["a", 1, "b", 3.2]
py_str = LazyConfig.to_py(cfg)
expected = """cfg.dir1a_dict.a = "modified"
cfg.dir1a_dict.b = 2
cfg.dir1b_dict.a = 1
cfg.dir1b_dict.b = 2
cfg.lazyobj = itertools.count(
x={
"a": 1,
"b": 2,
"c": itertools.count(x={"r": "a", "s": 2.4, "t": [1, 2, 3, "z"]}),
},
y="base_a_1_from_b",
)
cfg.list = ["a", 1, "b", 3.2]
"""
self.assertEqual(py_str, expected)
|
banmo-main
|
third_party/detectron2_old/tests/config/test_lazy_config.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
dir1a_str = "base_a_1"
dir1a_dict = {"a": 1, "b": 2}
|
banmo-main
|
third_party/detectron2_old/tests/config/dir1/dir1_a.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from detectron2.config import LazyConfig
# equivalent to relative import
dir1a_str, dir1a_dict = LazyConfig.load_rel("dir1_a.py", ("dir1a_str", "dir1a_dict"))
dir1b_str = dir1a_str + "_from_b"
dir1b_dict = dir1a_dict
# Every import is a reload: not modified by other config files
assert dir1a_dict.a == 1
|
banmo-main
|
third_party/detectron2_old/tests/config/dir1/dir1_b.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import unittest
import torch
import detectron2.export.torchscript # apply patch # noqa
from detectron2 import model_zoo
from detectron2.config import get_cfg
from detectron2.layers import ShapeSpec
from detectron2.modeling.backbone import build_resnet_backbone
from detectron2.modeling.backbone.fpn import build_resnet_fpn_backbone
from detectron2.utils.env import TORCH_VERSION
class TestBackBone(unittest.TestCase):
@unittest.skipIf(TORCH_VERSION < (1, 8), "Insufficient pytorch version")
def test_resnet_scriptability(self):
cfg = get_cfg()
resnet = build_resnet_backbone(cfg, ShapeSpec(channels=3))
scripted_resnet = torch.jit.script(resnet)
inp = torch.rand(2, 3, 100, 100)
out1 = resnet(inp)["res4"]
out2 = scripted_resnet(inp)["res4"]
self.assertTrue(torch.allclose(out1, out2))
@unittest.skipIf(TORCH_VERSION < (1, 8), "Insufficient pytorch version")
def test_fpn_scriptability(self):
cfg = model_zoo.get_config("Misc/scratch_mask_rcnn_R_50_FPN_3x_gn.yaml")
bb = build_resnet_fpn_backbone(cfg, ShapeSpec(channels=3))
bb_s = torch.jit.script(bb)
inp = torch.rand(2, 3, 128, 128)
out1 = bb(inp)["p5"]
out2 = bb_s(inp)["p5"]
self.assertTrue(torch.allclose(out1, out2))
|
banmo-main
|
third_party/detectron2_old/tests/modeling/test_backbone.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import unittest
from typing import List
import torch
from detectron2.config import get_cfg
from detectron2.modeling.matcher import Matcher
class TestMatcher(unittest.TestCase):
def test_scriptability(self):
cfg = get_cfg()
anchor_matcher = Matcher(
cfg.MODEL.RPN.IOU_THRESHOLDS, cfg.MODEL.RPN.IOU_LABELS, allow_low_quality_matches=True
)
match_quality_matrix = torch.tensor(
[[0.15, 0.45, 0.2, 0.6], [0.3, 0.65, 0.05, 0.1], [0.05, 0.4, 0.25, 0.4]]
)
expected_matches = torch.tensor([1, 1, 2, 0])
expected_match_labels = torch.tensor([-1, 1, 0, 1], dtype=torch.int8)
matches, match_labels = anchor_matcher(match_quality_matrix)
self.assertTrue(torch.allclose(matches, expected_matches))
self.assertTrue(torch.allclose(match_labels, expected_match_labels))
# nonzero_tuple must be import explicitly to let jit know what it is.
# https://github.com/pytorch/pytorch/issues/38964
from detectron2.layers import nonzero_tuple # noqa F401
def f(thresholds: List[float], labels: List[int]):
return Matcher(thresholds, labels, allow_low_quality_matches=True)
scripted_anchor_matcher = torch.jit.script(f)(
cfg.MODEL.RPN.IOU_THRESHOLDS, cfg.MODEL.RPN.IOU_LABELS
)
matches, match_labels = scripted_anchor_matcher(match_quality_matrix)
self.assertTrue(torch.allclose(matches, expected_matches))
self.assertTrue(torch.allclose(match_labels, expected_match_labels))
if __name__ == "__main__":
unittest.main()
|
banmo-main
|
third_party/detectron2_old/tests/modeling/test_matcher.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import logging
import unittest
import torch
from detectron2.modeling.poolers import ROIPooler, _fmt_box_list
from detectron2.structures import Boxes, RotatedBoxes
from detectron2.utils.env import TORCH_VERSION
from detectron2.utils.testing import random_boxes
logger = logging.getLogger(__name__)
class TestROIPooler(unittest.TestCase):
def _test_roialignv2_roialignrotated_match(self, device):
pooler_resolution = 14
canonical_level = 4
canonical_scale_factor = 2 ** canonical_level
pooler_scales = (1.0 / canonical_scale_factor,)
sampling_ratio = 0
N, C, H, W = 2, 4, 10, 8
N_rois = 10
std = 11
mean = 0
feature = (torch.rand(N, C, H, W) - 0.5) * 2 * std + mean
features = [feature.to(device)]
rois = []
rois_rotated = []
for _ in range(N):
boxes = random_boxes(N_rois, W * canonical_scale_factor)
rotated_boxes = torch.zeros(N_rois, 5)
rotated_boxes[:, 0] = (boxes[:, 0] + boxes[:, 2]) / 2.0
rotated_boxes[:, 1] = (boxes[:, 1] + boxes[:, 3]) / 2.0
rotated_boxes[:, 2] = boxes[:, 2] - boxes[:, 0]
rotated_boxes[:, 3] = boxes[:, 3] - boxes[:, 1]
rois.append(Boxes(boxes).to(device))
rois_rotated.append(RotatedBoxes(rotated_boxes).to(device))
roialignv2_pooler = ROIPooler(
output_size=pooler_resolution,
scales=pooler_scales,
sampling_ratio=sampling_ratio,
pooler_type="ROIAlignV2",
)
roialignv2_out = roialignv2_pooler(features, rois)
roialignrotated_pooler = ROIPooler(
output_size=pooler_resolution,
scales=pooler_scales,
sampling_ratio=sampling_ratio,
pooler_type="ROIAlignRotated",
)
roialignrotated_out = roialignrotated_pooler(features, rois_rotated)
self.assertTrue(torch.allclose(roialignv2_out, roialignrotated_out, atol=1e-4))
def test_roialignv2_roialignrotated_match_cpu(self):
self._test_roialignv2_roialignrotated_match(device="cpu")
@unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
def test_roialignv2_roialignrotated_match_cuda(self):
self._test_roialignv2_roialignrotated_match(device="cuda")
def _test_scriptability(self, device):
pooler_resolution = 14
canonical_level = 4
canonical_scale_factor = 2 ** canonical_level
pooler_scales = (1.0 / canonical_scale_factor,)
sampling_ratio = 0
N, C, H, W = 2, 4, 10, 8
N_rois = 10
std = 11
mean = 0
feature = (torch.rand(N, C, H, W) - 0.5) * 2 * std + mean
features = [feature.to(device)]
rois = []
for _ in range(N):
boxes = random_boxes(N_rois, W * canonical_scale_factor)
rois.append(Boxes(boxes).to(device))
roialignv2_pooler = ROIPooler(
output_size=pooler_resolution,
scales=pooler_scales,
sampling_ratio=sampling_ratio,
pooler_type="ROIAlignV2",
)
roialignv2_out = roialignv2_pooler(features, rois)
scripted_roialignv2_out = torch.jit.script(roialignv2_pooler)(features, rois)
self.assertTrue(torch.equal(roialignv2_out, scripted_roialignv2_out))
@unittest.skipIf(TORCH_VERSION < (1, 7), "Insufficient pytorch version")
def test_scriptability_cpu(self):
self._test_scriptability(device="cpu")
@unittest.skipIf(TORCH_VERSION < (1, 7), "Insufficient pytorch version")
@unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
def test_scriptability_gpu(self):
self._test_scriptability(device="cuda")
def test_no_images(self):
N, C, H, W = 0, 32, 32, 32
feature = torch.rand(N, C, H, W) - 0.5
features = [feature]
pooler = ROIPooler(
output_size=14, scales=(1.0,), sampling_ratio=0.0, pooler_type="ROIAlignV2"
)
output = pooler.forward(features, [])
self.assertEqual(output.shape, (0, C, 14, 14))
def test_fmt_box_list_tracing(self):
class Model(torch.nn.Module):
def forward(self, box_tensor):
return _fmt_box_list(box_tensor, 0)
with torch.no_grad():
func = torch.jit.trace(Model(), torch.ones(10, 4))
self.assertEqual(func(torch.ones(10, 4)).shape, (10, 5))
self.assertEqual(func(torch.ones(5, 4)).shape, (5, 5))
self.assertEqual(func(torch.ones(20, 4)).shape, (20, 5))
def test_roi_pooler_tracing(self):
class Model(torch.nn.Module):
def __init__(self, roi):
super(Model, self).__init__()
self.roi = roi
def forward(self, x, boxes):
return self.roi(x, [Boxes(boxes)])
pooler_resolution = 14
canonical_level = 4
canonical_scale_factor = 2 ** canonical_level
pooler_scales = (1.0 / canonical_scale_factor, 0.5 / canonical_scale_factor)
sampling_ratio = 0
N, C, H, W = 1, 4, 10, 8
N_rois = 10
std = 11
mean = 0
feature = (torch.rand(N, C, H, W) - 0.5) * 2 * std + mean
feature = [feature, feature]
rois = random_boxes(N_rois, W * canonical_scale_factor)
# Add one larger box so that this level has only one box.
# This may trigger the bug https://github.com/pytorch/pytorch/issues/49852
# that we shall workaround.
rois = torch.cat([rois, torch.tensor([[0, 0, 448, 448]])])
model = Model(
ROIPooler(
output_size=pooler_resolution,
scales=pooler_scales,
sampling_ratio=sampling_ratio,
pooler_type="ROIAlign",
)
)
with torch.no_grad():
func = torch.jit.trace(model, (feature, rois))
o = func(feature, rois)
self.assertEqual(o.shape, (11, 4, 14, 14))
o = func(feature, rois[:5])
self.assertEqual(o.shape, (5, 4, 14, 14))
o = func(feature, random_boxes(20, W * canonical_scale_factor))
self.assertEqual(o.shape, (20, 4, 14, 14))
if __name__ == "__main__":
unittest.main()
|
banmo-main
|
third_party/detectron2_old/tests/modeling/test_roi_pooler.py
|
import unittest
from detectron2.layers import ShapeSpec
from detectron2.modeling.mmdet_wrapper import MMDetBackbone, MMDetDetector
try:
import mmdet.models # noqa
HAS_MMDET = True
except ImportError:
HAS_MMDET = False
@unittest.skipIf(not HAS_MMDET, "mmdet not available")
class TestMMDetWrapper(unittest.TestCase):
def test_backbone(self):
MMDetBackbone(
backbone=dict(
type="DetectoRS_ResNet",
conv_cfg=dict(type="ConvAWS"),
sac=dict(type="SAC", use_deform=True),
stage_with_sac=(False, True, True, True),
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type="BN", requires_grad=True),
norm_eval=True,
style="pytorch",
),
neck=dict(
type="FPN",
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5,
),
# skip pretrained model for tests
# pretrained_backbone="torchvision://resnet50",
output_shapes=[ShapeSpec(channels=256, stride=s) for s in [4, 8, 16, 32, 64]],
output_names=["p2", "p3", "p4", "p5", "p6"],
)
def test_detector(self):
# a basic R50 Mask R-CNN
MMDetDetector(
detector=dict(
type="MaskRCNN",
# skip pretrained model for tests
# pretrained="torchvision://resnet50",
backbone=dict(
type="ResNet",
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type="BN", requires_grad=True),
norm_eval=True,
style="pytorch",
),
neck=dict(
type="FPN", in_channels=[256, 512, 1024, 2048], out_channels=256, num_outs=5
),
rpn_head=dict(
type="RPNHead",
in_channels=256,
feat_channels=256,
anchor_generator=dict(
type="AnchorGenerator",
scales=[8],
ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64],
),
bbox_coder=dict(
type="DeltaXYWHBBoxCoder",
target_means=[0.0, 0.0, 0.0, 0.0],
target_stds=[1.0, 1.0, 1.0, 1.0],
),
loss_cls=dict(type="CrossEntropyLoss", use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type="L1Loss", loss_weight=1.0),
),
roi_head=dict(
type="StandardRoIHead",
bbox_roi_extractor=dict(
type="SingleRoIExtractor",
roi_layer=dict(type="RoIAlign", output_size=7, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32],
),
bbox_head=dict(
type="Shared2FCBBoxHead",
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type="DeltaXYWHBBoxCoder",
target_means=[0.0, 0.0, 0.0, 0.0],
target_stds=[0.1, 0.1, 0.2, 0.2],
),
reg_class_agnostic=False,
loss_cls=dict(type="CrossEntropyLoss", use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type="L1Loss", loss_weight=1.0),
),
mask_roi_extractor=dict(
type="SingleRoIExtractor",
roi_layer=dict(type="RoIAlign", output_size=14, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32],
),
mask_head=dict(
type="FCNMaskHead",
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=80,
loss_mask=dict(type="CrossEntropyLoss", use_mask=True, loss_weight=1.0),
),
),
# model training and testing settings
train_cfg=dict(
rpn=dict(
assigner=dict(
type="MaxIoUAssigner",
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
match_low_quality=True,
ignore_iof_thr=-1,
),
sampler=dict(
type="RandomSampler",
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False,
),
allowed_border=-1,
pos_weight=-1,
debug=False,
),
rpn_proposal=dict(
nms_pre=2000,
max_per_img=1000,
nms=dict(type="nms", iou_threshold=0.7),
min_bbox_size=0,
),
rcnn=dict(
assigner=dict(
type="MaxIoUAssigner",
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
match_low_quality=True,
ignore_iof_thr=-1,
),
sampler=dict(
type="RandomSampler",
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True,
),
mask_size=28,
pos_weight=-1,
debug=False,
),
),
test_cfg=dict(
rpn=dict(
nms_pre=1000,
max_per_img=1000,
nms=dict(type="nms", iou_threshold=0.7),
min_bbox_size=0,
),
rcnn=dict(
score_thr=0.05,
nms=dict(type="nms", iou_threshold=0.5),
max_per_img=100,
mask_thr_binary=0.5,
),
),
),
pixel_mean=[1, 2, 3],
pixel_std=[1, 2, 3],
)
|
banmo-main
|
third_party/detectron2_old/tests/modeling/test_mmdet.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import itertools
import numpy as np
import unittest
from contextlib import contextmanager
from copy import deepcopy
import torch
from detectron2.structures import BitMasks, Boxes, ImageList, Instances
from detectron2.utils.events import EventStorage
from detectron2.utils.testing import get_model_no_weights
@contextmanager
def typecheck_hook(model, *, in_dtype=None, out_dtype=None):
"""
Check that the model must be called with the given input/output dtype
"""
if not isinstance(in_dtype, set):
in_dtype = {in_dtype}
if not isinstance(out_dtype, set):
out_dtype = {out_dtype}
def flatten(x):
if isinstance(x, torch.Tensor):
return [x]
if isinstance(x, (list, tuple)):
return list(itertools.chain(*[flatten(t) for t in x]))
if isinstance(x, dict):
return flatten(list(x.values()))
return []
def hook(module, input, output):
if in_dtype is not None:
dtypes = {x.dtype for x in flatten(input)}
assert (
dtypes == in_dtype
), f"Expected input dtype of {type(module)} is {in_dtype}. Got {dtypes} instead!"
if out_dtype is not None:
dtypes = {x.dtype for x in flatten(output)}
assert (
dtypes == out_dtype
), f"Expected output dtype of {type(module)} is {out_dtype}. Got {dtypes} instead!"
with model.register_forward_hook(hook):
yield
def create_model_input(img, inst=None):
if inst is not None:
return {"image": img, "instances": inst}
else:
return {"image": img}
def get_empty_instance(h, w):
inst = Instances((h, w))
inst.gt_boxes = Boxes(torch.rand(0, 4))
inst.gt_classes = torch.tensor([]).to(dtype=torch.int64)
inst.gt_masks = BitMasks(torch.rand(0, h, w))
return inst
def get_regular_bitmask_instances(h, w):
inst = Instances((h, w))
inst.gt_boxes = Boxes(torch.rand(3, 4))
inst.gt_boxes.tensor[:, 2:] += inst.gt_boxes.tensor[:, :2]
inst.gt_classes = torch.tensor([3, 4, 5]).to(dtype=torch.int64)
inst.gt_masks = BitMasks((torch.rand(3, h, w) > 0.5))
return inst
class ModelE2ETest:
def setUp(self):
torch.manual_seed(43)
self.model = get_model_no_weights(self.CONFIG_PATH)
def _test_eval(self, input_sizes):
inputs = [create_model_input(torch.rand(3, s[0], s[1])) for s in input_sizes]
self.model.eval()
self.model(inputs)
def _test_train(self, input_sizes, instances):
assert len(input_sizes) == len(instances)
inputs = [
create_model_input(torch.rand(3, s[0], s[1]), inst)
for s, inst in zip(input_sizes, instances)
]
self.model.train()
with EventStorage():
losses = self.model(inputs)
sum(losses.values()).backward()
del losses
def _inf_tensor(self, *shape):
return 1.0 / torch.zeros(*shape, device=self.model.device)
def _nan_tensor(self, *shape):
return torch.zeros(*shape, device=self.model.device).fill_(float("nan"))
def test_empty_data(self):
instances = [get_empty_instance(200, 250), get_empty_instance(200, 249)]
self._test_eval([(200, 250), (200, 249)])
self._test_train([(200, 250), (200, 249)], instances)
@unittest.skipIf(not torch.cuda.is_available(), "CUDA unavailable")
def test_eval_tocpu(self):
model = deepcopy(self.model).cpu()
model.eval()
input_sizes = [(200, 250), (200, 249)]
inputs = [create_model_input(torch.rand(3, s[0], s[1])) for s in input_sizes]
model(inputs)
class MaskRCNNE2ETest(ModelE2ETest, unittest.TestCase):
CONFIG_PATH = "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml"
def test_half_empty_data(self):
instances = [get_empty_instance(200, 250), get_regular_bitmask_instances(200, 249)]
self._test_train([(200, 250), (200, 249)], instances)
# This test is flaky because in some environment the output features are zero due to relu
# def test_rpn_inf_nan_data(self):
# self.model.eval()
# for tensor in [self._inf_tensor, self._nan_tensor]:
# images = ImageList(tensor(1, 3, 512, 512), [(510, 510)])
# features = {
# "p2": tensor(1, 256, 256, 256),
# "p3": tensor(1, 256, 128, 128),
# "p4": tensor(1, 256, 64, 64),
# "p5": tensor(1, 256, 32, 32),
# "p6": tensor(1, 256, 16, 16),
# }
# props, _ = self.model.proposal_generator(images, features)
# self.assertEqual(len(props[0]), 0)
def test_roiheads_inf_nan_data(self):
self.model.eval()
for tensor in [self._inf_tensor, self._nan_tensor]:
images = ImageList(tensor(1, 3, 512, 512), [(510, 510)])
features = {
"p2": tensor(1, 256, 256, 256),
"p3": tensor(1, 256, 128, 128),
"p4": tensor(1, 256, 64, 64),
"p5": tensor(1, 256, 32, 32),
"p6": tensor(1, 256, 16, 16),
}
props = [Instances((510, 510))]
props[0].proposal_boxes = Boxes([[10, 10, 20, 20]]).to(device=self.model.device)
props[0].objectness_logits = torch.tensor([1.0]).reshape(1, 1)
det, _ = self.model.roi_heads(images, features, props)
self.assertEqual(len(det[0]), 0)
@unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
def test_autocast(self):
from torch.cuda.amp import autocast
inputs = [{"image": torch.rand(3, 100, 100)}]
self.model.eval()
with autocast(), typecheck_hook(
self.model.backbone, in_dtype=torch.float32, out_dtype=torch.float16
), typecheck_hook(
self.model.roi_heads.box_predictor, in_dtype=torch.float16, out_dtype=torch.float16
):
out = self.model.inference(inputs, do_postprocess=False)[0]
self.assertEqual(out.pred_boxes.tensor.dtype, torch.float32)
self.assertEqual(out.pred_masks.dtype, torch.float16)
self.assertEqual(out.scores.dtype, torch.float32) # scores comes from softmax
class RetinaNetE2ETest(ModelE2ETest, unittest.TestCase):
CONFIG_PATH = "COCO-Detection/retinanet_R_50_FPN_1x.yaml"
def test_inf_nan_data(self):
self.model.eval()
self.model.score_threshold = -999999999
for tensor in [self._inf_tensor, self._nan_tensor]:
images = ImageList(tensor(1, 3, 512, 512), [(510, 510)])
features = [
tensor(1, 256, 128, 128),
tensor(1, 256, 64, 64),
tensor(1, 256, 32, 32),
tensor(1, 256, 16, 16),
tensor(1, 256, 8, 8),
]
anchors = self.model.anchor_generator(features)
_, pred_anchor_deltas = self.model.head(features)
HWAs = [np.prod(x.shape[-3:]) // 4 for x in pred_anchor_deltas]
pred_logits = [tensor(1, HWA, self.model.num_classes) for HWA in HWAs]
pred_anchor_deltas = [tensor(1, HWA, 4) for HWA in HWAs]
det = self.model.inference(anchors, pred_logits, pred_anchor_deltas, images.image_sizes)
# all predictions (if any) are infinite or nan
if len(det[0]):
self.assertTrue(torch.isfinite(det[0].pred_boxes.tensor).sum() == 0)
@unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
def test_autocast(self):
from torch.cuda.amp import autocast
inputs = [{"image": torch.rand(3, 100, 100)}]
self.model.eval()
with autocast(), typecheck_hook(
self.model.backbone, in_dtype=torch.float32, out_dtype=torch.float16
), typecheck_hook(self.model.head, in_dtype=torch.float16, out_dtype=torch.float16):
out = self.model(inputs)[0]["instances"]
self.assertEqual(out.pred_boxes.tensor.dtype, torch.float32)
self.assertEqual(out.scores.dtype, torch.float16)
|
banmo-main
|
third_party/detectron2_old/tests/modeling/test_model_e2e.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import logging
import unittest
import torch
from detectron2.config import get_cfg
from detectron2.layers import ShapeSpec
from detectron2.modeling.anchor_generator import DefaultAnchorGenerator, RotatedAnchorGenerator
logger = logging.getLogger(__name__)
class TestAnchorGenerator(unittest.TestCase):
def test_default_anchor_generator(self):
cfg = get_cfg()
cfg.MODEL.ANCHOR_GENERATOR.SIZES = [[32, 64]]
cfg.MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS = [[0.25, 1, 4]]
anchor_generator = DefaultAnchorGenerator(cfg, [ShapeSpec(stride=4)])
# only the last two dimensions of features matter here
num_images = 2
features = {"stage3": torch.rand(num_images, 96, 1, 2)}
anchors = anchor_generator([features["stage3"]])
expected_anchor_tensor = torch.tensor(
[
[-32.0, -8.0, 32.0, 8.0],
[-16.0, -16.0, 16.0, 16.0],
[-8.0, -32.0, 8.0, 32.0],
[-64.0, -16.0, 64.0, 16.0],
[-32.0, -32.0, 32.0, 32.0],
[-16.0, -64.0, 16.0, 64.0],
[-28.0, -8.0, 36.0, 8.0], # -28.0 == -32.0 + STRIDE (4)
[-12.0, -16.0, 20.0, 16.0],
[-4.0, -32.0, 12.0, 32.0],
[-60.0, -16.0, 68.0, 16.0],
[-28.0, -32.0, 36.0, 32.0],
[-12.0, -64.0, 20.0, 64.0],
]
)
self.assertTrue(torch.allclose(anchors[0].tensor, expected_anchor_tensor))
def test_default_anchor_generator_centered(self):
# test explicit args
anchor_generator = DefaultAnchorGenerator(
sizes=[32, 64], aspect_ratios=[0.25, 1, 4], strides=[4]
)
# only the last two dimensions of features matter here
num_images = 2
features = {"stage3": torch.rand(num_images, 96, 1, 2)}
expected_anchor_tensor = torch.tensor(
[
[-30.0, -6.0, 34.0, 10.0],
[-14.0, -14.0, 18.0, 18.0],
[-6.0, -30.0, 10.0, 34.0],
[-62.0, -14.0, 66.0, 18.0],
[-30.0, -30.0, 34.0, 34.0],
[-14.0, -62.0, 18.0, 66.0],
[-26.0, -6.0, 38.0, 10.0],
[-10.0, -14.0, 22.0, 18.0],
[-2.0, -30.0, 14.0, 34.0],
[-58.0, -14.0, 70.0, 18.0],
[-26.0, -30.0, 38.0, 34.0],
[-10.0, -62.0, 22.0, 66.0],
]
)
anchors = anchor_generator([features["stage3"]])
self.assertTrue(torch.allclose(anchors[0].tensor, expected_anchor_tensor))
anchors = torch.jit.script(anchor_generator)([features["stage3"]])
self.assertTrue(torch.allclose(anchors[0].tensor, expected_anchor_tensor))
def test_rrpn_anchor_generator(self):
cfg = get_cfg()
cfg.MODEL.ANCHOR_GENERATOR.SIZES = [[32, 64]]
cfg.MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS = [[0.25, 1, 4]]
cfg.MODEL.ANCHOR_GENERATOR.ANGLES = [0, 45] # test single list[float]
anchor_generator = RotatedAnchorGenerator(cfg, [ShapeSpec(stride=4)])
# only the last two dimensions of features matter here
num_images = 2
features = {"stage3": torch.rand(num_images, 96, 1, 2)}
anchors = anchor_generator([features["stage3"]])
expected_anchor_tensor = torch.tensor(
[
[0.0, 0.0, 64.0, 16.0, 0.0],
[0.0, 0.0, 64.0, 16.0, 45.0],
[0.0, 0.0, 32.0, 32.0, 0.0],
[0.0, 0.0, 32.0, 32.0, 45.0],
[0.0, 0.0, 16.0, 64.0, 0.0],
[0.0, 0.0, 16.0, 64.0, 45.0],
[0.0, 0.0, 128.0, 32.0, 0.0],
[0.0, 0.0, 128.0, 32.0, 45.0],
[0.0, 0.0, 64.0, 64.0, 0.0],
[0.0, 0.0, 64.0, 64.0, 45.0],
[0.0, 0.0, 32.0, 128.0, 0.0],
[0.0, 0.0, 32.0, 128.0, 45.0],
[4.0, 0.0, 64.0, 16.0, 0.0], # 4.0 == 0.0 + STRIDE (4)
[4.0, 0.0, 64.0, 16.0, 45.0],
[4.0, 0.0, 32.0, 32.0, 0.0],
[4.0, 0.0, 32.0, 32.0, 45.0],
[4.0, 0.0, 16.0, 64.0, 0.0],
[4.0, 0.0, 16.0, 64.0, 45.0],
[4.0, 0.0, 128.0, 32.0, 0.0],
[4.0, 0.0, 128.0, 32.0, 45.0],
[4.0, 0.0, 64.0, 64.0, 0.0],
[4.0, 0.0, 64.0, 64.0, 45.0],
[4.0, 0.0, 32.0, 128.0, 0.0],
[4.0, 0.0, 32.0, 128.0, 45.0],
]
)
self.assertTrue(torch.allclose(anchors[0].tensor, expected_anchor_tensor))
if __name__ == "__main__":
unittest.main()
|
banmo-main
|
third_party/detectron2_old/tests/modeling/test_anchor_generator.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import logging
import unittest
from copy import deepcopy
import torch
from torch import nn
from detectron2 import model_zoo
from detectron2.config import get_cfg
from detectron2.export.torchscript_patch import (
freeze_training_mode,
patch_builtin_len,
patch_instances,
)
from detectron2.layers import ShapeSpec
from detectron2.modeling.proposal_generator.build import build_proposal_generator
from detectron2.modeling.roi_heads import (
FastRCNNConvFCHead,
KRCNNConvDeconvUpsampleHead,
MaskRCNNConvUpsampleHead,
StandardROIHeads,
build_roi_heads,
)
from detectron2.projects import point_rend
from detectron2.structures import BitMasks, Boxes, ImageList, Instances, RotatedBoxes
from detectron2.utils.env import TORCH_VERSION
from detectron2.utils.events import EventStorage
from detectron2.utils.testing import assert_instances_allclose, random_boxes
logger = logging.getLogger(__name__)
"""
Make sure the losses of ROIHeads/RPN do not change, to avoid
breaking the forward logic by mistake.
This relies on assumption that pytorch's RNG is stable.
"""
class ROIHeadsTest(unittest.TestCase):
def test_roi_heads(self):
torch.manual_seed(121)
cfg = get_cfg()
cfg.MODEL.ROI_BOX_HEAD.NAME = "FastRCNNConvFCHead"
cfg.MODEL.ROI_BOX_HEAD.NUM_FC = 2
cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE = "ROIAlignV2"
cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_WEIGHTS = (10, 10, 5, 5)
cfg.MODEL.MASK_ON = True
num_images = 2
images_tensor = torch.rand(num_images, 20, 30)
image_sizes = [(10, 10), (20, 30)]
images = ImageList(images_tensor, image_sizes)
num_channels = 1024
features = {"res4": torch.rand(num_images, num_channels, 1, 2)}
feature_shape = {"res4": ShapeSpec(channels=num_channels, stride=16)}
image_shape = (15, 15)
gt_boxes0 = torch.tensor([[1, 1, 3, 3], [2, 2, 6, 6]], dtype=torch.float32)
gt_instance0 = Instances(image_shape)
gt_instance0.gt_boxes = Boxes(gt_boxes0)
gt_instance0.gt_classes = torch.tensor([2, 1])
gt_instance0.gt_masks = BitMasks(torch.rand((2,) + image_shape) > 0.5)
gt_boxes1 = torch.tensor([[1, 5, 2, 8], [7, 3, 10, 5]], dtype=torch.float32)
gt_instance1 = Instances(image_shape)
gt_instance1.gt_boxes = Boxes(gt_boxes1)
gt_instance1.gt_classes = torch.tensor([1, 2])
gt_instance1.gt_masks = BitMasks(torch.rand((2,) + image_shape) > 0.5)
gt_instances = [gt_instance0, gt_instance1]
proposal_generator = build_proposal_generator(cfg, feature_shape)
roi_heads = StandardROIHeads(cfg, feature_shape)
with EventStorage(): # capture events in a new storage to discard them
proposals, proposal_losses = proposal_generator(images, features, gt_instances)
_, detector_losses = roi_heads(images, features, proposals, gt_instances)
detector_losses.update(proposal_losses)
expected_losses = {
"loss_cls": 4.5253729820251465,
"loss_box_reg": 0.009785720147192478,
"loss_mask": 0.693184494972229,
"loss_rpn_cls": 0.08186662942171097,
"loss_rpn_loc": 0.1104838103055954,
}
succ = all(
torch.allclose(detector_losses[name], torch.tensor(expected_losses.get(name, 0.0)))
for name in detector_losses.keys()
)
self.assertTrue(
succ,
"Losses has changed! New losses: {}".format(
{k: v.item() for k, v in detector_losses.items()}
),
)
def test_rroi_heads(self):
torch.manual_seed(121)
cfg = get_cfg()
cfg.MODEL.PROPOSAL_GENERATOR.NAME = "RRPN"
cfg.MODEL.ANCHOR_GENERATOR.NAME = "RotatedAnchorGenerator"
cfg.MODEL.ROI_HEADS.NAME = "RROIHeads"
cfg.MODEL.ROI_BOX_HEAD.NAME = "FastRCNNConvFCHead"
cfg.MODEL.ROI_BOX_HEAD.NUM_FC = 2
cfg.MODEL.RPN.BBOX_REG_WEIGHTS = (1, 1, 1, 1, 1)
cfg.MODEL.RPN.HEAD_NAME = "StandardRPNHead"
cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE = "ROIAlignRotated"
cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_WEIGHTS = (10, 10, 5, 5, 1)
num_images = 2
images_tensor = torch.rand(num_images, 20, 30)
image_sizes = [(10, 10), (20, 30)]
images = ImageList(images_tensor, image_sizes)
num_channels = 1024
features = {"res4": torch.rand(num_images, num_channels, 1, 2)}
feature_shape = {"res4": ShapeSpec(channels=num_channels, stride=16)}
image_shape = (15, 15)
gt_boxes0 = torch.tensor([[2, 2, 2, 2, 30], [4, 4, 4, 4, 0]], dtype=torch.float32)
gt_instance0 = Instances(image_shape)
gt_instance0.gt_boxes = RotatedBoxes(gt_boxes0)
gt_instance0.gt_classes = torch.tensor([2, 1])
gt_boxes1 = torch.tensor([[1.5, 5.5, 1, 3, 0], [8.5, 4, 3, 2, -50]], dtype=torch.float32)
gt_instance1 = Instances(image_shape)
gt_instance1.gt_boxes = RotatedBoxes(gt_boxes1)
gt_instance1.gt_classes = torch.tensor([1, 2])
gt_instances = [gt_instance0, gt_instance1]
proposal_generator = build_proposal_generator(cfg, feature_shape)
roi_heads = build_roi_heads(cfg, feature_shape)
with EventStorage(): # capture events in a new storage to discard them
proposals, proposal_losses = proposal_generator(images, features, gt_instances)
_, detector_losses = roi_heads(images, features, proposals, gt_instances)
detector_losses.update(proposal_losses)
expected_losses = {
"loss_cls": 4.365657806396484,
"loss_box_reg": 0.0015851043863222003,
"loss_rpn_cls": 0.2427729219198227,
"loss_rpn_loc": 0.3646621108055115,
}
succ = all(
torch.allclose(detector_losses[name], torch.tensor(expected_losses.get(name, 0.0)))
for name in detector_losses.keys()
)
self.assertTrue(
succ,
"Losses has changed! New losses: {}".format(
{k: v.item() for k, v in detector_losses.items()}
),
)
@unittest.skipIf(TORCH_VERSION < (1, 7), "Insufficient pytorch version")
def test_box_head_scriptability(self):
input_shape = ShapeSpec(channels=1024, height=14, width=14)
box_features = torch.randn(4, 1024, 14, 14)
box_head = FastRCNNConvFCHead(
input_shape, conv_dims=[512, 512], fc_dims=[1024, 1024]
).eval()
script_box_head = torch.jit.script(box_head)
origin_output = box_head(box_features)
script_output = script_box_head(box_features)
self.assertTrue(torch.equal(origin_output, script_output))
@unittest.skipIf(TORCH_VERSION < (1, 8), "Insufficient pytorch version")
def test_mask_head_scriptability(self):
input_shape = ShapeSpec(channels=1024)
mask_features = torch.randn(4, 1024, 14, 14)
image_shapes = [(10, 10), (15, 15)]
pred_instance0 = Instances(image_shapes[0])
pred_classes0 = torch.tensor([1, 2, 3], dtype=torch.int64)
pred_instance0.pred_classes = pred_classes0
pred_instance1 = Instances(image_shapes[1])
pred_classes1 = torch.tensor([4], dtype=torch.int64)
pred_instance1.pred_classes = pred_classes1
mask_head = MaskRCNNConvUpsampleHead(
input_shape, num_classes=80, conv_dims=[256, 256]
).eval()
# pred_instance will be in-place changed during the inference
# process of `MaskRCNNConvUpsampleHead`
origin_outputs = mask_head(mask_features, deepcopy([pred_instance0, pred_instance1]))
fields = {"pred_masks": torch.Tensor, "pred_classes": torch.Tensor}
with freeze_training_mode(mask_head), patch_instances(fields) as NewInstances:
sciript_mask_head = torch.jit.script(mask_head)
pred_instance0 = NewInstances.from_instances(pred_instance0)
pred_instance1 = NewInstances.from_instances(pred_instance1)
script_outputs = sciript_mask_head(mask_features, [pred_instance0, pred_instance1])
for origin_ins, script_ins in zip(origin_outputs, script_outputs):
assert_instances_allclose(origin_ins, script_ins, rtol=0)
@unittest.skipIf(TORCH_VERSION < (1, 8), "Insufficient pytorch version")
def test_keypoint_head_scriptability(self):
input_shape = ShapeSpec(channels=1024, height=14, width=14)
keypoint_features = torch.randn(4, 1024, 14, 14)
image_shapes = [(10, 10), (15, 15)]
pred_boxes0 = torch.tensor([[1, 1, 3, 3], [2, 2, 6, 6], [1, 5, 2, 8]], dtype=torch.float32)
pred_instance0 = Instances(image_shapes[0])
pred_instance0.pred_boxes = Boxes(pred_boxes0)
pred_boxes1 = torch.tensor([[7, 3, 10, 5]], dtype=torch.float32)
pred_instance1 = Instances(image_shapes[1])
pred_instance1.pred_boxes = Boxes(pred_boxes1)
keypoint_head = KRCNNConvDeconvUpsampleHead(
input_shape, num_keypoints=17, conv_dims=[512, 512]
).eval()
origin_outputs = keypoint_head(
keypoint_features, deepcopy([pred_instance0, pred_instance1])
)
fields = {
"pred_boxes": Boxes,
"pred_keypoints": torch.Tensor,
"pred_keypoint_heatmaps": torch.Tensor,
}
with freeze_training_mode(keypoint_head), patch_instances(fields) as NewInstances:
sciript_keypoint_head = torch.jit.script(keypoint_head)
pred_instance0 = NewInstances.from_instances(pred_instance0)
pred_instance1 = NewInstances.from_instances(pred_instance1)
script_outputs = sciript_keypoint_head(
keypoint_features, [pred_instance0, pred_instance1]
)
for origin_ins, script_ins in zip(origin_outputs, script_outputs):
assert_instances_allclose(origin_ins, script_ins, rtol=0)
@unittest.skipIf(TORCH_VERSION < (1, 8), "Insufficient pytorch version")
def test_StandardROIHeads_scriptability(self):
cfg = get_cfg()
cfg.MODEL.ROI_BOX_HEAD.NAME = "FastRCNNConvFCHead"
cfg.MODEL.ROI_BOX_HEAD.NUM_FC = 2
cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE = "ROIAlignV2"
cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_WEIGHTS = (10, 10, 5, 5)
cfg.MODEL.MASK_ON = True
cfg.MODEL.ROI_HEADS.NMS_THRESH_TEST = 0.01
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.01
num_images = 2
images_tensor = torch.rand(num_images, 20, 30)
image_sizes = [(10, 10), (20, 30)]
images = ImageList(images_tensor, image_sizes)
num_channels = 1024
features = {"res4": torch.rand(num_images, num_channels, 1, 2)}
feature_shape = {"res4": ShapeSpec(channels=num_channels, stride=16)}
roi_heads = StandardROIHeads(cfg, feature_shape).eval()
proposal0 = Instances(image_sizes[0])
proposal_boxes0 = torch.tensor([[1, 1, 3, 3], [2, 2, 6, 6]], dtype=torch.float32)
proposal0.proposal_boxes = Boxes(proposal_boxes0)
proposal0.objectness_logits = torch.tensor([0.5, 0.7], dtype=torch.float32)
proposal1 = Instances(image_sizes[1])
proposal_boxes1 = torch.tensor([[1, 5, 2, 8], [7, 3, 10, 5]], dtype=torch.float32)
proposal1.proposal_boxes = Boxes(proposal_boxes1)
proposal1.objectness_logits = torch.tensor([0.1, 0.9], dtype=torch.float32)
proposals = [proposal0, proposal1]
pred_instances, _ = roi_heads(images, features, proposals)
fields = {
"objectness_logits": torch.Tensor,
"proposal_boxes": Boxes,
"pred_classes": torch.Tensor,
"scores": torch.Tensor,
"pred_masks": torch.Tensor,
"pred_boxes": Boxes,
"pred_keypoints": torch.Tensor,
"pred_keypoint_heatmaps": torch.Tensor,
}
with freeze_training_mode(roi_heads), patch_instances(fields) as new_instances:
proposal0 = new_instances.from_instances(proposal0)
proposal1 = new_instances.from_instances(proposal1)
proposals = [proposal0, proposal1]
scripted_rot_heads = torch.jit.script(roi_heads)
scripted_pred_instances, _ = scripted_rot_heads(images, features, proposals)
for instance, scripted_instance in zip(pred_instances, scripted_pred_instances):
assert_instances_allclose(instance, scripted_instance, rtol=0)
@unittest.skipIf(TORCH_VERSION < (1, 8), "Insufficient pytorch version")
def test_PointRend_mask_head_tracing(self):
cfg = model_zoo.get_config("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml")
point_rend.add_pointrend_config(cfg)
cfg.MODEL.ROI_HEADS.IN_FEATURES = ["p2", "p3"]
cfg.MODEL.ROI_MASK_HEAD.NAME = "PointRendMaskHead"
cfg.MODEL.ROI_MASK_HEAD.POOLER_TYPE = ""
cfg.MODEL.ROI_MASK_HEAD.POINT_HEAD_ON = True
chan = 256
head = point_rend.PointRendMaskHead(
cfg,
{
"p2": ShapeSpec(channels=chan, stride=4),
"p3": ShapeSpec(channels=chan, stride=8),
},
)
def gen_inputs(h, w, N):
p2 = torch.rand(1, chan, h, w)
p3 = torch.rand(1, chan, h // 2, w // 2)
boxes = random_boxes(N, max_coord=h)
return p2, p3, boxes
class Wrap(nn.ModuleDict):
def forward(self, p2, p3, boxes):
features = {
"p2": p2,
"p3": p3,
}
inst = Instances((p2.shape[2] * 4, p2.shape[3] * 4))
inst.pred_boxes = Boxes(boxes)
inst.pred_classes = torch.zeros(inst.__len__(), dtype=torch.long)
out = self.head(features, [inst])[0]
return out.pred_masks
model = Wrap({"head": head})
model.eval()
with torch.no_grad(), patch_builtin_len():
traced = torch.jit.trace(model, gen_inputs(302, 208, 20))
inputs = gen_inputs(100, 120, 30)
out_eager = model(*inputs)
out_trace = traced(*inputs)
self.assertTrue(torch.allclose(out_eager, out_trace))
if __name__ == "__main__":
unittest.main()
|
banmo-main
|
third_party/detectron2_old/tests/modeling/test_roi_heads.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import logging
import sys
import unittest
import torch
from detectron2.config import get_cfg
from detectron2.export import scripting_with_instances
from detectron2.layers import ShapeSpec
from detectron2.modeling.backbone import build_backbone
from detectron2.modeling.proposal_generator import RPN, build_proposal_generator
from detectron2.modeling.proposal_generator.proposal_utils import (
add_ground_truth_to_proposals,
find_top_rpn_proposals,
)
from detectron2.structures import Boxes, ImageList, Instances, RotatedBoxes
from detectron2.utils.env import TORCH_VERSION
from detectron2.utils.events import EventStorage
logger = logging.getLogger(__name__)
class RPNTest(unittest.TestCase):
def get_gt_and_features(self):
num_images = 2
images_tensor = torch.rand(num_images, 20, 30)
image_sizes = [(10, 10), (20, 30)]
images = ImageList(images_tensor, image_sizes)
image_shape = (15, 15)
num_channels = 1024
features = {"res4": torch.rand(num_images, num_channels, 1, 2)}
gt_boxes = torch.tensor([[1, 1, 3, 3], [2, 2, 6, 6]], dtype=torch.float32)
gt_instances = Instances(image_shape)
gt_instances.gt_boxes = Boxes(gt_boxes)
return (gt_instances, features, images, image_sizes)
def test_rpn(self):
torch.manual_seed(121)
cfg = get_cfg()
backbone = build_backbone(cfg)
proposal_generator = RPN(cfg, backbone.output_shape())
(gt_instances, features, images, image_sizes) = self.get_gt_and_features()
with EventStorage(): # capture events in a new storage to discard them
proposals, proposal_losses = proposal_generator(
images, features, [gt_instances[0], gt_instances[1]]
)
expected_losses = {
"loss_rpn_cls": torch.tensor(0.08011703193),
"loss_rpn_loc": torch.tensor(0.101470276),
}
for name in expected_losses.keys():
err_msg = "proposal_losses[{}] = {}, expected losses = {}".format(
name, proposal_losses[name], expected_losses[name]
)
self.assertTrue(torch.allclose(proposal_losses[name], expected_losses[name]), err_msg)
self.assertEqual(len(proposals), len(image_sizes))
for proposal, im_size in zip(proposals, image_sizes):
self.assertEqual(proposal.image_size, im_size)
expected_proposal_box = torch.tensor([[0, 0, 10, 10], [7.2702, 0, 10, 10]])
expected_objectness_logit = torch.tensor([0.1596, -0.0007])
self.assertTrue(
torch.allclose(proposals[0].proposal_boxes.tensor, expected_proposal_box, atol=1e-4)
)
self.assertTrue(
torch.allclose(proposals[0].objectness_logits, expected_objectness_logit, atol=1e-4)
)
def verify_rpn(self, conv_dims, expected_conv_dims):
torch.manual_seed(121)
cfg = get_cfg()
cfg.MODEL.RPN.CONV_DIMS = conv_dims
backbone = build_backbone(cfg)
proposal_generator = RPN(cfg, backbone.output_shape())
for k, conv in enumerate(proposal_generator.rpn_head.conv):
self.assertEqual(expected_conv_dims[k], conv.out_channels)
return proposal_generator
def test_rpn_larger_num_convs(self):
conv_dims = [64, 64, 64, 64, 64]
proposal_generator = self.verify_rpn(conv_dims, conv_dims)
(gt_instances, features, images, image_sizes) = self.get_gt_and_features()
with EventStorage(): # capture events in a new storage to discard them
proposals, proposal_losses = proposal_generator(
images, features, [gt_instances[0], gt_instances[1]]
)
expected_losses = {
"loss_rpn_cls": torch.tensor(0.08122821152),
"loss_rpn_loc": torch.tensor(0.10064548254),
}
for name in expected_losses.keys():
err_msg = "proposal_losses[{}] = {}, expected losses = {}".format(
name, proposal_losses[name], expected_losses[name]
)
self.assertTrue(torch.allclose(proposal_losses[name], expected_losses[name]), err_msg)
def test_rpn_conv_dims_not_set(self):
conv_dims = [-1, -1, -1]
expected_conv_dims = [1024, 1024, 1024]
self.verify_rpn(conv_dims, expected_conv_dims)
# https://github.com/pytorch/pytorch/issues/46964
@unittest.skipIf(
TORCH_VERSION < (1, 7) or sys.version_info.minor <= 6, "Insufficient pytorch version"
)
def test_rpn_scriptability(self):
cfg = get_cfg()
proposal_generator = RPN(cfg, {"res4": ShapeSpec(channels=1024, stride=16)}).eval()
num_images = 2
images_tensor = torch.rand(num_images, 30, 40)
image_sizes = [(32, 32), (30, 40)]
images = ImageList(images_tensor, image_sizes)
features = {"res4": torch.rand(num_images, 1024, 1, 2)}
fields = {"proposal_boxes": Boxes, "objectness_logits": torch.Tensor}
proposal_generator_ts = scripting_with_instances(proposal_generator, fields)
proposals, _ = proposal_generator(images, features)
proposals_ts, _ = proposal_generator_ts(images, features)
for proposal, proposal_ts in zip(proposals, proposals_ts):
self.assertEqual(proposal.image_size, proposal_ts.image_size)
self.assertTrue(
torch.equal(proposal.proposal_boxes.tensor, proposal_ts.proposal_boxes.tensor)
)
self.assertTrue(torch.equal(proposal.objectness_logits, proposal_ts.objectness_logits))
def test_rrpn(self):
torch.manual_seed(121)
cfg = get_cfg()
cfg.MODEL.PROPOSAL_GENERATOR.NAME = "RRPN"
cfg.MODEL.ANCHOR_GENERATOR.NAME = "RotatedAnchorGenerator"
cfg.MODEL.ANCHOR_GENERATOR.SIZES = [[32, 64]]
cfg.MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS = [[0.25, 1]]
cfg.MODEL.ANCHOR_GENERATOR.ANGLES = [[0, 60]]
cfg.MODEL.RPN.BBOX_REG_WEIGHTS = (1, 1, 1, 1, 1)
cfg.MODEL.RPN.HEAD_NAME = "StandardRPNHead"
backbone = build_backbone(cfg)
proposal_generator = build_proposal_generator(cfg, backbone.output_shape())
num_images = 2
images_tensor = torch.rand(num_images, 20, 30)
image_sizes = [(10, 10), (20, 30)]
images = ImageList(images_tensor, image_sizes)
image_shape = (15, 15)
num_channels = 1024
features = {"res4": torch.rand(num_images, num_channels, 1, 2)}
gt_boxes = torch.tensor([[2, 2, 2, 2, 0], [4, 4, 4, 4, 0]], dtype=torch.float32)
gt_instances = Instances(image_shape)
gt_instances.gt_boxes = RotatedBoxes(gt_boxes)
with EventStorage(): # capture events in a new storage to discard them
proposals, proposal_losses = proposal_generator(
images, features, [gt_instances[0], gt_instances[1]]
)
expected_losses = {
"loss_rpn_cls": torch.tensor(0.04291602224),
"loss_rpn_loc": torch.tensor(0.145077362),
}
for name in expected_losses.keys():
err_msg = "proposal_losses[{}] = {}, expected losses = {}".format(
name, proposal_losses[name], expected_losses[name]
)
self.assertTrue(torch.allclose(proposal_losses[name], expected_losses[name]), err_msg)
expected_proposal_box = torch.tensor(
[
[-1.77999556, 0.78155339, 68.04367828, 14.78156471, 60.59333801],
[13.82740974, -1.50282836, 34.67269897, 29.19676590, -3.81942749],
[8.10392570, -0.99071521, 145.39100647, 32.13126373, 3.67242432],
[5.00000000, 4.57370186, 10.00000000, 9.14740372, 0.89196777],
]
)
expected_objectness_logit = torch.tensor([0.10924313, 0.09881870, 0.07649877, 0.05858029])
torch.set_printoptions(precision=8, sci_mode=False)
self.assertEqual(len(proposals), len(image_sizes))
proposal = proposals[0]
# It seems that there's some randomness in the result across different machines:
# This test can be run on a local machine for 100 times with exactly the same result,
# However, a different machine might produce slightly different results,
# thus the atol here.
err_msg = "computed proposal boxes = {}, expected {}".format(
proposal.proposal_boxes.tensor, expected_proposal_box
)
self.assertTrue(
torch.allclose(proposal.proposal_boxes.tensor[:4], expected_proposal_box, atol=1e-5),
err_msg,
)
err_msg = "computed objectness logits = {}, expected {}".format(
proposal.objectness_logits, expected_objectness_logit
)
self.assertTrue(
torch.allclose(proposal.objectness_logits[:4], expected_objectness_logit, atol=1e-5),
err_msg,
)
def test_find_rpn_proposals_inf(self):
N, Hi, Wi, A = 3, 3, 3, 3
proposals = [torch.rand(N, Hi * Wi * A, 4)]
pred_logits = [torch.rand(N, Hi * Wi * A)]
pred_logits[0][1][3:5].fill_(float("inf"))
find_top_rpn_proposals(proposals, pred_logits, [(10, 10)], 0.5, 1000, 1000, 0, False)
@unittest.skipIf(TORCH_VERSION < (1, 7), "Insufficient pytorch version")
def test_find_rpn_proposals_tracing(self):
N, Hi, Wi, A = 3, 50, 50, 9
proposal = torch.rand(N, Hi * Wi * A, 4)
pred_logit = torch.rand(N, Hi * Wi * A)
def func(proposal, logit, image_size):
r = find_top_rpn_proposals(
[proposal], [logit], [image_size], 0.7, 1000, 1000, 0, False
)[0]
size = r.image_size
if not isinstance(size, torch.Tensor):
size = torch.tensor(size)
return (size, r.proposal_boxes.tensor, r.objectness_logits)
other_inputs = []
# test that it generalizes to other shapes
for Hi, Wi, shp in [(30, 30, 60), (10, 10, 800)]:
other_inputs.append(
(
torch.rand(N, Hi * Wi * A, 4),
torch.rand(N, Hi * Wi * A),
torch.tensor([shp, shp]),
)
)
torch.jit.trace(
func, (proposal, pred_logit, torch.tensor([100, 100])), check_inputs=other_inputs
)
def test_append_gt_to_proposal(self):
proposals = Instances(
(10, 10),
**{
"proposal_boxes": Boxes(torch.empty((0, 4))),
"objectness_logits": torch.tensor([]),
"custom_attribute": torch.tensor([]),
}
)
gt_boxes = Boxes(torch.tensor([[0, 0, 1, 1]]))
self.assertRaises(AssertionError, add_ground_truth_to_proposals, [gt_boxes], [proposals])
gt_instances = Instances((10, 10))
gt_instances.gt_boxes = gt_boxes
self.assertRaises(
AssertionError, add_ground_truth_to_proposals, [gt_instances], [proposals]
)
gt_instances.custom_attribute = torch.tensor([1])
gt_instances.custom_attribute2 = torch.tensor([1])
new_proposals = add_ground_truth_to_proposals([gt_instances], [proposals])[0]
self.assertEqual(new_proposals.custom_attribute[0], 1)
# new proposals should only include the attributes in proposals
self.assertRaises(AttributeError, lambda: new_proposals.custom_attribute2)
if __name__ == "__main__":
unittest.main()
|
banmo-main
|
third_party/detectron2_old/tests/modeling/test_rpn.py
|
banmo-main
|
third_party/detectron2_old/tests/modeling/__init__.py
|
|
# Copyright (c) Facebook, Inc. and its affiliates.
import logging
import unittest
import torch
from detectron2.layers import ShapeSpec
from detectron2.modeling.box_regression import Box2BoxTransform, Box2BoxTransformRotated
from detectron2.modeling.roi_heads.fast_rcnn import FastRCNNOutputLayers
from detectron2.modeling.roi_heads.rotated_fast_rcnn import RotatedFastRCNNOutputLayers
from detectron2.structures import Boxes, Instances, RotatedBoxes
from detectron2.utils.env import TORCH_VERSION
from detectron2.utils.events import EventStorage
logger = logging.getLogger(__name__)
class FastRCNNTest(unittest.TestCase):
def test_fast_rcnn(self):
torch.manual_seed(132)
box_head_output_size = 8
box_predictor = FastRCNNOutputLayers(
ShapeSpec(channels=box_head_output_size),
box2box_transform=Box2BoxTransform(weights=(10, 10, 5, 5)),
num_classes=5,
)
feature_pooled = torch.rand(2, box_head_output_size)
predictions = box_predictor(feature_pooled)
proposal_boxes = torch.tensor([[0.8, 1.1, 3.2, 2.8], [2.3, 2.5, 7, 8]], dtype=torch.float32)
gt_boxes = torch.tensor([[1, 1, 3, 3], [2, 2, 6, 6]], dtype=torch.float32)
proposal = Instances((10, 10))
proposal.proposal_boxes = Boxes(proposal_boxes)
proposal.gt_boxes = Boxes(gt_boxes)
proposal.gt_classes = torch.tensor([1, 2])
with EventStorage(): # capture events in a new storage to discard them
losses = box_predictor.losses(predictions, [proposal])
expected_losses = {
"loss_cls": torch.tensor(1.7951188087),
"loss_box_reg": torch.tensor(4.0357131958),
}
for name in expected_losses.keys():
assert torch.allclose(losses[name], expected_losses[name])
def test_fast_rcnn_empty_batch(self, device="cpu"):
box_predictor = FastRCNNOutputLayers(
ShapeSpec(channels=10),
box2box_transform=Box2BoxTransform(weights=(10, 10, 5, 5)),
num_classes=8,
).to(device=device)
logits = torch.randn(0, 100, requires_grad=True, device=device)
deltas = torch.randn(0, 4, requires_grad=True, device=device)
losses = box_predictor.losses([logits, deltas], [])
for value in losses.values():
self.assertTrue(torch.allclose(value, torch.zeros_like(value)))
sum(losses.values()).backward()
self.assertTrue(logits.grad is not None)
self.assertTrue(deltas.grad is not None)
predictions, _ = box_predictor.inference([logits, deltas], [])
self.assertEqual(len(predictions), 0)
@unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
def test_fast_rcnn_empty_batch_cuda(self):
self.test_fast_rcnn_empty_batch(device=torch.device("cuda"))
def test_fast_rcnn_rotated(self):
torch.manual_seed(132)
box_head_output_size = 8
box_predictor = RotatedFastRCNNOutputLayers(
ShapeSpec(channels=box_head_output_size),
box2box_transform=Box2BoxTransformRotated(weights=(10, 10, 5, 5, 1)),
num_classes=5,
)
feature_pooled = torch.rand(2, box_head_output_size)
predictions = box_predictor(feature_pooled)
proposal_boxes = torch.tensor(
[[2, 1.95, 2.4, 1.7, 0], [4.65, 5.25, 4.7, 5.5, 0]], dtype=torch.float32
)
gt_boxes = torch.tensor([[2, 2, 2, 2, 0], [4, 4, 4, 4, 0]], dtype=torch.float32)
proposal = Instances((10, 10))
proposal.proposal_boxes = RotatedBoxes(proposal_boxes)
proposal.gt_boxes = RotatedBoxes(gt_boxes)
proposal.gt_classes = torch.tensor([1, 2])
with EventStorage(): # capture events in a new storage to discard them
losses = box_predictor.losses(predictions, [proposal])
# Note: the expected losses are slightly different even if
# the boxes are essentially the same as in the FastRCNNOutput test, because
# bbox_pred in FastRCNNOutputLayers have different Linear layers/initialization
# between the two cases.
expected_losses = {
"loss_cls": torch.tensor(1.7920907736),
"loss_box_reg": torch.tensor(4.0410838127),
}
for name in expected_losses.keys():
assert torch.allclose(losses[name], expected_losses[name])
@unittest.skipIf(TORCH_VERSION < (1, 8), "Insufficient pytorch version")
def test_predict_boxes_tracing(self):
class Model(torch.nn.Module):
def __init__(self, output_layer):
super(Model, self).__init__()
self._output_layer = output_layer
def forward(self, proposal_deltas, proposal_boxes):
instances = Instances((10, 10))
instances.proposal_boxes = Boxes(proposal_boxes)
return self._output_layer.predict_boxes((None, proposal_deltas), [instances])
box_head_output_size = 8
box_predictor = FastRCNNOutputLayers(
ShapeSpec(channels=box_head_output_size),
box2box_transform=Box2BoxTransform(weights=(10, 10, 5, 5)),
num_classes=5,
)
model = Model(box_predictor)
from detectron2.export.torchscript_patch import patch_builtin_len
with torch.no_grad(), patch_builtin_len():
func = torch.jit.trace(model, (torch.randn(10, 20), torch.randn(10, 4)))
o = func(torch.randn(10, 20), torch.randn(10, 4))
self.assertEqual(o[0].shape, (10, 20))
o = func(torch.randn(5, 20), torch.randn(5, 4))
self.assertEqual(o[0].shape, (5, 20))
o = func(torch.randn(20, 20), torch.randn(20, 4))
self.assertEqual(o[0].shape, (20, 20))
def test_predict_probs_tracing(self):
class Model(torch.nn.Module):
def __init__(self, output_layer):
super(Model, self).__init__()
self._output_layer = output_layer
def forward(self, scores, proposal_boxes):
instances = Instances((10, 10))
instances.proposal_boxes = Boxes(proposal_boxes)
return self._output_layer.predict_probs((scores, None), [instances])
box_head_output_size = 8
box_predictor = FastRCNNOutputLayers(
ShapeSpec(channels=box_head_output_size),
box2box_transform=Box2BoxTransform(weights=(10, 10, 5, 5)),
num_classes=5,
)
model = Model(box_predictor)
from detectron2.export.torchscript_patch import patch_builtin_len
with torch.no_grad(), patch_builtin_len():
func = torch.jit.trace(model, (torch.randn(10, 6), torch.rand(10, 4)))
o = func(torch.randn(10, 6), torch.randn(10, 4))
self.assertEqual(o[0].shape, (10, 6))
o = func(torch.randn(5, 6), torch.randn(5, 4))
self.assertEqual(o[0].shape, (5, 6))
o = func(torch.randn(20, 6), torch.randn(20, 4))
self.assertEqual(o[0].shape, (20, 6))
if __name__ == "__main__":
unittest.main()
|
banmo-main
|
third_party/detectron2_old/tests/modeling/test_fast_rcnn.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import logging
import unittest
import torch
from detectron2.modeling.box_regression import Box2BoxTransform, Box2BoxTransformRotated
from detectron2.utils.env import TORCH_VERSION
from detectron2.utils.testing import random_boxes
logger = logging.getLogger(__name__)
class TestBox2BoxTransform(unittest.TestCase):
def test_reconstruction(self):
weights = (5, 5, 10, 10)
b2b_tfm = Box2BoxTransform(weights=weights)
src_boxes = random_boxes(10)
dst_boxes = random_boxes(10)
devices = [torch.device("cpu")]
if torch.cuda.is_available():
devices.append(torch.device("cuda"))
for device in devices:
src_boxes = src_boxes.to(device=device)
dst_boxes = dst_boxes.to(device=device)
deltas = b2b_tfm.get_deltas(src_boxes, dst_boxes)
dst_boxes_reconstructed = b2b_tfm.apply_deltas(deltas, src_boxes)
self.assertTrue(torch.allclose(dst_boxes, dst_boxes_reconstructed))
@unittest.skipIf(TORCH_VERSION < (1, 8), "Insufficient pytorch version")
def test_apply_deltas_tracing(self):
weights = (5, 5, 10, 10)
b2b_tfm = Box2BoxTransform(weights=weights)
with torch.no_grad():
func = torch.jit.trace(b2b_tfm.apply_deltas, (torch.randn(10, 20), torch.randn(10, 4)))
o = func(torch.randn(10, 20), torch.randn(10, 4))
self.assertEqual(o.shape, (10, 20))
o = func(torch.randn(5, 20), torch.randn(5, 4))
self.assertEqual(o.shape, (5, 20))
def random_rotated_boxes(mean_box, std_length, std_angle, N):
return torch.cat(
[torch.rand(N, 4) * std_length, torch.rand(N, 1) * std_angle], dim=1
) + torch.tensor(mean_box, dtype=torch.float)
class TestBox2BoxTransformRotated(unittest.TestCase):
def test_reconstruction(self):
weights = (5, 5, 10, 10, 1)
b2b_transform = Box2BoxTransformRotated(weights=weights)
src_boxes = random_rotated_boxes([10, 10, 20, 20, -30], 5, 60.0, 10)
dst_boxes = random_rotated_boxes([10, 10, 20, 20, -30], 5, 60.0, 10)
devices = [torch.device("cpu")]
if torch.cuda.is_available():
devices.append(torch.device("cuda"))
for device in devices:
src_boxes = src_boxes.to(device=device)
dst_boxes = dst_boxes.to(device=device)
deltas = b2b_transform.get_deltas(src_boxes, dst_boxes)
dst_boxes_reconstructed = b2b_transform.apply_deltas(deltas, src_boxes)
assert torch.allclose(dst_boxes[:, :4], dst_boxes_reconstructed[:, :4], atol=1e-5)
# angle difference has to be normalized
assert torch.allclose(
(dst_boxes[:, 4] - dst_boxes_reconstructed[:, 4] + 180.0) % 360.0 - 180.0,
torch.zeros_like(dst_boxes[:, 4]),
atol=1e-4,
)
if __name__ == "__main__":
unittest.main()
|
banmo-main
|
third_party/detectron2_old/tests/modeling/test_box2box_transform.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import json
import numpy as np
import os
import tempfile
import unittest
import pycocotools.mask as mask_util
from detectron2.data import DatasetCatalog, MetadataCatalog
from detectron2.data.datasets.coco import convert_to_coco_dict, load_coco_json
from detectron2.structures import BoxMode
def make_mask():
"""
Makes a donut shaped binary mask.
"""
H = 100
W = 100
mask = np.zeros([H, W], dtype=np.uint8)
for x in range(W):
for y in range(H):
d = np.linalg.norm(np.array([W, H]) / 2 - np.array([x, y]))
if d > 10 and d < 20:
mask[y, x] = 1
return mask
def uncompressed_rle(mask):
l = mask.flatten(order="F").tolist()
counts = []
p = False
cnt = 0
for i in l:
if i == p:
cnt += 1
else:
counts.append(cnt)
p = i
cnt = 1
counts.append(cnt)
return {"counts": counts, "size": [mask.shape[0], mask.shape[1]]}
def make_dataset_dicts(mask, compressed: bool = True):
"""
Returns a list of dicts that represents a single COCO data point for
object detection. The single instance given by `mask` is represented by
RLE, either compressed or uncompressed.
"""
record = {}
record["file_name"] = "test"
record["image_id"] = 0
record["height"] = mask.shape[0]
record["width"] = mask.shape[1]
y, x = np.nonzero(mask)
if compressed:
segmentation = mask_util.encode(np.asarray(mask, order="F"))
else:
segmentation = uncompressed_rle(mask)
min_x = np.min(x)
max_x = np.max(x)
min_y = np.min(y)
max_y = np.max(y)
obj = {
"bbox": [min_x, min_y, max_x, max_y],
"bbox_mode": BoxMode.XYXY_ABS,
"category_id": 0,
"iscrowd": 0,
"segmentation": segmentation,
}
record["annotations"] = [obj]
return [record]
class TestRLEToJson(unittest.TestCase):
def test(self):
# Make a dummy dataset.
mask = make_mask()
DatasetCatalog.register("test_dataset", lambda: make_dataset_dicts(mask))
MetadataCatalog.get("test_dataset").set(thing_classes=["test_label"])
# Dump to json.
json_dict = convert_to_coco_dict("test_dataset")
with tempfile.TemporaryDirectory() as tmpdir:
json_file_name = os.path.join(tmpdir, "test.json")
with open(json_file_name, "w") as f:
json.dump(json_dict, f)
# Load from json.
dicts = load_coco_json(json_file_name, "")
# Check the loaded mask matches the original.
anno = dicts[0]["annotations"][0]
loaded_mask = mask_util.decode(anno["segmentation"])
self.assertTrue(np.array_equal(loaded_mask, mask))
DatasetCatalog.pop("test_dataset")
MetadataCatalog.pop("test_dataset")
def test_uncompressed_RLE(self):
mask = make_mask()
rle = mask_util.encode(np.asarray(mask, order="F"))
uncompressed = uncompressed_rle(mask)
compressed = mask_util.frPyObjects(uncompressed, *rle["size"])
self.assertEqual(rle, compressed)
class TestConvertCOCO(unittest.TestCase):
@staticmethod
def generate_data():
record = {
"file_name": "test",
"image_id": 0,
"height": 100,
"width": 100,
"annotations": [
{
"bbox": [10, 10, 10, 10, 5],
"bbox_mode": BoxMode.XYWHA_ABS,
"category_id": 0,
"iscrowd": 0,
},
{
"bbox": [15, 15, 3, 3],
"bbox_mode": BoxMode.XYXY_ABS,
"category_id": 0,
"iscrowd": 0,
},
],
}
return [record]
def test_convert_to_coco(self):
DatasetCatalog.register("test_dataset", lambda: TestConvertCOCO.generate_data())
MetadataCatalog.get("test_dataset").set(thing_classes=["test_label"])
convert_to_coco_dict("test_dataset")
DatasetCatalog.pop("test_dataset")
MetadataCatalog.pop("test_dataset")
|
banmo-main
|
third_party/detectron2_old/tests/data/test_coco.py
|
banmo-main
|
third_party/detectron2_old/tests/data/__init__.py
|
|
# Copyright (c) Facebook, Inc. and its affiliates.
import numpy as np
import unittest
from detectron2.data.transforms.transform import RotationTransform
class TestRotationTransform(unittest.TestCase):
def assertEqualsArrays(self, a1, a2):
self.assertTrue(np.allclose(a1, a2))
def randomData(self, h=5, w=5):
image = np.random.rand(h, w)
coords = np.array([[i, j] for j in range(h + 1) for i in range(w + 1)], dtype=float)
return image, coords, h, w
def test180(self):
image, coords, h, w = self.randomData(6, 6)
rot = RotationTransform(h, w, 180, expand=False, center=None)
self.assertEqualsArrays(rot.apply_image(image), image[::-1, ::-1])
rotated_coords = [[w - c[0], h - c[1]] for c in coords]
self.assertEqualsArrays(rot.apply_coords(coords), rotated_coords)
def test45_coords(self):
_, coords, h, w = self.randomData(4, 6)
rot = RotationTransform(h, w, 45, expand=False, center=None)
rotated_coords = [
[(x + y - (h + w) / 2) / np.sqrt(2) + w / 2, h / 2 + (y + (w - h) / 2 - x) / np.sqrt(2)]
for (x, y) in coords
]
self.assertEqualsArrays(rot.apply_coords(coords), rotated_coords)
def test90(self):
image, coords, h, w = self.randomData()
rot = RotationTransform(h, w, 90, expand=False, center=None)
self.assertEqualsArrays(rot.apply_image(image), image.T[::-1])
rotated_coords = [[c[1], w - c[0]] for c in coords]
self.assertEqualsArrays(rot.apply_coords(coords), rotated_coords)
def test90_expand(self): # non-square image
image, coords, h, w = self.randomData(h=5, w=8)
rot = RotationTransform(h, w, 90, expand=True, center=None)
self.assertEqualsArrays(rot.apply_image(image), image.T[::-1])
rotated_coords = [[c[1], w - c[0]] for c in coords]
self.assertEqualsArrays(rot.apply_coords(coords), rotated_coords)
def test_center_expand(self):
# center has no effect if expand=True because it only affects shifting
image, coords, h, w = self.randomData(h=5, w=8)
angle = np.random.randint(360)
rot1 = RotationTransform(h, w, angle, expand=True, center=None)
rot2 = RotationTransform(h, w, angle, expand=True, center=(0, 0))
rot3 = RotationTransform(h, w, angle, expand=True, center=(h, w))
rot4 = RotationTransform(h, w, angle, expand=True, center=(2, 5))
for r1 in [rot1, rot2, rot3, rot4]:
for r2 in [rot1, rot2, rot3, rot4]:
self.assertEqualsArrays(r1.apply_image(image), r2.apply_image(image))
self.assertEqualsArrays(r1.apply_coords(coords), r2.apply_coords(coords))
def test_inverse_transform(self):
image, coords, h, w = self.randomData(h=5, w=8)
rot = RotationTransform(h, w, 90, expand=True, center=None)
rot_image = rot.apply_image(image)
self.assertEqualsArrays(rot.inverse().apply_image(rot_image), image)
rot = RotationTransform(h, w, 65, expand=True, center=None)
rotated_coords = rot.apply_coords(coords)
self.assertEqualsArrays(rot.inverse().apply_coords(rotated_coords), coords)
if __name__ == "__main__":
unittest.main()
|
banmo-main
|
third_party/detectron2_old/tests/data/test_rotation_transform.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import os
import unittest
from functools import partial
from iopath.common.file_io import LazyPath
from detectron2.data.build import DatasetFromList
def _a_slow_func(x):
return "path/{}".format(x)
class TestDatasetFromList(unittest.TestCase):
def test_using_lazy_path(self):
dataset = []
for i in range(10):
dataset.append({"file_name": LazyPath(partial(_a_slow_func, i))})
dataset = DatasetFromList(dataset)
for i in range(10):
path = dataset[i]["file_name"]
self.assertTrue(isinstance(path, LazyPath))
self.assertEqual(os.fspath(path), _a_slow_func(i))
|
banmo-main
|
third_party/detectron2_old/tests/data/test_dataset.py
|
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
import logging
import numpy as np
import unittest
from unittest import mock
from PIL import Image, ImageOps
from detectron2.config import get_cfg
from detectron2.data import detection_utils
from detectron2.data import transforms as T
from detectron2.utils.logger import setup_logger
logger = logging.getLogger(__name__)
class TestTransforms(unittest.TestCase):
def setUp(self):
setup_logger()
def test_apply_rotated_boxes(self):
np.random.seed(125)
cfg = get_cfg()
is_train = True
augs = detection_utils.build_augmentation(cfg, is_train)
image = np.random.rand(200, 300)
image, transforms = T.apply_augmentations(augs, image)
image_shape = image.shape[:2] # h, w
assert image_shape == (800, 1200)
annotation = {"bbox": [179, 97, 62, 40, -56]}
boxes = np.array([annotation["bbox"]], dtype=np.float64) # boxes.shape = (1, 5)
transformed_bbox = transforms.apply_rotated_box(boxes)[0]
expected_bbox = np.array([484, 388, 248, 160, 56], dtype=np.float64)
err_msg = "transformed_bbox = {}, expected {}".format(transformed_bbox, expected_bbox)
assert np.allclose(transformed_bbox, expected_bbox), err_msg
def test_resize_and_crop(self):
np.random.seed(125)
min_scale = 0.2
max_scale = 2.0
target_height = 1100
target_width = 1000
resize_aug = T.ResizeScale(min_scale, max_scale, target_height, target_width)
fixed_size_crop_aug = T.FixedSizeCrop((target_height, target_width))
hflip_aug = T.RandomFlip()
augs = [resize_aug, fixed_size_crop_aug, hflip_aug]
original_image = np.random.rand(900, 800)
image, transforms = T.apply_augmentations(augs, original_image)
image_shape = image.shape[:2] # h, w
self.assertEqual((1100, 1000), image_shape)
boxes = np.array(
[[91, 46, 144, 111], [523, 251, 614, 295]],
dtype=np.float64,
)
transformed_bboxs = transforms.apply_box(boxes)
expected_bboxs = np.array(
[
[895.42, 33.42666667, 933.91125, 80.66],
[554.0825, 182.39333333, 620.17125, 214.36666667],
],
dtype=np.float64,
)
err_msg = "transformed_bbox = {}, expected {}".format(transformed_bboxs, expected_bboxs)
self.assertTrue(np.allclose(transformed_bboxs, expected_bboxs), err_msg)
polygon = np.array([[91, 46], [144, 46], [144, 111], [91, 111]])
transformed_polygons = transforms.apply_polygons([polygon])
expected_polygon = np.array([[934.0, 33.0], [934.0, 80.0], [896.0, 80.0], [896.0, 33.0]])
self.assertEqual(1, len(transformed_polygons))
err_msg = "transformed_polygon = {}, expected {}".format(
transformed_polygons[0], expected_polygon
)
self.assertTrue(np.allclose(transformed_polygons[0], expected_polygon), err_msg)
def test_apply_rotated_boxes_unequal_scaling_factor(self):
np.random.seed(125)
h, w = 400, 200
newh, neww = 800, 800
image = np.random.rand(h, w)
augs = []
augs.append(T.Resize(shape=(newh, neww)))
image, transforms = T.apply_augmentations(augs, image)
image_shape = image.shape[:2] # h, w
assert image_shape == (newh, neww)
boxes = np.array(
[
[150, 100, 40, 20, 0],
[150, 100, 40, 20, 30],
[150, 100, 40, 20, 90],
[150, 100, 40, 20, -90],
],
dtype=np.float64,
)
transformed_boxes = transforms.apply_rotated_box(boxes)
expected_bboxes = np.array(
[
[600, 200, 160, 40, 0],
[600, 200, 144.22205102, 52.91502622, 49.10660535],
[600, 200, 80, 80, 90],
[600, 200, 80, 80, -90],
],
dtype=np.float64,
)
err_msg = "transformed_boxes = {}, expected {}".format(transformed_boxes, expected_bboxes)
assert np.allclose(transformed_boxes, expected_bboxes), err_msg
def test_print_augmentation(self):
t = T.RandomCrop("relative", (100, 100))
self.assertEqual(str(t), "RandomCrop(crop_type='relative', crop_size=(100, 100))")
t0 = T.RandomFlip(prob=0.5)
self.assertEqual(str(t0), "RandomFlip(prob=0.5)")
t1 = T.RandomFlip()
self.assertEqual(str(t1), "RandomFlip()")
t = T.AugmentationList([t0, t1])
self.assertEqual(str(t), f"AugmentationList[{t0}, {t1}]")
def test_random_apply_prob_out_of_range_check(self):
test_probabilities = {0.0: True, 0.5: True, 1.0: True, -0.01: False, 1.01: False}
for given_probability, is_valid in test_probabilities.items():
if not is_valid:
self.assertRaises(AssertionError, T.RandomApply, None, prob=given_probability)
else:
T.RandomApply(T.NoOpTransform(), prob=given_probability)
def test_random_apply_wrapping_aug_probability_occured_evaluation(self):
transform_mock = mock.MagicMock(name="MockTransform", spec=T.Augmentation)
image_mock = mock.MagicMock(name="MockImage")
random_apply = T.RandomApply(transform_mock, prob=0.001)
with mock.patch.object(random_apply, "_rand_range", return_value=0.0001):
transform = random_apply.get_transform(image_mock)
transform_mock.get_transform.assert_called_once_with(image_mock)
self.assertIsNot(transform, transform_mock)
def test_random_apply_wrapping_std_transform_probability_occured_evaluation(self):
transform_mock = mock.MagicMock(name="MockTransform", spec=T.Transform)
image_mock = mock.MagicMock(name="MockImage")
random_apply = T.RandomApply(transform_mock, prob=0.001)
with mock.patch.object(random_apply, "_rand_range", return_value=0.0001):
transform = random_apply.get_transform(image_mock)
self.assertIs(transform, transform_mock)
def test_random_apply_probability_not_occured_evaluation(self):
transform_mock = mock.MagicMock(name="MockTransform", spec=T.Augmentation)
image_mock = mock.MagicMock(name="MockImage")
random_apply = T.RandomApply(transform_mock, prob=0.001)
with mock.patch.object(random_apply, "_rand_range", return_value=0.9):
transform = random_apply.get_transform(image_mock)
transform_mock.get_transform.assert_not_called()
self.assertIsInstance(transform, T.NoOpTransform)
def test_augmentation_input_args(self):
input_shape = (100, 100)
output_shape = (50, 50)
# define two augmentations with different args
class TG1(T.Augmentation):
def get_transform(self, image, sem_seg):
return T.ResizeTransform(
input_shape[0], input_shape[1], output_shape[0], output_shape[1]
)
class TG2(T.Augmentation):
def get_transform(self, image):
assert image.shape[:2] == output_shape # check that TG1 is applied
return T.HFlipTransform(output_shape[1])
image = np.random.rand(*input_shape).astype("float32")
sem_seg = (np.random.rand(*input_shape) < 0.5).astype("uint8")
inputs = T.AugInput(image, sem_seg=sem_seg) # provide two args
tfms = inputs.apply_augmentations([TG1(), TG2()])
self.assertIsInstance(tfms[0], T.ResizeTransform)
self.assertIsInstance(tfms[1], T.HFlipTransform)
self.assertTrue(inputs.image.shape[:2] == output_shape)
self.assertTrue(inputs.sem_seg.shape[:2] == output_shape)
class TG3(T.Augmentation):
def get_transform(self, image, nonexist):
pass
with self.assertRaises(AttributeError):
inputs.apply_augmentations([TG3()])
def test_augmentation_list(self):
input_shape = (100, 100)
image = np.random.rand(*input_shape).astype("float32")
sem_seg = (np.random.rand(*input_shape) < 0.5).astype("uint8")
inputs = T.AugInput(image, sem_seg=sem_seg) # provide two args
augs = T.AugmentationList([T.RandomFlip(), T.Resize(20)])
_ = T.AugmentationList([augs, T.Resize(30)])(inputs)
# 3 in latest fvcore (flattened transformlist), 2 in older
# self.assertEqual(len(tfms), 3)
def test_color_transforms(self):
rand_img = np.random.random((100, 100, 3)) * 255
rand_img = rand_img.astype("uint8")
# Test no-op
noop_transform = T.ColorTransform(lambda img: img)
self.assertTrue(np.array_equal(rand_img, noop_transform.apply_image(rand_img)))
# Test a ImageOps operation
magnitude = np.random.randint(0, 256)
solarize_transform = T.PILColorTransform(lambda img: ImageOps.solarize(img, magnitude))
expected_img = ImageOps.solarize(Image.fromarray(rand_img), magnitude)
self.assertTrue(np.array_equal(expected_img, solarize_transform.apply_image(rand_img)))
def test_resize_transform(self):
input_shapes = [(100, 100), (100, 100, 1), (100, 100, 3)]
output_shapes = [(200, 200), (200, 200, 1), (200, 200, 3)]
for in_shape, out_shape in zip(input_shapes, output_shapes):
in_img = np.random.randint(0, 255, size=in_shape, dtype=np.uint8)
tfm = T.ResizeTransform(in_shape[0], in_shape[1], out_shape[0], out_shape[1])
out_img = tfm.apply_image(in_img)
self.assertTrue(out_img.shape == out_shape)
def test_extent_transform(self):
input_shapes = [(100, 100), (100, 100, 1), (100, 100, 3)]
src_rect = (20, 20, 80, 80)
output_shapes = [(200, 200), (200, 200, 1), (200, 200, 3)]
for in_shape, out_shape in zip(input_shapes, output_shapes):
in_img = np.random.randint(0, 255, size=in_shape, dtype=np.uint8)
tfm = T.ExtentTransform(src_rect, out_shape[:2])
out_img = tfm.apply_image(in_img)
self.assertTrue(out_img.shape == out_shape)
|
banmo-main
|
third_party/detectron2_old/tests/data/test_transforms.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import copy
import numpy as np
import os
import unittest
import pycocotools.mask as mask_util
from detectron2.data import MetadataCatalog, detection_utils
from detectron2.data import transforms as T
from detectron2.structures import BitMasks, BoxMode
from detectron2.utils.file_io import PathManager
class TestTransformAnnotations(unittest.TestCase):
def test_transform_simple_annotation(self):
transforms = T.TransformList([T.HFlipTransform(400)])
anno = {
"bbox": np.asarray([10, 10, 200, 300]),
"bbox_mode": BoxMode.XYXY_ABS,
"category_id": 3,
"segmentation": [[10, 10, 100, 100, 100, 10], [150, 150, 200, 150, 200, 200]],
}
output = detection_utils.transform_instance_annotations(anno, transforms, (400, 400))
self.assertTrue(np.allclose(output["bbox"], [200, 10, 390, 300]))
self.assertEqual(len(output["segmentation"]), len(anno["segmentation"]))
self.assertTrue(np.allclose(output["segmentation"][0], [390, 10, 300, 100, 300, 10]))
detection_utils.annotations_to_instances([output, output], (400, 400))
def test_flip_keypoints(self):
transforms = T.TransformList([T.HFlipTransform(400)])
anno = {
"bbox": np.asarray([10, 10, 200, 300]),
"bbox_mode": BoxMode.XYXY_ABS,
"keypoints": np.random.rand(17, 3) * 50 + 15,
}
output = detection_utils.transform_instance_annotations(
copy.deepcopy(anno),
transforms,
(400, 400),
keypoint_hflip_indices=detection_utils.create_keypoint_hflip_indices(
["keypoints_coco_2017_train"]
),
)
# The first keypoint is nose
self.assertTrue(np.allclose(output["keypoints"][0, 0], 400 - anno["keypoints"][0, 0]))
# The last 16 keypoints are 8 left-right pairs
self.assertTrue(
np.allclose(
output["keypoints"][1:, 0].reshape(-1, 2)[:, ::-1],
400 - anno["keypoints"][1:, 0].reshape(-1, 2),
)
)
self.assertTrue(
np.allclose(
output["keypoints"][1:, 1:].reshape(-1, 2, 2)[:, ::-1, :],
anno["keypoints"][1:, 1:].reshape(-1, 2, 2),
)
)
def test_crop(self):
transforms = T.TransformList([T.CropTransform(300, 300, 10, 10)])
keypoints = np.random.rand(17, 3) * 50 + 15
keypoints[:, 2] = 2
anno = {
"bbox": np.asarray([10, 10, 200, 400]),
"bbox_mode": BoxMode.XYXY_ABS,
"keypoints": keypoints,
}
output = detection_utils.transform_instance_annotations(
copy.deepcopy(anno), transforms, (10, 10)
)
# box is shifted and cropped
self.assertTrue((output["bbox"] == np.asarray([0, 0, 0, 10])).all())
# keypoints are no longer visible
self.assertTrue((output["keypoints"][:, 2] == 0).all())
def test_transform_RLE(self):
transforms = T.TransformList([T.HFlipTransform(400)])
mask = np.zeros((300, 400), order="F").astype("uint8")
mask[:, :200] = 1
anno = {
"bbox": np.asarray([10, 10, 200, 300]),
"bbox_mode": BoxMode.XYXY_ABS,
"segmentation": mask_util.encode(mask[:, :, None])[0],
"category_id": 3,
}
output = detection_utils.transform_instance_annotations(
copy.deepcopy(anno), transforms, (300, 400)
)
mask = output["segmentation"]
self.assertTrue((mask[:, 200:] == 1).all())
self.assertTrue((mask[:, :200] == 0).all())
inst = detection_utils.annotations_to_instances(
[output, output], (400, 400), mask_format="bitmask"
)
self.assertTrue(isinstance(inst.gt_masks, BitMasks))
def test_transform_RLE_resize(self):
transforms = T.TransformList(
[T.HFlipTransform(400), T.ScaleTransform(300, 400, 400, 400, "bilinear")]
)
mask = np.zeros((300, 400), order="F").astype("uint8")
mask[:, :200] = 1
anno = {
"bbox": np.asarray([10, 10, 200, 300]),
"bbox_mode": BoxMode.XYXY_ABS,
"segmentation": mask_util.encode(mask[:, :, None])[0],
"category_id": 3,
}
output = detection_utils.transform_instance_annotations(
copy.deepcopy(anno), transforms, (400, 400)
)
inst = detection_utils.annotations_to_instances(
[output, output], (400, 400), mask_format="bitmask"
)
self.assertTrue(isinstance(inst.gt_masks, BitMasks))
def test_gen_crop(self):
instance = {"bbox": [10, 10, 100, 100], "bbox_mode": BoxMode.XYXY_ABS}
t = detection_utils.gen_crop_transform_with_instance((10, 10), (150, 150), instance)
# the box center must fall into the cropped region
self.assertTrue(t.x0 <= 55 <= t.x0 + t.w)
def test_gen_crop_outside_boxes(self):
instance = {"bbox": [10, 10, 100, 100], "bbox_mode": BoxMode.XYXY_ABS}
with self.assertRaises(AssertionError):
detection_utils.gen_crop_transform_with_instance((10, 10), (15, 15), instance)
def test_read_sem_seg(self):
cityscapes_dir = MetadataCatalog.get("cityscapes_fine_sem_seg_val").gt_dir
sem_seg_gt_path = os.path.join(
cityscapes_dir, "frankfurt", "frankfurt_000001_083852_gtFine_labelIds.png"
)
if not PathManager.exists(sem_seg_gt_path):
raise unittest.SkipTest(
"Semantic segmentation ground truth {} not found.".format(sem_seg_gt_path)
)
sem_seg = detection_utils.read_image(sem_seg_gt_path, "L")
self.assertEqual(sem_seg.ndim, 3)
self.assertEqual(sem_seg.shape[2], 1)
self.assertEqual(sem_seg.dtype, np.uint8)
self.assertEqual(sem_seg.max(), 32)
self.assertEqual(sem_seg.min(), 1)
def test_read_exif_orientation(self):
# https://github.com/recurser/exif-orientation-examples/raw/master/Landscape_5.jpg
URL = "detectron2://assets/Landscape_5.jpg"
img = detection_utils.read_image(URL, "RGB")
self.assertEqual(img.ndim, 3)
self.assertEqual(img.dtype, np.uint8)
self.assertEqual(img.shape, (1200, 1800, 3)) # check that shape is not transposed
if __name__ == "__main__":
unittest.main()
|
banmo-main
|
third_party/detectron2_old/tests/data/test_detection_utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import contextlib
import copy
import io
import json
import numpy as np
import os
import tempfile
import unittest
import torch
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from detectron2.data import DatasetCatalog
from detectron2.evaluation import COCOEvaluator
from detectron2.evaluation.fast_eval_api import COCOeval_opt
from detectron2.structures import Boxes, Instances
class TestCOCOeval(unittest.TestCase):
def test_fast_eval(self):
# A small set of images/categories from COCO val
# fmt: off
detections = [{"image_id": 139, "category_id": 1, "bbox": [417.3332824707031, 159.27003479003906, 47.66064453125, 143.00193786621094], "score": 0.9949821829795837, "segmentation": {"size": [426, 640], "counts": "Tc`52W=3N0N4aNN^E7]:4XE1g:8kDMT;U100000001O1gE[Nk8h1dFiNY9Z1aFkN]9g2J3NdN`FlN`9S1cFRN07]9g1bFoM6;X9c1cFoM=8R9g1bFQN>3U9Y30O01OO1O001N2O1N1O4L4L5UNoE3V:CVF6Q:@YF9l9@ZF<k9[O`F=];HYnX2"}}, {"image_id": 139, "category_id": 1, "bbox": [383.5909118652344, 172.0777587890625, 17.959075927734375, 36.94813537597656], "score": 0.7685421705245972, "segmentation": {"size": [426, 640], "counts": "lZP5m0Z<300O100O100000001O00]OlC0T<OnCOT<OnCNX<JnC2bQT3"}}, {"image_id": 139, "category_id": 1, "bbox": [457.8359069824219, 158.88027954101562, 9.89764404296875, 8.771820068359375], "score": 0.07092753797769547, "segmentation": {"size": [426, 640], "counts": "bSo54T=2N2O1001O006ImiW2"}}] # noqa
gt_annotations = {"categories": [{"supercategory": "person", "id": 1, "name": "person"}, {"supercategory": "furniture", "id": 65, "name": "bed"}], "images": [{"license": 4, "file_name": "000000000285.jpg", "coco_url": "http://images.cocodataset.org/val2017/000000000285.jpg", "height": 640, "width": 586, "date_captured": "2013-11-18 13:09:47", "flickr_url": "http://farm8.staticflickr.com/7434/9138147604_c6225224b8_z.jpg", "id": 285}, {"license": 2, "file_name": "000000000139.jpg", "coco_url": "http://images.cocodataset.org/val2017/000000000139.jpg", "height": 426, "width": 640, "date_captured": "2013-11-21 01:34:01", "flickr_url": "http://farm9.staticflickr.com/8035/8024364858_9c41dc1666_z.jpg", "id": 139}], "annotations": [{"segmentation": [[428.19, 219.47, 430.94, 209.57, 430.39, 210.12, 421.32, 216.17, 412.8, 217.27, 413.9, 214.24, 422.42, 211.22, 429.29, 201.6, 430.67, 181.8, 430.12, 175.2, 427.09, 168.06, 426.27, 164.21, 430.94, 159.26, 440.29, 157.61, 446.06, 163.93, 448.53, 168.06, 448.53, 173.01, 449.08, 174.93, 454.03, 185.1, 455.41, 188.4, 458.43, 195.0, 460.08, 210.94, 462.28, 226.61, 460.91, 233.76, 454.31, 234.04, 460.08, 256.85, 462.56, 268.13, 465.58, 290.67, 465.85, 293.14, 463.38, 295.62, 452.66, 295.34, 448.26, 294.52, 443.59, 282.7, 446.06, 235.14, 446.34, 230.19, 438.09, 232.39, 438.09, 221.67, 434.24, 221.12, 427.09, 219.74]], "area": 2913.1103999999987, "iscrowd": 0, "image_id": 139, "bbox": [412.8, 157.61, 53.05, 138.01], "category_id": 1, "id": 230831}, {"segmentation": [[384.98, 206.58, 384.43, 199.98, 385.25, 193.66, 385.25, 190.08, 387.18, 185.13, 387.18, 182.93, 386.08, 181.01, 385.25, 178.81, 385.25, 175.79, 388.0, 172.76, 394.88, 172.21, 398.72, 173.31, 399.27, 176.06, 399.55, 183.48, 397.9, 185.68, 395.15, 188.98, 396.8, 193.38, 398.45, 194.48, 399.0, 205.75, 395.43, 207.95, 388.83, 206.03]], "area": 435.1449499999997, "iscrowd": 0, "image_id": 139, "bbox": [384.43, 172.21, 15.12, 35.74], "category_id": 1, "id": 233201}]} # noqa
# fmt: on
# Test a small dataset for typical COCO format
experiments = {"full": (detections, gt_annotations, {})}
# Test what happens if the list of detections or ground truth annotations is empty
experiments["empty_dt"] = ([], gt_annotations, {})
gt = copy.deepcopy(gt_annotations)
gt["annotations"] = []
experiments["empty_gt"] = (detections, gt, {})
# Test changing parameter settings
experiments["no_categories"] = (detections, gt_annotations, {"useCats": 0})
experiments["no_ious"] = (detections, gt_annotations, {"iouThrs": []})
experiments["no_rec_thrs"] = (detections, gt_annotations, {"recThrs": []})
experiments["no_max_dets"] = (detections, gt_annotations, {"maxDets": []})
experiments["one_max_det"] = (detections, gt_annotations, {"maxDets": [1]})
experiments["no_area"] = (detections, gt_annotations, {"areaRng": [], "areaRngLbl": []})
# Test what happens if one omits different fields from the annotation structure
annotation_fields = [
"id",
"image_id",
"category_id",
"score",
"area",
"iscrowd",
"ignore",
"bbox",
"segmentation",
]
for a in annotation_fields:
gt = copy.deepcopy(gt_annotations)
for g in gt["annotations"]:
if a in g:
del g[a]
dt = copy.deepcopy(detections)
for d in dt:
if a in d:
del d[a]
experiments["omit_gt_" + a] = (detections, gt, {})
experiments["omit_dt_" + a] = (dt, gt_annotations, {})
# Compare precision/recall for original COCO PythonAPI to custom optimized one
for name, (dt, gt, params) in experiments.items():
# Dump to json.
try:
with tempfile.TemporaryDirectory() as tmpdir:
json_file_name = os.path.join(tmpdir, "gt_" + name + ".json")
with open(json_file_name, "w") as f:
json.dump(gt, f)
with contextlib.redirect_stdout(io.StringIO()):
coco_api = COCO(json_file_name)
except Exception:
pass
for iou_type in ["bbox", "segm", "keypoints"]:
# Run original COCOeval PythonAPI
api_exception = None
try:
with contextlib.redirect_stdout(io.StringIO()):
coco_dt = coco_api.loadRes(dt)
coco_eval = COCOeval(coco_api, coco_dt, iou_type)
for p, v in params.items():
setattr(coco_eval.params, p, v)
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
except Exception as ex:
api_exception = ex
# Run optimized COCOeval_opt API
opt_exception = None
try:
with contextlib.redirect_stdout(io.StringIO()):
coco_dt = coco_api.loadRes(dt)
coco_eval_opt = COCOeval_opt(coco_api, coco_dt, iou_type)
for p, v in params.items():
setattr(coco_eval_opt.params, p, v)
coco_eval_opt.evaluate()
coco_eval_opt.accumulate()
coco_eval_opt.summarize()
except Exception as ex:
opt_exception = ex
if api_exception is not None and opt_exception is not None:
# Original API and optimized API should throw the same exception if annotation
# format is bad
api_error = "" if api_exception is None else type(api_exception).__name__
opt_error = "" if opt_exception is None else type(opt_exception).__name__
msg = "%s: comparing COCO APIs, '%s' != '%s'" % (name, api_error, opt_error)
self.assertTrue(api_error == opt_error, msg=msg)
else:
# Original API and optimized API should produce the same precision/recalls
for k in ["precision", "recall"]:
diff = np.abs(coco_eval.eval[k] - coco_eval_opt.eval[k])
abs_diff = np.max(diff) if diff.size > 0 else 0.0
msg = "%s: comparing COCO APIs, %s differs by %f" % (name, k, abs_diff)
self.assertTrue(abs_diff < 1e-4, msg=msg)
@unittest.skipIf(os.environ.get("CI"), "Require COCO data.")
def test_unknown_category(self):
dataset = "coco_2017_val_100"
evaluator = COCOEvaluator(dataset)
evaluator.reset()
inputs = DatasetCatalog.get(dataset)[:2]
pred = Instances((100, 100))
pred.pred_boxes = Boxes(torch.rand(2, 4))
pred.scores = torch.rand(2)
pred.pred_classes = torch.tensor([10, 80])
output = {"instances": pred}
evaluator.process(inputs, [output, output])
with self.assertRaises(AssertionError):
evaluator.evaluate()
|
banmo-main
|
third_party/detectron2_old/tests/data/test_coco_evaluation.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import itertools
import math
import operator
import unittest
import torch
from torch.utils import data
from torch.utils.data.sampler import SequentialSampler
from detectron2.data.build import worker_init_reset_seed
from detectron2.data.common import DatasetFromList, ToIterableDataset
from detectron2.data.samplers import (
GroupedBatchSampler,
RepeatFactorTrainingSampler,
TrainingSampler,
)
from detectron2.utils.env import seed_all_rng
class TestGroupedBatchSampler(unittest.TestCase):
def test_missing_group_id(self):
sampler = SequentialSampler(list(range(100)))
group_ids = [1] * 100
samples = GroupedBatchSampler(sampler, group_ids, 2)
for mini_batch in samples:
self.assertEqual(len(mini_batch), 2)
def test_groups(self):
sampler = SequentialSampler(list(range(100)))
group_ids = [1, 0] * 50
samples = GroupedBatchSampler(sampler, group_ids, 2)
for mini_batch in samples:
self.assertEqual((mini_batch[0] + mini_batch[1]) % 2, 0)
class TestSamplerDeterministic(unittest.TestCase):
def test_to_iterable(self):
sampler = TrainingSampler(100, seed=10)
dataset = DatasetFromList(list(range(100)))
dataset = ToIterableDataset(dataset, sampler)
data_loader = data.DataLoader(dataset, num_workers=0, collate_fn=operator.itemgetter(0))
output = list(itertools.islice(data_loader, 100))
self.assertEqual(set(output), set(range(100)))
data_loader = data.DataLoader(
dataset,
num_workers=2,
collate_fn=operator.itemgetter(0),
worker_init_fn=worker_init_reset_seed,
# reset seed should not affect behavior of TrainingSampler
)
output = list(itertools.islice(data_loader, 100))
# multiple workers should not lead to duplicate or different data
self.assertEqual(set(output), set(range(100)))
def test_training_sampler_seed(self):
seed_all_rng(42)
sampler = TrainingSampler(30)
data = list(itertools.islice(sampler, 65))
seed_all_rng(42)
sampler = TrainingSampler(30)
seed_all_rng(999) # should be ineffective
data2 = list(itertools.islice(sampler, 65))
self.assertEqual(data, data2)
class TestRepeatFactorTrainingSampler(unittest.TestCase):
def test_repeat_factors_from_category_frequency(self):
repeat_thresh = 0.5
dataset_dicts = [
{"annotations": [{"category_id": 0}, {"category_id": 1}]},
{"annotations": [{"category_id": 0}]},
{"annotations": []},
]
rep_factors = RepeatFactorTrainingSampler.repeat_factors_from_category_frequency(
dataset_dicts, repeat_thresh
)
expected_rep_factors = torch.tensor([math.sqrt(3 / 2), 1.0, 1.0])
self.assertTrue(torch.allclose(rep_factors, expected_rep_factors))
|
banmo-main
|
third_party/detectron2_old/tests/data/test_sampler.py
|
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
# flake8: noqa
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
from unittest import mock
from sphinx.domains import Domain
from typing import Dict, List, Tuple
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
import sphinx_rtd_theme
class GithubURLDomain(Domain):
"""
Resolve certain links in markdown files to github source.
"""
name = "githuburl"
ROOT = "https://github.com/facebookresearch/detectron2/blob/master/"
LINKED_DOC = ["tutorials/install", "tutorials/getting_started"]
def resolve_any_xref(self, env, fromdocname, builder, target, node, contnode):
github_url = None
if not target.endswith("html") and target.startswith("../../"):
url = target.replace("../", "")
github_url = url
if fromdocname in self.LINKED_DOC:
# unresolved links in these docs are all github links
github_url = target
if github_url is not None:
if github_url.endswith("MODEL_ZOO") or github_url.endswith("README"):
# bug of recommonmark.
# https://github.com/readthedocs/recommonmark/blob/ddd56e7717e9745f11300059e4268e204138a6b1/recommonmark/parser.py#L152-L155
github_url += ".md"
print("Ref {} resolved to github:{}".format(target, github_url))
contnode["refuri"] = self.ROOT + github_url
return [("githuburl:any", contnode)]
else:
return []
# to support markdown
from recommonmark.parser import CommonMarkParser
sys.path.insert(0, os.path.abspath("../"))
os.environ["_DOC_BUILDING"] = "True"
DEPLOY = os.environ.get("READTHEDOCS") == "True"
# -- Project information -----------------------------------------------------
# fmt: off
try:
import torch # noqa
except ImportError:
for m in [
"torch", "torchvision", "torch.nn", "torch.nn.parallel", "torch.distributed", "torch.multiprocessing", "torch.autograd",
"torch.autograd.function", "torch.nn.modules", "torch.nn.modules.utils", "torch.utils", "torch.utils.data", "torch.onnx",
"torchvision", "torchvision.ops",
]:
sys.modules[m] = mock.Mock(name=m)
sys.modules['torch'].__version__ = "1.7" # fake version
HAS_TORCH = False
else:
try:
torch.ops.detectron2 = mock.Mock(name="torch.ops.detectron2")
except:
pass
HAS_TORCH = True
for m in [
"cv2", "scipy", "portalocker", "detectron2._C",
"pycocotools", "pycocotools.mask", "pycocotools.coco", "pycocotools.cocoeval",
"google", "google.protobuf", "google.protobuf.internal", "onnx",
"caffe2", "caffe2.proto", "caffe2.python", "caffe2.python.utils", "caffe2.python.onnx", "caffe2.python.onnx.backend",
]:
sys.modules[m] = mock.Mock(name=m)
# fmt: on
sys.modules["cv2"].__version__ = "3.4"
import detectron2 # isort: skip
if HAS_TORCH:
from detectron2.utils.env import fixup_module_metadata
fixup_module_metadata("torch.nn", torch.nn.__dict__)
fixup_module_metadata("torch.utils.data", torch.utils.data.__dict__)
project = "detectron2"
copyright = "2019-2020, detectron2 contributors"
author = "detectron2 contributors"
# The short X.Y version
version = detectron2.__version__
# The full version, including alpha/beta/rc tags
release = version
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
needs_sphinx = "3.0"
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"recommonmark",
"sphinx.ext.autodoc",
"sphinx.ext.napoleon",
"sphinx.ext.intersphinx",
"sphinx.ext.todo",
"sphinx.ext.coverage",
"sphinx.ext.mathjax",
"sphinx.ext.viewcode",
"sphinx.ext.githubpages",
]
# -- Configurations for plugins ------------
napoleon_google_docstring = True
napoleon_include_init_with_doc = True
napoleon_include_special_with_doc = True
napoleon_numpy_docstring = False
napoleon_use_rtype = False
autodoc_inherit_docstrings = False
autodoc_member_order = "bysource"
if DEPLOY:
intersphinx_timeout = 10
else:
# skip this when building locally
intersphinx_timeout = 0.5
intersphinx_mapping = {
"python": ("https://docs.python.org/3.6", None),
"numpy": ("https://docs.scipy.org/doc/numpy/", None),
"torch": ("https://pytorch.org/docs/master/", None),
}
# -------------------------
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
source_suffix = [".rst", ".md"]
# The master toctree document.
master_doc = "index"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", "build", "README.md", "tutorials/README.md"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# -- Options for HTML output -------------------------------------------------
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
html_css_files = ["css/custom.css"]
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "detectron2doc"
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, "detectron2.tex", "detectron2 Documentation", "detectron2 contributors", "manual")
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "detectron2", "detectron2 Documentation", [author], 1)]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"detectron2",
"detectron2 Documentation",
author,
"detectron2",
"One line description of project.",
"Miscellaneous",
)
]
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
def autodoc_skip_member(app, what, name, obj, skip, options):
# we hide something deliberately
if getattr(obj, "__HIDE_SPHINX_DOC__", False):
return True
# Hide some that are deprecated or not intended to be used
HIDDEN = {
"ResNetBlockBase",
"GroupedBatchSampler",
"build_transform_gen",
"export_caffe2_model",
"export_onnx_model",
"apply_transform_gens",
"TransformGen",
"apply_augmentations",
"StandardAugInput",
"build_batch_data_loader",
"draw_panoptic_seg_predictions",
"WarmupCosineLR",
"WarmupMultiStepLR",
}
try:
if name in HIDDEN or (
hasattr(obj, "__doc__") and obj.__doc__.lower().strip().startswith("deprecated")
):
print("Skipping deprecated object: {}".format(name))
return True
except:
pass
return skip
_PAPER_DATA = {
"resnet": ("1512.03385", "Deep Residual Learning for Image Recognition"),
"fpn": ("1612.03144", "Feature Pyramid Networks for Object Detection"),
"mask r-cnn": ("1703.06870", "Mask R-CNN"),
"faster r-cnn": (
"1506.01497",
"Faster R-CNN: Towards Real-Time Object Detection with Region Proposal Networks",
),
"deformconv": ("1703.06211", "Deformable Convolutional Networks"),
"deformconv2": ("1811.11168", "Deformable ConvNets v2: More Deformable, Better Results"),
"panopticfpn": ("1901.02446", "Panoptic Feature Pyramid Networks"),
"retinanet": ("1708.02002", "Focal Loss for Dense Object Detection"),
"cascade r-cnn": ("1712.00726", "Cascade R-CNN: Delving into High Quality Object Detection"),
"lvis": ("1908.03195", "LVIS: A Dataset for Large Vocabulary Instance Segmentation"),
"rrpn": ("1703.01086", "Arbitrary-Oriented Scene Text Detection via Rotation Proposals"),
"imagenet in 1h": ("1706.02677", "Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour"),
"xception": ("1610.02357", "Xception: Deep Learning with Depthwise Separable Convolutions"),
"mobilenet": (
"1704.04861",
"MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications",
),
"deeplabv3+": (
"1802.02611",
"Encoder-Decoder with Atrous Separable Convolution for Semantic Image Segmentation",
),
"dds": ("2003.13678", "Designing Network Design Spaces"),
"scaling": ("2103.06877", "Fast and Accurate Model Scaling"),
}
def paper_ref_role(
typ: str,
rawtext: str,
text: str,
lineno: int,
inliner,
options: Dict = {},
content: List[str] = [],
):
"""
Parse :paper:`xxx`. Similar to the "extlinks" sphinx extension.
"""
from docutils import nodes, utils
from sphinx.util.nodes import split_explicit_title
text = utils.unescape(text)
has_explicit_title, title, link = split_explicit_title(text)
link = link.lower()
if link not in _PAPER_DATA:
inliner.reporter.warning("Cannot find paper " + link)
paper_url, paper_title = "#", link
else:
paper_url, paper_title = _PAPER_DATA[link]
if "/" not in paper_url:
paper_url = "https://arxiv.org/abs/" + paper_url
if not has_explicit_title:
title = paper_title
pnode = nodes.reference(title, title, internal=False, refuri=paper_url)
return [pnode], []
def setup(app):
from recommonmark.transform import AutoStructify
app.add_domain(GithubURLDomain)
app.connect("autodoc-skip-member", autodoc_skip_member)
app.add_role("paper", paper_ref_role)
app.add_config_value(
"recommonmark_config",
{"enable_math": True, "enable_inline_math": True, "enable_eval_rst": True},
True,
)
app.add_transform(AutoStructify)
|
banmo-main
|
third_party/detectron2_old/docs/conf.py
|
# An example config to train a mmdetection model using detectron2.
from ..common.data.coco import dataloader
from ..common.coco_schedule import lr_multiplier_1x as lr_multiplier
from ..common.optim import SGD as optimizer
from ..common.train import train
from detectron2.modeling.mmdet_wrapper import MMDetDetector
from detectron2.config import LazyCall as L
model = L(MMDetDetector)(
detector=dict(
type="MaskRCNN",
pretrained="torchvision://resnet50",
backbone=dict(
type="ResNet",
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type="BN", requires_grad=True),
norm_eval=True,
style="pytorch",
),
neck=dict(type="FPN", in_channels=[256, 512, 1024, 2048], out_channels=256, num_outs=5),
rpn_head=dict(
type="RPNHead",
in_channels=256,
feat_channels=256,
anchor_generator=dict(
type="AnchorGenerator",
scales=[8],
ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64],
),
bbox_coder=dict(
type="DeltaXYWHBBoxCoder",
target_means=[0.0, 0.0, 0.0, 0.0],
target_stds=[1.0, 1.0, 1.0, 1.0],
),
loss_cls=dict(type="CrossEntropyLoss", use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type="L1Loss", loss_weight=1.0),
),
roi_head=dict(
type="StandardRoIHead",
bbox_roi_extractor=dict(
type="SingleRoIExtractor",
roi_layer=dict(type="RoIAlign", output_size=7, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32],
),
bbox_head=dict(
type="Shared2FCBBoxHead",
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type="DeltaXYWHBBoxCoder",
target_means=[0.0, 0.0, 0.0, 0.0],
target_stds=[0.1, 0.1, 0.2, 0.2],
),
reg_class_agnostic=False,
loss_cls=dict(type="CrossEntropyLoss", use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type="L1Loss", loss_weight=1.0),
),
mask_roi_extractor=dict(
type="SingleRoIExtractor",
roi_layer=dict(type="RoIAlign", output_size=14, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32],
),
mask_head=dict(
type="FCNMaskHead",
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=80,
loss_mask=dict(type="CrossEntropyLoss", use_mask=True, loss_weight=1.0),
),
),
# model training and testing settings
train_cfg=dict(
rpn=dict(
assigner=dict(
type="MaxIoUAssigner",
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
match_low_quality=True,
ignore_iof_thr=-1,
),
sampler=dict(
type="RandomSampler",
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False,
),
allowed_border=-1,
pos_weight=-1,
debug=False,
),
rpn_proposal=dict(
nms_pre=2000,
max_per_img=1000,
nms=dict(type="nms", iou_threshold=0.7),
min_bbox_size=0,
),
rcnn=dict(
assigner=dict(
type="MaxIoUAssigner",
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
match_low_quality=True,
ignore_iof_thr=-1,
),
sampler=dict(
type="RandomSampler",
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True,
),
mask_size=28,
pos_weight=-1,
debug=False,
),
),
test_cfg=dict(
rpn=dict(
nms_pre=1000,
max_per_img=1000,
nms=dict(type="nms", iou_threshold=0.7),
min_bbox_size=0,
),
rcnn=dict(
score_thr=0.05,
nms=dict(type="nms", iou_threshold=0.5),
max_per_img=100,
mask_thr_binary=0.5,
),
),
),
pixel_mean=[123.675, 116.280, 103.530],
pixel_std=[58.395, 57.120, 57.375],
)
dataloader.train.mapper.image_format = "RGB" # torchvision pretrained model
train.init_checkpoint = None # pretrained model is loaded inside backbone
|
banmo-main
|
third_party/detectron2_old/configs/Misc/mmdet_mask_rcnn_R_50_FPN_1x.py
|
"""
An example config file to train a ImageNet classifier with detectron2.
Model and dataloader both come from torchvision.
This shows how to use detectron2 as a general engine for any new models and tasks.
To run, use the following command:
python tools/lazyconfig_train_net.py --config-file configs/Misc/torchvision_imagenet_R_50.py \
--num-gpus 8 dataloader.train.dataset.root=/path/to/imagenet/
"""
import torch
from torch import nn
from torch.nn import functional as F
from omegaconf import OmegaConf
import torchvision
from torchvision.transforms import transforms as T
from torchvision.models.resnet import ResNet, Bottleneck
from fvcore.common.param_scheduler import MultiStepParamScheduler
from detectron2.solver import WarmupParamScheduler
from detectron2.solver.build import get_default_optimizer_params
from detectron2.config import LazyCall as L
from detectron2.model_zoo import get_config
from detectron2.data.samplers import TrainingSampler, InferenceSampler
from detectron2.evaluation import DatasetEvaluator
from detectron2.utils import comm
"""
Note: Here we put reusable code (models, evaluation, data) together with configs just as a
proof-of-concept, to easily demonstrate what's needed to train a ImageNet classifier in detectron2.
Writing code in configs offers extreme flexibility but is often not a good engineering practice.
In practice, you might want to put code in your project and import them instead.
"""
def build_data_loader(dataset, batch_size, num_workers, training=True):
return torch.utils.data.DataLoader(
dataset,
sampler=(TrainingSampler if training else InferenceSampler)(len(dataset)),
batch_size=batch_size,
num_workers=num_workers,
pin_memory=True,
)
class ClassificationNet(nn.Module):
def __init__(self, model: nn.Module):
super().__init__()
self.model = model
@property
def device(self):
return list(self.model.parameters())[0].device
def forward(self, inputs):
image, label = inputs
pred = self.model(image.to(self.device))
if self.training:
label = label.to(self.device)
return F.cross_entropy(pred, label)
else:
return pred
class ClassificationAcc(DatasetEvaluator):
def reset(self):
self.corr = self.total = 0
def process(self, inputs, outputs):
image, label = inputs
self.corr += (outputs.argmax(dim=1).cpu() == label.cpu()).sum().item()
self.total += len(label)
def evaluate(self):
all_corr_total = comm.all_gather([self.corr, self.total])
corr = sum(x[0] for x in all_corr_total)
total = sum(x[1] for x in all_corr_total)
return {"accuracy": corr / total}
# --- End of code that could be in a project and be imported
dataloader = OmegaConf.create()
dataloader.train = L(build_data_loader)(
dataset=L(torchvision.datasets.ImageNet)(
root="/path/to/imagenet",
split="train",
transform=L(T.Compose)(
transforms=[
L(T.RandomResizedCrop)(size=224),
L(T.RandomHorizontalFlip)(),
T.ToTensor(),
L(T.Normalize)(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
]
),
),
batch_size=256 // 8,
num_workers=4,
training=True,
)
dataloader.test = L(build_data_loader)(
dataset=L(torchvision.datasets.ImageNet)(
root="${...train.dataset.root}",
split="val",
transform=L(T.Compose)(
transforms=[
L(T.Resize)(size=256),
L(T.CenterCrop)(size=224),
T.ToTensor(),
L(T.Normalize)(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
]
),
),
batch_size=256 // 8,
num_workers=4,
training=False,
)
dataloader.evaluator = L(ClassificationAcc)()
model = L(ClassificationNet)(
model=(ResNet)(block=Bottleneck, layers=[3, 4, 6, 3], zero_init_residual=True)
)
optimizer = L(torch.optim.SGD)(
params=L(get_default_optimizer_params)(),
lr=0.1,
momentum=0.9,
weight_decay=1e-4,
)
lr_multiplier = L(WarmupParamScheduler)(
scheduler=L(MultiStepParamScheduler)(
values=[1.0, 0.1, 0.01, 0.001], milestones=[30, 60, 90, 100]
),
warmup_length=1 / 100,
warmup_factor=0.1,
)
train = get_config("common/train.py").train
train.init_checkpoint = None
train.max_iter = 100 * 1281167 // 256
|
banmo-main
|
third_party/detectron2_old/configs/Misc/torchvision_imagenet_R_50.py
|
from ..common.optim import SGD as optimizer
from ..common.coco_schedule import lr_multiplier_1x as lr_multiplier
from ..common.data.coco_keypoint import dataloader
from ..common.models.keypoint_rcnn_fpn import model
from ..common.train import train
model.backbone.bottom_up.freeze_at = 2
|
banmo-main
|
third_party/detectron2_old/configs/COCO-Keypoints/keypoint_rcnn_R_50_FPN_1x.py
|
from ..common.optim import SGD as optimizer
from ..common.coco_schedule import lr_multiplier_1x as lr_multiplier
from ..common.data.coco import dataloader
from ..common.models.retinanet import model
from ..common.train import train
dataloader.train.mapper.use_instance_mask = False
model.backbone.bottom_up.freeze_at = 2
optimizer.lr = 0.01
|
banmo-main
|
third_party/detectron2_old/configs/COCO-Detection/retinanet_R_50_FPN_1x.py
|
from fvcore.common.param_scheduler import MultiStepParamScheduler
from detectron2.config import LazyCall as L
from detectron2.solver import WarmupParamScheduler
def default_X_scheduler(num_X):
"""
Returns the config for a default multi-step LR scheduler such as "1x", "3x",
commonly referred to in papers, where every 1x has the total length of 1440k
training images (~12 COCO epochs). LR is decayed twice at the end of training
following the strategy defined in "Rethinking ImageNet Pretraining", Sec 4.
Args:
num_X: a positive real number
Returns:
DictConfig: configs that define the multiplier for LR during training
"""
# total number of iterations assuming 16 batch size, using 1440000/16=90000
total_steps_16bs = num_X * 90000
if num_X <= 2:
scheduler = L(MultiStepParamScheduler)(
values=[1.0, 0.1, 0.01],
# note that scheduler is scale-invariant. This is equivalent to
# milestones=[6, 8, 9]
milestones=[60000, 80000, 90000],
)
else:
scheduler = L(MultiStepParamScheduler)(
values=[1.0, 0.1, 0.01],
milestones=[total_steps_16bs - 60000, total_steps_16bs - 20000, total_steps_16bs],
)
return L(WarmupParamScheduler)(
scheduler=scheduler,
warmup_length=1000 / total_steps_16bs,
warmup_method="linear",
warmup_factor=0.001,
)
lr_multiplier_1x = default_X_scheduler(1)
lr_multiplier_2x = default_X_scheduler(2)
lr_multiplier_3x = default_X_scheduler(3)
lr_multiplier_6x = default_X_scheduler(6)
lr_multiplier_9x = default_X_scheduler(9)
|
banmo-main
|
third_party/detectron2_old/configs/common/coco_schedule.py
|
# Common training-related configs that are designed for "tools/lazyconfig_train_net.py"
# You can use your own instead, together with your own train_net.py
train = dict(
output_dir="./output",
init_checkpoint="detectron2://ImageNetPretrained/MSRA/R-50.pkl",
max_iter=90000,
amp=dict(enabled=False), # options for Automatic Mixed Precision
ddp=dict( # options for DistributedDataParallel
broadcast_buffers=False,
find_unused_parameters=False,
fp16_compression=False,
),
checkpointer=dict(period=5000, max_to_keep=100), # options for PeriodicCheckpointer
eval_period=5000,
log_period=20,
device="cuda"
# ...
)
|
banmo-main
|
third_party/detectron2_old/configs/common/train.py
|
import torch
from detectron2.config import LazyCall as L
from detectron2.solver.build import get_default_optimizer_params
SGD = L(torch.optim.SGD)(
params=L(get_default_optimizer_params)(
# params.model is meant to be set to the model object, before instantiating
# the optimizer.
weight_decay_norm=0.0
),
lr=0.02,
momentum=0.9,
weight_decay=1e-4,
)
|
banmo-main
|
third_party/detectron2_old/configs/common/optim.py
|
from detectron2.config import LazyCall as L
from detectron2.layers import ShapeSpec
from detectron2.modeling import PanopticFPN
from detectron2.modeling.meta_arch.semantic_seg import SemSegFPNHead
from .mask_rcnn_fpn import model
model._target_ = PanopticFPN
model.sem_seg_head = L(SemSegFPNHead)(
input_shape={
f: L(ShapeSpec)(stride=s, channels="${....backbone.out_channels}")
for f, s in zip(["p2", "p3", "p4", "p5"], [4, 8, 16, 32])
},
ignore_value=255,
num_classes=54, # COCO stuff + 1
conv_dims=128,
common_stride=4,
loss_weight=0.5,
norm="GN",
)
|
banmo-main
|
third_party/detectron2_old/configs/common/models/panoptic_fpn.py
|
from detectron2.config import LazyCall as L
from detectron2.layers import ShapeSpec
from detectron2.modeling.poolers import ROIPooler
from detectron2.modeling.roi_heads import KRCNNConvDeconvUpsampleHead
from .mask_rcnn_fpn import model
[model.roi_heads.pop(x) for x in ["mask_in_features", "mask_pooler", "mask_head"]]
model.roi_heads.update(
num_classes=1,
keypoint_in_features=["p2", "p3", "p4", "p5"],
keypoint_pooler=L(ROIPooler)(
output_size=14,
scales=(1.0 / 4, 1.0 / 8, 1.0 / 16, 1.0 / 32),
sampling_ratio=0,
pooler_type="ROIAlignV2",
),
keypoint_head=L(KRCNNConvDeconvUpsampleHead)(
input_shape=ShapeSpec(channels=256, width=14, height=14),
num_keypoints=17,
conv_dims=[512] * 8,
loss_normalizer="visible",
),
)
# Detectron1 uses 2000 proposals per-batch, but this option is per-image in detectron2.
# 1000 proposals per-image is found to hurt box AP.
# Therefore we increase it to 1500 per-image.
model.proposal_generator.post_nms_topk = (1500, 1000)
# Keypoint AP degrades (though box AP improves) when using plain L1 loss
model.roi_heads.box_predictor.smooth_l1_beta = 0.5
|
banmo-main
|
third_party/detectron2_old/configs/common/models/keypoint_rcnn_fpn.py
|
from detectron2.config import LazyCall as L
from detectron2.layers import ShapeSpec
from detectron2.modeling.meta_arch import GeneralizedRCNN
from detectron2.modeling.anchor_generator import DefaultAnchorGenerator
from detectron2.modeling.backbone import BasicStem, BottleneckBlock, ResNet
from detectron2.modeling.box_regression import Box2BoxTransform
from detectron2.modeling.matcher import Matcher
from detectron2.modeling.poolers import ROIPooler
from detectron2.modeling.proposal_generator import RPN, StandardRPNHead
from detectron2.modeling.roi_heads import (
FastRCNNOutputLayers,
MaskRCNNConvUpsampleHead,
Res5ROIHeads,
)
model = L(GeneralizedRCNN)(
backbone=L(ResNet)(
stem=L(BasicStem)(in_channels=3, out_channels=64, norm="FrozenBN"),
stages=L(ResNet.make_default_stages)(
depth=50,
stride_in_1x1=True,
norm="FrozenBN",
),
out_features=["res4"],
),
proposal_generator=L(RPN)(
in_features=["res4"],
head=L(StandardRPNHead)(in_channels=1024, num_anchors=15),
anchor_generator=L(DefaultAnchorGenerator)(
sizes=[[32, 64, 128, 256, 512]],
aspect_ratios=[0.5, 1.0, 2.0],
strides=[16],
offset=0.0,
),
anchor_matcher=L(Matcher)(
thresholds=[0.3, 0.7], labels=[0, -1, 1], allow_low_quality_matches=True
),
box2box_transform=L(Box2BoxTransform)(weights=[1.0, 1.0, 1.0, 1.0]),
batch_size_per_image=256,
positive_fraction=0.5,
pre_nms_topk=(12000, 6000),
post_nms_topk=(2000, 1000),
nms_thresh=0.7,
),
roi_heads=L(Res5ROIHeads)(
num_classes=80,
batch_size_per_image=512,
positive_fraction=0.25,
proposal_matcher=L(Matcher)(
thresholds=[0.5], labels=[0, 1], allow_low_quality_matches=False
),
in_features=["res4"],
pooler=L(ROIPooler)(
output_size=14,
scales=(1.0 / 16,),
sampling_ratio=0,
pooler_type="ROIAlignV2",
),
res5=L(ResNet.make_stage)(
block_class=BottleneckBlock,
num_blocks=3,
stride_per_block=[2, 1, 1],
in_channels=1024,
bottleneck_channels=512,
out_channels=2048,
norm="FrozenBN",
stride_in_1x1=True,
),
box_predictor=L(FastRCNNOutputLayers)(
input_shape=L(ShapeSpec)(channels="${...res5.out_channels}", height=1, width=1),
test_score_thresh=0.05,
box2box_transform=L(Box2BoxTransform)(weights=(10, 10, 5, 5)),
num_classes="${..num_classes}",
),
mask_head=L(MaskRCNNConvUpsampleHead)(
input_shape=L(ShapeSpec)(
channels="${...res5.out_channels}",
width="${...pooler.output_size}",
height="${...pooler.output_size}",
),
num_classes="${..num_classes}",
conv_dims=[256],
),
),
pixel_mean=[103.530, 116.280, 123.675],
pixel_std=[1.0, 1.0, 1.0],
input_format="BGR",
)
|
banmo-main
|
third_party/detectron2_old/configs/common/models/mask_rcnn_c4.py
|
# -*- coding: utf-8 -*-
from detectron2.config import LazyCall as L
from detectron2.layers import ShapeSpec
from detectron2.modeling.meta_arch import RetinaNet
from detectron2.modeling.anchor_generator import DefaultAnchorGenerator
from detectron2.modeling.backbone.fpn import LastLevelP6P7
from detectron2.modeling.backbone import BasicStem, FPN, ResNet
from detectron2.modeling.box_regression import Box2BoxTransform
from detectron2.modeling.matcher import Matcher
from detectron2.modeling.meta_arch.retinanet import RetinaNetHead
model = L(RetinaNet)(
backbone=L(FPN)(
bottom_up=L(ResNet)(
stem=L(BasicStem)(in_channels=3, out_channels=64, norm="FrozenBN"),
stages=L(ResNet.make_default_stages)(
depth=50,
stride_in_1x1=True,
norm="FrozenBN",
),
out_features=["res3", "res4", "res5"],
),
in_features=["res3", "res4", "res5"],
out_channels=256,
top_block=L(LastLevelP6P7)(in_channels=2048, out_channels="${..out_channels}"),
),
head=L(RetinaNetHead)(
input_shape=[ShapeSpec(channels=256)],
num_classes="${..num_classes}",
conv_dims=[256, 256, 256, 256],
prior_prob=0.01,
num_anchors=9,
),
anchor_generator=L(DefaultAnchorGenerator)(
sizes=[[x, x * 2 ** (1.0 / 3), x * 2 ** (2.0 / 3)] for x in [32, 64, 128, 256, 512]],
aspect_ratios=[0.5, 1.0, 2.0],
strides=[8, 16, 32, 64, 128],
offset=0.0,
),
box2box_transform=L(Box2BoxTransform)(weights=[1.0, 1.0, 1.0, 1.0]),
anchor_matcher=L(Matcher)(
thresholds=[0.4, 0.5], labels=[0, -1, 1], allow_low_quality_matches=True
),
num_classes=80,
head_in_features=["p3", "p4", "p5", "p6", "p7"],
focal_loss_alpha=0.25,
focal_loss_gamma=2.0,
pixel_mean=[103.530, 116.280, 123.675],
pixel_std=[1.0, 1.0, 1.0],
input_format="BGR",
)
|
banmo-main
|
third_party/detectron2_old/configs/common/models/retinanet.py
|
from detectron2.config import LazyCall as L
from detectron2.layers import ShapeSpec
from detectron2.modeling.box_regression import Box2BoxTransform
from detectron2.modeling.matcher import Matcher
from detectron2.modeling.roi_heads import FastRCNNOutputLayers, FastRCNNConvFCHead, CascadeROIHeads
from .mask_rcnn_fpn import model
# arguments that don't exist for Cascade R-CNN
[model.roi_heads.pop(k) for k in ["box_head", "box_predictor", "proposal_matcher"]]
model.roi_heads.update(
_target_=CascadeROIHeads,
box_heads=[
L(FastRCNNConvFCHead)(
input_shape=ShapeSpec(channels=256, height=7, width=7),
conv_dims=[],
fc_dims=[1024, 1024],
)
for k in range(3)
],
box_predictors=[
L(FastRCNNOutputLayers)(
input_shape=ShapeSpec(channels=1024),
test_score_thresh=0.05,
box2box_transform=L(Box2BoxTransform)(weights=(w1, w1, w2, w2)),
cls_agnostic_bbox_reg=True,
num_classes="${...num_classes}",
)
for (w1, w2) in [(10, 5), (20, 10), (30, 15)]
],
proposal_matchers=[
L(Matcher)(thresholds=[th], labels=[0, 1], allow_low_quality_matches=False)
for th in [0.5, 0.6, 0.7]
],
)
|
banmo-main
|
third_party/detectron2_old/configs/common/models/cascade_rcnn.py
|
from detectron2.config import LazyCall as L
from detectron2.layers import ShapeSpec
from detectron2.modeling.meta_arch import GeneralizedRCNN
from detectron2.modeling.anchor_generator import DefaultAnchorGenerator
from detectron2.modeling.backbone.fpn import LastLevelMaxPool
from detectron2.modeling.backbone import BasicStem, FPN, ResNet
from detectron2.modeling.box_regression import Box2BoxTransform
from detectron2.modeling.matcher import Matcher
from detectron2.modeling.poolers import ROIPooler
from detectron2.modeling.proposal_generator import RPN, StandardRPNHead
from detectron2.modeling.roi_heads import (
StandardROIHeads,
FastRCNNOutputLayers,
MaskRCNNConvUpsampleHead,
FastRCNNConvFCHead,
)
model = L(GeneralizedRCNN)(
backbone=L(FPN)(
bottom_up=L(ResNet)(
stem=L(BasicStem)(in_channels=3, out_channels=64, norm="FrozenBN"),
stages=L(ResNet.make_default_stages)(
depth=50,
stride_in_1x1=True,
norm="FrozenBN",
),
out_features=["res2", "res3", "res4", "res5"],
),
in_features="${.bottom_up.out_features}",
out_channels=256,
top_block=L(LastLevelMaxPool)(),
),
proposal_generator=L(RPN)(
in_features=["p2", "p3", "p4", "p5", "p6"],
head=L(StandardRPNHead)(in_channels=256, num_anchors=3),
anchor_generator=L(DefaultAnchorGenerator)(
sizes=[[32], [64], [128], [256], [512]],
aspect_ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64],
offset=0.0,
),
anchor_matcher=L(Matcher)(
thresholds=[0.3, 0.7], labels=[0, -1, 1], allow_low_quality_matches=True
),
box2box_transform=L(Box2BoxTransform)(weights=[1.0, 1.0, 1.0, 1.0]),
batch_size_per_image=256,
positive_fraction=0.5,
pre_nms_topk=(2000, 1000),
post_nms_topk=(1000, 1000),
nms_thresh=0.7,
),
roi_heads=L(StandardROIHeads)(
num_classes=80,
batch_size_per_image=512,
positive_fraction=0.25,
proposal_matcher=L(Matcher)(
thresholds=[0.5], labels=[0, 1], allow_low_quality_matches=False
),
box_in_features=["p2", "p3", "p4", "p5"],
box_pooler=L(ROIPooler)(
output_size=7,
scales=(1.0 / 4, 1.0 / 8, 1.0 / 16, 1.0 / 32),
sampling_ratio=0,
pooler_type="ROIAlignV2",
),
box_head=L(FastRCNNConvFCHead)(
input_shape=ShapeSpec(channels=256, height=7, width=7),
conv_dims=[],
fc_dims=[1024, 1024],
),
box_predictor=L(FastRCNNOutputLayers)(
input_shape=ShapeSpec(channels=1024),
test_score_thresh=0.05,
box2box_transform=L(Box2BoxTransform)(weights=(10, 10, 5, 5)),
num_classes="${..num_classes}",
),
mask_in_features=["p2", "p3", "p4", "p5"],
mask_pooler=L(ROIPooler)(
output_size=14,
scales=(1.0 / 4, 1.0 / 8, 1.0 / 16, 1.0 / 32),
sampling_ratio=0,
pooler_type="ROIAlignV2",
),
mask_head=L(MaskRCNNConvUpsampleHead)(
input_shape=ShapeSpec(channels=256, width=14, height=14),
num_classes="${..num_classes}",
conv_dims=[256, 256, 256, 256, 256],
),
),
pixel_mean=[103.530, 116.280, 123.675],
pixel_std=[1.0, 1.0, 1.0],
input_format="BGR",
)
|
banmo-main
|
third_party/detectron2_old/configs/common/models/mask_rcnn_fpn.py
|
from omegaconf import OmegaConf
import detectron2.data.transforms as T
from detectron2.config import LazyCall as L
from detectron2.data import (
DatasetMapper,
build_detection_test_loader,
build_detection_train_loader,
get_detection_dataset_dicts,
)
from detectron2.evaluation import COCOEvaluator
dataloader = OmegaConf.create()
dataloader.train = L(build_detection_train_loader)(
dataset=L(get_detection_dataset_dicts)(names="coco_2017_train"),
mapper=L(DatasetMapper)(
is_train=True,
augmentations=[
L(T.ResizeShortestEdge)(
short_edge_length=(640, 672, 704, 736, 768, 800),
sample_style="choice",
max_size=1333,
),
L(T.RandomFlip)(horizontal=True),
],
image_format="BGR",
use_instance_mask=True,
),
total_batch_size=16,
num_workers=4,
)
dataloader.test = L(build_detection_test_loader)(
dataset=L(get_detection_dataset_dicts)(names="coco_2017_val", filter_empty=False),
mapper=L(DatasetMapper)(
is_train=False,
augmentations=[
L(T.ResizeShortestEdge)(short_edge_length=800, max_size=1333),
],
image_format="${...train.mapper.image_format}",
),
num_workers=4,
)
dataloader.evaluator = L(COCOEvaluator)(
dataset_name="${..test.dataset.names}",
)
|
banmo-main
|
third_party/detectron2_old/configs/common/data/coco.py
|
from detectron2.config import LazyCall as L
from detectron2.evaluation import (
COCOEvaluator,
COCOPanopticEvaluator,
DatasetEvaluators,
SemSegEvaluator,
)
from .coco import dataloader
dataloader.train.dataset.names = "coco_2017_train_panoptic_separated"
dataloader.train.dataset.filter_empty = False
dataloader.test.dataset.names = "coco_2017_val_panoptic_separated"
dataloader.evaluator = [
L(COCOEvaluator)(
dataset_name="${...test.dataset.names}",
),
L(SemSegEvaluator)(
dataset_name="${...test.dataset.names}",
),
L(COCOPanopticEvaluator)(
dataset_name="${...test.dataset.names}",
),
]
|
banmo-main
|
third_party/detectron2_old/configs/common/data/coco_panoptic_separated.py
|
from detectron2.data.detection_utils import create_keypoint_hflip_indices
from .coco import dataloader
dataloader.train.dataset.min_keypoints = 1
dataloader.train.dataset.names = "keypoints_coco_2017_train"
dataloader.test.dataset.names = "keypoints_coco_2017_val"
dataloader.train.mapper.update(
use_instance_mask=False,
use_keypoint=True,
keypoint_hflip_indices=create_keypoint_hflip_indices(dataloader.train.dataset.names),
)
|
banmo-main
|
third_party/detectron2_old/configs/common/data/coco_keypoint.py
|
from ..common.optim import SGD as optimizer
from ..common.coco_schedule import lr_multiplier_1x as lr_multiplier
from ..common.data.coco_panoptic_separated import dataloader
from ..common.models.panoptic_fpn import model
from ..common.train import train
model.backbone.bottom_up.freeze_at = 2
|
banmo-main
|
third_party/detectron2_old/configs/COCO-PanopticSegmentation/panoptic_fpn_R_50_1x.py
|
from .mask_rcnn_R_101_FPN_100ep_LSJ import (
dataloader,
lr_multiplier,
model,
optimizer,
train,
)
train.max_iter *= 2 # 100ep -> 200ep
|
banmo-main
|
third_party/detectron2_old/configs/new_baselines/mask_rcnn_R_101_FPN_200ep_LSJ.py
|
from .mask_rcnn_R_50_FPN_100ep_LSJ import (
dataloader,
lr_multiplier,
model,
optimizer,
train,
)
from detectron2.config import LazyCall as L
from detectron2.modeling.backbone import RegNet
from detectron2.modeling.backbone.regnet import SimpleStem, ResBottleneckBlock
# Config source:
# https://github.com/facebookresearch/detectron2/blob/master/configs/COCO-InstanceSegmentation/mask_rcnn_regnetx_4gf_dds_fpn_1x.py # noqa
model.backbone.bottom_up = L(RegNet)(
stem_class=SimpleStem,
stem_width=32,
block_class=ResBottleneckBlock,
depth=23,
w_a=38.65,
w_0=96,
w_m=2.43,
group_width=40,
norm="SyncBN",
out_features=["s1", "s2", "s3", "s4"],
)
model.pixel_std = [57.375, 57.120, 58.395]
# RegNets benefit from enabling cudnn benchmark mode
train.cudnn_benchmark = True
|
banmo-main
|
third_party/detectron2_old/configs/new_baselines/mask_rcnn_regnetx_4gf_dds_FPN_100ep_LSJ.py
|
from .mask_rcnn_regnetx_4gf_dds_FPN_100ep_LSJ import (
dataloader,
lr_multiplier,
model,
optimizer,
train,
)
train.max_iter *= 4 # 100ep -> 400ep
|
banmo-main
|
third_party/detectron2_old/configs/new_baselines/mask_rcnn_regnetx_4gf_dds_FPN_400ep_LSJ.py
|
from .mask_rcnn_regnety_4gf_dds_FPN_100ep_LSJ import (
dataloader,
lr_multiplier,
model,
optimizer,
train,
)
train.max_iter *= 4 # 100ep -> 400ep
|
banmo-main
|
third_party/detectron2_old/configs/new_baselines/mask_rcnn_regnety_4gf_dds_FPN_400ep_LSJ.py
|
from .mask_rcnn_R_50_FPN_100ep_LSJ import (
dataloader,
lr_multiplier,
model,
optimizer,
train,
)
train.max_iter *= 2 # 100ep -> 200ep
|
banmo-main
|
third_party/detectron2_old/configs/new_baselines/mask_rcnn_R_50_FPN_200ep_LSJ.py
|
from .mask_rcnn_R_50_FPN_100ep_LSJ import (
dataloader,
lr_multiplier,
model,
optimizer,
train,
)
from detectron2.config import LazyCall as L
from detectron2.modeling.backbone import RegNet
from detectron2.modeling.backbone.regnet import SimpleStem, ResBottleneckBlock
# Config source:
# https://github.com/facebookresearch/detectron2/blob/master/configs/COCO-InstanceSegmentation/mask_rcnn_regnety_4gf_dds_fpn_1x.py # noqa
model.backbone.bottom_up = L(RegNet)(
stem_class=SimpleStem,
stem_width=32,
block_class=ResBottleneckBlock,
depth=22,
w_a=31.41,
w_0=96,
w_m=2.24,
group_width=64,
se_ratio=0.25,
norm="SyncBN",
out_features=["s1", "s2", "s3", "s4"],
)
model.pixel_std = [57.375, 57.120, 58.395]
# RegNets benefit from enabling cudnn benchmark mode
train.cudnn_benchmark = True
|
banmo-main
|
third_party/detectron2_old/configs/new_baselines/mask_rcnn_regnety_4gf_dds_FPN_100ep_LSJ.py
|
import detectron2.data.transforms as T
from detectron2.config.lazy import LazyCall as L
from detectron2.layers.batch_norm import NaiveSyncBatchNorm
from detectron2.solver import WarmupParamScheduler
from fvcore.common.param_scheduler import CosineParamScheduler
from ..common.data.coco import dataloader
from ..common.models.mask_rcnn_fpn import model
from ..common.optim import SGD as optimizer
from ..common.train import train
# train from scratch
train.init_checkpoint = ""
train.amp.enabled = True
train.ddp.fp16_compression = True
model.backbone.bottom_up.freeze_at = 0
# SyncBN
# fmt: off
model.backbone.bottom_up.stem.norm = \
model.backbone.bottom_up.stages.norm = \
model.backbone.norm = "SyncBN"
# Using NaiveSyncBatchNorm becase heads may have empty input. That is not supported by
# torch.nn.SyncBatchNorm. We can remove this after
# https://github.com/pytorch/pytorch/issues/36530 is fixed.
model.roi_heads.box_head.conv_norm = \
model.roi_heads.mask_head.conv_norm = lambda c: NaiveSyncBatchNorm(c,
stats_mode="N")
# fmt: on
# 2conv in RPN:
# https://github.com/tensorflow/tpu/blob/b24729de804fdb751b06467d3dce0637fa652060/models/official/detection/modeling/architecture/heads.py#L95-L97 # noqa: E501, B950
model.proposal_generator.head.conv_dims = [-1, -1]
# 4conv1fc box head
model.roi_heads.box_head.conv_dims = [256, 256, 256, 256]
model.roi_heads.box_head.fc_dims = [1024]
# resize_and_crop_image in:
# https://github.com/tensorflow/tpu/blob/b24729de804fdb751b06467d3dce0637fa652060/models/official/detection/utils/input_utils.py#L127 # noqa: E501, B950
image_size = 1024
dataloader.train.mapper.augmentations = [
L(T.ResizeScale)(
min_scale=0.1, max_scale=2.0, target_height=image_size, target_width=image_size
),
L(T.FixedSizeCrop)(crop_size=(image_size, image_size)),
L(T.RandomFlip)(horizontal=True),
]
# recompute boxes due to cropping
dataloader.train.mapper.recompute_boxes = True
# larger batch-size.
dataloader.train.total_batch_size = 64
# Equivalent to 100 epochs.
# 100 ep = 184375 iters * 64 images/iter / 118000 images/ep
train.max_iter = 184375
lr_multiplier = L(WarmupParamScheduler)(
scheduler=CosineParamScheduler(1.0, 0.0),
warmup_length=500 / train.max_iter,
warmup_factor=0.067,
)
optimizer.lr = 0.1
optimizer.weight_decay = 4e-5
|
banmo-main
|
third_party/detectron2_old/configs/new_baselines/mask_rcnn_R_50_FPN_100ep_LSJ.py
|
from .mask_rcnn_regnety_4gf_dds_FPN_100ep_LSJ import (
dataloader,
lr_multiplier,
model,
optimizer,
train,
)
train.max_iter *= 2 # 100ep -> 200ep
|
banmo-main
|
third_party/detectron2_old/configs/new_baselines/mask_rcnn_regnety_4gf_dds_FPN_200ep_LSJ.py
|
from .mask_rcnn_R_50_FPN_100ep_LSJ import (
dataloader,
lr_multiplier,
model,
optimizer,
train,
)
train.max_iter *= 4 # 100ep -> 400ep
|
banmo-main
|
third_party/detectron2_old/configs/new_baselines/mask_rcnn_R_50_FPN_400ep_LSJ.py
|
from .mask_rcnn_R_101_FPN_100ep_LSJ import (
dataloader,
lr_multiplier,
model,
optimizer,
train,
)
train.max_iter *= 4 # 100ep -> 400ep
|
banmo-main
|
third_party/detectron2_old/configs/new_baselines/mask_rcnn_R_101_FPN_400ep_LSJ.py
|
from .mask_rcnn_R_50_FPN_100ep_LSJ import (
dataloader,
lr_multiplier,
model,
optimizer,
train,
)
model.backbone.bottom_up.stages.depth = 101
|
banmo-main
|
third_party/detectron2_old/configs/new_baselines/mask_rcnn_R_101_FPN_100ep_LSJ.py
|
from .mask_rcnn_regnetx_4gf_dds_FPN_100ep_LSJ import (
dataloader,
lr_multiplier,
model,
optimizer,
train,
)
train.max_iter *= 2 # 100ep -> 200ep
|
banmo-main
|
third_party/detectron2_old/configs/new_baselines/mask_rcnn_regnetx_4gf_dds_FPN_200ep_LSJ.py
|
from ..common.optim import SGD as optimizer
from ..common.coco_schedule import lr_multiplier_1x as lr_multiplier
from ..common.data.coco import dataloader
from ..common.models.mask_rcnn_fpn import model
from ..common.train import train
from detectron2.config import LazyCall as L
from detectron2.modeling.backbone import RegNet
from detectron2.modeling.backbone.regnet import SimpleStem, ResBottleneckBlock
# Replace default ResNet with RegNetX-4GF from the DDS paper. Config source:
# https://github.com/facebookresearch/pycls/blob/2c152a6e5d913e898cca4f0a758f41e6b976714d/configs/dds_baselines/regnetx/RegNetX-4.0GF_dds_8gpu.yaml#L4-L9 # noqa
model.backbone.bottom_up = L(RegNet)(
stem_class=SimpleStem,
stem_width=32,
block_class=ResBottleneckBlock,
depth=23,
w_a=38.65,
w_0=96,
w_m=2.43,
group_width=40,
freeze_at=2,
norm="FrozenBN",
out_features=["s1", "s2", "s3", "s4"],
)
model.pixel_std = [57.375, 57.120, 58.395]
optimizer.weight_decay = 5e-5
train.init_checkpoint = (
"https://dl.fbaipublicfiles.com/pycls/dds_baselines/160906383/RegNetX-4.0GF_dds_8gpu.pyth"
)
# RegNets benefit from enabling cudnn benchmark mode
train.cudnn_benchmark = True
|
banmo-main
|
third_party/detectron2_old/configs/COCO-InstanceSegmentation/mask_rcnn_regnetx_4gf_dds_fpn_1x.py
|
from ..common.optim import SGD as optimizer
from ..common.coco_schedule import lr_multiplier_1x as lr_multiplier
from ..common.data.coco import dataloader
from ..common.models.mask_rcnn_fpn import model
from ..common.train import train
from detectron2.config import LazyCall as L
from detectron2.modeling.backbone import RegNet
from detectron2.modeling.backbone.regnet import SimpleStem, ResBottleneckBlock
# Replace default ResNet with RegNetY-4GF from the DDS paper. Config source:
# https://github.com/facebookresearch/pycls/blob/2c152a6e5d913e898cca4f0a758f41e6b976714d/configs/dds_baselines/regnety/RegNetY-4.0GF_dds_8gpu.yaml#L4-L10 # noqa
model.backbone.bottom_up = L(RegNet)(
stem_class=SimpleStem,
stem_width=32,
block_class=ResBottleneckBlock,
depth=22,
w_a=31.41,
w_0=96,
w_m=2.24,
group_width=64,
se_ratio=0.25,
freeze_at=2,
norm="FrozenBN",
out_features=["s1", "s2", "s3", "s4"],
)
model.pixel_std = [57.375, 57.120, 58.395]
optimizer.weight_decay = 5e-5
train.init_checkpoint = (
"https://dl.fbaipublicfiles.com/pycls/dds_baselines/160906838/RegNetY-4.0GF_dds_8gpu.pyth"
)
# RegNets benefit from enabling cudnn benchmark mode
train.cudnn_benchmark = True
|
banmo-main
|
third_party/detectron2_old/configs/COCO-InstanceSegmentation/mask_rcnn_regnety_4gf_dds_fpn_1x.py
|
from ..common.optim import SGD as optimizer
from ..common.coco_schedule import lr_multiplier_1x as lr_multiplier
from ..common.data.coco import dataloader
from ..common.models.mask_rcnn_fpn import model
from ..common.train import train
model.backbone.bottom_up.freeze_at = 2
|
banmo-main
|
third_party/detectron2_old/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.py
|
from ..common.train import train
from ..common.optim import SGD as optimizer
from ..common.coco_schedule import lr_multiplier_1x as lr_multiplier
from ..common.data.coco import dataloader
from ..common.models.mask_rcnn_c4 import model
model.backbone.freeze_at = 2
|
banmo-main
|
third_party/detectron2_old/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_C4_1x.py
|
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# -*- coding: utf-8 -*-
import argparse
template = """<details><summary> install </summary><pre><code>\
python -m pip install detectron2{d2_version} -f \\
https://dl.fbaipublicfiles.com/detectron2/wheels/{cuda}/torch{torch}/index.html
</code></pre> </details>"""
CUDA_SUFFIX = {
"11.1": "cu111",
"11.0": "cu110",
"10.2": "cu102",
"10.1": "cu101",
"10.0": "cu100",
"9.2": "cu92",
"cpu": "cpu",
}
def gen_header(torch_versions):
return '<table class="docutils"><tbody><th width="80"> CUDA </th>' + "".join(
[
'<th valign="bottom" align="left" width="100">torch {}</th>'.format(t)
for t in torch_versions
]
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--d2-version", help="detectron2 version number, default to empty")
args = parser.parse_args()
d2_version = f"=={args.d2_version}" if args.d2_version else ""
all_versions = (
[("1.6", k) for k in ["10.2", "10.1", "9.2", "cpu"]]
+ [("1.7", k) for k in ["11.0", "10.2", "10.1", "9.2", "cpu"]]
+ [("1.8", k) for k in ["11.1", "10.2", "10.1", "cpu"]]
)
torch_versions = sorted({k[0] for k in all_versions}, key=float, reverse=True)
cuda_versions = sorted(
{k[1] for k in all_versions}, key=lambda x: float(x) if x != "cpu" else 0, reverse=True
)
table = gen_header(torch_versions)
for cu in cuda_versions:
table += f""" <tr><td align="left">{cu}</td>"""
cu_suffix = CUDA_SUFFIX[cu]
for torch in torch_versions:
if (torch, cu) in all_versions:
cell = template.format(d2_version=d2_version, cuda=cu_suffix, torch=torch)
else:
cell = ""
table += f"""<td align="left">{cell} </td> """
table += "</tr>"
table += "</tbody></table>"
print(table)
|
banmo-main
|
third_party/detectron2_old/dev/packaging/gen_install_table.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from .utils.env import setup_environment
setup_environment()
# This line will be programatically read/write by setup.py.
# Leave them at the bottom of this file and don't touch them.
__version__ = "0.4"
|
banmo-main
|
third_party/detectron2_old/detectron2/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import logging
from detectron2.utils.file_io import PathHandler, PathManager
class ModelCatalog(object):
"""
Store mappings from names to third-party models.
"""
S3_C2_DETECTRON_PREFIX = "https://dl.fbaipublicfiles.com/detectron"
# MSRA models have STRIDE_IN_1X1=True. False otherwise.
# NOTE: all BN models here have fused BN into an affine layer.
# As a result, you should only load them to a model with "FrozenBN".
# Loading them to a model with regular BN or SyncBN is wrong.
# Even when loaded to FrozenBN, it is still different from affine by an epsilon,
# which should be negligible for training.
# NOTE: all models here uses PIXEL_STD=[1,1,1]
# NOTE: Most of the BN models here are no longer used. We use the
# re-converted pre-trained models under detectron2 model zoo instead.
C2_IMAGENET_MODELS = {
"MSRA/R-50": "ImageNetPretrained/MSRA/R-50.pkl",
"MSRA/R-101": "ImageNetPretrained/MSRA/R-101.pkl",
"FAIR/R-50-GN": "ImageNetPretrained/47261647/R-50-GN.pkl",
"FAIR/R-101-GN": "ImageNetPretrained/47592356/R-101-GN.pkl",
"FAIR/X-101-32x8d": "ImageNetPretrained/20171220/X-101-32x8d.pkl",
"FAIR/X-101-64x4d": "ImageNetPretrained/FBResNeXt/X-101-64x4d.pkl",
"FAIR/X-152-32x8d-IN5k": "ImageNetPretrained/25093814/X-152-32x8d-IN5k.pkl",
}
C2_DETECTRON_PATH_FORMAT = (
"{prefix}/{url}/output/train/{dataset}/{type}/model_final.pkl" # noqa B950
)
C2_DATASET_COCO = "coco_2014_train%3Acoco_2014_valminusminival"
C2_DATASET_COCO_KEYPOINTS = "keypoints_coco_2014_train%3Akeypoints_coco_2014_valminusminival"
# format: {model_name} -> part of the url
C2_DETECTRON_MODELS = {
"35857197/e2e_faster_rcnn_R-50-C4_1x": "35857197/12_2017_baselines/e2e_faster_rcnn_R-50-C4_1x.yaml.01_33_49.iAX0mXvW", # noqa B950
"35857345/e2e_faster_rcnn_R-50-FPN_1x": "35857345/12_2017_baselines/e2e_faster_rcnn_R-50-FPN_1x.yaml.01_36_30.cUF7QR7I", # noqa B950
"35857890/e2e_faster_rcnn_R-101-FPN_1x": "35857890/12_2017_baselines/e2e_faster_rcnn_R-101-FPN_1x.yaml.01_38_50.sNxI7sX7", # noqa B950
"36761737/e2e_faster_rcnn_X-101-32x8d-FPN_1x": "36761737/12_2017_baselines/e2e_faster_rcnn_X-101-32x8d-FPN_1x.yaml.06_31_39.5MIHi1fZ", # noqa B950
"35858791/e2e_mask_rcnn_R-50-C4_1x": "35858791/12_2017_baselines/e2e_mask_rcnn_R-50-C4_1x.yaml.01_45_57.ZgkA7hPB", # noqa B950
"35858933/e2e_mask_rcnn_R-50-FPN_1x": "35858933/12_2017_baselines/e2e_mask_rcnn_R-50-FPN_1x.yaml.01_48_14.DzEQe4wC", # noqa B950
"35861795/e2e_mask_rcnn_R-101-FPN_1x": "35861795/12_2017_baselines/e2e_mask_rcnn_R-101-FPN_1x.yaml.02_31_37.KqyEK4tT", # noqa B950
"36761843/e2e_mask_rcnn_X-101-32x8d-FPN_1x": "36761843/12_2017_baselines/e2e_mask_rcnn_X-101-32x8d-FPN_1x.yaml.06_35_59.RZotkLKI", # noqa B950
"48616381/e2e_mask_rcnn_R-50-FPN_2x_gn": "GN/48616381/04_2018_gn_baselines/e2e_mask_rcnn_R-50-FPN_2x_gn_0416.13_23_38.bTlTI97Q", # noqa B950
"37697547/e2e_keypoint_rcnn_R-50-FPN_1x": "37697547/12_2017_baselines/e2e_keypoint_rcnn_R-50-FPN_1x.yaml.08_42_54.kdzV35ao", # noqa B950
"35998355/rpn_R-50-C4_1x": "35998355/12_2017_baselines/rpn_R-50-C4_1x.yaml.08_00_43.njH5oD9L", # noqa B950
"35998814/rpn_R-50-FPN_1x": "35998814/12_2017_baselines/rpn_R-50-FPN_1x.yaml.08_06_03.Axg0r179", # noqa B950
"36225147/fast_R-50-FPN_1x": "36225147/12_2017_baselines/fast_rcnn_R-50-FPN_1x.yaml.08_39_09.L3obSdQ2", # noqa B950
}
@staticmethod
def get(name):
if name.startswith("Caffe2Detectron/COCO"):
return ModelCatalog._get_c2_detectron_baseline(name)
if name.startswith("ImageNetPretrained/"):
return ModelCatalog._get_c2_imagenet_pretrained(name)
raise RuntimeError("model not present in the catalog: {}".format(name))
@staticmethod
def _get_c2_imagenet_pretrained(name):
prefix = ModelCatalog.S3_C2_DETECTRON_PREFIX
name = name[len("ImageNetPretrained/") :]
name = ModelCatalog.C2_IMAGENET_MODELS[name]
url = "/".join([prefix, name])
return url
@staticmethod
def _get_c2_detectron_baseline(name):
name = name[len("Caffe2Detectron/COCO/") :]
url = ModelCatalog.C2_DETECTRON_MODELS[name]
if "keypoint_rcnn" in name:
dataset = ModelCatalog.C2_DATASET_COCO_KEYPOINTS
else:
dataset = ModelCatalog.C2_DATASET_COCO
if "35998355/rpn_R-50-C4_1x" in name:
# this one model is somehow different from others ..
type = "rpn"
else:
type = "generalized_rcnn"
# Detectron C2 models are stored in the structure defined in `C2_DETECTRON_PATH_FORMAT`.
url = ModelCatalog.C2_DETECTRON_PATH_FORMAT.format(
prefix=ModelCatalog.S3_C2_DETECTRON_PREFIX, url=url, type=type, dataset=dataset
)
return url
class ModelCatalogHandler(PathHandler):
"""
Resolve URL like catalog://.
"""
PREFIX = "catalog://"
def _get_supported_prefixes(self):
return [self.PREFIX]
def _get_local_path(self, path, **kwargs):
logger = logging.getLogger(__name__)
catalog_path = ModelCatalog.get(path[len(self.PREFIX) :])
logger.info("Catalog entry {} points to {}".format(path, catalog_path))
return PathManager.get_local_path(catalog_path, **kwargs)
def _open(self, path, mode="r", **kwargs):
return PathManager.open(self._get_local_path(path), mode, **kwargs)
PathManager.register_handler(ModelCatalogHandler())
|
banmo-main
|
third_party/detectron2_old/detectron2/checkpoint/catalog.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import copy
import logging
import re
from typing import Dict, List
import torch
from tabulate import tabulate
def convert_basic_c2_names(original_keys):
"""
Apply some basic name conversion to names in C2 weights.
It only deals with typical backbone models.
Args:
original_keys (list[str]):
Returns:
list[str]: The same number of strings matching those in original_keys.
"""
layer_keys = copy.deepcopy(original_keys)
layer_keys = [
{"pred_b": "linear_b", "pred_w": "linear_w"}.get(k, k) for k in layer_keys
] # some hard-coded mappings
layer_keys = [k.replace("_", ".") for k in layer_keys]
layer_keys = [re.sub("\\.b$", ".bias", k) for k in layer_keys]
layer_keys = [re.sub("\\.w$", ".weight", k) for k in layer_keys]
# Uniform both bn and gn names to "norm"
layer_keys = [re.sub("bn\\.s$", "norm.weight", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.bias$", "norm.bias", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.rm", "norm.running_mean", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.running.mean$", "norm.running_mean", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.riv$", "norm.running_var", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.running.var$", "norm.running_var", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.gamma$", "norm.weight", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.beta$", "norm.bias", k) for k in layer_keys]
layer_keys = [re.sub("gn\\.s$", "norm.weight", k) for k in layer_keys]
layer_keys = [re.sub("gn\\.bias$", "norm.bias", k) for k in layer_keys]
# stem
layer_keys = [re.sub("^res\\.conv1\\.norm\\.", "conv1.norm.", k) for k in layer_keys]
# to avoid mis-matching with "conv1" in other components (e.g. detection head)
layer_keys = [re.sub("^conv1\\.", "stem.conv1.", k) for k in layer_keys]
# layer1-4 is used by torchvision, however we follow the C2 naming strategy (res2-5)
# layer_keys = [re.sub("^res2.", "layer1.", k) for k in layer_keys]
# layer_keys = [re.sub("^res3.", "layer2.", k) for k in layer_keys]
# layer_keys = [re.sub("^res4.", "layer3.", k) for k in layer_keys]
# layer_keys = [re.sub("^res5.", "layer4.", k) for k in layer_keys]
# blocks
layer_keys = [k.replace(".branch1.", ".shortcut.") for k in layer_keys]
layer_keys = [k.replace(".branch2a.", ".conv1.") for k in layer_keys]
layer_keys = [k.replace(".branch2b.", ".conv2.") for k in layer_keys]
layer_keys = [k.replace(".branch2c.", ".conv3.") for k in layer_keys]
# DensePose substitutions
layer_keys = [re.sub("^body.conv.fcn", "body_conv_fcn", k) for k in layer_keys]
layer_keys = [k.replace("AnnIndex.lowres", "ann_index_lowres") for k in layer_keys]
layer_keys = [k.replace("Index.UV.lowres", "index_uv_lowres") for k in layer_keys]
layer_keys = [k.replace("U.lowres", "u_lowres") for k in layer_keys]
layer_keys = [k.replace("V.lowres", "v_lowres") for k in layer_keys]
return layer_keys
def convert_c2_detectron_names(weights):
"""
Map Caffe2 Detectron weight names to Detectron2 names.
Args:
weights (dict): name -> tensor
Returns:
dict: detectron2 names -> tensor
dict: detectron2 names -> C2 names
"""
logger = logging.getLogger(__name__)
logger.info("Renaming Caffe2 weights ......")
original_keys = sorted(weights.keys())
layer_keys = copy.deepcopy(original_keys)
layer_keys = convert_basic_c2_names(layer_keys)
# --------------------------------------------------------------------------
# RPN hidden representation conv
# --------------------------------------------------------------------------
# FPN case
# In the C2 model, the RPN hidden layer conv is defined for FPN level 2 and then
# shared for all other levels, hence the appearance of "fpn2"
layer_keys = [
k.replace("conv.rpn.fpn2", "proposal_generator.rpn_head.conv") for k in layer_keys
]
# Non-FPN case
layer_keys = [k.replace("conv.rpn", "proposal_generator.rpn_head.conv") for k in layer_keys]
# --------------------------------------------------------------------------
# RPN box transformation conv
# --------------------------------------------------------------------------
# FPN case (see note above about "fpn2")
layer_keys = [
k.replace("rpn.bbox.pred.fpn2", "proposal_generator.rpn_head.anchor_deltas")
for k in layer_keys
]
layer_keys = [
k.replace("rpn.cls.logits.fpn2", "proposal_generator.rpn_head.objectness_logits")
for k in layer_keys
]
# Non-FPN case
layer_keys = [
k.replace("rpn.bbox.pred", "proposal_generator.rpn_head.anchor_deltas") for k in layer_keys
]
layer_keys = [
k.replace("rpn.cls.logits", "proposal_generator.rpn_head.objectness_logits")
for k in layer_keys
]
# --------------------------------------------------------------------------
# Fast R-CNN box head
# --------------------------------------------------------------------------
layer_keys = [re.sub("^bbox\\.pred", "bbox_pred", k) for k in layer_keys]
layer_keys = [re.sub("^cls\\.score", "cls_score", k) for k in layer_keys]
layer_keys = [re.sub("^fc6\\.", "box_head.fc1.", k) for k in layer_keys]
layer_keys = [re.sub("^fc7\\.", "box_head.fc2.", k) for k in layer_keys]
# 4conv1fc head tensor names: head_conv1_w, head_conv1_gn_s
layer_keys = [re.sub("^head\\.conv", "box_head.conv", k) for k in layer_keys]
# --------------------------------------------------------------------------
# FPN lateral and output convolutions
# --------------------------------------------------------------------------
def fpn_map(name):
"""
Look for keys with the following patterns:
1) Starts with "fpn.inner."
Example: "fpn.inner.res2.2.sum.lateral.weight"
Meaning: These are lateral pathway convolutions
2) Starts with "fpn.res"
Example: "fpn.res2.2.sum.weight"
Meaning: These are FPN output convolutions
"""
splits = name.split(".")
norm = ".norm" if "norm" in splits else ""
if name.startswith("fpn.inner."):
# splits example: ['fpn', 'inner', 'res2', '2', 'sum', 'lateral', 'weight']
stage = int(splits[2][len("res") :])
return "fpn_lateral{}{}.{}".format(stage, norm, splits[-1])
elif name.startswith("fpn.res"):
# splits example: ['fpn', 'res2', '2', 'sum', 'weight']
stage = int(splits[1][len("res") :])
return "fpn_output{}{}.{}".format(stage, norm, splits[-1])
return name
layer_keys = [fpn_map(k) for k in layer_keys]
# --------------------------------------------------------------------------
# Mask R-CNN mask head
# --------------------------------------------------------------------------
# roi_heads.StandardROIHeads case
layer_keys = [k.replace(".[mask].fcn", "mask_head.mask_fcn") for k in layer_keys]
layer_keys = [re.sub("^\\.mask\\.fcn", "mask_head.mask_fcn", k) for k in layer_keys]
layer_keys = [k.replace("mask.fcn.logits", "mask_head.predictor") for k in layer_keys]
# roi_heads.Res5ROIHeads case
layer_keys = [k.replace("conv5.mask", "mask_head.deconv") for k in layer_keys]
# --------------------------------------------------------------------------
# Keypoint R-CNN head
# --------------------------------------------------------------------------
# interestingly, the keypoint head convs have blob names that are simply "conv_fcnX"
layer_keys = [k.replace("conv.fcn", "roi_heads.keypoint_head.conv_fcn") for k in layer_keys]
layer_keys = [
k.replace("kps.score.lowres", "roi_heads.keypoint_head.score_lowres") for k in layer_keys
]
layer_keys = [k.replace("kps.score.", "roi_heads.keypoint_head.score.") for k in layer_keys]
# --------------------------------------------------------------------------
# Done with replacements
# --------------------------------------------------------------------------
assert len(set(layer_keys)) == len(layer_keys)
assert len(original_keys) == len(layer_keys)
new_weights = {}
new_keys_to_original_keys = {}
for orig, renamed in zip(original_keys, layer_keys):
new_keys_to_original_keys[renamed] = orig
if renamed.startswith("bbox_pred.") or renamed.startswith("mask_head.predictor."):
# remove the meaningless prediction weight for background class
new_start_idx = 4 if renamed.startswith("bbox_pred.") else 1
new_weights[renamed] = weights[orig][new_start_idx:]
logger.info(
"Remove prediction weight for background class in {}. The shape changes from "
"{} to {}.".format(
renamed, tuple(weights[orig].shape), tuple(new_weights[renamed].shape)
)
)
elif renamed.startswith("cls_score."):
# move weights of bg class from original index 0 to last index
logger.info(
"Move classification weights for background class in {} from index 0 to "
"index {}.".format(renamed, weights[orig].shape[0] - 1)
)
new_weights[renamed] = torch.cat([weights[orig][1:], weights[orig][:1]])
else:
new_weights[renamed] = weights[orig]
return new_weights, new_keys_to_original_keys
# Note the current matching is not symmetric.
# it assumes model_state_dict will have longer names.
def align_and_update_state_dicts(model_state_dict, ckpt_state_dict, c2_conversion=True):
"""
Match names between the two state-dict, and returns a new chkpt_state_dict with names
converted to match model_state_dict with heuristics. The returned dict can be later
loaded with fvcore checkpointer.
If `c2_conversion==True`, `ckpt_state_dict` is assumed to be a Caffe2
model and will be renamed at first.
Strategy: suppose that the models that we will create will have prefixes appended
to each of its keys, for example due to an extra level of nesting that the original
pre-trained weights from ImageNet won't contain. For example, model.state_dict()
might return backbone[0].body.res2.conv1.weight, while the pre-trained model contains
res2.conv1.weight. We thus want to match both parameters together.
For that, we look for each model weight, look among all loaded keys if there is one
that is a suffix of the current weight name, and use it if that's the case.
If multiple matches exist, take the one with longest size
of the corresponding name. For example, for the same model as before, the pretrained
weight file can contain both res2.conv1.weight, as well as conv1.weight. In this case,
we want to match backbone[0].body.conv1.weight to conv1.weight, and
backbone[0].body.res2.conv1.weight to res2.conv1.weight.
"""
model_keys = sorted(model_state_dict.keys())
if c2_conversion:
ckpt_state_dict, original_keys = convert_c2_detectron_names(ckpt_state_dict)
# original_keys: the name in the original dict (before renaming)
else:
original_keys = {x: x for x in ckpt_state_dict.keys()}
ckpt_keys = sorted(ckpt_state_dict.keys())
def match(a, b):
# Matched ckpt_key should be a complete (starts with '.') suffix.
# For example, roi_heads.mesh_head.whatever_conv1 does not match conv1,
# but matches whatever_conv1 or mesh_head.whatever_conv1.
return a == b or a.endswith("." + b)
# get a matrix of string matches, where each (i, j) entry correspond to the size of the
# ckpt_key string, if it matches
match_matrix = [len(j) if match(i, j) else 0 for i in model_keys for j in ckpt_keys]
match_matrix = torch.as_tensor(match_matrix).view(len(model_keys), len(ckpt_keys))
# use the matched one with longest size in case of multiple matches
max_match_size, idxs = match_matrix.max(1)
# remove indices that correspond to no-match
idxs[max_match_size == 0] = -1
logger = logging.getLogger(__name__)
# matched_pairs (matched checkpoint key --> matched model key)
matched_keys = {}
result_state_dict = {}
for idx_model, idx_ckpt in enumerate(idxs.tolist()):
if idx_ckpt == -1:
continue
key_model = model_keys[idx_model]
key_ckpt = ckpt_keys[idx_ckpt]
value_ckpt = ckpt_state_dict[key_ckpt]
shape_in_model = model_state_dict[key_model].shape
if shape_in_model != value_ckpt.shape:
logger.warning(
"Shape of {} in checkpoint is {}, while shape of {} in model is {}.".format(
key_ckpt, value_ckpt.shape, key_model, shape_in_model
)
)
logger.warning(
"{} will not be loaded. Please double check and see if this is desired.".format(
key_ckpt
)
)
continue
assert key_model not in result_state_dict
result_state_dict[key_model] = value_ckpt
if key_ckpt in matched_keys: # already added to matched_keys
logger.error(
"Ambiguity found for {} in checkpoint!"
"It matches at least two keys in the model ({} and {}).".format(
key_ckpt, key_model, matched_keys[key_ckpt]
)
)
raise ValueError("Cannot match one checkpoint key to multiple keys in the model.")
matched_keys[key_ckpt] = key_model
# logging:
matched_model_keys = sorted(matched_keys.values())
if len(matched_model_keys) == 0:
logger.warning("No weights in checkpoint matched with model.")
return ckpt_state_dict
common_prefix = _longest_common_prefix(matched_model_keys)
rev_matched_keys = {v: k for k, v in matched_keys.items()}
original_keys = {k: original_keys[rev_matched_keys[k]] for k in matched_model_keys}
model_key_groups = _group_keys_by_module(matched_model_keys, original_keys)
table = []
memo = set()
for key_model in matched_model_keys:
if key_model in memo:
continue
if key_model in model_key_groups:
group = model_key_groups[key_model]
memo |= set(group)
shapes = [tuple(model_state_dict[k].shape) for k in group]
table.append(
(
_longest_common_prefix([k[len(common_prefix) :] for k in group]) + "*",
_group_str([original_keys[k] for k in group]),
" ".join([str(x).replace(" ", "") for x in shapes]),
)
)
else:
key_checkpoint = original_keys[key_model]
shape = str(tuple(model_state_dict[key_model].shape))
table.append((key_model[len(common_prefix) :], key_checkpoint, shape))
table_str = tabulate(
table, tablefmt="pipe", headers=["Names in Model", "Names in Checkpoint", "Shapes"]
)
logger.info(
"Following weights matched with "
+ (f"submodule {common_prefix[:-1]}" if common_prefix else "model")
+ ":\n"
+ table_str
)
unmatched_ckpt_keys = [k for k in ckpt_keys if k not in set(matched_keys.keys())]
for k in unmatched_ckpt_keys:
result_state_dict[k] = ckpt_state_dict[k]
return result_state_dict
def _group_keys_by_module(keys: List[str], original_names: Dict[str, str]):
"""
Params in the same submodule are grouped together.
Args:
keys: names of all parameters
original_names: mapping from parameter name to their name in the checkpoint
Returns:
dict[name -> all other names in the same group]
"""
def _submodule_name(key):
pos = key.rfind(".")
if pos < 0:
return None
prefix = key[: pos + 1]
return prefix
all_submodules = [_submodule_name(k) for k in keys]
all_submodules = [x for x in all_submodules if x]
all_submodules = sorted(all_submodules, key=len)
ret = {}
for prefix in all_submodules:
group = [k for k in keys if k.startswith(prefix)]
if len(group) <= 1:
continue
original_name_lcp = _longest_common_prefix_str([original_names[k] for k in group])
if len(original_name_lcp) == 0:
# don't group weights if original names don't share prefix
continue
for k in group:
if k in ret:
continue
ret[k] = group
return ret
def _longest_common_prefix(names: List[str]) -> str:
"""
["abc.zfg", "abc.zef"] -> "abc."
"""
names = [n.split(".") for n in names]
m1, m2 = min(names), max(names)
ret = [a for a, b in zip(m1, m2) if a == b]
ret = ".".join(ret) + "." if len(ret) else ""
return ret
def _longest_common_prefix_str(names: List[str]) -> str:
m1, m2 = min(names), max(names)
lcp = [a for a, b in zip(m1, m2) if a == b]
lcp = "".join(lcp)
return lcp
def _group_str(names: List[str]) -> str:
"""
Turn "common1", "common2", "common3" into "common{1,2,3}"
"""
lcp = _longest_common_prefix_str(names)
rest = [x[len(lcp) :] for x in names]
rest = "{" + ",".join(rest) + "}"
ret = lcp + rest
# add some simplification for BN specifically
ret = ret.replace("bn_{beta,running_mean,running_var,gamma}", "bn_*")
ret = ret.replace("bn_beta,bn_running_mean,bn_running_var,bn_gamma", "bn_*")
return ret
|
banmo-main
|
third_party/detectron2_old/detectron2/checkpoint/c2_model_loading.py
|
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
# File:
from . import catalog as _UNUSED # register the handler
from .detection_checkpoint import DetectionCheckpointer
from fvcore.common.checkpoint import Checkpointer, PeriodicCheckpointer
__all__ = ["Checkpointer", "PeriodicCheckpointer", "DetectionCheckpointer"]
|
banmo-main
|
third_party/detectron2_old/detectron2/checkpoint/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import logging
import os
import pickle
import torch
from fvcore.common.checkpoint import Checkpointer
from torch.nn.parallel import DistributedDataParallel
import detectron2.utils.comm as comm
from detectron2.utils.env import TORCH_VERSION
from detectron2.utils.file_io import PathManager
from .c2_model_loading import align_and_update_state_dicts
class DetectionCheckpointer(Checkpointer):
"""
Same as :class:`Checkpointer`, but is able to:
1. handle models in detectron & detectron2 model zoo, and apply conversions for legacy models.
2. correctly load checkpoints that are only available on the master worker
"""
def __init__(self, model, save_dir="", *, save_to_disk=None, **checkpointables):
is_main_process = comm.is_main_process()
super().__init__(
model,
save_dir,
save_to_disk=is_main_process if save_to_disk is None else save_to_disk,
**checkpointables,
)
self.path_manager = PathManager
def load(self, path, *args, **kwargs):
need_sync = False
if path and isinstance(self.model, DistributedDataParallel):
logger = logging.getLogger(__name__)
path = self.path_manager.get_local_path(path)
has_file = os.path.isfile(path)
all_has_file = comm.all_gather(has_file)
if not all_has_file[0]:
raise OSError(f"File {path} not found on main worker.")
if not all(all_has_file):
logger.warning(
f"Not all workers can read checkpoint {path}. "
"Training may fail to fully resume."
)
# TODO: broadcast the checkpoint file contents from main
# worker, and load from it instead.
need_sync = True
if not has_file:
path = None # don't load if not readable
ret = super().load(path, *args, **kwargs)
if need_sync:
logger.info("Broadcasting model states from main worker ...")
if TORCH_VERSION >= (1, 7):
self.model._sync_params_and_buffers()
return ret
def _load_file(self, filename):
if filename.endswith(".pkl"):
with PathManager.open(filename, "rb") as f:
data = pickle.load(f, encoding="latin1")
if "model" in data and "__author__" in data:
# file is in Detectron2 model zoo format
self.logger.info("Reading a file from '{}'".format(data["__author__"]))
return data
else:
# assume file is from Caffe2 / Detectron1 model zoo
if "blobs" in data:
# Detection models have "blobs", but ImageNet models don't
data = data["blobs"]
data = {k: v for k, v in data.items() if not k.endswith("_momentum")}
return {"model": data, "__author__": "Caffe2", "matching_heuristics": True}
elif filename.endswith(".pyth"):
# assume file is from pycls; no one else seems to use the ".pyth" extension
with PathManager.open(filename, "rb") as f:
data = torch.load(f)
assert (
"model_state" in data
), f"Cannot load .pyth file {filename}; pycls checkpoints must contain 'model_state'."
model_state = {
k: v
for k, v in data["model_state"].items()
if not k.endswith("num_batches_tracked")
}
return {"model": model_state, "__author__": "pycls", "matching_heuristics": True}
loaded = super()._load_file(filename) # load native pth checkpoint
if "model" not in loaded:
loaded = {"model": loaded}
return loaded
def _load_model(self, checkpoint):
if checkpoint.get("matching_heuristics", False):
self._convert_ndarray_to_tensor(checkpoint["model"])
# convert weights by name-matching heuristics
checkpoint["model"] = align_and_update_state_dicts(
self.model.state_dict(),
checkpoint["model"],
c2_conversion=checkpoint.get("__author__", None) == "Caffe2",
)
# for non-caffe2 models, use standard ways to load it
incompatible = super()._load_model(checkpoint)
model_buffers = dict(self.model.named_buffers(recurse=False))
for k in ["pixel_mean", "pixel_std"]:
# Ignore missing key message about pixel_mean/std.
# Though they may be missing in old checkpoints, they will be correctly
# initialized from config anyway.
if k in model_buffers:
try:
incompatible.missing_keys.remove(k)
except ValueError:
pass
return incompatible
|
banmo-main
|
third_party/detectron2_old/detectron2/checkpoint/detection_checkpoint.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import math
from functools import lru_cache
import torch
from torch import nn
from torch.autograd import Function
from torch.autograd.function import once_differentiable
from torch.nn.modules.utils import _pair
from torchvision.ops import deform_conv2d
from detectron2 import _C
from .wrappers import _NewEmptyTensorOp
class _DeformConv(Function):
@staticmethod
def forward(
ctx,
input,
offset,
weight,
stride=1,
padding=0,
dilation=1,
groups=1,
deformable_groups=1,
im2col_step=64,
):
if input is not None and input.dim() != 4:
raise ValueError(
"Expected 4D tensor as input, got {}D tensor instead.".format(input.dim())
)
ctx.stride = _pair(stride)
ctx.padding = _pair(padding)
ctx.dilation = _pair(dilation)
ctx.groups = groups
ctx.deformable_groups = deformable_groups
ctx.im2col_step = im2col_step
ctx.save_for_backward(input, offset, weight)
output = input.new_empty(
_DeformConv._output_size(input, weight, ctx.padding, ctx.dilation, ctx.stride)
)
ctx.bufs_ = [input.new_empty(0), input.new_empty(0)] # columns, ones
if not input.is_cuda:
if deformable_groups != 1:
raise NotImplementedError(
"Deformable Conv with deformable_groups != 1 is not supported on CPUs!"
)
return deform_conv2d(
input, offset, weight, stride=stride, padding=padding, dilation=dilation
)
else:
cur_im2col_step = _DeformConv._cal_im2col_step(input.shape[0], ctx.im2col_step)
assert (input.shape[0] % cur_im2col_step) == 0, "im2col step must divide batchsize"
_C.deform_conv_forward(
input,
weight,
offset,
output,
ctx.bufs_[0],
ctx.bufs_[1],
weight.size(3),
weight.size(2),
ctx.stride[1],
ctx.stride[0],
ctx.padding[1],
ctx.padding[0],
ctx.dilation[1],
ctx.dilation[0],
ctx.groups,
ctx.deformable_groups,
cur_im2col_step,
)
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
input, offset, weight = ctx.saved_tensors
grad_input = grad_offset = grad_weight = None
if not grad_output.is_cuda:
raise NotImplementedError("Deformable Conv is not supported on CPUs!")
else:
cur_im2col_step = _DeformConv._cal_im2col_step(input.shape[0], ctx.im2col_step)
assert (input.shape[0] % cur_im2col_step) == 0, "im2col step must divide batchsize"
if ctx.needs_input_grad[0] or ctx.needs_input_grad[1]:
grad_input = torch.zeros_like(input)
grad_offset = torch.zeros_like(offset)
_C.deform_conv_backward_input(
input,
offset,
grad_output,
grad_input,
grad_offset,
weight,
ctx.bufs_[0],
weight.size(3),
weight.size(2),
ctx.stride[1],
ctx.stride[0],
ctx.padding[1],
ctx.padding[0],
ctx.dilation[1],
ctx.dilation[0],
ctx.groups,
ctx.deformable_groups,
cur_im2col_step,
)
if ctx.needs_input_grad[2]:
grad_weight = torch.zeros_like(weight)
_C.deform_conv_backward_filter(
input,
offset,
grad_output,
grad_weight,
ctx.bufs_[0],
ctx.bufs_[1],
weight.size(3),
weight.size(2),
ctx.stride[1],
ctx.stride[0],
ctx.padding[1],
ctx.padding[0],
ctx.dilation[1],
ctx.dilation[0],
ctx.groups,
ctx.deformable_groups,
1,
cur_im2col_step,
)
return grad_input, grad_offset, grad_weight, None, None, None, None, None, None
@staticmethod
def _output_size(input, weight, padding, dilation, stride):
channels = weight.size(0)
output_size = (input.size(0), channels)
for d in range(input.dim() - 2):
in_size = input.size(d + 2)
pad = padding[d]
kernel = dilation[d] * (weight.size(d + 2) - 1) + 1
stride_ = stride[d]
output_size += ((in_size + (2 * pad) - kernel) // stride_ + 1,)
if not all(map(lambda s: s > 0, output_size)):
raise ValueError(
"convolution input is too small (output would be {})".format(
"x".join(map(str, output_size))
)
)
return output_size
@staticmethod
@lru_cache(maxsize=128)
def _cal_im2col_step(input_size, default_size):
"""
Calculate proper im2col step size, which should be divisible by input_size and not larger
than prefer_size. Meanwhile the step size should be as large as possible to be more
efficient. So we choose the largest one among all divisors of input_size which are smaller
than prefer_size.
:param input_size: input batch size .
:param default_size: default preferred im2col step size.
:return: the largest proper step size.
"""
if input_size <= default_size:
return input_size
best_step = 1
for step in range(2, min(int(math.sqrt(input_size)) + 1, default_size)):
if input_size % step == 0:
if input_size // step <= default_size:
return input_size // step
best_step = step
return best_step
class _ModulatedDeformConv(Function):
@staticmethod
def forward(
ctx,
input,
offset,
mask,
weight,
bias=None,
stride=1,
padding=0,
dilation=1,
groups=1,
deformable_groups=1,
):
ctx.stride = stride
ctx.padding = padding
ctx.dilation = dilation
ctx.groups = groups
ctx.deformable_groups = deformable_groups
ctx.with_bias = bias is not None
if not ctx.with_bias:
bias = input.new_empty(1) # fake tensor
if not input.is_cuda:
raise NotImplementedError("Deformable Conv is not supported on CPUs!")
if (
weight.requires_grad
or mask.requires_grad
or offset.requires_grad
or input.requires_grad
):
ctx.save_for_backward(input, offset, mask, weight, bias)
output = input.new_empty(_ModulatedDeformConv._infer_shape(ctx, input, weight))
ctx._bufs = [input.new_empty(0), input.new_empty(0)]
_C.modulated_deform_conv_forward(
input,
weight,
bias,
ctx._bufs[0],
offset,
mask,
output,
ctx._bufs[1],
weight.shape[2],
weight.shape[3],
ctx.stride,
ctx.stride,
ctx.padding,
ctx.padding,
ctx.dilation,
ctx.dilation,
ctx.groups,
ctx.deformable_groups,
ctx.with_bias,
)
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
if not grad_output.is_cuda:
raise NotImplementedError("Deformable Conv is not supported on CPUs!")
input, offset, mask, weight, bias = ctx.saved_tensors
grad_input = torch.zeros_like(input)
grad_offset = torch.zeros_like(offset)
grad_mask = torch.zeros_like(mask)
grad_weight = torch.zeros_like(weight)
grad_bias = torch.zeros_like(bias)
_C.modulated_deform_conv_backward(
input,
weight,
bias,
ctx._bufs[0],
offset,
mask,
ctx._bufs[1],
grad_input,
grad_weight,
grad_bias,
grad_offset,
grad_mask,
grad_output,
weight.shape[2],
weight.shape[3],
ctx.stride,
ctx.stride,
ctx.padding,
ctx.padding,
ctx.dilation,
ctx.dilation,
ctx.groups,
ctx.deformable_groups,
ctx.with_bias,
)
if not ctx.with_bias:
grad_bias = None
return (
grad_input,
grad_offset,
grad_mask,
grad_weight,
grad_bias,
None,
None,
None,
None,
None,
)
@staticmethod
def _infer_shape(ctx, input, weight):
n = input.size(0)
channels_out = weight.size(0)
height, width = input.shape[2:4]
kernel_h, kernel_w = weight.shape[2:4]
height_out = (
height + 2 * ctx.padding - (ctx.dilation * (kernel_h - 1) + 1)
) // ctx.stride + 1
width_out = (
width + 2 * ctx.padding - (ctx.dilation * (kernel_w - 1) + 1)
) // ctx.stride + 1
return n, channels_out, height_out, width_out
deform_conv = _DeformConv.apply
modulated_deform_conv = _ModulatedDeformConv.apply
class DeformConv(nn.Module):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
deformable_groups=1,
bias=False,
norm=None,
activation=None,
):
"""
Deformable convolution from :paper:`deformconv`.
Arguments are similar to :class:`Conv2D`. Extra arguments:
Args:
deformable_groups (int): number of groups used in deformable convolution.
norm (nn.Module, optional): a normalization layer
activation (callable(Tensor) -> Tensor): a callable activation function
"""
super(DeformConv, self).__init__()
assert not bias
assert in_channels % groups == 0, "in_channels {} cannot be divisible by groups {}".format(
in_channels, groups
)
assert (
out_channels % groups == 0
), "out_channels {} cannot be divisible by groups {}".format(out_channels, groups)
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = _pair(kernel_size)
self.stride = _pair(stride)
self.padding = _pair(padding)
self.dilation = _pair(dilation)
self.groups = groups
self.deformable_groups = deformable_groups
self.norm = norm
self.activation = activation
self.weight = nn.Parameter(
torch.Tensor(out_channels, in_channels // self.groups, *self.kernel_size)
)
self.bias = None
nn.init.kaiming_uniform_(self.weight, nonlinearity="relu")
def forward(self, x, offset):
if x.numel() == 0:
# When input is empty, we want to return a empty tensor with "correct" shape,
# So that the following operations will not panic
# if they check for the shape of the tensor.
# This computes the height and width of the output tensor
output_shape = [
(i + 2 * p - (di * (k - 1) + 1)) // s + 1
for i, p, di, k, s in zip(
x.shape[-2:], self.padding, self.dilation, self.kernel_size, self.stride
)
]
output_shape = [x.shape[0], self.weight.shape[0]] + output_shape
return _NewEmptyTensorOp.apply(x, output_shape)
x = deform_conv(
x,
offset,
self.weight,
self.stride,
self.padding,
self.dilation,
self.groups,
self.deformable_groups,
)
if self.norm is not None:
x = self.norm(x)
if self.activation is not None:
x = self.activation(x)
return x
def extra_repr(self):
tmpstr = "in_channels=" + str(self.in_channels)
tmpstr += ", out_channels=" + str(self.out_channels)
tmpstr += ", kernel_size=" + str(self.kernel_size)
tmpstr += ", stride=" + str(self.stride)
tmpstr += ", padding=" + str(self.padding)
tmpstr += ", dilation=" + str(self.dilation)
tmpstr += ", groups=" + str(self.groups)
tmpstr += ", deformable_groups=" + str(self.deformable_groups)
tmpstr += ", bias=False"
return tmpstr
class ModulatedDeformConv(nn.Module):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
deformable_groups=1,
bias=True,
norm=None,
activation=None,
):
"""
Modulated deformable convolution from :paper:`deformconv2`.
Arguments are similar to :class:`Conv2D`. Extra arguments:
Args:
deformable_groups (int): number of groups used in deformable convolution.
norm (nn.Module, optional): a normalization layer
activation (callable(Tensor) -> Tensor): a callable activation function
"""
super(ModulatedDeformConv, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = _pair(kernel_size)
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = groups
self.deformable_groups = deformable_groups
self.with_bias = bias
self.norm = norm
self.activation = activation
self.weight = nn.Parameter(
torch.Tensor(out_channels, in_channels // groups, *self.kernel_size)
)
if bias:
self.bias = nn.Parameter(torch.Tensor(out_channels))
else:
self.bias = None
nn.init.kaiming_uniform_(self.weight, nonlinearity="relu")
if self.bias is not None:
nn.init.constant_(self.bias, 0)
def forward(self, x, offset, mask):
if x.numel() == 0:
output_shape = [
(i + 2 * p - (di * (k - 1) + 1)) // s + 1
for i, p, di, k, s in zip(
x.shape[-2:], self.padding, self.dilation, self.kernel_size, self.stride
)
]
output_shape = [x.shape[0], self.weight.shape[0]] + output_shape
return _NewEmptyTensorOp.apply(x, output_shape)
x = modulated_deform_conv(
x,
offset,
mask,
self.weight,
self.bias,
self.stride,
self.padding,
self.dilation,
self.groups,
self.deformable_groups,
)
if self.norm is not None:
x = self.norm(x)
if self.activation is not None:
x = self.activation(x)
return x
def extra_repr(self):
tmpstr = "in_channels=" + str(self.in_channels)
tmpstr += ", out_channels=" + str(self.out_channels)
tmpstr += ", kernel_size=" + str(self.kernel_size)
tmpstr += ", stride=" + str(self.stride)
tmpstr += ", padding=" + str(self.padding)
tmpstr += ", dilation=" + str(self.dilation)
tmpstr += ", groups=" + str(self.groups)
tmpstr += ", deformable_groups=" + str(self.deformable_groups)
tmpstr += ", bias=" + str(self.with_bias)
return tmpstr
|
banmo-main
|
third_party/detectron2_old/detectron2/layers/deform_conv.py
|
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
from collections import namedtuple
class ShapeSpec(namedtuple("_ShapeSpec", ["channels", "height", "width", "stride"])):
"""
A simple structure that contains basic shape specification about a tensor.
It is often used as the auxiliary inputs/outputs of models,
to complement the lack of shape inference ability among pytorch modules.
Attributes:
channels:
height:
width:
stride:
"""
def __new__(cls, channels=None, height=None, width=None, stride=None):
return super().__new__(cls, channels, height, width, stride)
|
banmo-main
|
third_party/detectron2_old/detectron2/layers/shape_spec.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from torch import nn
from torchvision.ops import roi_align
# NOTE: torchvision's RoIAlign has a different default aligned=False
class ROIAlign(nn.Module):
def __init__(self, output_size, spatial_scale, sampling_ratio, aligned=True):
"""
Args:
output_size (tuple): h, w
spatial_scale (float): scale the input boxes by this number
sampling_ratio (int): number of inputs samples to take for each output
sample. 0 to take samples densely.
aligned (bool): if False, use the legacy implementation in
Detectron. If True, align the results more perfectly.
Note:
The meaning of aligned=True:
Given a continuous coordinate c, its two neighboring pixel indices (in our
pixel model) are computed by floor(c - 0.5) and ceil(c - 0.5). For example,
c=1.3 has pixel neighbors with discrete indices [0] and [1] (which are sampled
from the underlying signal at continuous coordinates 0.5 and 1.5). But the original
roi_align (aligned=False) does not subtract the 0.5 when computing neighboring
pixel indices and therefore it uses pixels with a slightly incorrect alignment
(relative to our pixel model) when performing bilinear interpolation.
With `aligned=True`,
we first appropriately scale the ROI and then shift it by -0.5
prior to calling roi_align. This produces the correct neighbors; see
detectron2/tests/test_roi_align.py for verification.
The difference does not make a difference to the model's performance if
ROIAlign is used together with conv layers.
"""
super().__init__()
self.output_size = output_size
self.spatial_scale = spatial_scale
self.sampling_ratio = sampling_ratio
self.aligned = aligned
from torchvision import __version__
version = tuple(int(x) for x in __version__.split(".")[:2])
# https://github.com/pytorch/vision/pull/2438
assert version >= (0, 7), "Require torchvision >= 0.7"
def forward(self, input, rois):
"""
Args:
input: NCHW images
rois: Bx5 boxes. First column is the index into N. The other 4 columns are xyxy.
"""
assert rois.dim() == 2 and rois.size(1) == 5
if input.is_quantized:
input = input.dequantize()
return roi_align(
input,
rois.to(dtype=input.dtype),
self.output_size,
self.spatial_scale,
self.sampling_ratio,
self.aligned,
)
def __repr__(self):
tmpstr = self.__class__.__name__ + "("
tmpstr += "output_size=" + str(self.output_size)
tmpstr += ", spatial_scale=" + str(self.spatial_scale)
tmpstr += ", sampling_ratio=" + str(self.sampling_ratio)
tmpstr += ", aligned=" + str(self.aligned)
tmpstr += ")"
return tmpstr
|
banmo-main
|
third_party/detectron2_old/detectron2/layers/roi_align.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import torch
from torch import nn
from torch.autograd import Function
from torch.autograd.function import once_differentiable
from torch.nn.modules.utils import _pair
from detectron2 import _C
class _ROIAlignRotated(Function):
@staticmethod
def forward(ctx, input, roi, output_size, spatial_scale, sampling_ratio):
ctx.save_for_backward(roi)
ctx.output_size = _pair(output_size)
ctx.spatial_scale = spatial_scale
ctx.sampling_ratio = sampling_ratio
ctx.input_shape = input.size()
output = _C.roi_align_rotated_forward(
input, roi, spatial_scale, output_size[0], output_size[1], sampling_ratio
)
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
(rois,) = ctx.saved_tensors
output_size = ctx.output_size
spatial_scale = ctx.spatial_scale
sampling_ratio = ctx.sampling_ratio
bs, ch, h, w = ctx.input_shape
grad_input = _C.roi_align_rotated_backward(
grad_output,
rois,
spatial_scale,
output_size[0],
output_size[1],
bs,
ch,
h,
w,
sampling_ratio,
)
return grad_input, None, None, None, None, None
roi_align_rotated = _ROIAlignRotated.apply
class ROIAlignRotated(nn.Module):
def __init__(self, output_size, spatial_scale, sampling_ratio):
"""
Args:
output_size (tuple): h, w
spatial_scale (float): scale the input boxes by this number
sampling_ratio (int): number of inputs samples to take for each output
sample. 0 to take samples densely.
Note:
ROIAlignRotated supports continuous coordinate by default:
Given a continuous coordinate c, its two neighboring pixel indices (in our
pixel model) are computed by floor(c - 0.5) and ceil(c - 0.5). For example,
c=1.3 has pixel neighbors with discrete indices [0] and [1] (which are sampled
from the underlying signal at continuous coordinates 0.5 and 1.5).
"""
super(ROIAlignRotated, self).__init__()
self.output_size = output_size
self.spatial_scale = spatial_scale
self.sampling_ratio = sampling_ratio
def forward(self, input, rois):
"""
Args:
input: NCHW images
rois: Bx6 boxes. First column is the index into N.
The other 5 columns are (x_ctr, y_ctr, width, height, angle_degrees).
"""
assert rois.dim() == 2 and rois.size(1) == 6
orig_dtype = input.dtype
if orig_dtype == torch.float16:
input = input.float()
rois = rois.float()
return roi_align_rotated(
input, rois, self.output_size, self.spatial_scale, self.sampling_ratio
).to(dtype=orig_dtype)
def __repr__(self):
tmpstr = self.__class__.__name__ + "("
tmpstr += "output_size=" + str(self.output_size)
tmpstr += ", spatial_scale=" + str(self.spatial_scale)
tmpstr += ", sampling_ratio=" + str(self.sampling_ratio)
tmpstr += ")"
return tmpstr
|
banmo-main
|
third_party/detectron2_old/detectron2/layers/roi_align_rotated.py
|
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
from typing import List
import torch
from torchvision.ops import boxes as box_ops
from torchvision.ops import nms # BC-compat
from detectron2.utils.env import TORCH_VERSION
if TORCH_VERSION < (1, 7):
from detectron2 import _C
nms_rotated_func = _C.nms_rotated
else:
nms_rotated_func = torch.ops.detectron2.nms_rotated
def batched_nms(
boxes: torch.Tensor, scores: torch.Tensor, idxs: torch.Tensor, iou_threshold: float
):
"""
Same as torchvision.ops.boxes.batched_nms, but safer.
"""
assert boxes.shape[-1] == 4
# TODO may need better strategy.
# Investigate after having a fully-cuda NMS op.
if len(boxes) < 40000:
# fp16 does not have enough range for batched NMS
return box_ops.batched_nms(boxes.float(), scores, idxs, iou_threshold)
result_mask = scores.new_zeros(scores.size(), dtype=torch.bool)
for id in torch.jit.annotate(List[int], torch.unique(idxs).cpu().tolist()):
mask = (idxs == id).nonzero().view(-1)
keep = nms(boxes[mask], scores[mask], iou_threshold)
result_mask[mask[keep]] = True
keep = result_mask.nonzero().view(-1)
keep = keep[scores[keep].argsort(descending=True)]
return keep
# Note: this function (nms_rotated) might be moved into
# torchvision/ops/boxes.py in the future
def nms_rotated(boxes, scores, iou_threshold):
"""
Performs non-maximum suppression (NMS) on the rotated boxes according
to their intersection-over-union (IoU).
Rotated NMS iteratively removes lower scoring rotated boxes which have an
IoU greater than iou_threshold with another (higher scoring) rotated box.
Note that RotatedBox (5, 3, 4, 2, -90) covers exactly the same region as
RotatedBox (5, 3, 4, 2, 90) does, and their IoU will be 1. However, they
can be representing completely different objects in certain tasks, e.g., OCR.
As for the question of whether rotated-NMS should treat them as faraway boxes
even though their IOU is 1, it depends on the application and/or ground truth annotation.
As an extreme example, consider a single character v and the square box around it.
If the angle is 0 degree, the object (text) would be read as 'v';
If the angle is 90 degrees, the object (text) would become '>';
If the angle is 180 degrees, the object (text) would become '^';
If the angle is 270/-90 degrees, the object (text) would become '<'
All of these cases have IoU of 1 to each other, and rotated NMS that only
uses IoU as criterion would only keep one of them with the highest score -
which, practically, still makes sense in most cases because typically
only one of theses orientations is the correct one. Also, it does not matter
as much if the box is only used to classify the object (instead of transcribing
them with a sequential OCR recognition model) later.
On the other hand, when we use IoU to filter proposals that are close to the
ground truth during training, we should definitely take the angle into account if
we know the ground truth is labeled with the strictly correct orientation (as in,
upside-down words are annotated with -180 degrees even though they can be covered
with a 0/90/-90 degree box, etc.)
The way the original dataset is annotated also matters. For example, if the dataset
is a 4-point polygon dataset that does not enforce ordering of vertices/orientation,
we can estimate a minimum rotated bounding box to this polygon, but there's no way
we can tell the correct angle with 100% confidence (as shown above, there could be 4 different
rotated boxes, with angles differed by 90 degrees to each other, covering the exactly
same region). In that case we have to just use IoU to determine the box
proximity (as many detection benchmarks (even for text) do) unless there're other
assumptions we can make (like width is always larger than height, or the object is not
rotated by more than 90 degrees CCW/CW, etc.)
In summary, not considering angles in rotated NMS seems to be a good option for now,
but we should be aware of its implications.
Args:
boxes (Tensor[N, 5]): Rotated boxes to perform NMS on. They are expected to be in
(x_center, y_center, width, height, angle_degrees) format.
scores (Tensor[N]): Scores for each one of the rotated boxes
iou_threshold (float): Discards all overlapping rotated boxes with IoU < iou_threshold
Returns:
keep (Tensor): int64 tensor with the indices of the elements that have been kept
by Rotated NMS, sorted in decreasing order of scores
"""
return nms_rotated_func(boxes, scores, iou_threshold)
# Note: this function (batched_nms_rotated) might be moved into
# torchvision/ops/boxes.py in the future
def batched_nms_rotated(boxes, scores, idxs, iou_threshold):
"""
Performs non-maximum suppression in a batched fashion.
Each index value correspond to a category, and NMS
will not be applied between elements of different categories.
Args:
boxes (Tensor[N, 5]):
boxes where NMS will be performed. They
are expected to be in (x_ctr, y_ctr, width, height, angle_degrees) format
scores (Tensor[N]):
scores for each one of the boxes
idxs (Tensor[N]):
indices of the categories for each one of the boxes.
iou_threshold (float):
discards all overlapping boxes
with IoU < iou_threshold
Returns:
Tensor:
int64 tensor with the indices of the elements that have been kept
by NMS, sorted in decreasing order of scores
"""
assert boxes.shape[-1] == 5
if boxes.numel() == 0:
return torch.empty((0,), dtype=torch.int64, device=boxes.device)
boxes = boxes.float() # fp16 does not have enough range for batched NMS
# Strategy: in order to perform NMS independently per class,
# we add an offset to all the boxes. The offset is dependent
# only on the class idx, and is large enough so that boxes
# from different classes do not overlap
# Note that batched_nms in torchvision/ops/boxes.py only uses max_coordinate,
# which won't handle negative coordinates correctly.
# Here by using min_coordinate we can make sure the negative coordinates are
# correctly handled.
max_coordinate = (
torch.max(boxes[:, 0], boxes[:, 1]) + torch.max(boxes[:, 2], boxes[:, 3]) / 2
).max()
min_coordinate = (
torch.min(boxes[:, 0], boxes[:, 1]) - torch.max(boxes[:, 2], boxes[:, 3]) / 2
).min()
offsets = idxs.to(boxes) * (max_coordinate - min_coordinate + 1)
boxes_for_nms = boxes.clone() # avoid modifying the original values in boxes
boxes_for_nms[:, :2] += offsets[:, None]
keep = nms_rotated(boxes_for_nms, scores, iou_threshold)
return keep
|
banmo-main
|
third_party/detectron2_old/detectron2/layers/nms.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from .batch_norm import FrozenBatchNorm2d, get_norm, NaiveSyncBatchNorm
from .deform_conv import DeformConv, ModulatedDeformConv
from .mask_ops import paste_masks_in_image
from .nms import batched_nms, batched_nms_rotated, nms, nms_rotated
from .roi_align import ROIAlign, roi_align
from .roi_align_rotated import ROIAlignRotated, roi_align_rotated
from .shape_spec import ShapeSpec
from .wrappers import (
BatchNorm2d,
Conv2d,
ConvTranspose2d,
cat,
interpolate,
Linear,
nonzero_tuple,
cross_entropy,
)
from .blocks import CNNBlockBase, DepthwiseSeparableConv2d
from .aspp import ASPP
__all__ = [k for k in globals().keys() if not k.startswith("_")]
|
banmo-main
|
third_party/detectron2_old/detectron2/layers/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from copy import deepcopy
import fvcore.nn.weight_init as weight_init
import torch
from torch import nn
from torch.nn import functional as F
from .batch_norm import get_norm
from .blocks import DepthwiseSeparableConv2d
from .wrappers import Conv2d
class ASPP(nn.Module):
"""
Atrous Spatial Pyramid Pooling (ASPP).
"""
def __init__(
self,
in_channels,
out_channels,
dilations,
*,
norm,
activation,
pool_kernel_size=None,
dropout: float = 0.0,
use_depthwise_separable_conv=False,
):
"""
Args:
in_channels (int): number of input channels for ASPP.
out_channels (int): number of output channels.
dilations (list): a list of 3 dilations in ASPP.
norm (str or callable): normalization for all conv layers.
See :func:`layers.get_norm` for supported format. norm is
applied to all conv layers except the conv following
global average pooling.
activation (callable): activation function.
pool_kernel_size (tuple, list): the average pooling size (kh, kw)
for image pooling layer in ASPP. If set to None, it always
performs global average pooling. If not None, it must be
divisible by the shape of inputs in forward(). It is recommended
to use a fixed input feature size in training, and set this
option to match this size, so that it performs global average
pooling in training, and the size of the pooling window stays
consistent in inference.
dropout (float): apply dropout on the output of ASPP. It is used in
the official DeepLab implementation with a rate of 0.1:
https://github.com/tensorflow/models/blob/21b73d22f3ed05b650e85ac50849408dd36de32e/research/deeplab/model.py#L532 # noqa
use_depthwise_separable_conv (bool): use DepthwiseSeparableConv2d
for 3x3 convs in ASPP, proposed in :paper:`DeepLabV3+`.
"""
super(ASPP, self).__init__()
assert len(dilations) == 3, "ASPP expects 3 dilations, got {}".format(len(dilations))
self.pool_kernel_size = pool_kernel_size
self.dropout = dropout
use_bias = norm == ""
self.convs = nn.ModuleList()
# conv 1x1
self.convs.append(
Conv2d(
in_channels,
out_channels,
kernel_size=1,
bias=use_bias,
norm=get_norm(norm, out_channels),
activation=deepcopy(activation),
)
)
weight_init.c2_xavier_fill(self.convs[-1])
# atrous convs
for dilation in dilations:
if use_depthwise_separable_conv:
self.convs.append(
DepthwiseSeparableConv2d(
in_channels,
out_channels,
kernel_size=3,
padding=dilation,
dilation=dilation,
norm1=norm,
activation1=deepcopy(activation),
norm2=norm,
activation2=deepcopy(activation),
)
)
else:
self.convs.append(
Conv2d(
in_channels,
out_channels,
kernel_size=3,
padding=dilation,
dilation=dilation,
bias=use_bias,
norm=get_norm(norm, out_channels),
activation=deepcopy(activation),
)
)
weight_init.c2_xavier_fill(self.convs[-1])
# image pooling
# We do not add BatchNorm because the spatial resolution is 1x1,
# the original TF implementation has BatchNorm.
if pool_kernel_size is None:
image_pooling = nn.Sequential(
nn.AdaptiveAvgPool2d(1),
Conv2d(in_channels, out_channels, 1, bias=True, activation=deepcopy(activation)),
)
else:
image_pooling = nn.Sequential(
nn.AvgPool2d(kernel_size=pool_kernel_size, stride=1),
Conv2d(in_channels, out_channels, 1, bias=True, activation=deepcopy(activation)),
)
weight_init.c2_xavier_fill(image_pooling[1])
self.convs.append(image_pooling)
self.project = Conv2d(
5 * out_channels,
out_channels,
kernel_size=1,
bias=use_bias,
norm=get_norm(norm, out_channels),
activation=deepcopy(activation),
)
weight_init.c2_xavier_fill(self.project)
def forward(self, x):
size = x.shape[-2:]
if self.pool_kernel_size is not None:
if size[0] % self.pool_kernel_size[0] or size[1] % self.pool_kernel_size[1]:
raise ValueError(
"`pool_kernel_size` must be divisible by the shape of inputs. "
"Input size: {} `pool_kernel_size`: {}".format(size, self.pool_kernel_size)
)
res = []
for conv in self.convs:
res.append(conv(x))
res[-1] = F.interpolate(res[-1], size=size, mode="bilinear", align_corners=False)
res = torch.cat(res, dim=1)
res = self.project(res)
res = F.dropout(res, self.dropout, training=self.training) if self.dropout > 0 else res
return res
|
banmo-main
|
third_party/detectron2_old/detectron2/layers/aspp.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import numpy as np
from typing import Tuple
import torch
from PIL import Image
from torch.nn import functional as F
from detectron2.structures import Boxes
__all__ = ["paste_masks_in_image"]
BYTES_PER_FLOAT = 4
# TODO: This memory limit may be too much or too little. It would be better to
# determine it based on available resources.
GPU_MEM_LIMIT = 1024 ** 3 # 1 GB memory limit
def _do_paste_mask(masks, boxes, img_h: int, img_w: int, skip_empty: bool = True):
"""
Args:
masks: N, 1, H, W
boxes: N, 4
img_h, img_w (int):
skip_empty (bool): only paste masks within the region that
tightly bound all boxes, and returns the results this region only.
An important optimization for CPU.
Returns:
if skip_empty == False, a mask of shape (N, img_h, img_w)
if skip_empty == True, a mask of shape (N, h', w'), and the slice
object for the corresponding region.
"""
# On GPU, paste all masks together (up to chunk size)
# by using the entire image to sample the masks
# Compared to pasting them one by one,
# this has more operations but is faster on COCO-scale dataset.
device = masks.device
if skip_empty and not torch.jit.is_scripting():
x0_int, y0_int = torch.clamp(boxes.min(dim=0).values.floor()[:2] - 1, min=0).to(
dtype=torch.int32
)
x1_int = torch.clamp(boxes[:, 2].max().ceil() + 1, max=img_w).to(dtype=torch.int32)
y1_int = torch.clamp(boxes[:, 3].max().ceil() + 1, max=img_h).to(dtype=torch.int32)
else:
x0_int, y0_int = 0, 0
x1_int, y1_int = img_w, img_h
x0, y0, x1, y1 = torch.split(boxes, 1, dim=1) # each is Nx1
N = masks.shape[0]
img_y = torch.arange(y0_int, y1_int, device=device, dtype=torch.float32) + 0.5
img_x = torch.arange(x0_int, x1_int, device=device, dtype=torch.float32) + 0.5
img_y = (img_y - y0) / (y1 - y0) * 2 - 1
img_x = (img_x - x0) / (x1 - x0) * 2 - 1
# img_x, img_y have shapes (N, w), (N, h)
gx = img_x[:, None, :].expand(N, img_y.size(1), img_x.size(1))
gy = img_y[:, :, None].expand(N, img_y.size(1), img_x.size(1))
grid = torch.stack([gx, gy], dim=3)
if not torch.jit.is_scripting():
if not masks.dtype.is_floating_point:
masks = masks.float()
img_masks = F.grid_sample(masks, grid.to(masks.dtype), align_corners=False)
if skip_empty and not torch.jit.is_scripting():
return img_masks[:, 0], (slice(y0_int, y1_int), slice(x0_int, x1_int))
else:
return img_masks[:, 0], ()
def paste_masks_in_image(
masks: torch.Tensor, boxes: Boxes, image_shape: Tuple[int, int], threshold: float = 0.5
):
"""
Paste a set of masks that are of a fixed resolution (e.g., 28 x 28) into an image.
The location, height, and width for pasting each mask is determined by their
corresponding bounding boxes in boxes.
Note:
This is a complicated but more accurate implementation. In actual deployment, it is
often enough to use a faster but less accurate implementation.
See :func:`paste_mask_in_image_old` in this file for an alternative implementation.
Args:
masks (tensor): Tensor of shape (Bimg, Hmask, Wmask), where Bimg is the number of
detected object instances in the image and Hmask, Wmask are the mask width and mask
height of the predicted mask (e.g., Hmask = Wmask = 28). Values are in [0, 1].
boxes (Boxes or Tensor): A Boxes of length Bimg or Tensor of shape (Bimg, 4).
boxes[i] and masks[i] correspond to the same object instance.
image_shape (tuple): height, width
threshold (float): A threshold in [0, 1] for converting the (soft) masks to
binary masks.
Returns:
img_masks (Tensor): A tensor of shape (Bimg, Himage, Wimage), where Bimg is the
number of detected object instances and Himage, Wimage are the image width
and height. img_masks[i] is a binary mask for object instance i.
"""
assert masks.shape[-1] == masks.shape[-2], "Only square mask predictions are supported"
N = len(masks)
if N == 0:
return masks.new_empty((0,) + image_shape, dtype=torch.uint8)
if not isinstance(boxes, torch.Tensor):
boxes = boxes.tensor
device = boxes.device
assert len(boxes) == N, boxes.shape
img_h, img_w = image_shape
# The actual implementation split the input into chunks,
# and paste them chunk by chunk.
if device.type == "cpu" or torch.jit.is_scripting():
# CPU is most efficient when they are pasted one by one with skip_empty=True
# so that it performs minimal number of operations.
num_chunks = N
else:
# GPU benefits from parallelism for larger chunks, but may have memory issue
# int(img_h) because shape may be tensors in tracing
num_chunks = int(np.ceil(N * int(img_h) * int(img_w) * BYTES_PER_FLOAT / GPU_MEM_LIMIT))
assert (
num_chunks <= N
), "Default GPU_MEM_LIMIT in mask_ops.py is too small; try increasing it"
chunks = torch.chunk(torch.arange(N, device=device), num_chunks)
img_masks = torch.zeros(
N, img_h, img_w, device=device, dtype=torch.bool if threshold >= 0 else torch.uint8
)
for inds in chunks:
masks_chunk, spatial_inds = _do_paste_mask(
masks[inds, None, :, :], boxes[inds], img_h, img_w, skip_empty=device.type == "cpu"
)
if threshold >= 0:
masks_chunk = (masks_chunk >= threshold).to(dtype=torch.bool)
else:
# for visualization and debugging
masks_chunk = (masks_chunk * 255).to(dtype=torch.uint8)
if torch.jit.is_scripting(): # Scripting does not use the optimized codepath
img_masks[inds] = masks_chunk
else:
img_masks[(inds,) + spatial_inds] = masks_chunk
return img_masks
# The below are the original paste function (from Detectron1) which has
# larger quantization error.
# It is faster on CPU, while the aligned one is faster on GPU thanks to grid_sample.
def paste_mask_in_image_old(mask, box, img_h, img_w, threshold):
"""
Paste a single mask in an image.
This is a per-box implementation of :func:`paste_masks_in_image`.
This function has larger quantization error due to incorrect pixel
modeling and is not used any more.
Args:
mask (Tensor): A tensor of shape (Hmask, Wmask) storing the mask of a single
object instance. Values are in [0, 1].
box (Tensor): A tensor of shape (4, ) storing the x0, y0, x1, y1 box corners
of the object instance.
img_h, img_w (int): Image height and width.
threshold (float): Mask binarization threshold in [0, 1].
Returns:
im_mask (Tensor):
The resized and binarized object mask pasted into the original
image plane (a tensor of shape (img_h, img_w)).
"""
# Conversion from continuous box coordinates to discrete pixel coordinates
# via truncation (cast to int32). This determines which pixels to paste the
# mask onto.
box = box.to(dtype=torch.int32) # Continuous to discrete coordinate conversion
# An example (1D) box with continuous coordinates (x0=0.7, x1=4.3) will map to
# a discrete coordinates (x0=0, x1=4). Note that box is mapped to 5 = x1 - x0 + 1
# pixels (not x1 - x0 pixels).
samples_w = box[2] - box[0] + 1 # Number of pixel samples, *not* geometric width
samples_h = box[3] - box[1] + 1 # Number of pixel samples, *not* geometric height
# Resample the mask from it's original grid to the new samples_w x samples_h grid
mask = Image.fromarray(mask.cpu().numpy())
mask = mask.resize((samples_w, samples_h), resample=Image.BILINEAR)
mask = np.array(mask, copy=False)
if threshold >= 0:
mask = np.array(mask > threshold, dtype=np.uint8)
mask = torch.from_numpy(mask)
else:
# for visualization and debugging, we also
# allow it to return an unmodified mask
mask = torch.from_numpy(mask * 255).to(torch.uint8)
im_mask = torch.zeros((img_h, img_w), dtype=torch.uint8)
x_0 = max(box[0], 0)
x_1 = min(box[2] + 1, img_w)
y_0 = max(box[1], 0)
y_1 = min(box[3] + 1, img_h)
im_mask[y_0:y_1, x_0:x_1] = mask[
(y_0 - box[1]) : (y_1 - box[1]), (x_0 - box[0]) : (x_1 - box[0])
]
return im_mask
# Our pixel modeling requires extrapolation for any continuous
# coordinate < 0.5 or > length - 0.5. When sampling pixels on the masks,
# we would like this extrapolation to be an interpolation between boundary values and zero,
# instead of using absolute zero or boundary values.
# Therefore `paste_mask_in_image_old` is often used with zero padding around the masks like this:
# masks, scale = pad_masks(masks[:, 0, :, :], 1)
# boxes = scale_boxes(boxes.tensor, scale)
def pad_masks(masks, padding):
"""
Args:
masks (tensor): A tensor of shape (B, M, M) representing B masks.
padding (int): Number of cells to pad on all sides.
Returns:
The padded masks and the scale factor of the padding size / original size.
"""
B = masks.shape[0]
M = masks.shape[-1]
pad2 = 2 * padding
scale = float(M + pad2) / M
padded_masks = masks.new_zeros((B, M + pad2, M + pad2))
padded_masks[:, padding:-padding, padding:-padding] = masks
return padded_masks, scale
def scale_boxes(boxes, scale):
"""
Args:
boxes (tensor): A tensor of shape (B, 4) representing B boxes with 4
coords representing the corners x0, y0, x1, y1,
scale (float): The box scaling factor.
Returns:
Scaled boxes.
"""
w_half = (boxes[:, 2] - boxes[:, 0]) * 0.5
h_half = (boxes[:, 3] - boxes[:, 1]) * 0.5
x_c = (boxes[:, 2] + boxes[:, 0]) * 0.5
y_c = (boxes[:, 3] + boxes[:, 1]) * 0.5
w_half *= scale
h_half *= scale
scaled_boxes = torch.zeros_like(boxes)
scaled_boxes[:, 0] = x_c - w_half
scaled_boxes[:, 2] = x_c + w_half
scaled_boxes[:, 1] = y_c - h_half
scaled_boxes[:, 3] = y_c + h_half
return scaled_boxes
|
banmo-main
|
third_party/detectron2_old/detectron2/layers/mask_ops.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
"""
Wrappers around on some nn functions, mainly to support empty tensors.
Ideally, add support directly in PyTorch to empty tensors in those functions.
These can be removed once https://github.com/pytorch/pytorch/issues/12013
is implemented
"""
from typing import List
import torch
from torch.nn import functional as F
def cat(tensors: List[torch.Tensor], dim: int = 0):
"""
Efficient version of torch.cat that avoids a copy if there is only a single element in a list
"""
assert isinstance(tensors, (list, tuple))
if len(tensors) == 1:
return tensors[0]
return torch.cat(tensors, dim)
def cross_entropy(input, target, *, reduction="mean", **kwargs):
"""
Same as `torch.nn.functional.cross_entropy`, but returns 0 (instead of nan)
for empty inputs.
"""
if target.numel() == 0 and reduction == "mean":
return input.sum() * 0.0 # connect the gradient
return F.cross_entropy(input, target, **kwargs)
class _NewEmptyTensorOp(torch.autograd.Function):
@staticmethod
def forward(ctx, x, new_shape):
ctx.shape = x.shape
return x.new_empty(new_shape)
@staticmethod
def backward(ctx, grad):
shape = ctx.shape
return _NewEmptyTensorOp.apply(grad, shape), None
class Conv2d(torch.nn.Conv2d):
"""
A wrapper around :class:`torch.nn.Conv2d` to support empty inputs and more features.
"""
def __init__(self, *args, **kwargs):
"""
Extra keyword arguments supported in addition to those in `torch.nn.Conv2d`:
Args:
norm (nn.Module, optional): a normalization layer
activation (callable(Tensor) -> Tensor): a callable activation function
It assumes that norm layer is used before activation.
"""
norm = kwargs.pop("norm", None)
activation = kwargs.pop("activation", None)
super().__init__(*args, **kwargs)
self.norm = norm
self.activation = activation
def forward(self, x):
# torchscript does not support SyncBatchNorm yet
# https://github.com/pytorch/pytorch/issues/40507
# and we skip these codes in torchscript since:
# 1. currently we only support torchscript in evaluation mode
# 2. features needed by exporting module to torchscript are added in PyTorch 1.6 or
# later version, `Conv2d` in these PyTorch versions has already supported empty inputs.
if not torch.jit.is_scripting():
if x.numel() == 0 and self.training:
# https://github.com/pytorch/pytorch/issues/12013
assert not isinstance(
self.norm, torch.nn.SyncBatchNorm
), "SyncBatchNorm does not support empty inputs!"
x = F.conv2d(
x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups
)
if self.norm is not None:
x = self.norm(x)
if self.activation is not None:
x = self.activation(x)
return x
ConvTranspose2d = torch.nn.ConvTranspose2d
BatchNorm2d = torch.nn.BatchNorm2d
interpolate = F.interpolate
Linear = torch.nn.Linear
def nonzero_tuple(x):
"""
A 'as_tuple=True' version of torch.nonzero to support torchscript.
because of https://github.com/pytorch/pytorch/issues/38718
"""
if torch.jit.is_scripting():
if x.dim() == 0:
return x.unsqueeze(0).nonzero().unbind(1)
return x.nonzero().unbind(1)
else:
return x.nonzero(as_tuple=True)
|
banmo-main
|
third_party/detectron2_old/detectron2/layers/wrappers.py
|
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
import fvcore.nn.weight_init as weight_init
from torch import nn
from .batch_norm import FrozenBatchNorm2d, get_norm
from .wrappers import Conv2d
"""
CNN building blocks.
"""
class CNNBlockBase(nn.Module):
"""
A CNN block is assumed to have input channels, output channels and a stride.
The input and output of `forward()` method must be NCHW tensors.
The method can perform arbitrary computation but must match the given
channels and stride specification.
Attribute:
in_channels (int):
out_channels (int):
stride (int):
"""
def __init__(self, in_channels, out_channels, stride):
"""
The `__init__` method of any subclass should also contain these arguments.
Args:
in_channels (int):
out_channels (int):
stride (int):
"""
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.stride = stride
def freeze(self):
"""
Make this block not trainable.
This method sets all parameters to `requires_grad=False`,
and convert all BatchNorm layers to FrozenBatchNorm
Returns:
the block itself
"""
for p in self.parameters():
p.requires_grad = False
FrozenBatchNorm2d.convert_frozen_batchnorm(self)
return self
class DepthwiseSeparableConv2d(nn.Module):
"""
A kxk depthwise convolution + a 1x1 convolution.
In :paper:`xception`, norm & activation are applied on the second conv.
:paper:`mobilenet` uses norm & activation on both convs.
"""
def __init__(
self,
in_channels,
out_channels,
kernel_size=3,
padding=1,
dilation=1,
*,
norm1=None,
activation1=None,
norm2=None,
activation2=None,
):
"""
Args:
norm1, norm2 (str or callable): normalization for the two conv layers.
activation1, activation2 (callable(Tensor) -> Tensor): activation
function for the two conv layers.
"""
super().__init__()
self.depthwise = Conv2d(
in_channels,
in_channels,
kernel_size=kernel_size,
padding=padding,
dilation=dilation,
groups=in_channels,
bias=not norm1,
norm=get_norm(norm1, in_channels),
activation=activation1,
)
self.pointwise = Conv2d(
in_channels,
out_channels,
kernel_size=1,
bias=not norm2,
norm=get_norm(norm2, out_channels),
activation=activation2,
)
# default initialization
weight_init.c2_msra_fill(self.depthwise)
weight_init.c2_msra_fill(self.pointwise)
def forward(self, x):
return self.pointwise(self.depthwise(x))
|
banmo-main
|
third_party/detectron2_old/detectron2/layers/blocks.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import logging
import torch
import torch.distributed as dist
from fvcore.nn.distributed import differentiable_all_reduce
from torch import nn
from torch.nn import functional as F
from detectron2.utils import comm, env
from .wrappers import BatchNorm2d
class FrozenBatchNorm2d(nn.Module):
"""
BatchNorm2d where the batch statistics and the affine parameters are fixed.
It contains non-trainable buffers called
"weight" and "bias", "running_mean", "running_var",
initialized to perform identity transformation.
The pre-trained backbone models from Caffe2 only contain "weight" and "bias",
which are computed from the original four parameters of BN.
The affine transform `x * weight + bias` will perform the equivalent
computation of `(x - running_mean) / sqrt(running_var) * weight + bias`.
When loading a backbone model from Caffe2, "running_mean" and "running_var"
will be left unchanged as identity transformation.
Other pre-trained backbone models may contain all 4 parameters.
The forward is implemented by `F.batch_norm(..., training=False)`.
"""
_version = 3
def __init__(self, num_features, eps=1e-5):
super().__init__()
self.num_features = num_features
self.eps = eps
self.register_buffer("weight", torch.ones(num_features))
self.register_buffer("bias", torch.zeros(num_features))
self.register_buffer("running_mean", torch.zeros(num_features))
self.register_buffer("running_var", torch.ones(num_features) - eps)
def forward(self, x):
if x.requires_grad:
# When gradients are needed, F.batch_norm will use extra memory
# because its backward op computes gradients for weight/bias as well.
scale = self.weight * (self.running_var + self.eps).rsqrt()
bias = self.bias - self.running_mean * scale
scale = scale.reshape(1, -1, 1, 1)
bias = bias.reshape(1, -1, 1, 1)
out_dtype = x.dtype # may be half
return x * scale.to(out_dtype) + bias.to(out_dtype)
else:
# When gradients are not needed, F.batch_norm is a single fused op
# and provide more optimization opportunities.
return F.batch_norm(
x,
self.running_mean,
self.running_var,
self.weight,
self.bias,
training=False,
eps=self.eps,
)
def _load_from_state_dict(
self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
):
version = local_metadata.get("version", None)
if version is None or version < 2:
# No running_mean/var in early versions
# This will silent the warnings
if prefix + "running_mean" not in state_dict:
state_dict[prefix + "running_mean"] = torch.zeros_like(self.running_mean)
if prefix + "running_var" not in state_dict:
state_dict[prefix + "running_var"] = torch.ones_like(self.running_var)
# NOTE: if a checkpoint is trained with BatchNorm and loaded (together with
# version number) to FrozenBatchNorm, running_var will be wrong. One solution
# is to remove the version number from the checkpoint.
if version is not None and version < 3:
logger = logging.getLogger(__name__)
logger.info("FrozenBatchNorm {} is upgraded to version 3.".format(prefix.rstrip(".")))
# In version < 3, running_var are used without +eps.
state_dict[prefix + "running_var"] -= self.eps
super()._load_from_state_dict(
state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
)
def __repr__(self):
return "FrozenBatchNorm2d(num_features={}, eps={})".format(self.num_features, self.eps)
@classmethod
def convert_frozen_batchnorm(cls, module):
"""
Convert all BatchNorm/SyncBatchNorm in module into FrozenBatchNorm.
Args:
module (torch.nn.Module):
Returns:
If module is BatchNorm/SyncBatchNorm, returns a new module.
Otherwise, in-place convert module and return it.
Similar to convert_sync_batchnorm in
https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/batchnorm.py
"""
bn_module = nn.modules.batchnorm
bn_module = (bn_module.BatchNorm2d, bn_module.SyncBatchNorm)
res = module
if isinstance(module, bn_module):
res = cls(module.num_features)
if module.affine:
res.weight.data = module.weight.data.clone().detach()
res.bias.data = module.bias.data.clone().detach()
res.running_mean.data = module.running_mean.data
res.running_var.data = module.running_var.data
res.eps = module.eps
else:
for name, child in module.named_children():
new_child = cls.convert_frozen_batchnorm(child)
if new_child is not child:
res.add_module(name, new_child)
return res
def get_norm(norm, out_channels):
"""
Args:
norm (str or callable): either one of BN, SyncBN, FrozenBN, GN;
or a callable that takes a channel number and returns
the normalization layer as a nn.Module.
Returns:
nn.Module or None: the normalization layer
"""
if norm is None:
return None
if isinstance(norm, str):
if len(norm) == 0:
return None
norm = {
"BN": BatchNorm2d,
# Fixed in https://github.com/pytorch/pytorch/pull/36382
"SyncBN": NaiveSyncBatchNorm if env.TORCH_VERSION <= (1, 5) else nn.SyncBatchNorm,
"FrozenBN": FrozenBatchNorm2d,
"GN": lambda channels: nn.GroupNorm(32, channels),
# for debugging:
"nnSyncBN": nn.SyncBatchNorm,
"naiveSyncBN": NaiveSyncBatchNorm,
}[norm]
return norm(out_channels)
class NaiveSyncBatchNorm(BatchNorm2d):
"""
In PyTorch<=1.5, ``nn.SyncBatchNorm`` has incorrect gradient
when the batch size on each worker is different.
(e.g., when scale augmentation is used, or when it is applied to mask head).
This is a slower but correct alternative to `nn.SyncBatchNorm`.
Note:
There isn't a single definition of Sync BatchNorm.
When ``stats_mode==""``, this module computes overall statistics by using
statistics of each worker with equal weight. The result is true statistics
of all samples (as if they are all on one worker) only when all workers
have the same (N, H, W). This mode does not support inputs with zero batch size.
When ``stats_mode=="N"``, this module computes overall statistics by weighting
the statistics of each worker by their ``N``. The result is true statistics
of all samples (as if they are all on one worker) only when all workers
have the same (H, W). It is slower than ``stats_mode==""``.
Even though the result of this module may not be the true statistics of all samples,
it may still be reasonable because it might be preferrable to assign equal weights
to all workers, regardless of their (H, W) dimension, instead of putting larger weight
on larger images. From preliminary experiments, little difference is found between such
a simplified implementation and an accurate computation of overall mean & variance.
"""
def __init__(self, *args, stats_mode="", **kwargs):
super().__init__(*args, **kwargs)
assert stats_mode in ["", "N"]
self._stats_mode = stats_mode
def forward(self, input):
if comm.get_world_size() == 1 or not self.training:
return super().forward(input)
B, C = input.shape[0], input.shape[1]
half_input = input.dtype == torch.float16
if half_input:
# fp16 does not have good enough numerics for the reduction here
input = input.float()
mean = torch.mean(input, dim=[0, 2, 3])
meansqr = torch.mean(input * input, dim=[0, 2, 3])
if self._stats_mode == "":
assert B > 0, 'SyncBatchNorm(stats_mode="") does not support zero batch size.'
vec = torch.cat([mean, meansqr], dim=0)
vec = differentiable_all_reduce(vec) * (1.0 / dist.get_world_size())
mean, meansqr = torch.split(vec, C)
momentum = self.momentum
else:
if B == 0:
vec = torch.zeros([2 * C + 1], device=mean.device, dtype=mean.dtype)
vec = vec + input.sum() # make sure there is gradient w.r.t input
else:
vec = torch.cat(
[mean, meansqr, torch.ones([1], device=mean.device, dtype=mean.dtype)], dim=0
)
vec = differentiable_all_reduce(vec * B)
total_batch = vec[-1].detach()
momentum = total_batch.clamp(max=1) * self.momentum # no update if total_batch is 0
mean, meansqr, _ = torch.split(vec / total_batch.clamp(min=1), C) # avoid div-by-zero
var = meansqr - mean * mean
invstd = torch.rsqrt(var + self.eps)
scale = self.weight * invstd
bias = self.bias - mean * scale
scale = scale.reshape(1, -1, 1, 1)
bias = bias.reshape(1, -1, 1, 1)
self.running_mean += momentum * (mean.detach() - self.running_mean)
self.running_var += momentum * (var.detach() - self.running_var)
ret = input * scale + bias
if half_input:
ret = ret.half()
return ret
|
banmo-main
|
third_party/detectron2_old/detectron2/layers/batch_norm.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from __future__ import absolute_import, division, print_function, unicode_literals
from detectron2 import _C
def pairwise_iou_rotated(boxes1, boxes2):
"""
Return intersection-over-union (Jaccard index) of boxes.
Both sets of boxes are expected to be in
(x_center, y_center, width, height, angle) format.
Arguments:
boxes1 (Tensor[N, 5])
boxes2 (Tensor[M, 5])
Returns:
iou (Tensor[N, M]): the NxM matrix containing the pairwise
IoU values for every element in boxes1 and boxes2
"""
return _C.box_iou_rotated(boxes1, boxes2)
|
banmo-main
|
third_party/detectron2_old/detectron2/layers/rotated_boxes.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import itertools
from typing import Any, Dict, List, Tuple, Union
import torch
class Instances:
"""
This class represents a list of instances in an image.
It stores the attributes of instances (e.g., boxes, masks, labels, scores) as "fields".
All fields must have the same ``__len__`` which is the number of instances.
All other (non-field) attributes of this class are considered private:
they must start with '_' and are not modifiable by a user.
Some basic usage:
1. Set/get/check a field:
.. code-block:: python
instances.gt_boxes = Boxes(...)
print(instances.pred_masks) # a tensor of shape (N, H, W)
print('gt_masks' in instances)
2. ``len(instances)`` returns the number of instances
3. Indexing: ``instances[indices]`` will apply the indexing on all the fields
and returns a new :class:`Instances`.
Typically, ``indices`` is a integer vector of indices,
or a binary mask of length ``num_instances``
.. code-block:: python
category_3_detections = instances[instances.pred_classes == 3]
confident_detections = instances[instances.scores > 0.9]
"""
def __init__(self, image_size: Tuple[int, int], **kwargs: Any):
"""
Args:
image_size (height, width): the spatial size of the image.
kwargs: fields to add to this `Instances`.
"""
self._image_size = image_size
self._fields: Dict[str, Any] = {}
for k, v in kwargs.items():
self.set(k, v)
@property
def image_size(self) -> Tuple[int, int]:
"""
Returns:
tuple: height, width
"""
return self._image_size
def __setattr__(self, name: str, val: Any) -> None:
if name.startswith("_"):
super().__setattr__(name, val)
else:
self.set(name, val)
def __getattr__(self, name: str) -> Any:
if name == "_fields" or name not in self._fields:
raise AttributeError("Cannot find field '{}' in the given Instances!".format(name))
return self._fields[name]
def set(self, name: str, value: Any) -> None:
"""
Set the field named `name` to `value`.
The length of `value` must be the number of instances,
and must agree with other existing fields in this object.
"""
data_len = len(value)
if len(self._fields):
assert (
len(self) == data_len
), "Adding a field of length {} to a Instances of length {}".format(data_len, len(self))
self._fields[name] = value
def has(self, name: str) -> bool:
"""
Returns:
bool: whether the field called `name` exists.
"""
return name in self._fields
def remove(self, name: str) -> None:
"""
Remove the field called `name`.
"""
del self._fields[name]
def get(self, name: str) -> Any:
"""
Returns the field called `name`.
"""
return self._fields[name]
def get_fields(self) -> Dict[str, Any]:
"""
Returns:
dict: a dict which maps names (str) to data of the fields
Modifying the returned dict will modify this instance.
"""
return self._fields
# Tensor-like methods
def to(self, *args: Any, **kwargs: Any) -> "Instances":
"""
Returns:
Instances: all fields are called with a `to(device)`, if the field has this method.
"""
ret = Instances(self._image_size)
for k, v in self._fields.items():
if hasattr(v, "to"):
v = v.to(*args, **kwargs)
ret.set(k, v)
return ret
def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> "Instances":
"""
Args:
item: an index-like object and will be used to index all the fields.
Returns:
If `item` is a string, return the data in the corresponding field.
Otherwise, returns an `Instances` where all fields are indexed by `item`.
"""
if type(item) == int:
if item >= len(self) or item < -len(self):
raise IndexError("Instances index out of range!")
else:
item = slice(item, None, len(self))
ret = Instances(self._image_size)
for k, v in self._fields.items():
ret.set(k, v[item])
return ret
def __len__(self) -> int:
for v in self._fields.values():
# use __len__ because len() has to be int and is not friendly to tracing
return v.__len__()
raise NotImplementedError("Empty Instances does not support __len__!")
def __iter__(self):
raise NotImplementedError("`Instances` object is not iterable!")
@staticmethod
def cat(instance_lists: List["Instances"]) -> "Instances":
"""
Args:
instance_lists (list[Instances])
Returns:
Instances
"""
assert all(isinstance(i, Instances) for i in instance_lists)
assert len(instance_lists) > 0
if len(instance_lists) == 1:
return instance_lists[0]
image_size = instance_lists[0].image_size
for i in instance_lists[1:]:
assert i.image_size == image_size
ret = Instances(image_size)
for k in instance_lists[0]._fields.keys():
values = [i.get(k) for i in instance_lists]
v0 = values[0]
if isinstance(v0, torch.Tensor):
values = torch.cat(values, dim=0)
elif isinstance(v0, list):
values = list(itertools.chain(*values))
elif hasattr(type(v0), "cat"):
values = type(v0).cat(values)
else:
raise ValueError("Unsupported type {} for concatenation".format(type(v0)))
ret.set(k, values)
return ret
def __str__(self) -> str:
s = self.__class__.__name__ + "("
s += "num_instances={}, ".format(len(self))
s += "image_height={}, ".format(self._image_size[0])
s += "image_width={}, ".format(self._image_size[1])
s += "fields=[{}])".format(", ".join((f"{k}: {v}" for k, v in self._fields.items())))
return s
__repr__ = __str__
|
banmo-main
|
third_party/detectron2_old/detectron2/structures/instances.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from .boxes import Boxes, BoxMode, pairwise_iou, pairwise_ioa
from .image_list import ImageList
from .instances import Instances
from .keypoints import Keypoints, heatmaps_to_keypoints
from .masks import BitMasks, PolygonMasks, polygons_to_bitmask, ROIMasks
from .rotated_boxes import RotatedBoxes
from .rotated_boxes import pairwise_iou as pairwise_iou_rotated
__all__ = [k for k in globals().keys() if not k.startswith("_")]
from detectron2.utils.env import fixup_module_metadata
fixup_module_metadata(__name__, globals(), __all__)
del fixup_module_metadata
|
banmo-main
|
third_party/detectron2_old/detectron2/structures/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import math
import numpy as np
from enum import IntEnum, unique
from typing import List, Tuple, Union
import torch
from torch import device
from detectron2.utils.env import TORCH_VERSION
_RawBoxType = Union[List[float], Tuple[float, ...], torch.Tensor, np.ndarray]
if TORCH_VERSION < (1, 8):
_maybe_jit_unused = torch.jit.unused
else:
def _maybe_jit_unused(x):
return x
@unique
class BoxMode(IntEnum):
"""
Enum of different ways to represent a box.
"""
XYXY_ABS = 0
"""
(x0, y0, x1, y1) in absolute floating points coordinates.
The coordinates in range [0, width or height].
"""
XYWH_ABS = 1
"""
(x0, y0, w, h) in absolute floating points coordinates.
"""
XYXY_REL = 2
"""
Not yet supported!
(x0, y0, x1, y1) in range [0, 1]. They are relative to the size of the image.
"""
XYWH_REL = 3
"""
Not yet supported!
(x0, y0, w, h) in range [0, 1]. They are relative to the size of the image.
"""
XYWHA_ABS = 4
"""
(xc, yc, w, h, a) in absolute floating points coordinates.
(xc, yc) is the center of the rotated box, and the angle a is in degrees ccw.
"""
@staticmethod
def convert(box: _RawBoxType, from_mode: "BoxMode", to_mode: "BoxMode") -> _RawBoxType:
"""
Args:
box: can be a k-tuple, k-list or an Nxk array/tensor, where k = 4 or 5
from_mode, to_mode (BoxMode)
Returns:
The converted box of the same type.
"""
if from_mode == to_mode:
return box
original_type = type(box)
is_numpy = isinstance(box, np.ndarray)
single_box = isinstance(box, (list, tuple))
if single_box:
assert len(box) == 4 or len(box) == 5, (
"BoxMode.convert takes either a k-tuple/list or an Nxk array/tensor,"
" where k == 4 or 5"
)
arr = torch.tensor(box)[None, :]
else:
# avoid modifying the input box
if is_numpy:
arr = torch.from_numpy(np.asarray(box)).clone()
else:
arr = box.clone()
assert to_mode not in [BoxMode.XYXY_REL, BoxMode.XYWH_REL] and from_mode not in [
BoxMode.XYXY_REL,
BoxMode.XYWH_REL,
], "Relative mode not yet supported!"
if from_mode == BoxMode.XYWHA_ABS and to_mode == BoxMode.XYXY_ABS:
assert (
arr.shape[-1] == 5
), "The last dimension of input shape must be 5 for XYWHA format"
original_dtype = arr.dtype
arr = arr.double()
w = arr[:, 2]
h = arr[:, 3]
a = arr[:, 4]
c = torch.abs(torch.cos(a * math.pi / 180.0))
s = torch.abs(torch.sin(a * math.pi / 180.0))
# This basically computes the horizontal bounding rectangle of the rotated box
new_w = c * w + s * h
new_h = c * h + s * w
# convert center to top-left corner
arr[:, 0] -= new_w / 2.0
arr[:, 1] -= new_h / 2.0
# bottom-right corner
arr[:, 2] = arr[:, 0] + new_w
arr[:, 3] = arr[:, 1] + new_h
arr = arr[:, :4].to(dtype=original_dtype)
elif from_mode == BoxMode.XYWH_ABS and to_mode == BoxMode.XYWHA_ABS:
original_dtype = arr.dtype
arr = arr.double()
arr[:, 0] += arr[:, 2] / 2.0
arr[:, 1] += arr[:, 3] / 2.0
angles = torch.zeros((arr.shape[0], 1), dtype=arr.dtype)
arr = torch.cat((arr, angles), axis=1).to(dtype=original_dtype)
else:
if to_mode == BoxMode.XYXY_ABS and from_mode == BoxMode.XYWH_ABS:
arr[:, 2] += arr[:, 0]
arr[:, 3] += arr[:, 1]
elif from_mode == BoxMode.XYXY_ABS and to_mode == BoxMode.XYWH_ABS:
arr[:, 2] -= arr[:, 0]
arr[:, 3] -= arr[:, 1]
else:
raise NotImplementedError(
"Conversion from BoxMode {} to {} is not supported yet".format(
from_mode, to_mode
)
)
if single_box:
return original_type(arr.flatten().tolist())
if is_numpy:
return arr.numpy()
else:
return arr
class Boxes:
"""
This structure stores a list of boxes as a Nx4 torch.Tensor.
It supports some common methods about boxes
(`area`, `clip`, `nonempty`, etc),
and also behaves like a Tensor
(support indexing, `to(device)`, `.device`, and iteration over all boxes)
Attributes:
tensor (torch.Tensor): float matrix of Nx4. Each row is (x1, y1, x2, y2).
"""
def __init__(self, tensor: torch.Tensor):
"""
Args:
tensor (Tensor[float]): a Nx4 matrix. Each row is (x1, y1, x2, y2).
"""
device = tensor.device if isinstance(tensor, torch.Tensor) else torch.device("cpu")
tensor = torch.as_tensor(tensor, dtype=torch.float32, device=device)
if tensor.numel() == 0:
# Use reshape, so we don't end up creating a new tensor that does not depend on
# the inputs (and consequently confuses jit)
tensor = tensor.reshape((-1, 4)).to(dtype=torch.float32, device=device)
assert tensor.dim() == 2 and tensor.size(-1) == 4, tensor.size()
self.tensor = tensor
def clone(self) -> "Boxes":
"""
Clone the Boxes.
Returns:
Boxes
"""
return Boxes(self.tensor.clone())
@_maybe_jit_unused
def to(self, device: torch.device):
# Boxes are assumed float32 and does not support to(dtype)
return Boxes(self.tensor.to(device=device))
def area(self) -> torch.Tensor:
"""
Computes the area of all the boxes.
Returns:
torch.Tensor: a vector with areas of each box.
"""
box = self.tensor
area = (box[:, 2] - box[:, 0]) * (box[:, 3] - box[:, 1])
return area
def clip(self, box_size: Tuple[int, int]) -> None:
"""
Clip (in place) the boxes by limiting x coordinates to the range [0, width]
and y coordinates to the range [0, height].
Args:
box_size (height, width): The clipping box's size.
"""
assert torch.isfinite(self.tensor).all(), "Box tensor contains infinite or NaN!"
h, w = box_size
x1 = self.tensor[:, 0].clamp(min=0, max=w)
y1 = self.tensor[:, 1].clamp(min=0, max=h)
x2 = self.tensor[:, 2].clamp(min=0, max=w)
y2 = self.tensor[:, 3].clamp(min=0, max=h)
self.tensor = torch.stack((x1, y1, x2, y2), dim=-1)
def nonempty(self, threshold: float = 0.0) -> torch.Tensor:
"""
Find boxes that are non-empty.
A box is considered empty, if either of its side is no larger than threshold.
Returns:
Tensor:
a binary vector which represents whether each box is empty
(False) or non-empty (True).
"""
box = self.tensor
widths = box[:, 2] - box[:, 0]
heights = box[:, 3] - box[:, 1]
keep = (widths > threshold) & (heights > threshold)
return keep
def __getitem__(self, item) -> "Boxes":
"""
Args:
item: int, slice, or a BoolTensor
Returns:
Boxes: Create a new :class:`Boxes` by indexing.
The following usage are allowed:
1. `new_boxes = boxes[3]`: return a `Boxes` which contains only one box.
2. `new_boxes = boxes[2:10]`: return a slice of boxes.
3. `new_boxes = boxes[vector]`, where vector is a torch.BoolTensor
with `length = len(boxes)`. Nonzero elements in the vector will be selected.
Note that the returned Boxes might share storage with this Boxes,
subject to Pytorch's indexing semantics.
"""
if isinstance(item, int):
return Boxes(self.tensor[item].view(1, -1))
b = self.tensor[item]
assert b.dim() == 2, "Indexing on Boxes with {} failed to return a matrix!".format(item)
return Boxes(b)
def __len__(self) -> int:
return self.tensor.shape[0]
def __repr__(self) -> str:
return "Boxes(" + str(self.tensor) + ")"
def inside_box(self, box_size: Tuple[int, int], boundary_threshold: int = 0) -> torch.Tensor:
"""
Args:
box_size (height, width): Size of the reference box.
boundary_threshold (int): Boxes that extend beyond the reference box
boundary by more than boundary_threshold are considered "outside".
Returns:
a binary vector, indicating whether each box is inside the reference box.
"""
height, width = box_size
inds_inside = (
(self.tensor[..., 0] >= -boundary_threshold)
& (self.tensor[..., 1] >= -boundary_threshold)
& (self.tensor[..., 2] < width + boundary_threshold)
& (self.tensor[..., 3] < height + boundary_threshold)
)
return inds_inside
def get_centers(self) -> torch.Tensor:
"""
Returns:
The box centers in a Nx2 array of (x, y).
"""
return (self.tensor[:, :2] + self.tensor[:, 2:]) / 2
def scale(self, scale_x: float, scale_y: float) -> None:
"""
Scale the box with horizontal and vertical scaling factors
"""
self.tensor[:, 0::2] *= scale_x
self.tensor[:, 1::2] *= scale_y
@classmethod
@_maybe_jit_unused
def cat(cls, boxes_list: List["Boxes"]) -> "Boxes":
"""
Concatenates a list of Boxes into a single Boxes
Arguments:
boxes_list (list[Boxes])
Returns:
Boxes: the concatenated Boxes
"""
assert isinstance(boxes_list, (list, tuple))
if len(boxes_list) == 0:
return cls(torch.empty(0))
assert all([isinstance(box, Boxes) for box in boxes_list])
# use torch.cat (v.s. layers.cat) so the returned boxes never share storage with input
cat_boxes = cls(torch.cat([b.tensor for b in boxes_list], dim=0))
return cat_boxes
@property
def device(self) -> device:
return self.tensor.device
# type "Iterator[torch.Tensor]", yield, and iter() not supported by torchscript
# https://github.com/pytorch/pytorch/issues/18627
@torch.jit.unused
def __iter__(self):
"""
Yield a box as a Tensor of shape (4,) at a time.
"""
yield from self.tensor
def pairwise_intersection(boxes1: Boxes, boxes2: Boxes) -> torch.Tensor:
"""
Given two lists of boxes of size N and M,
compute the intersection area between __all__ N x M pairs of boxes.
The box order must be (xmin, ymin, xmax, ymax)
Args:
boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.
Returns:
Tensor: intersection, sized [N,M].
"""
boxes1, boxes2 = boxes1.tensor, boxes2.tensor
width_height = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) - torch.max(
boxes1[:, None, :2], boxes2[:, :2]
) # [N,M,2]
width_height.clamp_(min=0) # [N,M,2]
intersection = width_height.prod(dim=2) # [N,M]
return intersection
# implementation from https://github.com/kuangliu/torchcv/blob/master/torchcv/utils/box.py
# with slight modifications
def pairwise_iou(boxes1: Boxes, boxes2: Boxes) -> torch.Tensor:
"""
Given two lists of boxes of size N and M, compute the IoU
(intersection over union) between **all** N x M pairs of boxes.
The box order must be (xmin, ymin, xmax, ymax).
Args:
boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.
Returns:
Tensor: IoU, sized [N,M].
"""
area1 = boxes1.area() # [N]
area2 = boxes2.area() # [M]
inter = pairwise_intersection(boxes1, boxes2)
# handle empty boxes
iou = torch.where(
inter > 0,
inter / (area1[:, None] + area2 - inter),
torch.zeros(1, dtype=inter.dtype, device=inter.device),
)
return iou
def pairwise_ioa(boxes1: Boxes, boxes2: Boxes) -> torch.Tensor:
"""
Similar to :func:`pariwise_iou` but compute the IoA (intersection over boxes2 area).
Args:
boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.
Returns:
Tensor: IoA, sized [N,M].
"""
area2 = boxes2.area() # [M]
inter = pairwise_intersection(boxes1, boxes2)
# handle empty boxes
ioa = torch.where(
inter > 0, inter / area2, torch.zeros(1, dtype=inter.dtype, device=inter.device)
)
return ioa
def matched_boxlist_iou(boxes1: Boxes, boxes2: Boxes) -> torch.Tensor:
"""
Compute pairwise intersection over union (IOU) of two sets of matched
boxes. The box order must be (xmin, ymin, xmax, ymax).
Similar to boxlist_iou, but computes only diagonal elements of the matrix
Args:
boxes1: (Boxes) bounding boxes, sized [N,4].
boxes2: (Boxes) bounding boxes, sized [N,4].
Returns:
Tensor: iou, sized [N].
"""
assert len(boxes1) == len(
boxes2
), "boxlists should have the same" "number of entries, got {}, {}".format(
len(boxes1), len(boxes2)
)
area1 = boxes1.area() # [N]
area2 = boxes2.area() # [N]
box1, box2 = boxes1.tensor, boxes2.tensor
lt = torch.max(box1[:, :2], box2[:, :2]) # [N,2]
rb = torch.min(box1[:, 2:], box2[:, 2:]) # [N,2]
wh = (rb - lt).clamp(min=0) # [N,2]
inter = wh[:, 0] * wh[:, 1] # [N]
iou = inter / (area1 + area2 - inter) # [N]
return iou
|
banmo-main
|
third_party/detectron2_old/detectron2/structures/boxes.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import numpy as np
from typing import Any, List, Tuple, Union
import torch
from torch.nn import functional as F
from detectron2.utils.env import TORCH_VERSION
if TORCH_VERSION < (1, 8):
def script_if_tracing(fn):
return fn
else:
script_if_tracing = torch.jit.script_if_tracing
class Keypoints:
"""
Stores keypoint **annotation** data. GT Instances have a `gt_keypoints` property
containing the x,y location and visibility flag of each keypoint. This tensor has shape
(N, K, 3) where N is the number of instances and K is the number of keypoints per instance.
The visibility flag follows the COCO format and must be one of three integers:
* v=0: not labeled (in which case x=y=0)
* v=1: labeled but not visible
* v=2: labeled and visible
"""
def __init__(self, keypoints: Union[torch.Tensor, np.ndarray, List[List[float]]]):
"""
Arguments:
keypoints: A Tensor, numpy array, or list of the x, y, and visibility of each keypoint.
The shape should be (N, K, 3) where N is the number of
instances, and K is the number of keypoints per instance.
"""
device = keypoints.device if isinstance(keypoints, torch.Tensor) else torch.device("cpu")
keypoints = torch.as_tensor(keypoints, dtype=torch.float32, device=device)
assert keypoints.dim() == 3 and keypoints.shape[2] == 3, keypoints.shape
self.tensor = keypoints
def __len__(self) -> int:
return self.tensor.size(0)
def to(self, *args: Any, **kwargs: Any) -> "Keypoints":
return type(self)(self.tensor.to(*args, **kwargs))
@property
def device(self) -> torch.device:
return self.tensor.device
def to_heatmap(self, boxes: torch.Tensor, heatmap_size: int) -> torch.Tensor:
"""
Convert keypoint annotations to a heatmap of one-hot labels for training,
as described in :paper:`Mask R-CNN`.
Arguments:
boxes: Nx4 tensor, the boxes to draw the keypoints to
Returns:
heatmaps:
A tensor of shape (N, K), each element is integer spatial label
in the range [0, heatmap_size**2 - 1] for each keypoint in the input.
valid:
A tensor of shape (N, K) containing whether each keypoint is in the roi or not.
"""
return _keypoints_to_heatmap(self.tensor, boxes, heatmap_size)
def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> "Keypoints":
"""
Create a new `Keypoints` by indexing on this `Keypoints`.
The following usage are allowed:
1. `new_kpts = kpts[3]`: return a `Keypoints` which contains only one instance.
2. `new_kpts = kpts[2:10]`: return a slice of key points.
3. `new_kpts = kpts[vector]`, where vector is a torch.ByteTensor
with `length = len(kpts)`. Nonzero elements in the vector will be selected.
Note that the returned Keypoints might share storage with this Keypoints,
subject to Pytorch's indexing semantics.
"""
if isinstance(item, int):
return Keypoints([self.tensor[item]])
return Keypoints(self.tensor[item])
def __repr__(self) -> str:
s = self.__class__.__name__ + "("
s += "num_instances={})".format(len(self.tensor))
return s
# TODO make this nicer, this is a direct translation from C2 (but removing the inner loop)
def _keypoints_to_heatmap(
keypoints: torch.Tensor, rois: torch.Tensor, heatmap_size: int
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Encode keypoint locations into a target heatmap for use in SoftmaxWithLoss across space.
Maps keypoints from the half-open interval [x1, x2) on continuous image coordinates to the
closed interval [0, heatmap_size - 1] on discrete image coordinates. We use the
continuous-discrete conversion from Heckbert 1990 ("What is the coordinate of a pixel?"):
d = floor(c) and c = d + 0.5, where d is a discrete coordinate and c is a continuous coordinate.
Arguments:
keypoints: tensor of keypoint locations in of shape (N, K, 3).
rois: Nx4 tensor of rois in xyxy format
heatmap_size: integer side length of square heatmap.
Returns:
heatmaps: A tensor of shape (N, K) containing an integer spatial label
in the range [0, heatmap_size**2 - 1] for each keypoint in the input.
valid: A tensor of shape (N, K) containing whether each keypoint is in
the roi or not.
"""
if rois.numel() == 0:
return rois.new().long(), rois.new().long()
offset_x = rois[:, 0]
offset_y = rois[:, 1]
scale_x = heatmap_size / (rois[:, 2] - rois[:, 0])
scale_y = heatmap_size / (rois[:, 3] - rois[:, 1])
offset_x = offset_x[:, None]
offset_y = offset_y[:, None]
scale_x = scale_x[:, None]
scale_y = scale_y[:, None]
x = keypoints[..., 0]
y = keypoints[..., 1]
x_boundary_inds = x == rois[:, 2][:, None]
y_boundary_inds = y == rois[:, 3][:, None]
x = (x - offset_x) * scale_x
x = x.floor().long()
y = (y - offset_y) * scale_y
y = y.floor().long()
x[x_boundary_inds] = heatmap_size - 1
y[y_boundary_inds] = heatmap_size - 1
valid_loc = (x >= 0) & (y >= 0) & (x < heatmap_size) & (y < heatmap_size)
vis = keypoints[..., 2] > 0
valid = (valid_loc & vis).long()
lin_ind = y * heatmap_size + x
heatmaps = lin_ind * valid
return heatmaps, valid
@script_if_tracing
def heatmaps_to_keypoints(maps: torch.Tensor, rois: torch.Tensor) -> torch.Tensor:
"""
Extract predicted keypoint locations from heatmaps.
Args:
maps (Tensor): (#ROIs, #keypoints, POOL_H, POOL_W). The predicted heatmap of logits for
each ROI and each keypoint.
rois (Tensor): (#ROIs, 4). The box of each ROI.
Returns:
Tensor of shape (#ROIs, #keypoints, 4) with the last dimension corresponding to
(x, y, logit, score) for each keypoint.
When converting discrete pixel indices in an NxN image to a continuous keypoint coordinate,
we maintain consistency with :meth:`Keypoints.to_heatmap` by using the conversion from
Heckbert 1990: c = d + 0.5, where d is a discrete coordinate and c is a continuous coordinate.
"""
# The decorator use of torch.no_grad() was not supported by torchscript.
# https://github.com/pytorch/pytorch/issues/44768
maps = maps.detach()
rois = rois.detach()
offset_x = rois[:, 0]
offset_y = rois[:, 1]
widths = (rois[:, 2] - rois[:, 0]).clamp(min=1)
heights = (rois[:, 3] - rois[:, 1]).clamp(min=1)
widths_ceil = widths.ceil()
heights_ceil = heights.ceil()
num_rois, num_keypoints = maps.shape[:2]
xy_preds = maps.new_zeros(rois.shape[0], num_keypoints, 4)
width_corrections = widths / widths_ceil
height_corrections = heights / heights_ceil
keypoints_idx = torch.arange(num_keypoints, device=maps.device)
for i in range(num_rois):
outsize = (int(heights_ceil[i]), int(widths_ceil[i]))
roi_map = F.interpolate(
maps[[i]], size=outsize, mode="bicubic", align_corners=False
).squeeze(
0
) # #keypoints x H x W
# softmax over the spatial region
max_score, _ = roi_map.view(num_keypoints, -1).max(1)
max_score = max_score.view(num_keypoints, 1, 1)
tmp_full_resolution = (roi_map - max_score).exp_()
tmp_pool_resolution = (maps[i] - max_score).exp_()
# Produce scores over the region H x W, but normalize with POOL_H x POOL_W,
# so that the scores of objects of different absolute sizes will be more comparable
roi_map_scores = tmp_full_resolution / tmp_pool_resolution.sum((1, 2), keepdim=True)
w = roi_map.shape[2]
pos = roi_map.view(num_keypoints, -1).argmax(1)
x_int = pos % w
y_int = (pos - x_int) // w
assert (
roi_map_scores[keypoints_idx, y_int, x_int]
== roi_map_scores.view(num_keypoints, -1).max(1)[0]
).all()
x = (x_int.float() + 0.5) * width_corrections[i]
y = (y_int.float() + 0.5) * height_corrections[i]
xy_preds[i, :, 0] = x + offset_x[i]
xy_preds[i, :, 1] = y + offset_y[i]
xy_preds[i, :, 2] = roi_map[keypoints_idx, y_int, x_int]
xy_preds[i, :, 3] = roi_map_scores[keypoints_idx, y_int, x_int]
return xy_preds
|
banmo-main
|
third_party/detectron2_old/detectron2/structures/keypoints.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import copy
import itertools
import numpy as np
from typing import Any, Iterator, List, Union
import pycocotools.mask as mask_util
import torch
from torch import device
from detectron2.layers.roi_align import ROIAlign
from detectron2.utils.memory import retry_if_cuda_oom
from .boxes import Boxes
def polygon_area(x, y):
# Using the shoelace formula
# https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates
return 0.5 * np.abs(np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1)))
def polygons_to_bitmask(polygons: List[np.ndarray], height: int, width: int) -> np.ndarray:
"""
Args:
polygons (list[ndarray]): each array has shape (Nx2,)
height, width (int)
Returns:
ndarray: a bool mask of shape (height, width)
"""
assert len(polygons) > 0, "COCOAPI does not support empty polygons"
rles = mask_util.frPyObjects(polygons, height, width)
rle = mask_util.merge(rles)
return mask_util.decode(rle).astype(np.bool)
def rasterize_polygons_within_box(
polygons: List[np.ndarray], box: np.ndarray, mask_size: int
) -> torch.Tensor:
"""
Rasterize the polygons into a mask image and
crop the mask content in the given box.
The cropped mask is resized to (mask_size, mask_size).
This function is used when generating training targets for mask head in Mask R-CNN.
Given original ground-truth masks for an image, new ground-truth mask
training targets in the size of `mask_size x mask_size`
must be provided for each predicted box. This function will be called to
produce such targets.
Args:
polygons (list[ndarray[float]]): a list of polygons, which represents an instance.
box: 4-element numpy array
mask_size (int):
Returns:
Tensor: BoolTensor of shape (mask_size, mask_size)
"""
# 1. Shift the polygons w.r.t the boxes
w, h = box[2] - box[0], box[3] - box[1]
polygons = copy.deepcopy(polygons)
for p in polygons:
p[0::2] = p[0::2] - box[0]
p[1::2] = p[1::2] - box[1]
# 2. Rescale the polygons to the new box size
# max() to avoid division by small number
ratio_h = mask_size / max(h, 0.1)
ratio_w = mask_size / max(w, 0.1)
if ratio_h == ratio_w:
for p in polygons:
p *= ratio_h
else:
for p in polygons:
p[0::2] *= ratio_w
p[1::2] *= ratio_h
# 3. Rasterize the polygons with coco api
mask = polygons_to_bitmask(polygons, mask_size, mask_size)
mask = torch.from_numpy(mask)
return mask
class BitMasks:
"""
This class stores the segmentation masks for all objects in one image, in
the form of bitmaps.
Attributes:
tensor: bool Tensor of N,H,W, representing N instances in the image.
"""
def __init__(self, tensor: Union[torch.Tensor, np.ndarray]):
"""
Args:
tensor: bool Tensor of N,H,W, representing N instances in the image.
"""
device = tensor.device if isinstance(tensor, torch.Tensor) else torch.device("cpu")
tensor = torch.as_tensor(tensor, dtype=torch.bool, device=device)
assert tensor.dim() == 3, tensor.size()
self.image_size = tensor.shape[1:]
self.tensor = tensor
@torch.jit.unused
def to(self, *args: Any, **kwargs: Any) -> "BitMasks":
return BitMasks(self.tensor.to(*args, **kwargs))
@property
def device(self) -> torch.device:
return self.tensor.device
@torch.jit.unused
def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> "BitMasks":
"""
Returns:
BitMasks: Create a new :class:`BitMasks` by indexing.
The following usage are allowed:
1. `new_masks = masks[3]`: return a `BitMasks` which contains only one mask.
2. `new_masks = masks[2:10]`: return a slice of masks.
3. `new_masks = masks[vector]`, where vector is a torch.BoolTensor
with `length = len(masks)`. Nonzero elements in the vector will be selected.
Note that the returned object might share storage with this object,
subject to Pytorch's indexing semantics.
"""
if isinstance(item, int):
return BitMasks(self.tensor[item].view(1, -1))
m = self.tensor[item]
assert m.dim() == 3, "Indexing on BitMasks with {} returns a tensor with shape {}!".format(
item, m.shape
)
return BitMasks(m)
@torch.jit.unused
def __iter__(self) -> torch.Tensor:
yield from self.tensor
@torch.jit.unused
def __repr__(self) -> str:
s = self.__class__.__name__ + "("
s += "num_instances={})".format(len(self.tensor))
return s
def __len__(self) -> int:
return self.tensor.shape[0]
def nonempty(self) -> torch.Tensor:
"""
Find masks that are non-empty.
Returns:
Tensor: a BoolTensor which represents
whether each mask is empty (False) or non-empty (True).
"""
return self.tensor.flatten(1).any(dim=1)
@staticmethod
def from_polygon_masks(
polygon_masks: Union["PolygonMasks", List[List[np.ndarray]]], height: int, width: int
) -> "BitMasks":
"""
Args:
polygon_masks (list[list[ndarray]] or PolygonMasks)
height, width (int)
"""
if isinstance(polygon_masks, PolygonMasks):
polygon_masks = polygon_masks.polygons
masks = [polygons_to_bitmask(p, height, width) for p in polygon_masks]
return BitMasks(torch.stack([torch.from_numpy(x) for x in masks]))
@staticmethod
def from_roi_masks(roi_masks: "ROIMasks", height: int, width: int) -> "BitMasks":
"""
Args:
roi_masks:
height, width (int):
"""
return roi_masks.to_bitmasks(height, width)
def crop_and_resize(self, boxes: torch.Tensor, mask_size: int) -> torch.Tensor:
"""
Crop each bitmask by the given box, and resize results to (mask_size, mask_size).
This can be used to prepare training targets for Mask R-CNN.
It has less reconstruction error compared to rasterization with polygons.
However we observe no difference in accuracy,
but BitMasks requires more memory to store all the masks.
Args:
boxes (Tensor): Nx4 tensor storing the boxes for each mask
mask_size (int): the size of the rasterized mask.
Returns:
Tensor:
A bool tensor of shape (N, mask_size, mask_size), where
N is the number of predicted boxes for this image.
"""
assert len(boxes) == len(self), "{} != {}".format(len(boxes), len(self))
device = self.tensor.device
batch_inds = torch.arange(len(boxes), device=device).to(dtype=boxes.dtype)[:, None]
rois = torch.cat([batch_inds, boxes], dim=1) # Nx5
bit_masks = self.tensor.to(dtype=torch.float32)
rois = rois.to(device=device)
output = (
ROIAlign((mask_size, mask_size), 1.0, 0, aligned=True)
.forward(bit_masks[:, None, :, :], rois)
.squeeze(1)
)
output = output >= 0.5
return output
def get_bounding_boxes(self) -> Boxes:
"""
Returns:
Boxes: tight bounding boxes around bitmasks.
If a mask is empty, it's bounding box will be all zero.
"""
boxes = torch.zeros(self.tensor.shape[0], 4, dtype=torch.float32)
x_any = torch.any(self.tensor, dim=1)
y_any = torch.any(self.tensor, dim=2)
for idx in range(self.tensor.shape[0]):
x = torch.where(x_any[idx, :])[0]
y = torch.where(y_any[idx, :])[0]
if len(x) > 0 and len(y) > 0:
boxes[idx, :] = torch.as_tensor(
[x[0], y[0], x[-1] + 1, y[-1] + 1], dtype=torch.float32
)
return Boxes(boxes)
@staticmethod
def cat(bitmasks_list: List["BitMasks"]) -> "BitMasks":
"""
Concatenates a list of BitMasks into a single BitMasks
Arguments:
bitmasks_list (list[BitMasks])
Returns:
BitMasks: the concatenated BitMasks
"""
assert isinstance(bitmasks_list, (list, tuple))
assert len(bitmasks_list) > 0
assert all(isinstance(bitmask, BitMasks) for bitmask in bitmasks_list)
cat_bitmasks = type(bitmasks_list[0])(torch.cat([bm.tensor for bm in bitmasks_list], dim=0))
return cat_bitmasks
class PolygonMasks:
"""
This class stores the segmentation masks for all objects in one image, in the form of polygons.
Attributes:
polygons: list[list[ndarray]]. Each ndarray is a float64 vector representing a polygon.
"""
def __init__(self, polygons: List[List[Union[torch.Tensor, np.ndarray]]]):
"""
Arguments:
polygons (list[list[np.ndarray]]): The first
level of the list correspond to individual instances,
the second level to all the polygons that compose the
instance, and the third level to the polygon coordinates.
The third level array should have the format of
[x0, y0, x1, y1, ..., xn, yn] (n >= 3).
"""
if not isinstance(polygons, list):
raise ValueError(
"Cannot create PolygonMasks: Expect a list of list of polygons per image. "
"Got '{}' instead.".format(type(polygons))
)
def _make_array(t: Union[torch.Tensor, np.ndarray]) -> np.ndarray:
# Use float64 for higher precision, because why not?
# Always put polygons on CPU (self.to is a no-op) since they
# are supposed to be small tensors.
# May need to change this assumption if GPU placement becomes useful
if isinstance(t, torch.Tensor):
t = t.cpu().numpy()
return np.asarray(t).astype("float64")
def process_polygons(
polygons_per_instance: List[Union[torch.Tensor, np.ndarray]]
) -> List[np.ndarray]:
if not isinstance(polygons_per_instance, list):
raise ValueError(
"Cannot create polygons: Expect a list of polygons per instance. "
"Got '{}' instead.".format(type(polygons_per_instance))
)
# transform each polygon to a numpy array
polygons_per_instance = [_make_array(p) for p in polygons_per_instance]
for polygon in polygons_per_instance:
if len(polygon) % 2 != 0 or len(polygon) < 6:
raise ValueError(f"Cannot create a polygon from {len(polygon)} coordinates.")
return polygons_per_instance
self.polygons: List[List[np.ndarray]] = [
process_polygons(polygons_per_instance) for polygons_per_instance in polygons
]
def to(self, *args: Any, **kwargs: Any) -> "PolygonMasks":
return self
@property
def device(self) -> torch.device:
return torch.device("cpu")
def get_bounding_boxes(self) -> Boxes:
"""
Returns:
Boxes: tight bounding boxes around polygon masks.
"""
boxes = torch.zeros(len(self.polygons), 4, dtype=torch.float32)
for idx, polygons_per_instance in enumerate(self.polygons):
minxy = torch.as_tensor([float("inf"), float("inf")], dtype=torch.float32)
maxxy = torch.zeros(2, dtype=torch.float32)
for polygon in polygons_per_instance:
coords = torch.from_numpy(polygon).view(-1, 2).to(dtype=torch.float32)
minxy = torch.min(minxy, torch.min(coords, dim=0).values)
maxxy = torch.max(maxxy, torch.max(coords, dim=0).values)
boxes[idx, :2] = minxy
boxes[idx, 2:] = maxxy
return Boxes(boxes)
def nonempty(self) -> torch.Tensor:
"""
Find masks that are non-empty.
Returns:
Tensor:
a BoolTensor which represents whether each mask is empty (False) or not (True).
"""
keep = [1 if len(polygon) > 0 else 0 for polygon in self.polygons]
return torch.from_numpy(np.asarray(keep, dtype=np.bool))
def __getitem__(self, item: Union[int, slice, List[int], torch.BoolTensor]) -> "PolygonMasks":
"""
Support indexing over the instances and return a `PolygonMasks` object.
`item` can be:
1. An integer. It will return an object with only one instance.
2. A slice. It will return an object with the selected instances.
3. A list[int]. It will return an object with the selected instances,
correpsonding to the indices in the list.
4. A vector mask of type BoolTensor, whose length is num_instances.
It will return an object with the instances whose mask is nonzero.
"""
if isinstance(item, int):
selected_polygons = [self.polygons[item]]
elif isinstance(item, slice):
selected_polygons = self.polygons[item]
elif isinstance(item, list):
selected_polygons = [self.polygons[i] for i in item]
elif isinstance(item, torch.Tensor):
# Polygons is a list, so we have to move the indices back to CPU.
if item.dtype == torch.bool:
assert item.dim() == 1, item.shape
item = item.nonzero().squeeze(1).cpu().numpy().tolist()
elif item.dtype in [torch.int32, torch.int64]:
item = item.cpu().numpy().tolist()
else:
raise ValueError("Unsupported tensor dtype={} for indexing!".format(item.dtype))
selected_polygons = [self.polygons[i] for i in item]
return PolygonMasks(selected_polygons)
def __iter__(self) -> Iterator[List[np.ndarray]]:
"""
Yields:
list[ndarray]: the polygons for one instance.
Each Tensor is a float64 vector representing a polygon.
"""
return iter(self.polygons)
def __repr__(self) -> str:
s = self.__class__.__name__ + "("
s += "num_instances={})".format(len(self.polygons))
return s
def __len__(self) -> int:
return len(self.polygons)
def crop_and_resize(self, boxes: torch.Tensor, mask_size: int) -> torch.Tensor:
"""
Crop each mask by the given box, and resize results to (mask_size, mask_size).
This can be used to prepare training targets for Mask R-CNN.
Args:
boxes (Tensor): Nx4 tensor storing the boxes for each mask
mask_size (int): the size of the rasterized mask.
Returns:
Tensor: A bool tensor of shape (N, mask_size, mask_size), where
N is the number of predicted boxes for this image.
"""
assert len(boxes) == len(self), "{} != {}".format(len(boxes), len(self))
device = boxes.device
# Put boxes on the CPU, as the polygon representation is not efficient GPU-wise
# (several small tensors for representing a single instance mask)
boxes = boxes.to(torch.device("cpu"))
results = [
rasterize_polygons_within_box(poly, box.numpy(), mask_size)
for poly, box in zip(self.polygons, boxes)
]
"""
poly: list[list[float]], the polygons for one instance
box: a tensor of shape (4,)
"""
if len(results) == 0:
return torch.empty(0, mask_size, mask_size, dtype=torch.bool, device=device)
return torch.stack(results, dim=0).to(device=device)
def area(self):
"""
Computes area of the mask.
Only works with Polygons, using the shoelace formula:
https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates
Returns:
Tensor: a vector, area for each instance
"""
area = []
for polygons_per_instance in self.polygons:
area_per_instance = 0
for p in polygons_per_instance:
area_per_instance += polygon_area(p[0::2], p[1::2])
area.append(area_per_instance)
return torch.tensor(area)
@staticmethod
def cat(polymasks_list: List["PolygonMasks"]) -> "PolygonMasks":
"""
Concatenates a list of PolygonMasks into a single PolygonMasks
Arguments:
polymasks_list (list[PolygonMasks])
Returns:
PolygonMasks: the concatenated PolygonMasks
"""
assert isinstance(polymasks_list, (list, tuple))
assert len(polymasks_list) > 0
assert all(isinstance(polymask, PolygonMasks) for polymask in polymasks_list)
cat_polymasks = type(polymasks_list[0])(
list(itertools.chain.from_iterable(pm.polygons for pm in polymasks_list))
)
return cat_polymasks
class ROIMasks:
"""
Represent masks by N smaller masks defined in some ROIs. Once ROI boxes are given,
full-image bitmask can be obtained by "pasting" the mask on the region defined
by the corresponding ROI box.
"""
def __init__(self, tensor: torch.Tensor):
"""
Args:
tensor: (N, M, M) mask tensor that defines the mask within each ROI.
"""
if tensor.dim() != 3:
raise ValueError("ROIMasks must take a masks of 3 dimension.")
self.tensor = tensor
def to(self, device: torch.device) -> "ROIMasks":
return ROIMasks(self.tensor.to(device))
@property
def device(self) -> device:
return self.tensor.device
def __len__(self):
return self.tensor.shape[0]
def __getitem__(self, item) -> "ROIMasks":
"""
Returns:
ROIMasks: Create a new :class:`ROIMasks` by indexing.
The following usage are allowed:
1. `new_masks = masks[2:10]`: return a slice of masks.
2. `new_masks = masks[vector]`, where vector is a torch.BoolTensor
with `length = len(masks)`. Nonzero elements in the vector will be selected.
Note that the returned object might share storage with this object,
subject to Pytorch's indexing semantics.
"""
t = self.tensor[item]
if t.dim() != 3:
raise ValueError(
f"Indexing on ROIMasks with {item} returns a tensor with shape {t.shape}!"
)
return ROIMasks(t)
@torch.jit.unused
def __repr__(self) -> str:
s = self.__class__.__name__ + "("
s += "num_instances={})".format(len(self.tensor))
return s
@torch.jit.unused
def to_bitmasks(self, boxes: torch.Tensor, height, width, threshold=0.5):
"""
Args:
"""
from detectron2.layers import paste_masks_in_image
paste = retry_if_cuda_oom(paste_masks_in_image)
bitmasks = paste(
self.tensor,
boxes,
(height, width),
threshold=threshold,
)
return BitMasks(bitmasks)
|
banmo-main
|
third_party/detectron2_old/detectron2/structures/masks.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from __future__ import division
from typing import Any, List, Tuple
import torch
from torch import device
from torch.nn import functional as F
from detectron2.utils.env import TORCH_VERSION
def _as_tensor(x: Tuple[int, int]) -> torch.Tensor:
"""
An equivalent of `torch.as_tensor`, but works under tracing if input
is a list of tensor. `torch.as_tensor` will record a constant in tracing,
but this function will use `torch.stack` instead.
"""
if torch.jit.is_scripting():
return torch.as_tensor(x)
if isinstance(x, (list, tuple)) and all([isinstance(t, torch.Tensor) for t in x]):
return torch.stack(x)
return torch.as_tensor(x)
class ImageList(object):
"""
Structure that holds a list of images (of possibly
varying sizes) as a single tensor.
This works by padding the images to the same size,
and storing in a field the original sizes of each image
Attributes:
image_sizes (list[tuple[int, int]]): each tuple is (h, w).
During tracing, it becomes list[Tensor] instead.
"""
def __init__(self, tensor: torch.Tensor, image_sizes: List[Tuple[int, int]]):
"""
Arguments:
tensor (Tensor): of shape (N, H, W) or (N, C_1, ..., C_K, H, W) where K >= 1
image_sizes (list[tuple[int, int]]): Each tuple is (h, w). It can
be smaller than (H, W) due to padding.
"""
self.tensor = tensor
self.image_sizes = image_sizes
def __len__(self) -> int:
return len(self.image_sizes)
def __getitem__(self, idx) -> torch.Tensor:
"""
Access the individual image in its original size.
Args:
idx: int or slice
Returns:
Tensor: an image of shape (H, W) or (C_1, ..., C_K, H, W) where K >= 1
"""
size = self.image_sizes[idx]
return self.tensor[idx, ..., : size[0], : size[1]]
@torch.jit.unused
def to(self, *args: Any, **kwargs: Any) -> "ImageList":
cast_tensor = self.tensor.to(*args, **kwargs)
return ImageList(cast_tensor, self.image_sizes)
@property
def device(self) -> device:
return self.tensor.device
@staticmethod
def from_tensors(
tensors: List[torch.Tensor], size_divisibility: int = 0, pad_value: float = 0.0
) -> "ImageList":
"""
Args:
tensors: a tuple or list of `torch.Tensor`, each of shape (Hi, Wi) or
(C_1, ..., C_K, Hi, Wi) where K >= 1. The Tensors will be padded
to the same shape with `pad_value`.
size_divisibility (int): If `size_divisibility > 0`, add padding to ensure
the common height and width is divisible by `size_divisibility`.
This depends on the model and many models need a divisibility of 32.
pad_value (float): value to pad
Returns:
an `ImageList`.
"""
assert len(tensors) > 0
assert isinstance(tensors, (tuple, list))
for t in tensors:
assert isinstance(t, torch.Tensor), type(t)
assert t.shape[:-2] == tensors[0].shape[:-2], t.shape
image_sizes = [(im.shape[-2], im.shape[-1]) for im in tensors]
image_sizes_tensor = [_as_tensor(x) for x in image_sizes]
max_size = torch.stack(image_sizes_tensor).max(0).values
if size_divisibility > 1:
stride = size_divisibility
# the last two dims are H,W, both subject to divisibility requirement
max_size = (max_size + (stride - 1)) // stride * stride
# handle weirdness of scripting and tracing ...
if torch.jit.is_scripting():
max_size: List[int] = max_size.to(dtype=torch.long).tolist()
else:
# https://github.com/pytorch/pytorch/issues/42448
if TORCH_VERSION >= (1, 7) and torch.jit.is_tracing():
image_sizes = image_sizes_tensor
if len(tensors) == 1:
# This seems slightly (2%) faster.
# TODO: check whether it's faster for multiple images as well
image_size = image_sizes[0]
padding_size = [0, max_size[-1] - image_size[1], 0, max_size[-2] - image_size[0]]
batched_imgs = F.pad(tensors[0], padding_size, value=pad_value).unsqueeze_(0)
else:
# max_size can be a tensor in tracing mode, therefore convert to list
batch_shape = [len(tensors)] + list(tensors[0].shape[:-2]) + list(max_size)
batched_imgs = tensors[0].new_full(batch_shape, pad_value)
for img, pad_img in zip(tensors, batched_imgs):
pad_img[..., : img.shape[-2], : img.shape[-1]].copy_(img)
return ImageList(batched_imgs.contiguous(), image_sizes)
|
banmo-main
|
third_party/detectron2_old/detectron2/structures/image_list.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import math
from typing import List, Tuple
import torch
from detectron2.layers.rotated_boxes import pairwise_iou_rotated
from .boxes import Boxes, _maybe_jit_unused
class RotatedBoxes(Boxes):
"""
This structure stores a list of rotated boxes as a Nx5 torch.Tensor.
It supports some common methods about boxes
(`area`, `clip`, `nonempty`, etc),
and also behaves like a Tensor
(support indexing, `to(device)`, `.device`, and iteration over all boxes)
"""
def __init__(self, tensor: torch.Tensor):
"""
Args:
tensor (Tensor[float]): a Nx5 matrix. Each row is
(x_center, y_center, width, height, angle),
in which angle is represented in degrees.
While there's no strict range restriction for it,
the recommended principal range is between [-180, 180) degrees.
Assume we have a horizontal box B = (x_center, y_center, width, height),
where width is along the x-axis and height is along the y-axis.
The rotated box B_rot (x_center, y_center, width, height, angle)
can be seen as:
1. When angle == 0:
B_rot == B
2. When angle > 0:
B_rot is obtained by rotating B w.r.t its center by :math:`|angle|` degrees CCW;
3. When angle < 0:
B_rot is obtained by rotating B w.r.t its center by :math:`|angle|` degrees CW.
Mathematically, since the right-handed coordinate system for image space
is (y, x), where y is top->down and x is left->right, the 4 vertices of the
rotated rectangle :math:`(yr_i, xr_i)` (i = 1, 2, 3, 4) can be obtained from
the vertices of the horizontal rectangle :math:`(y_i, x_i)` (i = 1, 2, 3, 4)
in the following way (:math:`\\theta = angle*\\pi/180` is the angle in radians,
:math:`(y_c, x_c)` is the center of the rectangle):
.. math::
yr_i = \\cos(\\theta) (y_i - y_c) - \\sin(\\theta) (x_i - x_c) + y_c,
xr_i = \\sin(\\theta) (y_i - y_c) + \\cos(\\theta) (x_i - x_c) + x_c,
which is the standard rigid-body rotation transformation.
Intuitively, the angle is
(1) the rotation angle from y-axis in image space
to the height vector (top->down in the box's local coordinate system)
of the box in CCW, and
(2) the rotation angle from x-axis in image space
to the width vector (left->right in the box's local coordinate system)
of the box in CCW.
More intuitively, consider the following horizontal box ABCD represented
in (x1, y1, x2, y2): (3, 2, 7, 4),
covering the [3, 7] x [2, 4] region of the continuous coordinate system
which looks like this:
.. code:: none
O--------> x
|
| A---B
| | |
| D---C
|
v y
Note that each capital letter represents one 0-dimensional geometric point
instead of a 'square pixel' here.
In the example above, using (x, y) to represent a point we have:
.. math::
O = (0, 0), A = (3, 2), B = (7, 2), C = (7, 4), D = (3, 4)
We name vector AB = vector DC as the width vector in box's local coordinate system, and
vector AD = vector BC as the height vector in box's local coordinate system. Initially,
when angle = 0 degree, they're aligned with the positive directions of x-axis and y-axis
in the image space, respectively.
For better illustration, we denote the center of the box as E,
.. code:: none
O--------> x
|
| A---B
| | E |
| D---C
|
v y
where the center E = ((3+7)/2, (2+4)/2) = (5, 3).
Also,
.. math::
width = |AB| = |CD| = 7 - 3 = 4,
height = |AD| = |BC| = 4 - 2 = 2.
Therefore, the corresponding representation for the same shape in rotated box in
(x_center, y_center, width, height, angle) format is:
(5, 3, 4, 2, 0),
Now, let's consider (5, 3, 4, 2, 90), which is rotated by 90 degrees
CCW (counter-clockwise) by definition. It looks like this:
.. code:: none
O--------> x
| B-C
| | |
| |E|
| | |
| A-D
v y
The center E is still located at the same point (5, 3), while the vertices
ABCD are rotated by 90 degrees CCW with regard to E:
A = (4, 5), B = (4, 1), C = (6, 1), D = (6, 5)
Here, 90 degrees can be seen as the CCW angle to rotate from y-axis to
vector AD or vector BC (the top->down height vector in box's local coordinate system),
or the CCW angle to rotate from x-axis to vector AB or vector DC (the left->right
width vector in box's local coordinate system).
.. math::
width = |AB| = |CD| = 5 - 1 = 4,
height = |AD| = |BC| = 6 - 4 = 2.
Next, how about (5, 3, 4, 2, -90), which is rotated by 90 degrees CW (clockwise)
by definition? It looks like this:
.. code:: none
O--------> x
| D-A
| | |
| |E|
| | |
| C-B
v y
The center E is still located at the same point (5, 3), while the vertices
ABCD are rotated by 90 degrees CW with regard to E:
A = (6, 1), B = (6, 5), C = (4, 5), D = (4, 1)
.. math::
width = |AB| = |CD| = 5 - 1 = 4,
height = |AD| = |BC| = 6 - 4 = 2.
This covers exactly the same region as (5, 3, 4, 2, 90) does, and their IoU
will be 1. However, these two will generate different RoI Pooling results and
should not be treated as an identical box.
On the other hand, it's easy to see that (X, Y, W, H, A) is identical to
(X, Y, W, H, A+360N), for any integer N. For example (5, 3, 4, 2, 270) would be
identical to (5, 3, 4, 2, -90), because rotating the shape 270 degrees CCW is
equivalent to rotating the same shape 90 degrees CW.
We could rotate further to get (5, 3, 4, 2, 180), or (5, 3, 4, 2, -180):
.. code:: none
O--------> x
|
| C---D
| | E |
| B---A
|
v y
.. math::
A = (7, 4), B = (3, 4), C = (3, 2), D = (7, 2),
width = |AB| = |CD| = 7 - 3 = 4,
height = |AD| = |BC| = 4 - 2 = 2.
Finally, this is a very inaccurate (heavily quantized) illustration of
how (5, 3, 4, 2, 60) looks like in case anyone wonders:
.. code:: none
O--------> x
| B\
| / C
| /E /
| A /
| `D
v y
It's still a rectangle with center of (5, 3), width of 4 and height of 2,
but its angle (and thus orientation) is somewhere between
(5, 3, 4, 2, 0) and (5, 3, 4, 2, 90).
"""
device = tensor.device if isinstance(tensor, torch.Tensor) else torch.device("cpu")
tensor = torch.as_tensor(tensor, dtype=torch.float32, device=device)
if tensor.numel() == 0:
# Use reshape, so we don't end up creating a new tensor that does not depend on
# the inputs (and consequently confuses jit)
tensor = tensor.reshape((0, 5)).to(dtype=torch.float32, device=device)
assert tensor.dim() == 2 and tensor.size(-1) == 5, tensor.size()
self.tensor = tensor
def clone(self) -> "RotatedBoxes":
"""
Clone the RotatedBoxes.
Returns:
RotatedBoxes
"""
return RotatedBoxes(self.tensor.clone())
@_maybe_jit_unused
def to(self, device: torch.device):
# Boxes are assumed float32 and does not support to(dtype)
return RotatedBoxes(self.tensor.to(device=device))
def area(self) -> torch.Tensor:
"""
Computes the area of all the boxes.
Returns:
torch.Tensor: a vector with areas of each box.
"""
box = self.tensor
area = box[:, 2] * box[:, 3]
return area
def normalize_angles(self) -> None:
"""
Restrict angles to the range of [-180, 180) degrees
"""
self.tensor[:, 4] = (self.tensor[:, 4] + 180.0) % 360.0 - 180.0
def clip(self, box_size: Tuple[int, int], clip_angle_threshold: float = 1.0) -> None:
"""
Clip (in place) the boxes by limiting x coordinates to the range [0, width]
and y coordinates to the range [0, height].
For RRPN:
Only clip boxes that are almost horizontal with a tolerance of
clip_angle_threshold to maintain backward compatibility.
Rotated boxes beyond this threshold are not clipped for two reasons:
1. There are potentially multiple ways to clip a rotated box to make it
fit within the image.
2. It's tricky to make the entire rectangular box fit within the image
and still be able to not leave out pixels of interest.
Therefore we rely on ops like RoIAlignRotated to safely handle this.
Args:
box_size (height, width): The clipping box's size.
clip_angle_threshold:
Iff. abs(normalized(angle)) <= clip_angle_threshold (in degrees),
we do the clipping as horizontal boxes.
"""
h, w = box_size
# normalize angles to be within (-180, 180] degrees
self.normalize_angles()
idx = torch.where(torch.abs(self.tensor[:, 4]) <= clip_angle_threshold)[0]
# convert to (x1, y1, x2, y2)
x1 = self.tensor[idx, 0] - self.tensor[idx, 2] / 2.0
y1 = self.tensor[idx, 1] - self.tensor[idx, 3] / 2.0
x2 = self.tensor[idx, 0] + self.tensor[idx, 2] / 2.0
y2 = self.tensor[idx, 1] + self.tensor[idx, 3] / 2.0
# clip
x1.clamp_(min=0, max=w)
y1.clamp_(min=0, max=h)
x2.clamp_(min=0, max=w)
y2.clamp_(min=0, max=h)
# convert back to (xc, yc, w, h)
self.tensor[idx, 0] = (x1 + x2) / 2.0
self.tensor[idx, 1] = (y1 + y2) / 2.0
# make sure widths and heights do not increase due to numerical errors
self.tensor[idx, 2] = torch.min(self.tensor[idx, 2], x2 - x1)
self.tensor[idx, 3] = torch.min(self.tensor[idx, 3], y2 - y1)
def nonempty(self, threshold: float = 0.0) -> torch.Tensor:
"""
Find boxes that are non-empty.
A box is considered empty, if either of its side is no larger than threshold.
Returns:
Tensor: a binary vector which represents
whether each box is empty (False) or non-empty (True).
"""
box = self.tensor
widths = box[:, 2]
heights = box[:, 3]
keep = (widths > threshold) & (heights > threshold)
return keep
def __getitem__(self, item) -> "RotatedBoxes":
"""
Returns:
RotatedBoxes: Create a new :class:`RotatedBoxes` by indexing.
The following usage are allowed:
1. `new_boxes = boxes[3]`: return a `RotatedBoxes` which contains only one box.
2. `new_boxes = boxes[2:10]`: return a slice of boxes.
3. `new_boxes = boxes[vector]`, where vector is a torch.ByteTensor
with `length = len(boxes)`. Nonzero elements in the vector will be selected.
Note that the returned RotatedBoxes might share storage with this RotatedBoxes,
subject to Pytorch's indexing semantics.
"""
if isinstance(item, int):
return RotatedBoxes(self.tensor[item].view(1, -1))
b = self.tensor[item]
assert b.dim() == 2, "Indexing on RotatedBoxes with {} failed to return a matrix!".format(
item
)
return RotatedBoxes(b)
def __len__(self) -> int:
return self.tensor.shape[0]
def __repr__(self) -> str:
return "RotatedBoxes(" + str(self.tensor) + ")"
def inside_box(self, box_size: Tuple[int, int], boundary_threshold: int = 0) -> torch.Tensor:
"""
Args:
box_size (height, width): Size of the reference box covering
[0, width] x [0, height]
boundary_threshold (int): Boxes that extend beyond the reference box
boundary by more than boundary_threshold are considered "outside".
For RRPN, it might not be necessary to call this function since it's common
for rotated box to extend to outside of the image boundaries
(the clip function only clips the near-horizontal boxes)
Returns:
a binary vector, indicating whether each box is inside the reference box.
"""
height, width = box_size
cnt_x = self.tensor[..., 0]
cnt_y = self.tensor[..., 1]
half_w = self.tensor[..., 2] / 2.0
half_h = self.tensor[..., 3] / 2.0
a = self.tensor[..., 4]
c = torch.abs(torch.cos(a * math.pi / 180.0))
s = torch.abs(torch.sin(a * math.pi / 180.0))
# This basically computes the horizontal bounding rectangle of the rotated box
max_rect_dx = c * half_w + s * half_h
max_rect_dy = c * half_h + s * half_w
inds_inside = (
(cnt_x - max_rect_dx >= -boundary_threshold)
& (cnt_y - max_rect_dy >= -boundary_threshold)
& (cnt_x + max_rect_dx < width + boundary_threshold)
& (cnt_y + max_rect_dy < height + boundary_threshold)
)
return inds_inside
def get_centers(self) -> torch.Tensor:
"""
Returns:
The box centers in a Nx2 array of (x, y).
"""
return self.tensor[:, :2]
def scale(self, scale_x: float, scale_y: float) -> None:
"""
Scale the rotated box with horizontal and vertical scaling factors
Note: when scale_factor_x != scale_factor_y,
the rotated box does not preserve the rectangular shape when the angle
is not a multiple of 90 degrees under resize transformation.
Instead, the shape is a parallelogram (that has skew)
Here we make an approximation by fitting a rotated rectangle to the parallelogram.
"""
self.tensor[:, 0] *= scale_x
self.tensor[:, 1] *= scale_y
theta = self.tensor[:, 4] * math.pi / 180.0
c = torch.cos(theta)
s = torch.sin(theta)
# In image space, y is top->down and x is left->right
# Consider the local coordintate system for the rotated box,
# where the box center is located at (0, 0), and the four vertices ABCD are
# A(-w / 2, -h / 2), B(w / 2, -h / 2), C(w / 2, h / 2), D(-w / 2, h / 2)
# the midpoint of the left edge AD of the rotated box E is:
# E = (A+D)/2 = (-w / 2, 0)
# the midpoint of the top edge AB of the rotated box F is:
# F(0, -h / 2)
# To get the old coordinates in the global system, apply the rotation transformation
# (Note: the right-handed coordinate system for image space is yOx):
# (old_x, old_y) = (s * y + c * x, c * y - s * x)
# E(old) = (s * 0 + c * (-w/2), c * 0 - s * (-w/2)) = (-c * w / 2, s * w / 2)
# F(old) = (s * (-h / 2) + c * 0, c * (-h / 2) - s * 0) = (-s * h / 2, -c * h / 2)
# After applying the scaling factor (sfx, sfy):
# E(new) = (-sfx * c * w / 2, sfy * s * w / 2)
# F(new) = (-sfx * s * h / 2, -sfy * c * h / 2)
# The new width after scaling tranformation becomes:
# w(new) = |E(new) - O| * 2
# = sqrt[(sfx * c * w / 2)^2 + (sfy * s * w / 2)^2] * 2
# = sqrt[(sfx * c)^2 + (sfy * s)^2] * w
# i.e., scale_factor_w = sqrt[(sfx * c)^2 + (sfy * s)^2]
#
# For example,
# when angle = 0 or 180, |c| = 1, s = 0, scale_factor_w == scale_factor_x;
# when |angle| = 90, c = 0, |s| = 1, scale_factor_w == scale_factor_y
self.tensor[:, 2] *= torch.sqrt((scale_x * c) ** 2 + (scale_y * s) ** 2)
# h(new) = |F(new) - O| * 2
# = sqrt[(sfx * s * h / 2)^2 + (sfy * c * h / 2)^2] * 2
# = sqrt[(sfx * s)^2 + (sfy * c)^2] * h
# i.e., scale_factor_h = sqrt[(sfx * s)^2 + (sfy * c)^2]
#
# For example,
# when angle = 0 or 180, |c| = 1, s = 0, scale_factor_h == scale_factor_y;
# when |angle| = 90, c = 0, |s| = 1, scale_factor_h == scale_factor_x
self.tensor[:, 3] *= torch.sqrt((scale_x * s) ** 2 + (scale_y * c) ** 2)
# The angle is the rotation angle from y-axis in image space to the height
# vector (top->down in the box's local coordinate system) of the box in CCW.
#
# angle(new) = angle_yOx(O - F(new))
# = angle_yOx( (sfx * s * h / 2, sfy * c * h / 2) )
# = atan2(sfx * s * h / 2, sfy * c * h / 2)
# = atan2(sfx * s, sfy * c)
#
# For example,
# when sfx == sfy, angle(new) == atan2(s, c) == angle(old)
self.tensor[:, 4] = torch.atan2(scale_x * s, scale_y * c) * 180 / math.pi
@classmethod
@_maybe_jit_unused
def cat(cls, boxes_list: List["RotatedBoxes"]) -> "RotatedBoxes":
"""
Concatenates a list of RotatedBoxes into a single RotatedBoxes
Arguments:
boxes_list (list[RotatedBoxes])
Returns:
RotatedBoxes: the concatenated RotatedBoxes
"""
assert isinstance(boxes_list, (list, tuple))
if len(boxes_list) == 0:
return cls(torch.empty(0))
assert all([isinstance(box, RotatedBoxes) for box in boxes_list])
# use torch.cat (v.s. layers.cat) so the returned boxes never share storage with input
cat_boxes = cls(torch.cat([b.tensor for b in boxes_list], dim=0))
return cat_boxes
@property
def device(self) -> torch.device:
return self.tensor.device
@torch.jit.unused
def __iter__(self):
"""
Yield a box as a Tensor of shape (5,) at a time.
"""
yield from self.tensor
def pairwise_iou(boxes1: RotatedBoxes, boxes2: RotatedBoxes) -> None:
"""
Given two lists of rotated boxes of size N and M,
compute the IoU (intersection over union)
between **all** N x M pairs of boxes.
The box order must be (x_center, y_center, width, height, angle).
Args:
boxes1, boxes2 (RotatedBoxes):
two `RotatedBoxes`. Contains N & M rotated boxes, respectively.
Returns:
Tensor: IoU, sized [N,M].
"""
return pairwise_iou_rotated(boxes1.tensor, boxes2.tensor)
|
banmo-main
|
third_party/detectron2_old/detectron2/structures/rotated_boxes.py
|
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
import functools
import inspect
import logging
from fvcore.common.config import CfgNode as _CfgNode
from detectron2.utils.file_io import PathManager
class CfgNode(_CfgNode):
"""
The same as `fvcore.common.config.CfgNode`, but different in:
1. Use unsafe yaml loading by default.
Note that this may lead to arbitrary code execution: you must not
load a config file from untrusted sources before manually inspecting
the content of the file.
2. Support config versioning.
When attempting to merge an old config, it will convert the old config automatically.
"""
@classmethod
def _open_cfg(cls, filename):
return PathManager.open(filename, "r")
# Note that the default value of allow_unsafe is changed to True
def merge_from_file(self, cfg_filename: str, allow_unsafe: bool = True) -> None:
assert PathManager.isfile(cfg_filename), f"Config file '{cfg_filename}' does not exist!"
loaded_cfg = self.load_yaml_with_base(cfg_filename, allow_unsafe=allow_unsafe)
loaded_cfg = type(self)(loaded_cfg)
# defaults.py needs to import CfgNode
from .defaults import _C
latest_ver = _C.VERSION
assert (
latest_ver == self.VERSION
), "CfgNode.merge_from_file is only allowed on a config object of latest version!"
logger = logging.getLogger(__name__)
loaded_ver = loaded_cfg.get("VERSION", None)
if loaded_ver is None:
from .compat import guess_version
loaded_ver = guess_version(loaded_cfg, cfg_filename)
assert loaded_ver <= self.VERSION, "Cannot merge a v{} config into a v{} config.".format(
loaded_ver, self.VERSION
)
if loaded_ver == self.VERSION:
self.merge_from_other_cfg(loaded_cfg)
else:
# compat.py needs to import CfgNode
from .compat import upgrade_config, downgrade_config
logger.warning(
"Loading an old v{} config file '{}' by automatically upgrading to v{}. "
"See docs/CHANGELOG.md for instructions to update your files.".format(
loaded_ver, cfg_filename, self.VERSION
)
)
# To convert, first obtain a full config at an old version
old_self = downgrade_config(self, to_version=loaded_ver)
old_self.merge_from_other_cfg(loaded_cfg)
new_config = upgrade_config(old_self)
self.clear()
self.update(new_config)
def dump(self, *args, **kwargs):
"""
Returns:
str: a yaml string representation of the config
"""
# to make it show up in docs
return super().dump(*args, **kwargs)
global_cfg = CfgNode()
def get_cfg() -> CfgNode:
"""
Get a copy of the default config.
Returns:
a detectron2 CfgNode instance.
"""
from .defaults import _C
return _C.clone()
def set_global_cfg(cfg: CfgNode) -> None:
"""
Let the global config point to the given cfg.
Assume that the given "cfg" has the key "KEY", after calling
`set_global_cfg(cfg)`, the key can be accessed by:
::
from detectron2.config import global_cfg
print(global_cfg.KEY)
By using a hacky global config, you can access these configs anywhere,
without having to pass the config object or the values deep into the code.
This is a hacky feature introduced for quick prototyping / research exploration.
"""
global global_cfg
global_cfg.clear()
global_cfg.update(cfg)
def configurable(init_func=None, *, from_config=None):
"""
Decorate a function or a class's __init__ method so that it can be called
with a :class:`CfgNode` object using a :func:`from_config` function that translates
:class:`CfgNode` to arguments.
Examples:
::
# Usage 1: Decorator on __init__:
class A:
@configurable
def __init__(self, a, b=2, c=3):
pass
@classmethod
def from_config(cls, cfg): # 'cfg' must be the first argument
# Returns kwargs to be passed to __init__
return {"a": cfg.A, "b": cfg.B}
a1 = A(a=1, b=2) # regular construction
a2 = A(cfg) # construct with a cfg
a3 = A(cfg, b=3, c=4) # construct with extra overwrite
# Usage 2: Decorator on any function. Needs an extra from_config argument:
@configurable(from_config=lambda cfg: {"a: cfg.A, "b": cfg.B})
def a_func(a, b=2, c=3):
pass
a1 = a_func(a=1, b=2) # regular call
a2 = a_func(cfg) # call with a cfg
a3 = a_func(cfg, b=3, c=4) # call with extra overwrite
Args:
init_func (callable): a class's ``__init__`` method in usage 1. The
class must have a ``from_config`` classmethod which takes `cfg` as
the first argument.
from_config (callable): the from_config function in usage 2. It must take `cfg`
as its first argument.
"""
if init_func is not None:
assert (
inspect.isfunction(init_func)
and from_config is None
and init_func.__name__ == "__init__"
), "Incorrect use of @configurable. Check API documentation for examples."
@functools.wraps(init_func)
def wrapped(self, *args, **kwargs):
try:
from_config_func = type(self).from_config
except AttributeError as e:
raise AttributeError(
"Class with @configurable must have a 'from_config' classmethod."
) from e
if not inspect.ismethod(from_config_func):
raise TypeError("Class with @configurable must have a 'from_config' classmethod.")
if _called_with_cfg(*args, **kwargs):
explicit_args = _get_args_from_config(from_config_func, *args, **kwargs)
init_func(self, **explicit_args)
else:
init_func(self, *args, **kwargs)
return wrapped
else:
if from_config is None:
return configurable # @configurable() is made equivalent to @configurable
assert inspect.isfunction(
from_config
), "from_config argument of configurable must be a function!"
def wrapper(orig_func):
@functools.wraps(orig_func)
def wrapped(*args, **kwargs):
if _called_with_cfg(*args, **kwargs):
explicit_args = _get_args_from_config(from_config, *args, **kwargs)
return orig_func(**explicit_args)
else:
return orig_func(*args, **kwargs)
return wrapped
return wrapper
def _get_args_from_config(from_config_func, *args, **kwargs):
"""
Use `from_config` to obtain explicit arguments.
Returns:
dict: arguments to be used for cls.__init__
"""
signature = inspect.signature(from_config_func)
if list(signature.parameters.keys())[0] != "cfg":
if inspect.isfunction(from_config_func):
name = from_config_func.__name__
else:
name = f"{from_config_func.__self__}.from_config"
raise TypeError(f"{name} must take 'cfg' as the first argument!")
support_var_arg = any(
param.kind in [param.VAR_POSITIONAL, param.VAR_KEYWORD]
for param in signature.parameters.values()
)
if support_var_arg: # forward all arguments to from_config, if from_config accepts them
ret = from_config_func(*args, **kwargs)
else:
# forward supported arguments to from_config
supported_arg_names = set(signature.parameters.keys())
extra_kwargs = {}
for name in list(kwargs.keys()):
if name not in supported_arg_names:
extra_kwargs[name] = kwargs.pop(name)
ret = from_config_func(*args, **kwargs)
# forward the other arguments to __init__
ret.update(extra_kwargs)
return ret
def _called_with_cfg(*args, **kwargs):
"""
Returns:
bool: whether the arguments contain CfgNode and should be considered
forwarded to from_config.
"""
from omegaconf import DictConfig
if len(args) and isinstance(args[0], (_CfgNode, DictConfig)):
return True
if isinstance(kwargs.pop("cfg", None), (_CfgNode, DictConfig)):
return True
# `from_config`'s first argument is forced to be "cfg".
# So the above check covers all cases.
return False
|
banmo-main
|
third_party/detectron2_old/detectron2/config/config.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
"""
Backward compatibility of configs.
Instructions to bump version:
+ It's not needed to bump version if new keys are added.
It's only needed when backward-incompatible changes happen
(i.e., some existing keys disappear, or the meaning of a key changes)
+ To bump version, do the following:
1. Increment _C.VERSION in defaults.py
2. Add a converter in this file.
Each ConverterVX has a function "upgrade" which in-place upgrades config from X-1 to X,
and a function "downgrade" which in-place downgrades config from X to X-1
In each function, VERSION is left unchanged.
Each converter assumes that its input has the relevant keys
(i.e., the input is not a partial config).
3. Run the tests (test_config.py) to make sure the upgrade & downgrade
functions are consistent.
"""
import logging
from typing import List, Optional, Tuple
from .config import CfgNode as CN
from .defaults import _C
__all__ = ["upgrade_config", "downgrade_config"]
def upgrade_config(cfg: CN, to_version: Optional[int] = None) -> CN:
"""
Upgrade a config from its current version to a newer version.
Args:
cfg (CfgNode):
to_version (int): defaults to the latest version.
"""
cfg = cfg.clone()
if to_version is None:
to_version = _C.VERSION
assert cfg.VERSION <= to_version, "Cannot upgrade from v{} to v{}!".format(
cfg.VERSION, to_version
)
for k in range(cfg.VERSION, to_version):
converter = globals()["ConverterV" + str(k + 1)]
converter.upgrade(cfg)
cfg.VERSION = k + 1
return cfg
def downgrade_config(cfg: CN, to_version: int) -> CN:
"""
Downgrade a config from its current version to an older version.
Args:
cfg (CfgNode):
to_version (int):
Note:
A general downgrade of arbitrary configs is not always possible due to the
different functionalities in different versions.
The purpose of downgrade is only to recover the defaults in old versions,
allowing it to load an old partial yaml config.
Therefore, the implementation only needs to fill in the default values
in the old version when a general downgrade is not possible.
"""
cfg = cfg.clone()
assert cfg.VERSION >= to_version, "Cannot downgrade from v{} to v{}!".format(
cfg.VERSION, to_version
)
for k in range(cfg.VERSION, to_version, -1):
converter = globals()["ConverterV" + str(k)]
converter.downgrade(cfg)
cfg.VERSION = k - 1
return cfg
def guess_version(cfg: CN, filename: str) -> int:
"""
Guess the version of a partial config where the VERSION field is not specified.
Returns the version, or the latest if cannot make a guess.
This makes it easier for users to migrate.
"""
logger = logging.getLogger(__name__)
def _has(name: str) -> bool:
cur = cfg
for n in name.split("."):
if n not in cur:
return False
cur = cur[n]
return True
# Most users' partial configs have "MODEL.WEIGHT", so guess on it
ret = None
if _has("MODEL.WEIGHT") or _has("TEST.AUG_ON"):
ret = 1
if ret is not None:
logger.warning("Config '{}' has no VERSION. Assuming it to be v{}.".format(filename, ret))
else:
ret = _C.VERSION
logger.warning(
"Config '{}' has no VERSION. Assuming it to be compatible with latest v{}.".format(
filename, ret
)
)
return ret
def _rename(cfg: CN, old: str, new: str) -> None:
old_keys = old.split(".")
new_keys = new.split(".")
def _set(key_seq: List[str], val: str) -> None:
cur = cfg
for k in key_seq[:-1]:
if k not in cur:
cur[k] = CN()
cur = cur[k]
cur[key_seq[-1]] = val
def _get(key_seq: List[str]) -> CN:
cur = cfg
for k in key_seq:
cur = cur[k]
return cur
def _del(key_seq: List[str]) -> None:
cur = cfg
for k in key_seq[:-1]:
cur = cur[k]
del cur[key_seq[-1]]
if len(cur) == 0 and len(key_seq) > 1:
_del(key_seq[:-1])
_set(new_keys, _get(old_keys))
_del(old_keys)
class _RenameConverter:
"""
A converter that handles simple rename.
"""
RENAME: List[Tuple[str, str]] = [] # list of tuples of (old name, new name)
@classmethod
def upgrade(cls, cfg: CN) -> None:
for old, new in cls.RENAME:
_rename(cfg, old, new)
@classmethod
def downgrade(cls, cfg: CN) -> None:
for old, new in cls.RENAME[::-1]:
_rename(cfg, new, old)
class ConverterV1(_RenameConverter):
RENAME = [("MODEL.RPN_HEAD.NAME", "MODEL.RPN.HEAD_NAME")]
class ConverterV2(_RenameConverter):
"""
A large bulk of rename, before public release.
"""
RENAME = [
("MODEL.WEIGHT", "MODEL.WEIGHTS"),
("MODEL.PANOPTIC_FPN.SEMANTIC_LOSS_SCALE", "MODEL.SEM_SEG_HEAD.LOSS_WEIGHT"),
("MODEL.PANOPTIC_FPN.RPN_LOSS_SCALE", "MODEL.RPN.LOSS_WEIGHT"),
("MODEL.PANOPTIC_FPN.INSTANCE_LOSS_SCALE", "MODEL.PANOPTIC_FPN.INSTANCE_LOSS_WEIGHT"),
("MODEL.PANOPTIC_FPN.COMBINE_ON", "MODEL.PANOPTIC_FPN.COMBINE.ENABLED"),
(
"MODEL.PANOPTIC_FPN.COMBINE_OVERLAP_THRESHOLD",
"MODEL.PANOPTIC_FPN.COMBINE.OVERLAP_THRESH",
),
(
"MODEL.PANOPTIC_FPN.COMBINE_STUFF_AREA_LIMIT",
"MODEL.PANOPTIC_FPN.COMBINE.STUFF_AREA_LIMIT",
),
(
"MODEL.PANOPTIC_FPN.COMBINE_INSTANCES_CONFIDENCE_THRESHOLD",
"MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH",
),
("MODEL.ROI_HEADS.SCORE_THRESH", "MODEL.ROI_HEADS.SCORE_THRESH_TEST"),
("MODEL.ROI_HEADS.NMS", "MODEL.ROI_HEADS.NMS_THRESH_TEST"),
("MODEL.RETINANET.INFERENCE_SCORE_THRESHOLD", "MODEL.RETINANET.SCORE_THRESH_TEST"),
("MODEL.RETINANET.INFERENCE_TOPK_CANDIDATES", "MODEL.RETINANET.TOPK_CANDIDATES_TEST"),
("MODEL.RETINANET.INFERENCE_NMS_THRESHOLD", "MODEL.RETINANET.NMS_THRESH_TEST"),
("TEST.DETECTIONS_PER_IMG", "TEST.DETECTIONS_PER_IMAGE"),
("TEST.AUG_ON", "TEST.AUG.ENABLED"),
("TEST.AUG_MIN_SIZES", "TEST.AUG.MIN_SIZES"),
("TEST.AUG_MAX_SIZE", "TEST.AUG.MAX_SIZE"),
("TEST.AUG_FLIP", "TEST.AUG.FLIP"),
]
@classmethod
def upgrade(cls, cfg: CN) -> None:
super().upgrade(cfg)
if cfg.MODEL.META_ARCHITECTURE == "RetinaNet":
_rename(
cfg, "MODEL.RETINANET.ANCHOR_ASPECT_RATIOS", "MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS"
)
_rename(cfg, "MODEL.RETINANET.ANCHOR_SIZES", "MODEL.ANCHOR_GENERATOR.SIZES")
del cfg["MODEL"]["RPN"]["ANCHOR_SIZES"]
del cfg["MODEL"]["RPN"]["ANCHOR_ASPECT_RATIOS"]
else:
_rename(cfg, "MODEL.RPN.ANCHOR_ASPECT_RATIOS", "MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS")
_rename(cfg, "MODEL.RPN.ANCHOR_SIZES", "MODEL.ANCHOR_GENERATOR.SIZES")
del cfg["MODEL"]["RETINANET"]["ANCHOR_SIZES"]
del cfg["MODEL"]["RETINANET"]["ANCHOR_ASPECT_RATIOS"]
del cfg["MODEL"]["RETINANET"]["ANCHOR_STRIDES"]
@classmethod
def downgrade(cls, cfg: CN) -> None:
super().downgrade(cfg)
_rename(cfg, "MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS", "MODEL.RPN.ANCHOR_ASPECT_RATIOS")
_rename(cfg, "MODEL.ANCHOR_GENERATOR.SIZES", "MODEL.RPN.ANCHOR_SIZES")
cfg.MODEL.RETINANET.ANCHOR_ASPECT_RATIOS = cfg.MODEL.RPN.ANCHOR_ASPECT_RATIOS
cfg.MODEL.RETINANET.ANCHOR_SIZES = cfg.MODEL.RPN.ANCHOR_SIZES
cfg.MODEL.RETINANET.ANCHOR_STRIDES = [] # this is not used anywhere in any version
|
banmo-main
|
third_party/detectron2_old/detectron2/config/compat.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from .compat import downgrade_config, upgrade_config
from .config import CfgNode, get_cfg, global_cfg, set_global_cfg, configurable
from .instantiate import instantiate
from .lazy import LazyCall, LazyConfig
__all__ = [
"CfgNode",
"get_cfg",
"global_cfg",
"set_global_cfg",
"downgrade_config",
"upgrade_config",
"configurable",
"instantiate",
"LazyCall",
"LazyConfig",
]
from detectron2.utils.env import fixup_module_metadata
fixup_module_metadata(__name__, globals(), __all__)
del fixup_module_metadata
|
banmo-main
|
third_party/detectron2_old/detectron2/config/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import dataclasses
import logging
from collections import abc
from typing import Any
from detectron2.utils.registry import _convert_target_to_string, locate
__all__ = ["dump_dataclass", "instantiate"]
def dump_dataclass(obj: Any):
"""
Dump a dataclass recursively into a dict that can be later instantiated.
Args:
obj: a dataclass object
Returns:
dict
"""
assert dataclasses.is_dataclass(obj) and not isinstance(
obj, type
), "dump_dataclass() requires an instance of a dataclass."
ret = {"_target_": _convert_target_to_string(type(obj))}
for f in dataclasses.fields(obj):
v = getattr(obj, f.name)
if dataclasses.is_dataclass(v):
v = dump_dataclass(v)
if isinstance(v, (list, tuple)):
v = [dump_dataclass(x) if dataclasses.is_dataclass(x) else x for x in v]
ret[f.name] = v
return ret
def instantiate(cfg):
"""
Recursively instantiate objects defined in dictionaries by
"_target_" and arguments.
Args:
cfg: a dict-like object with "_target_" that defines the caller, and
other keys that define the arguments
Returns:
object instantiated by cfg
"""
from omegaconf import ListConfig
if isinstance(cfg, ListConfig):
lst = [instantiate(x) for x in cfg]
return ListConfig(lst, flags={"allow_objects": True})
if isinstance(cfg, list):
# Specialize for list, because many classes take
# list[objects] as arguments, such as ResNet, DatasetMapper
return [instantiate(x) for x in cfg]
if isinstance(cfg, abc.Mapping) and "_target_" in cfg:
# conceptually equivalent to hydra.utils.instantiate(cfg) with _convert_=all,
# but faster: https://github.com/facebookresearch/hydra/issues/1200
cfg = {k: instantiate(v) for k, v in cfg.items()}
cls = cfg.pop("_target_")
cls = instantiate(cls)
if isinstance(cls, str):
cls_name = cls
cls = locate(cls_name)
assert cls is not None, cls_name
else:
try:
cls_name = cls.__module__ + "." + cls.__qualname__
except Exception:
# target could be anything, so the above could fail
cls_name = str(cls)
assert callable(cls), f"_target_ {cls} does not define a callable object"
try:
return cls(**cfg)
except TypeError:
logger = logging.getLogger(__name__)
logger.error(f"Error when instantiating {cls_name}!")
raise
return cfg # return as-is if don't know what to do
|
banmo-main
|
third_party/detectron2_old/detectron2/config/instantiate.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.