python_code
stringlengths 0
4.04M
| repo_name
stringlengths 8
58
| file_path
stringlengths 5
147
|
---|---|---|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
import functools
import json
import multiprocessing as mp
import numpy as np
import os
import time
from fvcore.common.download import download
from panopticapi.utils import rgb2id
from PIL import Image
from detectron2.data.datasets.builtin_meta import COCO_CATEGORIES
def _process_panoptic_to_semantic(input_panoptic, output_semantic, segments, id_map):
panoptic = np.asarray(Image.open(input_panoptic), dtype=np.uint32)
panoptic = rgb2id(panoptic)
output = np.zeros_like(panoptic, dtype=np.uint8) + 255
for seg in segments:
cat_id = seg["category_id"]
new_cat_id = id_map[cat_id]
output[panoptic == seg["id"]] = new_cat_id
Image.fromarray(output).save(output_semantic)
def separate_coco_semantic_from_panoptic(panoptic_json, panoptic_root, sem_seg_root, categories):
"""
Create semantic segmentation annotations from panoptic segmentation
annotations, to be used by PanopticFPN.
It maps all thing categories to class 0, and maps all unlabeled pixels to class 255.
It maps all stuff categories to contiguous ids starting from 1.
Args:
panoptic_json (str): path to the panoptic json file, in COCO's format.
panoptic_root (str): a directory with panoptic annotation files, in COCO's format.
sem_seg_root (str): a directory to output semantic annotation files
categories (list[dict]): category metadata. Each dict needs to have:
"id": corresponds to the "category_id" in the json annotations
"isthing": 0 or 1
"""
os.makedirs(sem_seg_root, exist_ok=True)
stuff_ids = [k["id"] for k in categories if k["isthing"] == 0]
thing_ids = [k["id"] for k in categories if k["isthing"] == 1]
id_map = {} # map from category id to id in the output semantic annotation
assert len(stuff_ids) <= 254
for i, stuff_id in enumerate(stuff_ids):
id_map[stuff_id] = i + 1
for thing_id in thing_ids:
id_map[thing_id] = 0
id_map[0] = 255
with open(panoptic_json) as f:
obj = json.load(f)
pool = mp.Pool(processes=max(mp.cpu_count() // 2, 4))
def iter_annotations():
for anno in obj["annotations"]:
file_name = anno["file_name"]
segments = anno["segments_info"]
input = os.path.join(panoptic_root, file_name)
output = os.path.join(sem_seg_root, file_name)
yield input, output, segments
print("Start writing to {} ...".format(sem_seg_root))
start = time.time()
pool.starmap(
functools.partial(_process_panoptic_to_semantic, id_map=id_map),
iter_annotations(),
chunksize=100,
)
print("Finished. time: {:.2f}s".format(time.time() - start))
if __name__ == "__main__":
dataset_dir = os.path.join(os.getenv("DETECTRON2_DATASETS", "datasets"), "coco")
for s in ["val2017", "train2017"]:
separate_coco_semantic_from_panoptic(
os.path.join(dataset_dir, "annotations/panoptic_{}.json".format(s)),
os.path.join(dataset_dir, "panoptic_{}".format(s)),
os.path.join(dataset_dir, "panoptic_stuff_{}".format(s)),
COCO_CATEGORIES,
)
# Prepare val2017_100 for quick testing:
dest_dir = os.path.join(dataset_dir, "annotations/")
URL_PREFIX = "https://dl.fbaipublicfiles.com/detectron2/"
download(URL_PREFIX + "annotations/coco/panoptic_val2017_100.json", dest_dir)
with open(os.path.join(dest_dir, "panoptic_val2017_100.json")) as f:
obj = json.load(f)
def link_val100(dir_full, dir_100):
print("Creating " + dir_100 + " ...")
os.makedirs(dir_100, exist_ok=True)
for img in obj["images"]:
basename = os.path.splitext(img["file_name"])[0]
src = os.path.join(dir_full, basename + ".png")
dst = os.path.join(dir_100, basename + ".png")
src = os.path.relpath(src, start=dir_100)
os.symlink(src, dst)
link_val100(
os.path.join(dataset_dir, "panoptic_val2017"),
os.path.join(dataset_dir, "panoptic_val2017_100"),
)
link_val100(
os.path.join(dataset_dir, "panoptic_stuff_val2017"),
os.path.join(dataset_dir, "panoptic_stuff_val2017_100"),
)
|
banmo-main
|
third_party/detectron2_old/datasets/prepare_panoptic_fpn.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
import numpy as np
import os
from pathlib import Path
import tqdm
from PIL import Image
def convert(input, output):
img = np.asarray(Image.open(input))
assert img.dtype == np.uint8
img = img - 1 # 0 (ignore) becomes 255. others are shifted by 1
Image.fromarray(img).save(output)
if __name__ == "__main__":
dataset_dir = Path(os.getenv("DETECTRON2_DATASETS", "datasets")) / "ADEChallengeData2016"
for name in ["training", "validation"]:
annotation_dir = dataset_dir / "annotations" / name
output_dir = dataset_dir / "annotations_detectron2" / name
output_dir.mkdir(parents=True, exist_ok=True)
for file in tqdm.tqdm(list(annotation_dir.iterdir())):
output_file = output_dir / file.name
convert(file, output_file)
|
banmo-main
|
third_party/detectron2_old/datasets/prepare_ade20k_sem_seg.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import unittest
import torch
from detectron2.modeling.meta_arch import GeneralizedRCNN
from detectron2.utils.registry import _convert_target_to_string, locate
class A:
class B:
pass
class TestLocate(unittest.TestCase):
def _test_obj(self, obj):
name = _convert_target_to_string(obj)
newobj = locate(name)
self.assertIs(obj, newobj)
def test_basic(self):
self._test_obj(GeneralizedRCNN)
def test_inside_class(self):
# requires using __qualname__ instead of __name__
self._test_obj(A.B)
def test_builtin(self):
self._test_obj(len)
self._test_obj(dict)
def test_pytorch_optim(self):
# pydoc.locate does not work for it
self._test_obj(torch.optim.SGD)
def test_failure(self):
with self.assertRaises(ImportError):
locate("asdf")
def test_compress_target(self):
from detectron2.data.transforms import RandomCrop
name = _convert_target_to_string(RandomCrop)
# name shouldn't contain 'augmentation_impl'
self.assertEqual(name, "detectron2.data.transforms.RandomCrop")
self.assertIs(RandomCrop, locate(name))
|
banmo-main
|
third_party/detectron2_old/tests/test_registry.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import json
import os
import tempfile
import time
import unittest
from unittest import mock
import torch
from fvcore.common.checkpoint import Checkpointer
from torch import nn
from detectron2.config import configurable, get_cfg
from detectron2.engine import DefaultTrainer, SimpleTrainer, hooks
from detectron2.modeling.meta_arch import META_ARCH_REGISTRY
from detectron2.utils.events import CommonMetricPrinter, JSONWriter
@META_ARCH_REGISTRY.register()
class _SimpleModel(nn.Module):
@configurable
def __init__(self, sleep_sec=0):
super().__init__()
self.mod = nn.Linear(10, 20)
self.sleep_sec = sleep_sec
@classmethod
def from_config(cls, cfg):
return {}
def forward(self, x):
if self.sleep_sec > 0:
time.sleep(self.sleep_sec)
return {"loss": x.sum() + sum([x.mean() for x in self.parameters()])}
class TestTrainer(unittest.TestCase):
def _data_loader(self, device):
device = torch.device(device)
while True:
yield torch.rand(3, 3).to(device)
def test_simple_trainer(self, device="cpu"):
model = _SimpleModel().to(device=device)
trainer = SimpleTrainer(
model, self._data_loader(device), torch.optim.SGD(model.parameters(), 0.1)
)
trainer.train(0, 10)
@unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
def test_simple_trainer_cuda(self):
self.test_simple_trainer(device="cuda")
def test_writer_hooks(self):
model = _SimpleModel(sleep_sec=0.1)
trainer = SimpleTrainer(
model, self._data_loader("cpu"), torch.optim.SGD(model.parameters(), 0.1)
)
max_iter = 50
with tempfile.TemporaryDirectory(prefix="detectron2_test") as d:
json_file = os.path.join(d, "metrics.json")
writers = [CommonMetricPrinter(max_iter), JSONWriter(json_file)]
trainer.register_hooks(
[hooks.EvalHook(0, lambda: {"metric": 100}), hooks.PeriodicWriter(writers)]
)
with self.assertLogs(writers[0].logger) as logs:
trainer.train(0, max_iter)
with open(json_file, "r") as f:
data = [json.loads(line.strip()) for line in f]
self.assertEqual([x["iteration"] for x in data], [19, 39, 49, 50])
# the eval metric is in the last line with iter 50
self.assertIn("metric", data[-1], "Eval metric must be in last line of JSON!")
# test logged messages from CommonMetricPrinter
self.assertEqual(len(logs.output), 3)
for log, iter in zip(logs.output, [19, 39, 49]):
self.assertIn(f"iter: {iter}", log)
self.assertIn("eta: 0:00:00", logs.output[-1], "Last ETA must be 0!")
@unittest.skipIf(os.environ.get("CI"), "Require COCO data.")
def test_default_trainer(self):
# TODO: this test requires manifold access, so changed device to CPU. see: T88318502
cfg = get_cfg()
cfg.MODEL.DEVICE = "cpu"
cfg.MODEL.META_ARCHITECTURE = "_SimpleModel"
cfg.DATASETS.TRAIN = ("coco_2017_val_100",)
with tempfile.TemporaryDirectory(prefix="detectron2_test") as d:
cfg.OUTPUT_DIR = d
trainer = DefaultTrainer(cfg)
# test property
self.assertIs(trainer.model, trainer._trainer.model)
trainer.model = _SimpleModel()
self.assertIs(trainer.model, trainer._trainer.model)
def test_checkpoint_resume(self):
model = _SimpleModel()
dataloader = self._data_loader("cpu")
opt = torch.optim.SGD(model.parameters(), 0.1)
scheduler = torch.optim.lr_scheduler.StepLR(opt, 3)
with tempfile.TemporaryDirectory(prefix="detectron2_test") as d:
trainer = SimpleTrainer(model, dataloader, opt)
checkpointer = Checkpointer(model, d, opt=opt, trainer=trainer)
trainer.register_hooks(
[
hooks.LRScheduler(scheduler=scheduler),
# checkpoint after scheduler to properly save the state of scheduler
hooks.PeriodicCheckpointer(checkpointer, 10),
]
)
trainer.train(0, 12)
self.assertAlmostEqual(opt.param_groups[0]["lr"], 1e-5)
self.assertEqual(scheduler.last_epoch, 12)
del trainer
opt = torch.optim.SGD(model.parameters(), 999) # lr will be loaded
trainer = SimpleTrainer(model, dataloader, opt)
scheduler = torch.optim.lr_scheduler.StepLR(opt, 3)
trainer.register_hooks(
[
hooks.LRScheduler(scheduler=scheduler),
]
)
checkpointer = Checkpointer(model, d, opt=opt, trainer=trainer)
checkpointer.resume_or_load("non_exist.pth")
self.assertEqual(trainer.iter, 11) # last finished iter number (0-based in Trainer)
# number of times `scheduler.step()` was called (1-based)
self.assertEqual(scheduler.last_epoch, 12)
self.assertAlmostEqual(opt.param_groups[0]["lr"], 1e-5)
def test_eval_hook(self):
model = _SimpleModel()
dataloader = self._data_loader("cpu")
opt = torch.optim.SGD(model.parameters(), 0.1)
for total_iter, period, eval_count in [(30, 15, 2), (31, 15, 3), (20, 0, 1)]:
test_func = mock.Mock(return_value={"metric": 3.0})
trainer = SimpleTrainer(model, dataloader, opt)
trainer.register_hooks([hooks.EvalHook(period, test_func)])
trainer.train(0, total_iter)
self.assertEqual(test_func.call_count, eval_count)
|
banmo-main
|
third_party/detectron2_old/tests/test_engine.py
|
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
import numpy as np
import os
import tempfile
import unittest
import cv2
import torch
from detectron2.data import MetadataCatalog
from detectron2.structures import BoxMode, Instances, RotatedBoxes
from detectron2.utils.visualizer import ColorMode, Visualizer
class TestVisualizer(unittest.TestCase):
def _random_data(self):
H, W = 100, 100
N = 10
img = np.random.rand(H, W, 3) * 255
boxxy = np.random.rand(N, 2) * (H // 2)
boxes = np.concatenate((boxxy, boxxy + H // 2), axis=1)
def _rand_poly():
return np.random.rand(3, 2).flatten() * H
polygons = [[_rand_poly() for _ in range(np.random.randint(1, 5))] for _ in range(N)]
mask = np.zeros_like(img[:, :, 0], dtype=np.bool)
mask[:40, 10:20] = 1
labels = [str(i) for i in range(N)]
return img, boxes, labels, polygons, [mask] * N
@property
def metadata(self):
return MetadataCatalog.get("coco_2017_train")
def test_draw_dataset_dict(self):
img = np.random.rand(512, 512, 3) * 255
dic = {
"annotations": [
{
"bbox": [
368.9946492271106,
330.891438763377,
13.148537455410235,
13.644708680142685,
],
"bbox_mode": BoxMode.XYWH_ABS,
"category_id": 0,
"iscrowd": 1,
"segmentation": {
"counts": "_jh52m?2N2N2N2O100O10O001N1O2MceP2",
"size": [512, 512],
},
}
],
"height": 512,
"image_id": 1,
"width": 512,
}
v = Visualizer(img)
v.draw_dataset_dict(dic)
v = Visualizer(img, self.metadata)
v.draw_dataset_dict(dic)
def test_draw_rotated_dataset_dict(self):
img = np.random.rand(512, 512, 3) * 255
dic = {
"annotations": [
{
"bbox": [
368.9946492271106,
330.891438763377,
13.148537455410235,
13.644708680142685,
45.0,
],
"bbox_mode": BoxMode.XYWHA_ABS,
"category_id": 0,
"iscrowd": 1,
}
],
"height": 512,
"image_id": 1,
"width": 512,
}
v = Visualizer(img, self.metadata)
v.draw_dataset_dict(dic)
def test_overlay_instances(self):
img, boxes, labels, polygons, masks = self._random_data()
v = Visualizer(img, self.metadata)
output = v.overlay_instances(masks=polygons, boxes=boxes, labels=labels).get_image()
self.assertEqual(output.shape, img.shape)
# Test 2x scaling
v = Visualizer(img, self.metadata, scale=2.0)
output = v.overlay_instances(masks=polygons, boxes=boxes, labels=labels).get_image()
self.assertEqual(output.shape[0], img.shape[0] * 2)
# Test overlay masks
v = Visualizer(img, self.metadata)
output = v.overlay_instances(masks=masks, boxes=boxes, labels=labels).get_image()
self.assertEqual(output.shape, img.shape)
def test_overlay_instances_no_boxes(self):
img, boxes, labels, polygons, _ = self._random_data()
v = Visualizer(img, self.metadata)
v.overlay_instances(masks=polygons, boxes=None, labels=labels).get_image()
def test_draw_instance_predictions(self):
img, boxes, _, _, masks = self._random_data()
num_inst = len(boxes)
inst = Instances((img.shape[0], img.shape[1]))
inst.pred_classes = torch.randint(0, 80, size=(num_inst,))
inst.scores = torch.rand(num_inst)
inst.pred_boxes = torch.from_numpy(boxes)
inst.pred_masks = torch.from_numpy(np.asarray(masks))
v = Visualizer(img)
v.draw_instance_predictions(inst)
v = Visualizer(img, self.metadata)
v.draw_instance_predictions(inst)
def test_BWmode_nomask(self):
img, boxes, _, _, masks = self._random_data()
num_inst = len(boxes)
inst = Instances((img.shape[0], img.shape[1]))
inst.pred_classes = torch.randint(0, 80, size=(num_inst,))
inst.scores = torch.rand(num_inst)
inst.pred_boxes = torch.from_numpy(boxes)
v = Visualizer(img, self.metadata, instance_mode=ColorMode.IMAGE_BW)
v.draw_instance_predictions(inst)
def test_draw_empty_mask_predictions(self):
img, boxes, _, _, masks = self._random_data()
num_inst = len(boxes)
inst = Instances((img.shape[0], img.shape[1]))
inst.pred_classes = torch.randint(0, 80, size=(num_inst,))
inst.scores = torch.rand(num_inst)
inst.pred_boxes = torch.from_numpy(boxes)
inst.pred_masks = torch.from_numpy(np.zeros_like(np.asarray(masks)))
v = Visualizer(img, self.metadata)
v.draw_instance_predictions(inst)
def test_correct_output_shape(self):
img = np.random.rand(928, 928, 3) * 255
v = Visualizer(img, self.metadata)
out = v.output.get_image()
self.assertEqual(out.shape, img.shape)
def test_overlay_rotated_instances(self):
H, W = 100, 150
img = np.random.rand(H, W, 3) * 255
num_boxes = 50
boxes_5d = torch.zeros(num_boxes, 5)
boxes_5d[:, 0] = torch.FloatTensor(num_boxes).uniform_(-0.1 * W, 1.1 * W)
boxes_5d[:, 1] = torch.FloatTensor(num_boxes).uniform_(-0.1 * H, 1.1 * H)
boxes_5d[:, 2] = torch.FloatTensor(num_boxes).uniform_(0, max(W, H))
boxes_5d[:, 3] = torch.FloatTensor(num_boxes).uniform_(0, max(W, H))
boxes_5d[:, 4] = torch.FloatTensor(num_boxes).uniform_(-1800, 1800)
rotated_boxes = RotatedBoxes(boxes_5d)
labels = [str(i) for i in range(num_boxes)]
v = Visualizer(img, self.metadata)
output = v.overlay_instances(boxes=rotated_boxes, labels=labels).get_image()
self.assertEqual(output.shape, img.shape)
def test_draw_no_metadata(self):
img, boxes, _, _, masks = self._random_data()
num_inst = len(boxes)
inst = Instances((img.shape[0], img.shape[1]))
inst.pred_classes = torch.randint(0, 80, size=(num_inst,))
inst.scores = torch.rand(num_inst)
inst.pred_boxes = torch.from_numpy(boxes)
inst.pred_masks = torch.from_numpy(np.asarray(masks))
v = Visualizer(img, MetadataCatalog.get("asdfasdf"))
v.draw_instance_predictions(inst)
def test_draw_binary_mask(self):
img, boxes, _, _, masks = self._random_data()
img[:, :, 0] = 0 # remove red color
mask = masks[0]
mask_with_hole = np.zeros_like(mask).astype("uint8")
mask_with_hole = cv2.rectangle(mask_with_hole, (10, 10), (50, 50), 1, 5)
for m in [mask, mask_with_hole]:
for save in [True, False]:
v = Visualizer(img)
o = v.draw_binary_mask(m, color="red", text="test")
if save:
with tempfile.TemporaryDirectory(prefix="detectron2_viz") as d:
path = os.path.join(d, "output.png")
o.save(path)
o = cv2.imread(path)[:, :, ::-1]
else:
o = o.get_image().astype("float32")
# red color is drawn on the image
self.assertTrue(o[:, :, 0].sum() > 0)
def test_border_mask_with_holes(self):
H, W = 200, 200
img = np.zeros((H, W, 3))
img[:, :, 0] = 255.0
v = Visualizer(img, scale=3)
mask = np.zeros((H, W))
mask[:, 100:150] = 1
# create a hole, to trigger imshow
mask = cv2.rectangle(mask, (110, 110), (130, 130), 0, thickness=-1)
output = v.draw_binary_mask(mask, color="blue")
output = output.get_image()[:, :, ::-1]
first_row = {tuple(x.tolist()) for x in output[0]}
last_row = {tuple(x.tolist()) for x in output[-1]}
# Check quantization / off-by-1 error: the first and last row must have two colors
self.assertEqual(len(last_row), 2)
self.assertEqual(len(first_row), 2)
self.assertIn((0, 0, 255), last_row)
self.assertIn((0, 0, 255), first_row)
def test_border_polygons(self):
H, W = 200, 200
img = np.zeros((H, W, 3))
img[:, :, 0] = 255.0
v = Visualizer(img, scale=3)
mask = np.zeros((H, W))
mask[:, 100:150] = 1
output = v.draw_binary_mask(mask, color="blue")
output = output.get_image()[:, :, ::-1]
first_row = {tuple(x.tolist()) for x in output[0]}
last_row = {tuple(x.tolist()) for x in output[-1]}
# Check quantization / off-by-1 error:
# the first and last row must have >=2 colors, because the polygon
# touches both rows
self.assertGreaterEqual(len(last_row), 2)
self.assertGreaterEqual(len(first_row), 2)
self.assertIn((0, 0, 255), last_row)
self.assertIn((0, 0, 255), first_row)
if __name__ == "__main__":
unittest.main()
|
banmo-main
|
third_party/detectron2_old/tests/test_visualizer.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import unittest
from detectron2.utils.collect_env import collect_env_info
class TestProjects(unittest.TestCase):
def test_import(self):
from detectron2.projects import point_rend
_ = point_rend.add_pointrend_config
import detectron2.projects.deeplab as deeplab
_ = deeplab.add_deeplab_config
# import detectron2.projects.panoptic_deeplab as panoptic_deeplab
# _ = panoptic_deeplab.add_panoptic_deeplab_config
class TestCollectEnv(unittest.TestCase):
def test(self):
_ = collect_env_info()
|
banmo-main
|
third_party/detectron2_old/tests/test_packaging.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import unittest
import torch
from detectron2.utils.analysis import flop_count_operators, parameter_count
from detectron2.utils.testing import get_model_no_weights
class RetinaNetTest(unittest.TestCase):
def setUp(self):
self.model = get_model_no_weights("COCO-Detection/retinanet_R_50_FPN_1x.yaml")
def test_flop(self):
# RetinaNet supports flop-counting with random inputs
inputs = [{"image": torch.rand(3, 800, 800), "test_unused": "abcd"}]
res = flop_count_operators(self.model, inputs)
self.assertTrue(int(res["conv"]), 146) # 146B flops
def test_param_count(self):
res = parameter_count(self.model)
self.assertTrue(res[""], 37915572)
self.assertTrue(res["backbone"], 31452352)
class FasterRCNNTest(unittest.TestCase):
def setUp(self):
self.model = get_model_no_weights("COCO-Detection/faster_rcnn_R_50_FPN_1x.yaml")
def test_flop(self):
# Faster R-CNN supports flop-counting with random inputs
inputs = [{"image": torch.rand(3, 800, 800)}]
res = flop_count_operators(self.model, inputs)
# This only checks flops for backbone & proposal generator
# Flops for box head is not conv, and depends on #proposals, which is
# almost 0 for random inputs.
self.assertTrue(int(res["conv"]), 117)
def test_param_count(self):
res = parameter_count(self.model)
self.assertTrue(res[""], 41699936)
self.assertTrue(res["backbone"], 26799296)
|
banmo-main
|
third_party/detectron2_old/tests/test_model_analysis.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
|
banmo-main
|
third_party/detectron2_old/tests/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import math
import numpy as np
from unittest import TestCase
import torch
from fvcore.common.param_scheduler import CosineParamScheduler, MultiStepParamScheduler
from torch import nn
from detectron2.solver import LRMultiplier, WarmupParamScheduler
class TestScheduler(TestCase):
def test_warmup_multistep(self):
p = nn.Parameter(torch.zeros(0))
opt = torch.optim.SGD([p], lr=5)
multiplier = WarmupParamScheduler(
MultiStepParamScheduler(
[1, 0.1, 0.01, 0.001],
milestones=[10, 15, 20],
num_updates=30,
),
0.001,
5 / 30,
)
sched = LRMultiplier(opt, multiplier, 30)
# This is an equivalent of:
# sched = WarmupMultiStepLR(
# opt, milestones=[10, 15, 20], gamma=0.1, warmup_factor=0.001, warmup_iters=5)
p.sum().backward()
opt.step()
lrs = [0.005]
for _ in range(30):
sched.step()
lrs.append(opt.param_groups[0]["lr"])
self.assertTrue(np.allclose(lrs[:5], [0.005, 1.004, 2.003, 3.002, 4.001]))
self.assertTrue(np.allclose(lrs[5:10], 5.0))
self.assertTrue(np.allclose(lrs[10:15], 0.5))
self.assertTrue(np.allclose(lrs[15:20], 0.05))
self.assertTrue(np.allclose(lrs[20:], 0.005))
def test_warmup_cosine(self):
p = nn.Parameter(torch.zeros(0))
opt = torch.optim.SGD([p], lr=5)
multiplier = WarmupParamScheduler(
CosineParamScheduler(1, 0),
0.001,
5 / 30,
)
sched = LRMultiplier(opt, multiplier, 30)
p.sum().backward()
opt.step()
self.assertEqual(opt.param_groups[0]["lr"], 0.005)
lrs = [0.005]
for _ in range(30):
sched.step()
lrs.append(opt.param_groups[0]["lr"])
for idx, lr in enumerate(lrs):
expected_cosine = 2.5 * (1.0 + math.cos(math.pi * idx / 30))
if idx >= 5:
self.assertAlmostEqual(lr, expected_cosine)
else:
self.assertNotAlmostEqual(lr, expected_cosine)
|
banmo-main
|
third_party/detectron2_old/tests/test_scheduler.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# -*- coding: utf-8 -*-
import copy
import os
import tempfile
import unittest
import torch
from detectron2 import model_zoo
from detectron2.utils.logger import setup_logger
from detectron2.utils.testing import get_sample_coco_image
@unittest.skipIf(os.environ.get("CI"), "Require COCO data and model zoo.")
class TestCaffe2Export(unittest.TestCase):
def setUp(self):
setup_logger()
def _test_model(self, config_path, device="cpu"):
# requires extra dependencies
from detectron2.export import Caffe2Model, add_export_config, Caffe2Tracer
cfg = model_zoo.get_config(config_path)
add_export_config(cfg)
cfg.MODEL.DEVICE = device
model = model_zoo.get(config_path, trained=True, device=device)
inputs = [{"image": get_sample_coco_image()}]
tracer = Caffe2Tracer(cfg, model, copy.deepcopy(inputs))
c2_model = tracer.export_caffe2()
with tempfile.TemporaryDirectory(prefix="detectron2_unittest") as d:
c2_model.save_protobuf(d)
c2_model.save_graph(os.path.join(d, "test.svg"), inputs=copy.deepcopy(inputs))
c2_model = Caffe2Model.load_protobuf(d)
c2_model(inputs)[0]["instances"]
ts_model = tracer.export_torchscript()
ts_model.save(os.path.join(d, "model.ts"))
def testMaskRCNN(self):
# TODO: this test requires manifold access, see: T88318502
self._test_model("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml")
@unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
def testMaskRCNNGPU(self):
# TODO: this test requires manifold access, see: T88318502
self._test_model("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml", device="cuda")
def testRetinaNet(self):
# TODO: this test requires manifold access, see: T88318502
self._test_model("COCO-Detection/retinanet_R_50_FPN_3x.yaml")
def testPanopticFPN(self):
# TODO: this test requires manifold access, see: T88318502
self._test_model("COCO-PanopticSegmentation/panoptic_fpn_R_50_3x.yaml")
|
banmo-main
|
third_party/detectron2_old/tests/test_export_caffe2.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import json
import os
import tempfile
import unittest
from detectron2.utils.events import CommonMetricPrinter, EventStorage, JSONWriter
class TestEventWriter(unittest.TestCase):
def testScalar(self):
with tempfile.TemporaryDirectory(
prefix="detectron2_tests"
) as dir, EventStorage() as storage:
json_file = os.path.join(dir, "test.json")
writer = JSONWriter(json_file)
for k in range(60):
storage.put_scalar("key", k, smoothing_hint=False)
if (k + 1) % 20 == 0:
writer.write()
storage.step()
writer.close()
with open(json_file) as f:
data = [json.loads(l) for l in f]
self.assertTrue([int(k["key"]) for k in data] == [19, 39, 59])
def testScalarMismatchedPeriod(self):
with tempfile.TemporaryDirectory(
prefix="detectron2_tests"
) as dir, EventStorage() as storage:
json_file = os.path.join(dir, "test.json")
writer = JSONWriter(json_file)
for k in range(60):
if k % 17 == 0: # write in a differnt period
storage.put_scalar("key2", k, smoothing_hint=False)
storage.put_scalar("key", k, smoothing_hint=False)
if (k + 1) % 20 == 0:
writer.write()
storage.step()
writer.close()
with open(json_file) as f:
data = [json.loads(l) for l in f]
self.assertTrue([int(k.get("key2", 0)) for k in data] == [17, 0, 34, 0, 51, 0])
self.assertTrue([int(k.get("key", 0)) for k in data] == [0, 19, 0, 39, 0, 59])
self.assertTrue([int(k["iteration"]) for k in data] == [17, 19, 34, 39, 51, 59])
def testPrintETA(self):
with EventStorage() as s:
p1 = CommonMetricPrinter(10)
p2 = CommonMetricPrinter()
s.put_scalar("time", 1.0)
s.step()
s.put_scalar("time", 1.0)
s.step()
with self.assertLogs("detectron2.utils.events") as logs:
p1.write()
self.assertIn("eta", logs.output[0])
with self.assertLogs("detectron2.utils.events") as logs:
p2.write()
self.assertNotIn("eta", logs.output[0])
|
banmo-main
|
third_party/detectron2_old/tests/test_events.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import json
import os
import tempfile
import unittest
import torch
from torch import Tensor, nn
from detectron2 import model_zoo
from detectron2.config import get_cfg
from detectron2.config.instantiate import dump_dataclass, instantiate
from detectron2.export import dump_torchscript_IR, scripting_with_instances
from detectron2.export.flatten import TracingAdapter, flatten_to_tuple
from detectron2.export.torchscript_patch import patch_builtin_len
from detectron2.layers import ShapeSpec
from detectron2.modeling import build_backbone
from detectron2.modeling.postprocessing import detector_postprocess
from detectron2.modeling.roi_heads import KRCNNConvDeconvUpsampleHead
from detectron2.structures import Boxes, Instances
from detectron2.utils.env import TORCH_VERSION
from detectron2.utils.testing import (
assert_instances_allclose,
convert_scripted_instances,
get_sample_coco_image,
random_boxes,
)
"""
https://detectron2.readthedocs.io/tutorials/deployment.html
contains some explanations of this file.
"""
@unittest.skipIf(os.environ.get("CI") or TORCH_VERSION < (1, 8), "Insufficient Pytorch version")
class TestScripting(unittest.TestCase):
@unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
def testMaskRCNN(self):
# TODO: this test requires manifold access, see: T88318502
self._test_rcnn_model("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml")
@unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
def testRetinaNet(self):
# TODO: this test requires manifold access, see: T88318502
self._test_retinanet_model("COCO-Detection/retinanet_R_50_FPN_3x.yaml")
def _test_rcnn_model(self, config_path):
model = model_zoo.get(config_path, trained=True)
model.eval()
fields = {
"proposal_boxes": Boxes,
"objectness_logits": Tensor,
"pred_boxes": Boxes,
"scores": Tensor,
"pred_classes": Tensor,
"pred_masks": Tensor,
}
script_model = scripting_with_instances(model, fields)
inputs = [{"image": get_sample_coco_image()}] * 2
with torch.no_grad():
instance = model.inference(inputs, do_postprocess=False)[0]
scripted_instance = script_model.inference(inputs, do_postprocess=False)[0]
assert_instances_allclose(instance, scripted_instance)
def _test_retinanet_model(self, config_path):
model = model_zoo.get(config_path, trained=True)
model.eval()
fields = {
"pred_boxes": Boxes,
"scores": Tensor,
"pred_classes": Tensor,
}
script_model = scripting_with_instances(model, fields)
img = get_sample_coco_image()
inputs = [{"image": img}] * 2
with torch.no_grad():
instance = model(inputs)[0]["instances"]
scripted_instance = convert_scripted_instances(script_model(inputs)[0])
scripted_instance = detector_postprocess(scripted_instance, img.shape[1], img.shape[2])
assert_instances_allclose(instance, scripted_instance)
# Note that the model currently cannot be saved and loaded into a new process:
# https://github.com/pytorch/pytorch/issues/46944
@unittest.skipIf(os.environ.get("CI") or TORCH_VERSION < (1, 8), "Insufficient Pytorch version")
class TestTracing(unittest.TestCase):
@unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
def testMaskRCNN(self):
# TODO: this test requires manifold access, see: T88318502
def inference_func(model, image):
inputs = [{"image": image}]
return model.inference(inputs, do_postprocess=False)[0]
self._test_model("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml", inference_func)
@unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
def testRetinaNet(self):
# TODO: this test requires manifold access, see: T88318502
def inference_func(model, image):
return model.forward([{"image": image}])[0]["instances"]
self._test_model("COCO-Detection/retinanet_R_50_FPN_3x.yaml", inference_func)
def _test_model(self, config_path, inference_func):
model = model_zoo.get(config_path, trained=True)
image = get_sample_coco_image()
wrapper = TracingAdapter(model, image, inference_func)
wrapper.eval()
with torch.no_grad():
small_image = nn.functional.interpolate(image, scale_factor=0.5)
# trace with a different image, and the trace must still work
traced_model = torch.jit.trace(wrapper, (small_image,))
output = inference_func(model, image)
traced_output = wrapper.outputs_schema(traced_model(image))
assert_instances_allclose(output, traced_output, size_as_tensor=True)
def testKeypointHead(self):
class M(nn.Module):
def __init__(self):
super().__init__()
self.model = KRCNNConvDeconvUpsampleHead(
ShapeSpec(channels=4, height=14, width=14), num_keypoints=17, conv_dims=(4,)
)
def forward(self, x, predbox1, predbox2):
inst = [
Instances((100, 100), pred_boxes=Boxes(predbox1)),
Instances((100, 100), pred_boxes=Boxes(predbox2)),
]
ret = self.model(x, inst)
return tuple(x.pred_keypoints for x in ret)
model = M()
model.eval()
def gen_input(num1, num2):
feat = torch.randn((num1 + num2, 4, 14, 14))
box1 = random_boxes(num1)
box2 = random_boxes(num2)
return feat, box1, box2
with torch.no_grad(), patch_builtin_len():
trace = torch.jit.trace(model, gen_input(15, 15), check_trace=False)
inputs = gen_input(12, 10)
trace_outputs = trace(*inputs)
true_outputs = model(*inputs)
for trace_output, true_output in zip(trace_outputs, true_outputs):
self.assertTrue(torch.allclose(trace_output, true_output))
class TestTorchscriptUtils(unittest.TestCase):
# TODO: add test to dump scripting
def test_dump_IR_tracing(self):
cfg = get_cfg()
cfg.MODEL.RESNETS.DEPTH = 18
cfg.MODEL.RESNETS.RES2_OUT_CHANNELS = 64
class Mod(nn.Module):
def forward(self, x):
return tuple(self.m(x).values())
model = Mod()
model.m = build_backbone(cfg)
model.eval()
with torch.no_grad():
ts_model = torch.jit.trace(model, (torch.rand(2, 3, 224, 224),))
with tempfile.TemporaryDirectory(prefix="detectron2_test") as d:
dump_torchscript_IR(ts_model, d)
# check that the files are created
for name in ["model_ts_code", "model_ts_IR", "model_ts_IR_inlined", "model"]:
fname = os.path.join(d, name + ".txt")
self.assertTrue(os.stat(fname).st_size > 0, fname)
def test_dump_IR_function(self):
@torch.jit.script
def gunc(x, y):
return x + y
def func(x, y):
return x + y + gunc(x, y)
ts_model = torch.jit.trace(func, (torch.rand(3), torch.rand(3)))
with tempfile.TemporaryDirectory(prefix="detectron2_test") as d:
dump_torchscript_IR(ts_model, d)
for name in ["model_ts_code", "model_ts_IR", "model_ts_IR_inlined"]:
fname = os.path.join(d, name + ".txt")
self.assertTrue(os.stat(fname).st_size > 0, fname)
def test_flatten_basic(self):
obj = [3, ([5, 6], {"name": [7, 9], "name2": 3})]
res, schema = flatten_to_tuple(obj)
self.assertEqual(res, (3, 5, 6, 7, 9, 3))
new_obj = schema(res)
self.assertEqual(new_obj, obj)
_, new_schema = flatten_to_tuple(new_obj)
self.assertEqual(schema, new_schema) # test __eq__
self._check_schema(schema)
def _check_schema(self, schema):
dumped_schema = dump_dataclass(schema)
# Check that the schema is json-serializable
# Although in reality you might want to use yaml because it often has many levels
json.dumps(dumped_schema)
# Check that the schema can be deserialized
new_schema = instantiate(dumped_schema)
self.assertEqual(schema, new_schema)
def test_flatten_instances_boxes(self):
inst = Instances(
torch.tensor([5, 8]), pred_masks=torch.tensor([3]), pred_boxes=Boxes(torch.ones((1, 4)))
)
obj = [3, ([5, 6], inst)]
res, schema = flatten_to_tuple(obj)
self.assertEqual(res[:3], (3, 5, 6))
for r, expected in zip(res[3:], (inst.pred_boxes.tensor, inst.pred_masks, inst.image_size)):
self.assertIs(r, expected)
new_obj = schema(res)
assert_instances_allclose(new_obj[1][1], inst, rtol=0.0, size_as_tensor=True)
self._check_schema(schema)
|
banmo-main
|
third_party/detectron2_old/tests/test_export_torchscript.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import logging
import unittest
from detectron2 import model_zoo
from detectron2.config import instantiate
from detectron2.modeling import FPN, GeneralizedRCNN
logger = logging.getLogger(__name__)
class TestModelZoo(unittest.TestCase):
def test_get_returns_model(self):
model = model_zoo.get("Misc/scratch_mask_rcnn_R_50_FPN_3x_gn.yaml", trained=False)
self.assertIsInstance(model, GeneralizedRCNN)
self.assertIsInstance(model.backbone, FPN)
def test_get_invalid_model(self):
self.assertRaises(RuntimeError, model_zoo.get, "Invalid/config.yaml")
def test_get_url(self):
url = model_zoo.get_checkpoint_url("Misc/scratch_mask_rcnn_R_50_FPN_3x_gn.yaml")
self.assertEqual(
url,
"https://dl.fbaipublicfiles.com/detectron2/Misc/scratch_mask_rcnn_R_50_FPN_3x_gn/138602908/model_final_01ca85.pkl", # noqa
)
url2 = model_zoo.get_checkpoint_url("Misc/scratch_mask_rcnn_R_50_FPN_3x_gn.py")
self.assertEqual(url, url2)
def _build_lazy_model(self, name):
cfg = model_zoo.get_config("common/models/" + name)
instantiate(cfg.model)
def test_mask_rcnn_fpn(self):
self._build_lazy_model("mask_rcnn_fpn.py")
def test_mask_rcnn_c4(self):
self._build_lazy_model("mask_rcnn_c4.py")
def test_panoptic_fpn(self):
self._build_lazy_model("panoptic_fpn.py")
def test_schedule(self):
cfg = model_zoo.get_config("common/coco_schedule.py")
for _, v in cfg.items():
instantiate(v)
if __name__ == "__main__":
unittest.main()
|
banmo-main
|
third_party/detectron2_old/tests/test_model_zoo.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import unittest
from collections import OrderedDict
import torch
from torch import nn
from detectron2.checkpoint.c2_model_loading import align_and_update_state_dicts
from detectron2.utils.logger import setup_logger
class TestCheckpointer(unittest.TestCase):
def setUp(self):
setup_logger()
def create_complex_model(self):
m = nn.Module()
m.block1 = nn.Module()
m.block1.layer1 = nn.Linear(2, 3)
m.layer2 = nn.Linear(3, 2)
m.res = nn.Module()
m.res.layer2 = nn.Linear(3, 2)
state_dict = OrderedDict()
state_dict["layer1.weight"] = torch.rand(3, 2)
state_dict["layer1.bias"] = torch.rand(3)
state_dict["layer2.weight"] = torch.rand(2, 3)
state_dict["layer2.bias"] = torch.rand(2)
state_dict["res.layer2.weight"] = torch.rand(2, 3)
state_dict["res.layer2.bias"] = torch.rand(2)
return m, state_dict
def test_complex_model_loaded(self):
for add_data_parallel in [False, True]:
model, state_dict = self.create_complex_model()
if add_data_parallel:
model = nn.DataParallel(model)
model_sd = model.state_dict()
sd_to_load = align_and_update_state_dicts(model_sd, state_dict)
model.load_state_dict(sd_to_load)
for loaded, stored in zip(model_sd.values(), state_dict.values()):
# different tensor references
self.assertFalse(id(loaded) == id(stored))
# same content
self.assertTrue(loaded.to(stored).equal(stored))
if __name__ == "__main__":
unittest.main()
|
banmo-main
|
third_party/detectron2_old/tests/test_checkpoint.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from __future__ import absolute_import, division, print_function, unicode_literals
import unittest
import torch
from detectron2.layers import batched_nms
from detectron2.utils.testing import random_boxes
class TestNMS(unittest.TestCase):
def _create_tensors(self, N):
boxes = random_boxes(N, 200)
scores = torch.rand(N)
return boxes, scores
def test_nms_scriptability(self):
N = 2000
num_classes = 50
boxes, scores = self._create_tensors(N)
idxs = torch.randint(0, num_classes, (N,))
scripted_batched_nms = torch.jit.script(batched_nms)
err_msg = "NMS is incompatible with jit-scripted NMS for IoU={}"
for iou in [0.2, 0.5, 0.8]:
keep_ref = batched_nms(boxes, scores, idxs, iou)
backup = boxes.clone()
scripted_keep = scripted_batched_nms(boxes, scores, idxs, iou)
assert torch.allclose(boxes, backup), "boxes modified by jit-scripted batched_nms"
self.assertTrue(torch.equal(keep_ref, scripted_keep), err_msg.format(iou))
if __name__ == "__main__":
unittest.main()
|
banmo-main
|
third_party/detectron2_old/tests/layers/test_nms.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import unittest
import torch
from torch import nn
from detectron2.layers import ASPP, DepthwiseSeparableConv2d, FrozenBatchNorm2d
from detectron2.modeling.backbone.resnet import BasicStem, ResNet
"""
Test for misc layers.
"""
class TestBlocks(unittest.TestCase):
def test_separable_conv(self):
DepthwiseSeparableConv2d(3, 10, norm1="BN", activation1=nn.PReLU())
def test_aspp(self):
m = ASPP(3, 10, [2, 3, 4], norm="", activation=nn.PReLU())
self.assertIsNot(m.convs[0].activation.weight, m.convs[1].activation.weight)
self.assertIsNot(m.convs[0].activation.weight, m.project.activation.weight)
@unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
def test_frozen_batchnorm_fp16(self):
from torch.cuda.amp import autocast
C = 10
input = torch.rand(1, C, 10, 10).cuda()
m = FrozenBatchNorm2d(C).cuda()
with autocast():
output = m(input.half())
self.assertEqual(output.dtype, torch.float16)
# requires_grad triggers a different codepath
input.requires_grad_()
with autocast():
output = m(input.half())
self.assertEqual(output.dtype, torch.float16)
def test_resnet_unused_stages(self):
resnet = ResNet(BasicStem(), ResNet.make_default_stages(18), out_features=["res2"])
self.assertTrue(hasattr(resnet, "res2"))
self.assertFalse(hasattr(resnet, "res3"))
self.assertFalse(hasattr(resnet, "res5"))
resnet = ResNet(BasicStem(), ResNet.make_default_stages(18), out_features=["res2", "res5"])
self.assertTrue(hasattr(resnet, "res2"))
self.assertTrue(hasattr(resnet, "res4"))
self.assertTrue(hasattr(resnet, "res5"))
|
banmo-main
|
third_party/detectron2_old/tests/layers/test_blocks.py
|
banmo-main
|
third_party/detectron2_old/tests/layers/__init__.py
|
|
# Copyright (c) Facebook, Inc. and its affiliates.
import numpy as np
import unittest
from copy import copy
import cv2
import torch
from fvcore.common.benchmark import benchmark
from torch.nn import functional as F
from detectron2.layers.roi_align import ROIAlign, roi_align
class ROIAlignTest(unittest.TestCase):
def test_forward_output(self):
input = np.arange(25).reshape(5, 5).astype("float32")
"""
0 1 2 3 4
5 6 7 8 9
10 11 12 13 14
15 16 17 18 19
20 21 22 23 24
"""
output = self._simple_roialign(input, [1, 1, 3, 3], (4, 4), aligned=False)
output_correct = self._simple_roialign(input, [1, 1, 3, 3], (4, 4), aligned=True)
# without correction:
old_results = [
[7.5, 8, 8.5, 9],
[10, 10.5, 11, 11.5],
[12.5, 13, 13.5, 14],
[15, 15.5, 16, 16.5],
]
# with 0.5 correction:
correct_results = [
[4.5, 5.0, 5.5, 6.0],
[7.0, 7.5, 8.0, 8.5],
[9.5, 10.0, 10.5, 11.0],
[12.0, 12.5, 13.0, 13.5],
]
# This is an upsampled version of [[6, 7], [11, 12]]
self.assertTrue(np.allclose(output.flatten(), np.asarray(old_results).flatten()))
self.assertTrue(
np.allclose(output_correct.flatten(), np.asarray(correct_results).flatten())
)
# Also see similar issues in tensorflow at
# https://github.com/tensorflow/tensorflow/issues/26278
def test_resize(self):
H, W = 30, 30
input = np.random.rand(H, W).astype("float32") * 100
box = [10, 10, 20, 20]
output = self._simple_roialign(input, box, (5, 5), aligned=True)
input2x = cv2.resize(input, (W // 2, H // 2), interpolation=cv2.INTER_LINEAR)
box2x = [x / 2 for x in box]
output2x = self._simple_roialign(input2x, box2x, (5, 5), aligned=True)
diff = np.abs(output2x - output)
self.assertTrue(diff.max() < 1e-4)
def test_grid_sample_equivalence(self):
H, W = 30, 30
input = np.random.rand(H, W).astype("float32") * 100
box = [10, 10, 20, 20]
for ratio in [1, 2, 3]:
output = self._simple_roialign(input, box, (5, 5), sampling_ratio=ratio)
output_grid_sample = grid_sample_roi_align(
torch.from_numpy(input[None, None, :, :]).float(),
torch.as_tensor(box).float()[None, :],
5,
1.0,
ratio,
)
self.assertTrue(torch.allclose(output, output_grid_sample))
def _simple_roialign(self, img, box, resolution, sampling_ratio=0, aligned=True):
"""
RoiAlign with scale 1.0.
"""
if isinstance(resolution, int):
resolution = (resolution, resolution)
op = ROIAlign(resolution, 1.0, sampling_ratio, aligned=aligned)
input = torch.from_numpy(img[None, None, :, :].astype("float32"))
rois = [0] + list(box)
rois = torch.from_numpy(np.asarray(rois)[None, :].astype("float32"))
output = op.forward(input, rois)
if torch.cuda.is_available():
output_cuda = op.forward(input.cuda(), rois.cuda()).cpu()
self.assertTrue(torch.allclose(output, output_cuda))
return output[0, 0]
def _simple_roialign_with_grad(self, img, box, resolution, device):
if isinstance(resolution, int):
resolution = (resolution, resolution)
op = ROIAlign(resolution, 1.0, 0, aligned=True)
input = torch.from_numpy(img[None, None, :, :].astype("float32"))
rois = [0] + list(box)
rois = torch.from_numpy(np.asarray(rois)[None, :].astype("float32"))
input = input.to(device=device)
rois = rois.to(device=device)
input.requires_grad = True
output = op.forward(input, rois)
return input, output
def test_empty_box(self):
img = np.random.rand(5, 5)
box = [3, 4, 5, 4]
o = self._simple_roialign(img, box, 7)
self.assertTrue(o.shape == (7, 7))
self.assertTrue((o == 0).all())
for dev in ["cpu"] + ["cuda"] if torch.cuda.is_available() else []:
input, output = self._simple_roialign_with_grad(img, box, 7, torch.device(dev))
output.sum().backward()
self.assertTrue(torch.allclose(input.grad, torch.zeros_like(input)))
def test_empty_batch(self):
input = torch.zeros(0, 3, 10, 10, dtype=torch.float32)
rois = torch.zeros(0, 5, dtype=torch.float32)
op = ROIAlign((7, 7), 1.0, 0, aligned=True)
output = op.forward(input, rois)
self.assertTrue(output.shape == (0, 3, 7, 7))
def grid_sample_roi_align(input, boxes, output_size, scale, sampling_ratio):
# unlike true roi_align, this does not support different batch_idx
from detectron2.projects.point_rend.point_features import (
generate_regular_grid_point_coords,
get_point_coords_wrt_image,
point_sample,
)
N, _, H, W = input.shape
R = len(boxes)
assert N == 1
boxes = boxes * scale
grid = generate_regular_grid_point_coords(R, output_size * sampling_ratio, device=boxes.device)
coords = get_point_coords_wrt_image(boxes, grid)
coords = coords / torch.as_tensor([W, H], device=coords.device) # R, s^2, 2
res = point_sample(input, coords.unsqueeze(0), align_corners=False) # 1,C, R,s^2
res = (
res.squeeze(0)
.permute(1, 0, 2)
.reshape(R, -1, output_size * sampling_ratio, output_size * sampling_ratio)
)
res = F.avg_pool2d(res, sampling_ratio)
return res
def benchmark_roi_align():
def random_boxes(mean_box, stdev, N, maxsize):
ret = torch.rand(N, 4) * stdev + torch.tensor(mean_box, dtype=torch.float)
ret.clamp_(min=0, max=maxsize)
return ret
def func(shape, nboxes_per_img, sampling_ratio, device, box_size="large"):
N, _, H, _ = shape
input = torch.rand(*shape)
boxes = []
batch_idx = []
for k in range(N):
if box_size == "large":
b = random_boxes([80, 80, 130, 130], 24, nboxes_per_img, H)
else:
b = random_boxes([100, 100, 110, 110], 4, nboxes_per_img, H)
boxes.append(b)
batch_idx.append(torch.zeros(nboxes_per_img, 1, dtype=torch.float32) + k)
boxes = torch.cat(boxes, axis=0)
batch_idx = torch.cat(batch_idx, axis=0)
boxes = torch.cat([batch_idx, boxes], axis=1)
input = input.to(device=device)
boxes = boxes.to(device=device)
def bench():
if False and sampling_ratio > 0 and N == 1:
# enable to benchmark grid_sample (slower)
grid_sample_roi_align(input, boxes[:, 1:], 7, 1.0, sampling_ratio)
else:
roi_align(input, boxes, 7, 1.0, sampling_ratio, True)
if device == "cuda":
torch.cuda.synchronize()
return bench
def gen_args(arg):
args = []
for size in ["small", "large"]:
for ratio in [0, 2]:
args.append(copy(arg))
args[-1]["sampling_ratio"] = ratio
args[-1]["box_size"] = size
return args
arg = dict(shape=(1, 512, 256, 256), nboxes_per_img=512, device="cuda")
benchmark(func, "cuda_roialign", gen_args(arg), num_iters=20, warmup_iters=1)
arg.update({"device": "cpu", "shape": (1, 256, 128, 128)})
benchmark(func, "cpu_roialign", gen_args(arg), num_iters=5, warmup_iters=1)
if __name__ == "__main__":
if torch.cuda.is_available():
benchmark_roi_align()
unittest.main()
|
banmo-main
|
third_party/detectron2_old/tests/layers/test_roi_align.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import numpy as np
import unittest
import torch
from detectron2.layers import DeformConv, ModulatedDeformConv
class DeformableTest(unittest.TestCase):
@unittest.skipIf(not torch.cuda.is_available(), "Deformable not supported for cpu")
def test_forward_output(self):
device = torch.device("cuda")
N, C, H, W = shape = 1, 1, 5, 5
kernel_size = 3
padding = 1
inputs = torch.arange(np.prod(shape), dtype=torch.float32).reshape(*shape).to(device)
"""
0 1 2 3 4
5 6 7 8 9
10 11 12 13 14
15 16 17 18 19
20 21 22 23 24
"""
offset_channels = kernel_size * kernel_size * 2
offset = torch.full((N, offset_channels, H, W), 0.5, dtype=torch.float32).to(device)
# Test DCN v1
deform = DeformConv(C, C, kernel_size=kernel_size, padding=padding).to(device)
deform.weight = torch.nn.Parameter(torch.ones_like(deform.weight))
output = deform(inputs, offset)
output = output.detach().cpu().numpy()
deform_results = np.array(
[
[30, 41.25, 48.75, 45, 28.75],
[62.25, 81, 90, 80.25, 50.25],
[99.75, 126, 135, 117.75, 72.75],
[105, 131.25, 138.75, 120, 73.75],
[71.75, 89.25, 93.75, 80.75, 49.5],
]
)
self.assertTrue(np.allclose(output.flatten(), deform_results.flatten()))
# Test DCN v2
mask_channels = kernel_size * kernel_size
mask = torch.full((N, mask_channels, H, W), 0.5, dtype=torch.float32).to(device)
modulate_deform = ModulatedDeformConv(C, C, kernel_size, padding=padding, bias=False).to(
device
)
modulate_deform.weight = deform.weight
output = modulate_deform(inputs, offset, mask)
output = output.detach().cpu().numpy()
self.assertTrue(np.allclose(output.flatten(), deform_results.flatten() * 0.5))
def test_forward_output_on_cpu(self):
device = torch.device("cpu")
N, C, H, W = shape = 1, 1, 5, 5
kernel_size = 3
padding = 1
inputs = torch.arange(np.prod(shape), dtype=torch.float32).reshape(*shape).to(device)
offset_channels = kernel_size * kernel_size * 2
offset = torch.full((N, offset_channels, H, W), 0.5, dtype=torch.float32).to(device)
# Test DCN v1 on cpu
deform = DeformConv(C, C, kernel_size=kernel_size, padding=padding).to(device)
deform.weight = torch.nn.Parameter(torch.ones_like(deform.weight))
output = deform(inputs, offset)
output = output.detach().cpu().numpy()
deform_results = np.array(
[
[30, 41.25, 48.75, 45, 28.75],
[62.25, 81, 90, 80.25, 50.25],
[99.75, 126, 135, 117.75, 72.75],
[105, 131.25, 138.75, 120, 73.75],
[71.75, 89.25, 93.75, 80.75, 49.5],
]
)
self.assertTrue(np.allclose(output.flatten(), deform_results.flatten()))
@unittest.skipIf(not torch.cuda.is_available(), "This test requires gpu access")
def test_forward_output_on_cpu_equals_output_on_gpu(self):
N, C, H, W = shape = 2, 4, 10, 10
kernel_size = 3
padding = 1
for groups in [1, 2]:
inputs = torch.arange(np.prod(shape), dtype=torch.float32).reshape(*shape)
offset_channels = kernel_size * kernel_size * 2
offset = torch.full((N, offset_channels, H, W), 0.5, dtype=torch.float32)
deform_gpu = DeformConv(
C, C, kernel_size=kernel_size, padding=padding, groups=groups
).to("cuda")
deform_gpu.weight = torch.nn.Parameter(torch.ones_like(deform_gpu.weight))
output_gpu = deform_gpu(inputs.to("cuda"), offset.to("cuda")).detach().cpu().numpy()
deform_cpu = DeformConv(
C, C, kernel_size=kernel_size, padding=padding, groups=groups
).to("cpu")
deform_cpu.weight = torch.nn.Parameter(torch.ones_like(deform_cpu.weight))
output_cpu = deform_cpu(inputs.to("cpu"), offset.to("cpu")).detach().numpy()
self.assertTrue(np.allclose(output_gpu.flatten(), output_cpu.flatten()))
@unittest.skipIf(not torch.cuda.is_available(), "Deformable not supported for cpu")
def test_small_input(self):
device = torch.device("cuda")
for kernel_size in [3, 5]:
padding = kernel_size // 2
N, C, H, W = shape = (1, 1, kernel_size - 1, kernel_size - 1)
inputs = torch.rand(shape).to(device) # input size is smaller than kernel size
offset_channels = kernel_size * kernel_size * 2
offset = torch.randn((N, offset_channels, H, W), dtype=torch.float32).to(device)
deform = DeformConv(C, C, kernel_size=kernel_size, padding=padding).to(device)
output = deform(inputs, offset)
self.assertTrue(output.shape == inputs.shape)
mask_channels = kernel_size * kernel_size
mask = torch.ones((N, mask_channels, H, W), dtype=torch.float32).to(device)
modulate_deform = ModulatedDeformConv(
C, C, kernel_size, padding=padding, bias=False
).to(device)
output = modulate_deform(inputs, offset, mask)
self.assertTrue(output.shape == inputs.shape)
@unittest.skipIf(not torch.cuda.is_available(), "Deformable not supported for cpu")
def test_raise_exception(self):
device = torch.device("cuda")
N, C, H, W = shape = 1, 1, 3, 3
kernel_size = 3
padding = 1
inputs = torch.rand(shape, dtype=torch.float32).to(device)
offset_channels = kernel_size * kernel_size # This is wrong channels for offset
offset = torch.randn((N, offset_channels, H, W), dtype=torch.float32).to(device)
deform = DeformConv(C, C, kernel_size=kernel_size, padding=padding).to(device)
self.assertRaises(RuntimeError, deform, inputs, offset)
offset_channels = kernel_size * kernel_size * 2
offset = torch.randn((N, offset_channels, H, W), dtype=torch.float32).to(device)
mask_channels = kernel_size * kernel_size * 2 # This is wrong channels for mask
mask = torch.ones((N, mask_channels, H, W), dtype=torch.float32).to(device)
modulate_deform = ModulatedDeformConv(C, C, kernel_size, padding=padding, bias=False).to(
device
)
self.assertRaises(RuntimeError, modulate_deform, inputs, offset, mask)
def test_repr(self):
module = DeformConv(3, 10, kernel_size=3, padding=1, deformable_groups=2)
correct_string = (
"DeformConv(in_channels=3, out_channels=10, kernel_size=(3, 3), "
"stride=(1, 1), padding=(1, 1), dilation=(1, 1), "
"groups=1, deformable_groups=2, bias=False)"
)
self.assertEqual(repr(module), correct_string)
module = ModulatedDeformConv(3, 10, kernel_size=3, padding=1, deformable_groups=2)
correct_string = (
"ModulatedDeformConv(in_channels=3, out_channels=10, kernel_size=(3, 3), "
"stride=1, padding=1, dilation=1, groups=1, deformable_groups=2, bias=True)"
)
self.assertEqual(repr(module), correct_string)
if __name__ == "__main__":
unittest.main()
|
banmo-main
|
third_party/detectron2_old/tests/layers/test_deformable.py
|
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
import contextlib
import io
import numpy as np
import unittest
from collections import defaultdict
import torch
import tqdm
from fvcore.common.benchmark import benchmark
from pycocotools.coco import COCO
from tabulate import tabulate
from torch.nn import functional as F
from detectron2.data import MetadataCatalog
from detectron2.layers.mask_ops import (
pad_masks,
paste_mask_in_image_old,
paste_masks_in_image,
scale_boxes,
)
from detectron2.structures import BitMasks, Boxes, BoxMode, PolygonMasks
from detectron2.structures.masks import polygons_to_bitmask
from detectron2.utils.file_io import PathManager
from detectron2.utils.testing import random_boxes
def iou_between_full_image_bit_masks(a, b):
intersect = (a & b).sum()
union = (a | b).sum()
return intersect / union
def rasterize_polygons_with_grid_sample(full_image_bit_mask, box, mask_size, threshold=0.5):
x0, y0, x1, y1 = box[0], box[1], box[2], box[3]
img_h, img_w = full_image_bit_mask.shape
mask_y = np.arange(0.0, mask_size) + 0.5 # mask y sample coords in [0.5, mask_size - 0.5]
mask_x = np.arange(0.0, mask_size) + 0.5 # mask x sample coords in [0.5, mask_size - 0.5]
mask_y = mask_y / mask_size * (y1 - y0) + y0
mask_x = mask_x / mask_size * (x1 - x0) + x0
mask_x = (mask_x - 0.5) / (img_w - 1) * 2 + -1
mask_y = (mask_y - 0.5) / (img_h - 1) * 2 + -1
gy, gx = torch.meshgrid(torch.from_numpy(mask_y), torch.from_numpy(mask_x))
ind = torch.stack([gx, gy], dim=-1).to(dtype=torch.float32)
full_image_bit_mask = torch.from_numpy(full_image_bit_mask)
mask = F.grid_sample(
full_image_bit_mask[None, None, :, :].to(dtype=torch.float32),
ind[None, :, :, :],
align_corners=True,
)
return mask[0, 0] >= threshold
class TestMaskCropPaste(unittest.TestCase):
def setUp(self):
json_file = MetadataCatalog.get("coco_2017_val_100").json_file
if not PathManager.isfile(json_file):
raise unittest.SkipTest("{} not found".format(json_file))
with contextlib.redirect_stdout(io.StringIO()):
json_file = PathManager.get_local_path(json_file)
self.coco = COCO(json_file)
def test_crop_paste_consistency(self):
"""
rasterize_polygons_within_box (used in training)
and
paste_masks_in_image (used in inference)
should be inverse operations to each other.
This function runs several implementation of the above two operations and prints
the reconstruction error.
"""
anns = self.coco.loadAnns(self.coco.getAnnIds(iscrowd=False)) # avoid crowd annotations
selected_anns = anns[:100]
ious = []
for ann in tqdm.tqdm(selected_anns):
results = self.process_annotation(ann)
ious.append([k[2] for k in results])
ious = np.array(ious)
mean_ious = ious.mean(axis=0)
table = []
res_dic = defaultdict(dict)
for row, iou in zip(results, mean_ious):
table.append((row[0], row[1], iou))
res_dic[row[0]][row[1]] = iou
print(tabulate(table, headers=["rasterize", "paste", "iou"], tablefmt="simple"))
# assert that the reconstruction is good:
self.assertTrue(res_dic["polygon"]["aligned"] > 0.94)
self.assertTrue(res_dic["roialign"]["aligned"] > 0.95)
def process_annotation(self, ann, mask_side_len=28):
# Parse annotation data
img_info = self.coco.loadImgs(ids=[ann["image_id"]])[0]
height, width = img_info["height"], img_info["width"]
gt_polygons = [np.array(p, dtype=np.float64) for p in ann["segmentation"]]
gt_bbox = BoxMode.convert(ann["bbox"], BoxMode.XYWH_ABS, BoxMode.XYXY_ABS)
gt_bit_mask = polygons_to_bitmask(gt_polygons, height, width)
# Run rasterize ..
torch_gt_bbox = torch.tensor(gt_bbox).to(dtype=torch.float32).reshape(-1, 4)
box_bitmasks = {
"polygon": PolygonMasks([gt_polygons]).crop_and_resize(torch_gt_bbox, mask_side_len)[0],
"gridsample": rasterize_polygons_with_grid_sample(gt_bit_mask, gt_bbox, mask_side_len),
"roialign": BitMasks(torch.from_numpy(gt_bit_mask[None, :, :])).crop_and_resize(
torch_gt_bbox, mask_side_len
)[0],
}
# Run paste ..
results = defaultdict(dict)
for k, box_bitmask in box_bitmasks.items():
padded_bitmask, scale = pad_masks(box_bitmask[None, :, :], 1)
scaled_boxes = scale_boxes(torch_gt_bbox, scale)
r = results[k]
r["old"] = paste_mask_in_image_old(
padded_bitmask[0], scaled_boxes[0], height, width, threshold=0.5
)
r["aligned"] = paste_masks_in_image(
box_bitmask[None, :, :], Boxes(torch_gt_bbox), (height, width)
)[0]
table = []
for rasterize_method, r in results.items():
for paste_method, mask in r.items():
mask = np.asarray(mask)
iou = iou_between_full_image_bit_masks(gt_bit_mask.astype("uint8"), mask)
table.append((rasterize_method, paste_method, iou))
return table
def test_polygon_area(self):
# Draw polygon boxes
for d in [5.0, 10.0, 1000.0]:
polygon = PolygonMasks([[[0, 0, 0, d, d, d, d, 0]]])
area = polygon.area()[0]
target = d ** 2
self.assertEqual(area, target)
# Draw polygon triangles
for d in [5.0, 10.0, 1000.0]:
polygon = PolygonMasks([[[0, 0, 0, d, d, d]]])
area = polygon.area()[0]
target = d ** 2 / 2
self.assertEqual(area, target)
def test_paste_mask_scriptable(self):
scripted_f = torch.jit.script(paste_masks_in_image)
N = 10
masks = torch.rand(N, 28, 28)
boxes = Boxes(random_boxes(N, 100))
image_shape = (150, 150)
out = paste_masks_in_image(masks, boxes, image_shape)
scripted_out = scripted_f(masks, boxes, image_shape)
self.assertTrue(torch.equal(out, scripted_out))
def benchmark_paste():
S = 800
H, W = image_shape = (S, S)
N = 64
torch.manual_seed(42)
masks = torch.rand(N, 28, 28)
center = torch.rand(N, 2) * 600 + 100
wh = torch.clamp(torch.randn(N, 2) * 40 + 200, min=50)
x0y0 = torch.clamp(center - wh * 0.5, min=0.0)
x1y1 = torch.clamp(center + wh * 0.5, max=S)
boxes = Boxes(torch.cat([x0y0, x1y1], axis=1))
def func(device, n=3):
m = masks.to(device=device)
b = boxes.to(device=device)
def bench():
for _ in range(n):
paste_masks_in_image(m, b, image_shape)
if device.type == "cuda":
torch.cuda.synchronize()
return bench
specs = [{"device": torch.device("cpu"), "n": 3}]
if torch.cuda.is_available():
specs.append({"device": torch.device("cuda"), "n": 3})
benchmark(func, "paste_masks", specs, num_iters=10, warmup_iters=2)
if __name__ == "__main__":
benchmark_paste()
unittest.main()
|
banmo-main
|
third_party/detectron2_old/tests/layers/test_mask_ops.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import logging
import unittest
import cv2
import torch
from torch.autograd import Variable, gradcheck
from detectron2.layers.roi_align import ROIAlign
from detectron2.layers.roi_align_rotated import ROIAlignRotated
logger = logging.getLogger(__name__)
class ROIAlignRotatedTest(unittest.TestCase):
def _box_to_rotated_box(self, box, angle):
return [
(box[0] + box[2]) / 2.0,
(box[1] + box[3]) / 2.0,
box[2] - box[0],
box[3] - box[1],
angle,
]
def _rot90(self, img, num):
num = num % 4 # note: -1 % 4 == 3
for _ in range(num):
img = img.transpose(0, 1).flip(0)
return img
def test_forward_output_0_90_180_270(self):
for i in range(4):
# i = 0, 1, 2, 3 corresponding to 0, 90, 180, 270 degrees
img = torch.arange(25, dtype=torch.float32).reshape(5, 5)
"""
0 1 2 3 4
5 6 7 8 9
10 11 12 13 14
15 16 17 18 19
20 21 22 23 24
"""
box = [1, 1, 3, 3]
rotated_box = self._box_to_rotated_box(box=box, angle=90 * i)
result = self._simple_roi_align_rotated(img=img, box=rotated_box, resolution=(4, 4))
# Here's an explanation for 0 degree case:
# point 0 in the original input lies at [0.5, 0.5]
# (the center of bin [0, 1] x [0, 1])
# point 1 in the original input lies at [1.5, 0.5], etc.
# since the resolution is (4, 4) that divides [1, 3] x [1, 3]
# into 4 x 4 equal bins,
# the top-left bin is [1, 1.5] x [1, 1.5], and its center
# (1.25, 1.25) lies at the 3/4 position
# between point 0 and point 1, point 5 and point 6,
# point 0 and point 5, point 1 and point 6, so it can be calculated as
# 0.25*(0*0.25+1*0.75)+(5*0.25+6*0.75)*0.75 = 4.5
result_expected = torch.tensor(
[
[4.5, 5.0, 5.5, 6.0],
[7.0, 7.5, 8.0, 8.5],
[9.5, 10.0, 10.5, 11.0],
[12.0, 12.5, 13.0, 13.5],
]
)
# This is also an upsampled version of [[6, 7], [11, 12]]
# When the box is rotated by 90 degrees CCW,
# the result would be rotated by 90 degrees CW, thus it's -i here
result_expected = self._rot90(result_expected, -i)
assert torch.allclose(result, result_expected)
def test_resize(self):
H, W = 30, 30
input = torch.rand(H, W) * 100
box = [10, 10, 20, 20]
rotated_box = self._box_to_rotated_box(box, angle=0)
output = self._simple_roi_align_rotated(img=input, box=rotated_box, resolution=(5, 5))
input2x = cv2.resize(input.numpy(), (W // 2, H // 2), interpolation=cv2.INTER_LINEAR)
input2x = torch.from_numpy(input2x)
box2x = [x / 2 for x in box]
rotated_box2x = self._box_to_rotated_box(box2x, angle=0)
output2x = self._simple_roi_align_rotated(img=input2x, box=rotated_box2x, resolution=(5, 5))
assert torch.allclose(output2x, output)
def _simple_roi_align_rotated(self, img, box, resolution):
"""
RoiAlignRotated with scale 1.0 and 0 sample ratio.
"""
op = ROIAlignRotated(output_size=resolution, spatial_scale=1.0, sampling_ratio=0)
input = img[None, None, :, :]
rois = [0] + list(box)
rois = torch.tensor(rois, dtype=torch.float32)[None, :]
result_cpu = op.forward(input, rois)
if torch.cuda.is_available():
result_cuda = op.forward(input.cuda(), rois.cuda())
assert torch.allclose(result_cpu, result_cuda.cpu())
return result_cpu[0, 0]
def test_empty_box(self):
img = torch.rand(5, 5)
out = self._simple_roi_align_rotated(img, [2, 3, 0, 0, 0], (7, 7))
self.assertTrue((out == 0).all())
def test_roi_align_rotated_gradcheck_cpu(self):
dtype = torch.float64
device = torch.device("cpu")
roi_align_rotated_op = ROIAlignRotated(
output_size=(5, 5), spatial_scale=0.5, sampling_ratio=1
).to(dtype=dtype, device=device)
x = torch.rand(1, 1, 10, 10, dtype=dtype, device=device, requires_grad=True)
# roi format is (batch index, x_center, y_center, width, height, angle)
rois = torch.tensor(
[[0, 4.5, 4.5, 9, 9, 0], [0, 2, 7, 4, 4, 0], [0, 7, 7, 4, 4, 0]],
dtype=dtype,
device=device,
)
def func(input):
return roi_align_rotated_op(input, rois)
assert gradcheck(func, (x,)), "gradcheck failed for RoIAlignRotated CPU"
assert gradcheck(func, (x.transpose(2, 3),)), "gradcheck failed for RoIAlignRotated CPU"
@unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
def test_roi_align_rotated_gradient_cuda(self):
"""
Compute gradients for ROIAlignRotated with multiple bounding boxes on the GPU,
and compare the result with ROIAlign
"""
# torch.manual_seed(123)
dtype = torch.float64
device = torch.device("cuda")
pool_h, pool_w = (5, 5)
roi_align = ROIAlign(output_size=(pool_h, pool_w), spatial_scale=1, sampling_ratio=2).to(
device=device
)
roi_align_rotated = ROIAlignRotated(
output_size=(pool_h, pool_w), spatial_scale=1, sampling_ratio=2
).to(device=device)
x = torch.rand(1, 1, 10, 10, dtype=dtype, device=device, requires_grad=True)
# x_rotated = x.clone() won't work (will lead to grad_fun=CloneBackward)!
x_rotated = Variable(x.data.clone(), requires_grad=True)
# roi_rotated format is (batch index, x_center, y_center, width, height, angle)
rois_rotated = torch.tensor(
[[0, 4.5, 4.5, 9, 9, 0], [0, 2, 7, 4, 4, 0], [0, 7, 7, 4, 4, 0]],
dtype=dtype,
device=device,
)
y_rotated = roi_align_rotated(x_rotated, rois_rotated)
s_rotated = y_rotated.sum()
s_rotated.backward()
# roi format is (batch index, x1, y1, x2, y2)
rois = torch.tensor(
[[0, 0, 0, 9, 9], [0, 0, 5, 4, 9], [0, 5, 5, 9, 9]], dtype=dtype, device=device
)
y = roi_align(x, rois)
s = y.sum()
s.backward()
assert torch.allclose(
x.grad, x_rotated.grad
), "gradients for ROIAlign and ROIAlignRotated mismatch on CUDA"
if __name__ == "__main__":
unittest.main()
|
banmo-main
|
third_party/detectron2_old/tests/layers/test_roi_align_rotated.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
import unittest
from copy import deepcopy
import torch
from torchvision import ops
from detectron2.layers import batched_nms, batched_nms_rotated, nms_rotated
from detectron2.utils.env import TORCH_VERSION
from detectron2.utils.testing import random_boxes
def nms_edit_distance(keep1, keep2):
"""
Compare the "keep" result of two nms call.
They are allowed to be different in terms of edit distance
due to floating point precision issues, e.g.,
if a box happen to have an IoU of 0.5 with another box,
one implentation may choose to keep it while another may discard it.
"""
keep1, keep2 = keep1.cpu(), keep2.cpu()
if torch.equal(keep1, keep2):
# they should be equal most of the time
return 0
keep1, keep2 = tuple(keep1), tuple(keep2)
m, n = len(keep1), len(keep2)
# edit distance with DP
f = [np.arange(n + 1), np.arange(n + 1)]
for i in range(m):
cur_row = i % 2
other_row = (i + 1) % 2
f[other_row][0] = i + 1
for j in range(n):
f[other_row][j + 1] = (
f[cur_row][j]
if keep1[i] == keep2[j]
else min(min(f[cur_row][j], f[cur_row][j + 1]), f[other_row][j]) + 1
)
return f[m % 2][n]
class TestNMSRotated(unittest.TestCase):
def reference_horizontal_nms(self, boxes, scores, iou_threshold):
"""
Args:
box_scores (N, 5): boxes in corner-form and probabilities.
(Note here 5 == 4 + 1, i.e., 4-dim horizontal box + 1-dim prob)
iou_threshold: intersection over union threshold.
Returns:
picked: a list of indexes of the kept boxes
"""
picked = []
_, indexes = scores.sort(descending=True)
while len(indexes) > 0:
current = indexes[0]
picked.append(current.item())
if len(indexes) == 1:
break
current_box = boxes[current, :]
indexes = indexes[1:]
rest_boxes = boxes[indexes, :]
iou = ops.box_iou(rest_boxes, current_box.unsqueeze(0)).squeeze(1)
indexes = indexes[iou <= iou_threshold]
return torch.as_tensor(picked)
def _create_tensors(self, N, device="cpu"):
boxes = random_boxes(N, 200, device=device)
scores = torch.rand(N, device=device)
return boxes, scores
def test_batched_nms_rotated_0_degree_cpu(self, device="cpu"):
N = 2000
num_classes = 50
boxes, scores = self._create_tensors(N, device=device)
idxs = torch.randint(0, num_classes, (N,))
rotated_boxes = torch.zeros(N, 5, device=device)
rotated_boxes[:, 0] = (boxes[:, 0] + boxes[:, 2]) / 2.0
rotated_boxes[:, 1] = (boxes[:, 1] + boxes[:, 3]) / 2.0
rotated_boxes[:, 2] = boxes[:, 2] - boxes[:, 0]
rotated_boxes[:, 3] = boxes[:, 3] - boxes[:, 1]
err_msg = "Rotated NMS with 0 degree is incompatible with horizontal NMS for IoU={}"
for iou in [0.2, 0.5, 0.8]:
backup = boxes.clone()
keep_ref = batched_nms(boxes, scores, idxs, iou)
assert torch.allclose(boxes, backup), "boxes modified by batched_nms"
backup = rotated_boxes.clone()
keep = batched_nms_rotated(rotated_boxes, scores, idxs, iou)
assert torch.allclose(
rotated_boxes, backup
), "rotated_boxes modified by batched_nms_rotated"
# Occasionally the gap can be large if there are many IOU on the threshold boundary
self.assertLessEqual(nms_edit_distance(keep, keep_ref), 5, err_msg.format(iou))
@unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
def test_batched_nms_rotated_0_degree_cuda(self):
self.test_batched_nms_rotated_0_degree_cpu(device="cuda")
def test_nms_rotated_0_degree_cpu(self, device="cpu"):
N = 1000
boxes, scores = self._create_tensors(N, device=device)
rotated_boxes = torch.zeros(N, 5, device=device)
rotated_boxes[:, 0] = (boxes[:, 0] + boxes[:, 2]) / 2.0
rotated_boxes[:, 1] = (boxes[:, 1] + boxes[:, 3]) / 2.0
rotated_boxes[:, 2] = boxes[:, 2] - boxes[:, 0]
rotated_boxes[:, 3] = boxes[:, 3] - boxes[:, 1]
err_msg = "Rotated NMS incompatible between CPU and reference implementation for IoU={}"
for iou in [0.2, 0.5, 0.8]:
keep_ref = self.reference_horizontal_nms(boxes, scores, iou)
keep = nms_rotated(rotated_boxes, scores, iou)
self.assertLessEqual(nms_edit_distance(keep, keep_ref), 1, err_msg.format(iou))
@unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
def test_nms_rotated_0_degree_cuda(self):
self.test_nms_rotated_0_degree_cpu(device="cuda")
def test_nms_rotated_90_degrees_cpu(self):
N = 1000
boxes, scores = self._create_tensors(N)
rotated_boxes = torch.zeros(N, 5)
rotated_boxes[:, 0] = (boxes[:, 0] + boxes[:, 2]) / 2.0
rotated_boxes[:, 1] = (boxes[:, 1] + boxes[:, 3]) / 2.0
# Note for rotated_boxes[:, 2] and rotated_boxes[:, 3]:
# widths and heights are intentionally swapped here for 90 degrees case
# so that the reference horizontal nms could be used
rotated_boxes[:, 2] = boxes[:, 3] - boxes[:, 1]
rotated_boxes[:, 3] = boxes[:, 2] - boxes[:, 0]
rotated_boxes[:, 4] = torch.ones(N) * 90
err_msg = "Rotated NMS incompatible between CPU and reference implementation for IoU={}"
for iou in [0.2, 0.5, 0.8]:
keep_ref = self.reference_horizontal_nms(boxes, scores, iou)
keep = nms_rotated(rotated_boxes, scores, iou)
self.assertLessEqual(nms_edit_distance(keep, keep_ref), 1, err_msg.format(iou))
def test_nms_rotated_180_degrees_cpu(self):
N = 1000
boxes, scores = self._create_tensors(N)
rotated_boxes = torch.zeros(N, 5)
rotated_boxes[:, 0] = (boxes[:, 0] + boxes[:, 2]) / 2.0
rotated_boxes[:, 1] = (boxes[:, 1] + boxes[:, 3]) / 2.0
rotated_boxes[:, 2] = boxes[:, 2] - boxes[:, 0]
rotated_boxes[:, 3] = boxes[:, 3] - boxes[:, 1]
rotated_boxes[:, 4] = torch.ones(N) * 180
err_msg = "Rotated NMS incompatible between CPU and reference implementation for IoU={}"
for iou in [0.2, 0.5, 0.8]:
keep_ref = self.reference_horizontal_nms(boxes, scores, iou)
keep = nms_rotated(rotated_boxes, scores, iou)
self.assertLessEqual(nms_edit_distance(keep, keep_ref), 1, err_msg.format(iou))
class TestScriptable(unittest.TestCase):
def setUp(self):
class TestingModule(torch.nn.Module):
def forward(self, boxes, scores, threshold):
return nms_rotated(boxes, scores, threshold)
self.module = TestingModule()
@unittest.skipIf(TORCH_VERSION < (1, 7), "Insufficient pytorch version")
def test_scriptable_cpu(self):
m = deepcopy(self.module).cpu()
_ = torch.jit.script(m)
@unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
@unittest.skipIf(TORCH_VERSION < (1, 7), "Insufficient pytorch version")
def test_scriptable_cuda(self):
m = deepcopy(self.module).cuda()
_ = torch.jit.script(m)
if __name__ == "__main__":
unittest.main()
|
banmo-main
|
third_party/detectron2_old/tests/layers/test_nms_rotated.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import unittest
from typing import List, Sequence, Tuple
import torch
from detectron2.structures import ImageList
from detectron2.utils.env import TORCH_VERSION
class TestImageList(unittest.TestCase):
@unittest.skipIf(TORCH_VERSION < (1, 7), "Insufficient pytorch version")
def test_imagelist_padding_tracing(self):
# test that the trace does not contain hard-coded constant sizes
def to_imagelist(tensors: Sequence[torch.Tensor]):
image_list = ImageList.from_tensors(tensors, 4)
return image_list.tensor, image_list.image_sizes
def _tensor(*shape):
return torch.ones(shape, dtype=torch.float32)
# test CHW (inputs needs padding vs. no padding)
for shape in [(3, 10, 10), (3, 12, 12)]:
func = torch.jit.trace(to_imagelist, ([_tensor(*shape)],))
tensor, image_sizes = func([_tensor(3, 15, 20)])
self.assertEqual(tensor.shape, (1, 3, 16, 20), tensor.shape)
self.assertEqual(image_sizes[0].tolist(), [15, 20], image_sizes[0])
# test HW
func = torch.jit.trace(to_imagelist, ([_tensor(10, 10)],))
tensor, image_sizes = func([_tensor(15, 20)])
self.assertEqual(tensor.shape, (1, 16, 20), tensor.shape)
self.assertEqual(image_sizes[0].tolist(), [15, 20], image_sizes[0])
# test 2x CHW
func = torch.jit.trace(
to_imagelist,
([_tensor(3, 16, 10), _tensor(3, 13, 11)],),
)
tensor, image_sizes = func([_tensor(3, 25, 20), _tensor(3, 10, 10)])
self.assertEqual(tensor.shape, (2, 3, 28, 20), tensor.shape)
self.assertEqual(image_sizes[0].tolist(), [25, 20], image_sizes[0])
self.assertEqual(image_sizes[1].tolist(), [10, 10], image_sizes[1])
# support calling with different spatial sizes, but not with different #images
@unittest.skipIf(TORCH_VERSION < (1, 7), "Insufficient pytorch version")
def test_imagelist_scriptability(self):
image_nums = 2
image_tensor = torch.randn((image_nums, 10, 20), dtype=torch.float32)
image_shape = [(10, 20)] * image_nums
def f(image_tensor, image_shape: List[Tuple[int, int]]):
return ImageList(image_tensor, image_shape)
ret = f(image_tensor, image_shape)
ret_script = torch.jit.script(f)(image_tensor, image_shape)
self.assertEqual(len(ret), len(ret_script))
for i in range(image_nums):
self.assertTrue(torch.equal(ret[i], ret_script[i]))
@unittest.skipIf(TORCH_VERSION < (1, 7), "Insufficient pytorch version")
def test_imagelist_from_tensors_scriptability(self):
image_tensor_0 = torch.randn(10, 20, dtype=torch.float32)
image_tensor_1 = torch.randn(12, 22, dtype=torch.float32)
inputs = [image_tensor_0, image_tensor_1]
def f(image_tensor: List[torch.Tensor]):
return ImageList.from_tensors(image_tensor, 10)
ret = f(inputs)
ret_script = torch.jit.script(f)(inputs)
self.assertEqual(len(ret), len(ret_script))
self.assertTrue(torch.equal(ret.tensor, ret_script.tensor))
if __name__ == "__main__":
unittest.main()
|
banmo-main
|
third_party/detectron2_old/tests/structures/test_imagelist.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import unittest
import torch
from detectron2.structures.masks import BitMasks, PolygonMasks, polygons_to_bitmask
class TestBitMask(unittest.TestCase):
def test_get_bounding_box(self):
masks = torch.tensor(
[
[
[False, False, False, True],
[False, False, True, True],
[False, True, True, False],
[False, True, True, False],
],
[
[False, False, False, False],
[False, False, True, False],
[False, True, True, False],
[False, True, True, False],
],
torch.zeros(4, 4),
]
)
bitmask = BitMasks(masks)
box_true = torch.tensor([[1, 0, 4, 4], [1, 1, 3, 4], [0, 0, 0, 0]], dtype=torch.float32)
box = bitmask.get_bounding_boxes()
self.assertTrue(torch.all(box.tensor == box_true).item())
for box in box_true:
poly = box[[0, 1, 2, 1, 2, 3, 0, 3]].numpy()
mask = polygons_to_bitmask([poly], 4, 4)
reconstruct_box = BitMasks(mask[None, :, :]).get_bounding_boxes()[0].tensor
self.assertTrue(torch.all(box == reconstruct_box).item())
reconstruct_box = PolygonMasks([[poly]]).get_bounding_boxes()[0].tensor
self.assertTrue(torch.all(box == reconstruct_box).item())
if __name__ == "__main__":
unittest.main()
|
banmo-main
|
third_party/detectron2_old/tests/structures/test_masks.py
|
banmo-main
|
third_party/detectron2_old/tests/structures/__init__.py
|
|
# Copyright (c) Facebook, Inc. and its affiliates.
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import math
import random
import unittest
import torch
from fvcore.common.benchmark import benchmark
from detectron2.layers.rotated_boxes import pairwise_iou_rotated
from detectron2.structures.boxes import Boxes
from detectron2.structures.rotated_boxes import RotatedBoxes, pairwise_iou
from detectron2.utils.env import TORCH_VERSION
from detectron2.utils.testing import reload_script_model
logger = logging.getLogger(__name__)
class TestRotatedBoxesLayer(unittest.TestCase):
def test_iou_0_dim_cpu(self):
boxes1 = torch.rand(0, 5, dtype=torch.float32)
boxes2 = torch.rand(10, 5, dtype=torch.float32)
expected_ious = torch.zeros(0, 10, dtype=torch.float32)
ious = pairwise_iou_rotated(boxes1, boxes2)
self.assertTrue(torch.allclose(ious, expected_ious))
boxes1 = torch.rand(10, 5, dtype=torch.float32)
boxes2 = torch.rand(0, 5, dtype=torch.float32)
expected_ious = torch.zeros(10, 0, dtype=torch.float32)
ious = pairwise_iou_rotated(boxes1, boxes2)
self.assertTrue(torch.allclose(ious, expected_ious))
@unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
def test_iou_0_dim_cuda(self):
boxes1 = torch.rand(0, 5, dtype=torch.float32)
boxes2 = torch.rand(10, 5, dtype=torch.float32)
expected_ious = torch.zeros(0, 10, dtype=torch.float32)
ious_cuda = pairwise_iou_rotated(boxes1.cuda(), boxes2.cuda())
self.assertTrue(torch.allclose(ious_cuda.cpu(), expected_ious))
boxes1 = torch.rand(10, 5, dtype=torch.float32)
boxes2 = torch.rand(0, 5, dtype=torch.float32)
expected_ious = torch.zeros(10, 0, dtype=torch.float32)
ious_cuda = pairwise_iou_rotated(boxes1.cuda(), boxes2.cuda())
self.assertTrue(torch.allclose(ious_cuda.cpu(), expected_ious))
def test_iou_half_overlap_cpu(self):
boxes1 = torch.tensor([[0.5, 0.5, 1.0, 1.0, 0.0]], dtype=torch.float32)
boxes2 = torch.tensor([[0.25, 0.5, 0.5, 1.0, 0.0]], dtype=torch.float32)
expected_ious = torch.tensor([[0.5]], dtype=torch.float32)
ious = pairwise_iou_rotated(boxes1, boxes2)
self.assertTrue(torch.allclose(ious, expected_ious))
@unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
def test_iou_half_overlap_cuda(self):
boxes1 = torch.tensor([[0.5, 0.5, 1.0, 1.0, 0.0]], dtype=torch.float32)
boxes2 = torch.tensor([[0.25, 0.5, 0.5, 1.0, 0.0]], dtype=torch.float32)
expected_ious = torch.tensor([[0.5]], dtype=torch.float32)
ious_cuda = pairwise_iou_rotated(boxes1.cuda(), boxes2.cuda())
self.assertTrue(torch.allclose(ious_cuda.cpu(), expected_ious))
def test_iou_precision(self):
for device in ["cpu"] + (["cuda"] if torch.cuda.is_available() else []):
boxes1 = torch.tensor([[565, 565, 10, 10.0, 0]], dtype=torch.float32, device=device)
boxes2 = torch.tensor([[565, 565, 10, 8.3, 0]], dtype=torch.float32, device=device)
iou = 8.3 / 10.0
expected_ious = torch.tensor([[iou]], dtype=torch.float32)
ious = pairwise_iou_rotated(boxes1, boxes2)
self.assertTrue(torch.allclose(ious.cpu(), expected_ious))
@unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
def test_iou_too_many_boxes_cuda(self):
s1, s2 = 5, 1289035
boxes1 = torch.zeros(s1, 5)
boxes2 = torch.zeros(s2, 5)
ious_cuda = pairwise_iou_rotated(boxes1.cuda(), boxes2.cuda())
self.assertTupleEqual(tuple(ious_cuda.shape), (s1, s2))
def test_iou_extreme(self):
# Cause floating point issues in cuda kernels (#1266)
for device in ["cpu"] + (["cuda"] if torch.cuda.is_available() else []):
boxes1 = torch.tensor([[160.0, 153.0, 230.0, 23.0, -37.0]], device=device)
boxes2 = torch.tensor(
[
[
-1.117407639806935e17,
1.3858420478349148e18,
1000.0000610351562,
1000.0000610351562,
1612.0,
]
],
device=device,
)
ious = pairwise_iou_rotated(boxes1, boxes2)
self.assertTrue(ious.min() >= 0, ious)
def test_iou_issue_2154(self):
for device in ["cpu"] + (["cuda"] if torch.cuda.is_available() else []):
boxes1 = torch.tensor(
[
[
296.6620178222656,
458.73883056640625,
23.515729904174805,
47.677001953125,
0.08795166015625,
]
],
device=device,
)
boxes2 = torch.tensor(
[[296.66201, 458.73882000000003, 23.51573, 47.67702, 0.087951]],
device=device,
)
ious = pairwise_iou_rotated(boxes1, boxes2)
expected_ious = torch.tensor([[1.0]], dtype=torch.float32)
self.assertTrue(torch.allclose(ious.cpu(), expected_ious))
def test_iou_issue_2167(self):
for device in ["cpu"] + (["cuda"] if torch.cuda.is_available() else []):
boxes1 = torch.tensor(
[
[
2563.74462890625000000000,
1436.79016113281250000000,
2174.70336914062500000000,
214.09500122070312500000,
115.11834716796875000000,
]
],
device=device,
)
boxes2 = torch.tensor(
[
[
2563.74462890625000000000,
1436.79028320312500000000,
2174.70288085937500000000,
214.09495544433593750000,
115.11835479736328125000,
]
],
device=device,
)
ious = pairwise_iou_rotated(boxes1, boxes2)
expected_ious = torch.tensor([[1.0]], dtype=torch.float32)
self.assertTrue(torch.allclose(ious.cpu(), expected_ious))
class TestRotatedBoxesStructure(unittest.TestCase):
def test_clip_area_0_degree(self):
for _ in range(50):
num_boxes = 100
boxes_5d = torch.zeros(num_boxes, 5)
boxes_5d[:, 0] = torch.FloatTensor(num_boxes).uniform_(-100, 500)
boxes_5d[:, 1] = torch.FloatTensor(num_boxes).uniform_(-100, 500)
boxes_5d[:, 2] = torch.FloatTensor(num_boxes).uniform_(0, 500)
boxes_5d[:, 3] = torch.FloatTensor(num_boxes).uniform_(0, 500)
# Convert from (x_ctr, y_ctr, w, h, 0) to (x1, y1, x2, y2)
boxes_4d = torch.zeros(num_boxes, 4)
boxes_4d[:, 0] = boxes_5d[:, 0] - boxes_5d[:, 2] / 2.0
boxes_4d[:, 1] = boxes_5d[:, 1] - boxes_5d[:, 3] / 2.0
boxes_4d[:, 2] = boxes_5d[:, 0] + boxes_5d[:, 2] / 2.0
boxes_4d[:, 3] = boxes_5d[:, 1] + boxes_5d[:, 3] / 2.0
image_size = (500, 600)
test_boxes_4d = Boxes(boxes_4d)
test_boxes_5d = RotatedBoxes(boxes_5d)
# Before clip
areas_4d = test_boxes_4d.area()
areas_5d = test_boxes_5d.area()
self.assertTrue(torch.allclose(areas_4d, areas_5d, atol=1e-1, rtol=1e-5))
# After clip
test_boxes_4d.clip(image_size)
test_boxes_5d.clip(image_size)
areas_4d = test_boxes_4d.area()
areas_5d = test_boxes_5d.area()
self.assertTrue(torch.allclose(areas_4d, areas_5d, atol=1e-1, rtol=1e-5))
def test_clip_area_arbitrary_angle(self):
num_boxes = 100
boxes_5d = torch.zeros(num_boxes, 5)
boxes_5d[:, 0] = torch.FloatTensor(num_boxes).uniform_(-100, 500)
boxes_5d[:, 1] = torch.FloatTensor(num_boxes).uniform_(-100, 500)
boxes_5d[:, 2] = torch.FloatTensor(num_boxes).uniform_(0, 500)
boxes_5d[:, 3] = torch.FloatTensor(num_boxes).uniform_(0, 500)
boxes_5d[:, 4] = torch.FloatTensor(num_boxes).uniform_(-1800, 1800)
clip_angle_threshold = random.uniform(0, 180)
image_size = (500, 600)
test_boxes_5d = RotatedBoxes(boxes_5d)
# Before clip
areas_before = test_boxes_5d.area()
# After clip
test_boxes_5d.clip(image_size, clip_angle_threshold)
areas_diff = test_boxes_5d.area() - areas_before
# the areas should only decrease after clipping
self.assertTrue(torch.all(areas_diff <= 0))
# whenever the box is clipped (thus the area shrinks),
# the angle for the box must be within the clip_angle_threshold
# Note that the clip function will normalize the angle range
# to be within (-180, 180]
self.assertTrue(
torch.all(torch.abs(boxes_5d[:, 4][torch.where(areas_diff < 0)]) < clip_angle_threshold)
)
def test_normalize_angles(self):
# torch.manual_seed(0)
for _ in range(50):
num_boxes = 100
boxes_5d = torch.zeros(num_boxes, 5)
boxes_5d[:, 0] = torch.FloatTensor(num_boxes).uniform_(-100, 500)
boxes_5d[:, 1] = torch.FloatTensor(num_boxes).uniform_(-100, 500)
boxes_5d[:, 2] = torch.FloatTensor(num_boxes).uniform_(0, 500)
boxes_5d[:, 3] = torch.FloatTensor(num_boxes).uniform_(0, 500)
boxes_5d[:, 4] = torch.FloatTensor(num_boxes).uniform_(-1800, 1800)
rotated_boxes = RotatedBoxes(boxes_5d)
normalized_boxes = rotated_boxes.clone()
normalized_boxes.normalize_angles()
self.assertTrue(torch.all(normalized_boxes.tensor[:, 4] >= -180))
self.assertTrue(torch.all(normalized_boxes.tensor[:, 4] < 180))
# x, y, w, h should not change
self.assertTrue(torch.allclose(boxes_5d[:, :4], normalized_boxes.tensor[:, :4]))
# the cos/sin values of the angles should stay the same
self.assertTrue(
torch.allclose(
torch.cos(boxes_5d[:, 4] * math.pi / 180),
torch.cos(normalized_boxes.tensor[:, 4] * math.pi / 180),
atol=1e-5,
)
)
self.assertTrue(
torch.allclose(
torch.sin(boxes_5d[:, 4] * math.pi / 180),
torch.sin(normalized_boxes.tensor[:, 4] * math.pi / 180),
atol=1e-5,
)
)
def test_pairwise_iou_0_degree(self):
for device in ["cpu"] + (["cuda"] if torch.cuda.is_available() else []):
boxes1 = torch.tensor(
[[0.5, 0.5, 1.0, 1.0, 0.0], [0.5, 0.5, 1.0, 1.0, 0.0]],
dtype=torch.float32,
device=device,
)
boxes2 = torch.tensor(
[
[0.5, 0.5, 1.0, 1.0, 0.0],
[0.25, 0.5, 0.5, 1.0, 0.0],
[0.5, 0.25, 1.0, 0.5, 0.0],
[0.25, 0.25, 0.5, 0.5, 0.0],
[0.75, 0.75, 0.5, 0.5, 0.0],
[1.0, 1.0, 1.0, 1.0, 0.0],
],
dtype=torch.float32,
device=device,
)
expected_ious = torch.tensor(
[
[1.0, 0.5, 0.5, 0.25, 0.25, 0.25 / (2 - 0.25)],
[1.0, 0.5, 0.5, 0.25, 0.25, 0.25 / (2 - 0.25)],
],
dtype=torch.float32,
device=device,
)
ious = pairwise_iou(RotatedBoxes(boxes1), RotatedBoxes(boxes2))
self.assertTrue(torch.allclose(ious, expected_ious))
def test_pairwise_iou_45_degrees(self):
for device in ["cpu"] + (["cuda"] if torch.cuda.is_available() else []):
boxes1 = torch.tensor(
[
[1, 1, math.sqrt(2), math.sqrt(2), 45],
[1, 1, 2 * math.sqrt(2), 2 * math.sqrt(2), -45],
],
dtype=torch.float32,
device=device,
)
boxes2 = torch.tensor([[1, 1, 2, 2, 0]], dtype=torch.float32, device=device)
expected_ious = torch.tensor([[0.5], [0.5]], dtype=torch.float32, device=device)
ious = pairwise_iou(RotatedBoxes(boxes1), RotatedBoxes(boxes2))
self.assertTrue(torch.allclose(ious, expected_ious))
def test_pairwise_iou_orthogonal(self):
for device in ["cpu"] + (["cuda"] if torch.cuda.is_available() else []):
boxes1 = torch.tensor([[5, 5, 10, 6, 55]], dtype=torch.float32, device=device)
boxes2 = torch.tensor([[5, 5, 10, 6, -35]], dtype=torch.float32, device=device)
iou = (6.0 * 6.0) / (6.0 * 6.0 + 4.0 * 6.0 + 4.0 * 6.0)
expected_ious = torch.tensor([[iou]], dtype=torch.float32, device=device)
ious = pairwise_iou(RotatedBoxes(boxes1), RotatedBoxes(boxes2))
self.assertTrue(torch.allclose(ious, expected_ious))
def test_pairwise_iou_large_close_boxes(self):
for device in ["cpu"] + (["cuda"] if torch.cuda.is_available() else []):
boxes1 = torch.tensor(
[[299.500000, 417.370422, 600.000000, 364.259186, 27.1828]],
dtype=torch.float32,
device=device,
)
boxes2 = torch.tensor(
[[299.500000, 417.370422, 600.000000, 364.259155, 27.1828]],
dtype=torch.float32,
device=device,
)
iou = 364.259155 / 364.259186
expected_ious = torch.tensor([[iou]], dtype=torch.float32, device=device)
ious = pairwise_iou(RotatedBoxes(boxes1), RotatedBoxes(boxes2))
self.assertTrue(torch.allclose(ious, expected_ious))
def test_pairwise_iou_many_boxes(self):
for device in ["cpu"] + (["cuda"] if torch.cuda.is_available() else []):
num_boxes1 = 100
num_boxes2 = 200
boxes1 = torch.stack(
[
torch.tensor(
[5 + 20 * i, 5 + 20 * i, 10, 10, 0],
dtype=torch.float32,
device=device,
)
for i in range(num_boxes1)
]
)
boxes2 = torch.stack(
[
torch.tensor(
[5 + 20 * i, 5 + 20 * i, 10, 1 + 9 * i / num_boxes2, 0],
dtype=torch.float32,
device=device,
)
for i in range(num_boxes2)
]
)
expected_ious = torch.zeros(num_boxes1, num_boxes2, dtype=torch.float32, device=device)
for i in range(min(num_boxes1, num_boxes2)):
expected_ious[i][i] = (1 + 9 * i / num_boxes2) / 10.0
ious = pairwise_iou(RotatedBoxes(boxes1), RotatedBoxes(boxes2))
self.assertTrue(torch.allclose(ious, expected_ious))
def test_pairwise_iou_issue1207_simplified(self):
for device in ["cpu"] + (["cuda"] if torch.cuda.is_available() else []):
# Simplified test case of D2-issue-1207
boxes1 = torch.tensor([[3, 3, 8, 2, -45.0]], device=device)
boxes2 = torch.tensor([[6, 0, 8, 2, -45.0]], device=device)
iou = 0.0
expected_ious = torch.tensor([[iou]], dtype=torch.float32, device=device)
ious = pairwise_iou(RotatedBoxes(boxes1), RotatedBoxes(boxes2))
self.assertTrue(torch.allclose(ious, expected_ious))
def test_pairwise_iou_issue1207(self):
for device in ["cpu"] + (["cuda"] if torch.cuda.is_available() else []):
# The original test case in D2-issue-1207
boxes1 = torch.tensor([[160.0, 153.0, 230.0, 23.0, -37.0]], device=device)
boxes2 = torch.tensor([[190.0, 127.0, 80.0, 21.0, -46.0]], device=device)
iou = 0.0
expected_ious = torch.tensor([[iou]], dtype=torch.float32, device=device)
ious = pairwise_iou(RotatedBoxes(boxes1), RotatedBoxes(boxes2))
self.assertTrue(torch.allclose(ious, expected_ious))
def test_empty_cat(self):
x = RotatedBoxes.cat([])
self.assertTrue(x.tensor.shape, (0, 5))
@unittest.skipIf(TORCH_VERSION < (1, 8), "Insufficient pytorch version")
def test_scriptability(self):
def func(x):
boxes = RotatedBoxes(x)
test = boxes.to(torch.device("cpu")).tensor
return boxes.area(), test
f = torch.jit.script(func)
f = reload_script_model(f)
f(torch.rand((3, 5)))
data = torch.rand((3, 5))
def func_cat(x: torch.Tensor):
boxes1 = RotatedBoxes(x)
boxes2 = RotatedBoxes(x)
# this is not supported by torchscript for now.
# boxes3 = RotatedBoxes.cat([boxes1, boxes2])
boxes3 = boxes1.cat([boxes1, boxes2])
return boxes3
f = torch.jit.script(func_cat)
script_box = f(data)
self.assertTrue(torch.equal(torch.cat([data, data]), script_box.tensor))
def benchmark_rotated_iou():
num_boxes1 = 200
num_boxes2 = 500
boxes1 = torch.stack(
[
torch.tensor([5 + 20 * i, 5 + 20 * i, 10, 10, 0], dtype=torch.float32)
for i in range(num_boxes1)
]
)
boxes2 = torch.stack(
[
torch.tensor(
[5 + 20 * i, 5 + 20 * i, 10, 1 + 9 * i / num_boxes2, 0],
dtype=torch.float32,
)
for i in range(num_boxes2)
]
)
def func(dev, n=1):
b1 = boxes1.to(device=dev)
b2 = boxes2.to(device=dev)
def bench():
for _ in range(n):
pairwise_iou_rotated(b1, b2)
if dev.type == "cuda":
torch.cuda.synchronize()
return bench
# only run it once per timed loop, since it's slow
args = [{"dev": torch.device("cpu"), "n": 1}]
if torch.cuda.is_available():
args.append({"dev": torch.device("cuda"), "n": 10})
benchmark(func, "rotated_iou", args, warmup_iters=3)
if __name__ == "__main__":
unittest.main()
benchmark_rotated_iou()
|
banmo-main
|
third_party/detectron2_old/tests/structures/test_rotated_boxes.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import json
import math
import numpy as np
import unittest
import torch
from detectron2.structures import Boxes, BoxMode, pairwise_ioa, pairwise_iou
from detectron2.utils.env import TORCH_VERSION
from detectron2.utils.testing import reload_script_model
class TestBoxMode(unittest.TestCase):
def _convert_xy_to_wh(self, x):
return BoxMode.convert(x, BoxMode.XYXY_ABS, BoxMode.XYWH_ABS)
def _convert_xywha_to_xyxy(self, x):
return BoxMode.convert(x, BoxMode.XYWHA_ABS, BoxMode.XYXY_ABS)
def _convert_xywh_to_xywha(self, x):
return BoxMode.convert(x, BoxMode.XYWH_ABS, BoxMode.XYWHA_ABS)
def test_convert_int_mode(self):
BoxMode.convert([1, 2, 3, 4], 0, 1)
def test_box_convert_list(self):
for tp in [list, tuple]:
box = tp([5.0, 5.0, 10.0, 10.0])
output = self._convert_xy_to_wh(box)
self.assertIsInstance(output, tp)
self.assertIsInstance(output[0], float)
self.assertEqual(output, tp([5.0, 5.0, 5.0, 5.0]))
with self.assertRaises(Exception):
self._convert_xy_to_wh([box])
def test_box_convert_array(self):
box = np.asarray([[5, 5, 10, 10], [1, 1, 2, 3]])
output = self._convert_xy_to_wh(box)
self.assertEqual(output.dtype, box.dtype)
self.assertEqual(output.shape, box.shape)
self.assertTrue((output[0] == [5, 5, 5, 5]).all())
self.assertTrue((output[1] == [1, 1, 1, 2]).all())
def test_box_convert_cpu_tensor(self):
box = torch.tensor([[5, 5, 10, 10], [1, 1, 2, 3]])
output = self._convert_xy_to_wh(box)
self.assertEqual(output.dtype, box.dtype)
self.assertEqual(output.shape, box.shape)
output = output.numpy()
self.assertTrue((output[0] == [5, 5, 5, 5]).all())
self.assertTrue((output[1] == [1, 1, 1, 2]).all())
@unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
def test_box_convert_cuda_tensor(self):
box = torch.tensor([[5, 5, 10, 10], [1, 1, 2, 3]]).cuda()
output = self._convert_xy_to_wh(box)
self.assertEqual(output.dtype, box.dtype)
self.assertEqual(output.shape, box.shape)
self.assertEqual(output.device, box.device)
output = output.cpu().numpy()
self.assertTrue((output[0] == [5, 5, 5, 5]).all())
self.assertTrue((output[1] == [1, 1, 1, 2]).all())
def test_box_convert_xywha_to_xyxy_list(self):
for tp in [list, tuple]:
box = tp([50, 50, 30, 20, 0])
output = self._convert_xywha_to_xyxy(box)
self.assertIsInstance(output, tp)
self.assertEqual(output, tp([35, 40, 65, 60]))
with self.assertRaises(Exception):
self._convert_xywha_to_xyxy([box])
def test_box_convert_xywha_to_xyxy_array(self):
for dtype in [np.float64, np.float32]:
box = np.asarray(
[
[50, 50, 30, 20, 0],
[50, 50, 30, 20, 90],
[1, 1, math.sqrt(2), math.sqrt(2), -45],
],
dtype=dtype,
)
output = self._convert_xywha_to_xyxy(box)
self.assertEqual(output.dtype, box.dtype)
expected = np.asarray([[35, 40, 65, 60], [40, 35, 60, 65], [0, 0, 2, 2]], dtype=dtype)
self.assertTrue(np.allclose(output, expected, atol=1e-6), "output={}".format(output))
def test_box_convert_xywha_to_xyxy_tensor(self):
for dtype in [torch.float32, torch.float64]:
box = torch.tensor(
[
[50, 50, 30, 20, 0],
[50, 50, 30, 20, 90],
[1, 1, math.sqrt(2), math.sqrt(2), -45],
],
dtype=dtype,
)
output = self._convert_xywha_to_xyxy(box)
self.assertEqual(output.dtype, box.dtype)
expected = torch.tensor([[35, 40, 65, 60], [40, 35, 60, 65], [0, 0, 2, 2]], dtype=dtype)
self.assertTrue(torch.allclose(output, expected, atol=1e-6), "output={}".format(output))
def test_box_convert_xywh_to_xywha_list(self):
for tp in [list, tuple]:
box = tp([50, 50, 30, 20])
output = self._convert_xywh_to_xywha(box)
self.assertIsInstance(output, tp)
self.assertEqual(output, tp([65, 60, 30, 20, 0]))
with self.assertRaises(Exception):
self._convert_xywh_to_xywha([box])
def test_box_convert_xywh_to_xywha_array(self):
for dtype in [np.float64, np.float32]:
box = np.asarray([[30, 40, 70, 60], [30, 40, 60, 70], [-1, -1, 2, 2]], dtype=dtype)
output = self._convert_xywh_to_xywha(box)
self.assertEqual(output.dtype, box.dtype)
expected = np.asarray(
[[65, 70, 70, 60, 0], [60, 75, 60, 70, 0], [0, 0, 2, 2, 0]], dtype=dtype
)
self.assertTrue(np.allclose(output, expected, atol=1e-6), "output={}".format(output))
def test_box_convert_xywh_to_xywha_tensor(self):
for dtype in [torch.float32, torch.float64]:
box = torch.tensor([[30, 40, 70, 60], [30, 40, 60, 70], [-1, -1, 2, 2]], dtype=dtype)
output = self._convert_xywh_to_xywha(box)
self.assertEqual(output.dtype, box.dtype)
expected = torch.tensor(
[[65, 70, 70, 60, 0], [60, 75, 60, 70, 0], [0, 0, 2, 2, 0]], dtype=dtype
)
self.assertTrue(torch.allclose(output, expected, atol=1e-6), "output={}".format(output))
def test_json_serializable(self):
payload = {"box_mode": BoxMode.XYWH_REL}
try:
json.dumps(payload)
except Exception:
self.fail("JSON serialization failed")
def test_json_deserializable(self):
payload = '{"box_mode": 2}'
obj = json.loads(payload)
try:
obj["box_mode"] = BoxMode(obj["box_mode"])
except Exception:
self.fail("JSON deserialization failed")
class TestBoxIOU(unittest.TestCase):
def create_boxes(self):
boxes1 = torch.tensor([[0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0]])
boxes2 = torch.tensor(
[
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 0.5, 1.0],
[0.0, 0.0, 1.0, 0.5],
[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 1.0, 1.0],
[0.5, 0.5, 1.5, 1.5],
]
)
return boxes1, boxes2
def test_pairwise_iou(self):
boxes1, boxes2 = self.create_boxes()
expected_ious = torch.tensor(
[
[1.0, 0.5, 0.5, 0.25, 0.25, 0.25 / (2 - 0.25)],
[1.0, 0.5, 0.5, 0.25, 0.25, 0.25 / (2 - 0.25)],
]
)
ious = pairwise_iou(Boxes(boxes1), Boxes(boxes2))
self.assertTrue(torch.allclose(ious, expected_ious))
def test_pairwise_ioa(self):
boxes1, boxes2 = self.create_boxes()
expected_ioas = torch.tensor(
[[1.0, 1.0, 1.0, 1.0, 1.0, 0.25], [1.0, 1.0, 1.0, 1.0, 1.0, 0.25]]
)
ioas = pairwise_ioa(Boxes(boxes1), Boxes(boxes2))
self.assertTrue(torch.allclose(ioas, expected_ioas))
class TestBoxes(unittest.TestCase):
def test_empty_cat(self):
x = Boxes.cat([])
self.assertTrue(x.tensor.shape, (0, 4))
def test_to(self):
x = Boxes(torch.rand(3, 4))
self.assertEqual(x.to(device="cpu").tensor.device.type, "cpu")
@unittest.skipIf(TORCH_VERSION < (1, 8), "Insufficient pytorch version")
def test_scriptability(self):
def func(x):
boxes = Boxes(x)
test = boxes.to(torch.device("cpu")).tensor
return boxes.area(), test
f = torch.jit.script(func)
f = reload_script_model(f)
f(torch.rand((3, 4)))
data = torch.rand((3, 4))
def func_cat(x: torch.Tensor):
boxes1 = Boxes(x)
boxes2 = Boxes(x)
# boxes3 = Boxes.cat([boxes1, boxes2]) # this is not supported by torchsript for now.
boxes3 = boxes1.cat([boxes1, boxes2])
return boxes3
f = torch.jit.script(func_cat)
script_box = f(data)
self.assertTrue(torch.equal(torch.cat([data, data]), script_box.tensor))
if __name__ == "__main__":
unittest.main()
|
banmo-main
|
third_party/detectron2_old/tests/structures/test_boxes.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import unittest
import torch
from torch import Tensor
from detectron2.export.torchscript import patch_instances
from detectron2.structures import Boxes, Instances
from detectron2.utils.env import TORCH_VERSION
from detectron2.utils.testing import convert_scripted_instances
class TestInstances(unittest.TestCase):
def test_int_indexing(self):
attr1 = torch.tensor([[0.0, 0.0, 1.0], [0.0, 0.0, 0.5], [0.0, 0.0, 1.0], [0.0, 0.5, 0.5]])
attr2 = torch.tensor([0.1, 0.2, 0.3, 0.4])
instances = Instances((100, 100))
instances.attr1 = attr1
instances.attr2 = attr2
for i in range(-len(instances), len(instances)):
inst = instances[i]
self.assertEqual((inst.attr1 == attr1[i]).all(), True)
self.assertEqual((inst.attr2 == attr2[i]).all(), True)
self.assertRaises(IndexError, lambda: instances[len(instances)])
self.assertRaises(IndexError, lambda: instances[-len(instances) - 1])
@unittest.skipIf(TORCH_VERSION < (1, 7), "Insufficient pytorch version")
def test_script_new_fields(self):
def get_mask(x: Instances) -> torch.Tensor:
return x.mask
class f(torch.nn.Module):
def forward(self, x: Instances):
proposal_boxes = x.proposal_boxes # noqa F841
objectness_logits = x.objectness_logits # noqa F841
return x
class g(torch.nn.Module):
def forward(self, x: Instances):
return get_mask(x)
class g2(torch.nn.Module):
def __init__(self):
super().__init__()
self.g = g()
def forward(self, x: Instances):
proposal_boxes = x.proposal_boxes # noqa F841
return x, self.g(x)
fields = {"proposal_boxes": Boxes, "objectness_logits": Tensor}
with patch_instances(fields):
torch.jit.script(f())
# can't script anymore after exiting the context
with self.assertRaises(Exception):
# will create a ConcreteType for g
torch.jit.script(g2())
new_fields = {"mask": Tensor}
with patch_instances(new_fields):
# will compile g with a different Instances; this should pass
torch.jit.script(g())
with self.assertRaises(Exception):
torch.jit.script(g2())
new_fields = {"mask": Tensor, "proposal_boxes": Boxes}
with patch_instances(new_fields) as NewInstances:
# get_mask will be compiled with a different Instances; this should pass
scripted_g2 = torch.jit.script(g2())
x = NewInstances((3, 4))
x.mask = torch.rand(3)
x.proposal_boxes = Boxes(torch.rand(3, 4))
scripted_g2(x) # it should accept the new Instances object and run successfully
@unittest.skipIf(TORCH_VERSION < (1, 7), "Insufficient pytorch version")
def test_script_access_fields(self):
class f(torch.nn.Module):
def forward(self, x: Instances):
proposal_boxes = x.proposal_boxes
objectness_logits = x.objectness_logits
return proposal_boxes.tensor + objectness_logits
fields = {"proposal_boxes": Boxes, "objectness_logits": Tensor}
with patch_instances(fields):
torch.jit.script(f())
@unittest.skipIf(TORCH_VERSION < (1, 7), "Insufficient pytorch version")
def test_script_len(self):
class f(torch.nn.Module):
def forward(self, x: Instances):
return len(x)
class g(torch.nn.Module):
def forward(self, x: Instances):
return len(x)
image_shape = (15, 15)
fields = {"proposal_boxes": Boxes}
with patch_instances(fields) as new_instance:
script_module = torch.jit.script(f())
x = new_instance(image_shape)
with self.assertRaises(Exception):
script_module(x)
box_tensors = torch.tensor([[5, 5, 10, 10], [1, 1, 2, 3]])
x.proposal_boxes = Boxes(box_tensors)
length = script_module(x)
self.assertEqual(length, 2)
fields = {"objectness_logits": Tensor}
with patch_instances(fields) as new_instance:
script_module = torch.jit.script(g())
x = new_instance(image_shape)
objectness_logits = torch.tensor([1.0]).reshape(1, 1)
x.objectness_logits = objectness_logits
length = script_module(x)
self.assertEqual(length, 1)
@unittest.skipIf(TORCH_VERSION < (1, 7), "Insufficient pytorch version")
def test_script_has(self):
class f(torch.nn.Module):
def forward(self, x: Instances):
return x.has("proposal_boxes")
image_shape = (15, 15)
fields = {"proposal_boxes": Boxes}
with patch_instances(fields) as new_instance:
script_module = torch.jit.script(f())
x = new_instance(image_shape)
self.assertFalse(script_module(x))
box_tensors = torch.tensor([[5, 5, 10, 10], [1, 1, 2, 3]])
x.proposal_boxes = Boxes(box_tensors)
self.assertTrue(script_module(x))
@unittest.skipIf(TORCH_VERSION < (1, 8), "Insufficient pytorch version")
def test_script_to(self):
class f(torch.nn.Module):
def forward(self, x: Instances):
return x.to(torch.device("cpu"))
image_shape = (15, 15)
fields = {"proposal_boxes": Boxes, "a": Tensor}
with patch_instances(fields) as new_instance:
script_module = torch.jit.script(f())
x = new_instance(image_shape)
script_module(x)
box_tensors = torch.tensor([[5, 5, 10, 10], [1, 1, 2, 3]])
x.proposal_boxes = Boxes(box_tensors)
x.a = box_tensors
script_module(x)
@unittest.skipIf(TORCH_VERSION < (1, 7), "Insufficient pytorch version")
def test_script_getitem(self):
class f(torch.nn.Module):
def forward(self, x: Instances, idx):
return x[idx]
image_shape = (15, 15)
fields = {"proposal_boxes": Boxes, "a": Tensor}
inst = Instances(image_shape)
inst.proposal_boxes = Boxes(torch.rand(4, 4))
inst.a = torch.rand(4, 10)
idx = torch.tensor([True, False, True, False])
with patch_instances(fields) as new_instance:
script_module = torch.jit.script(f())
out = f()(inst, idx)
out_scripted = script_module(new_instance.from_instances(inst), idx)
self.assertTrue(
torch.equal(out.proposal_boxes.tensor, out_scripted.proposal_boxes.tensor)
)
self.assertTrue(torch.equal(out.a, out_scripted.a))
@unittest.skipIf(TORCH_VERSION < (1, 7), "Insufficient pytorch version")
def test_from_to_instances(self):
orig = Instances((30, 30))
orig.proposal_boxes = Boxes(torch.rand(3, 4))
fields = {"proposal_boxes": Boxes, "a": Tensor}
with patch_instances(fields) as NewInstances:
# convert to NewInstances and back
new1 = NewInstances.from_instances(orig)
new2 = convert_scripted_instances(new1)
self.assertTrue(torch.equal(orig.proposal_boxes.tensor, new1.proposal_boxes.tensor))
self.assertTrue(torch.equal(orig.proposal_boxes.tensor, new2.proposal_boxes.tensor))
if __name__ == "__main__":
unittest.main()
|
banmo-main
|
third_party/detectron2_old/tests/structures/test_instances.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import os
import tempfile
import unittest
import yaml
from omegaconf import OmegaConf
from omegaconf import __version__ as oc_version
from detectron2.config import instantiate, LazyCall as L
from detectron2.layers import ShapeSpec
OC_VERSION = tuple(int(x) for x in oc_version.split(".")[:2])
class TestClass:
def __init__(self, int_arg, list_arg=None, dict_arg=None, extra_arg=None):
self.int_arg = int_arg
self.list_arg = list_arg
self.dict_arg = dict_arg
self.extra_arg = extra_arg
def __call__(self, call_arg):
return call_arg + self.int_arg
@unittest.skipIf(OC_VERSION < (2, 1), "omegaconf version too old")
class TestConstruction(unittest.TestCase):
def test_basic_construct(self):
objconf = L(TestClass)(
int_arg=3,
list_arg=[10],
dict_arg={},
extra_arg=L(TestClass)(int_arg=4, list_arg="${..list_arg}"),
)
obj = instantiate(objconf)
self.assertIsInstance(obj, TestClass)
self.assertEqual(obj.int_arg, 3)
self.assertEqual(obj.extra_arg.int_arg, 4)
self.assertEqual(obj.extra_arg.list_arg, obj.list_arg)
objconf.extra_arg.list_arg = [5]
obj = instantiate(objconf)
self.assertIsInstance(obj, TestClass)
self.assertEqual(obj.extra_arg.list_arg, [5])
def test_instantiate_other_obj(self):
# do nothing for other obj
self.assertEqual(instantiate(5), 5)
x = [3, 4, 5]
self.assertEqual(instantiate(x), x)
x = TestClass(1)
self.assertIs(instantiate(x), x)
x = {"xx": "yy"}
self.assertIs(instantiate(x), x)
def test_instantiate_lazy_target(self):
# _target_ is result of instantiate
objconf = L(L(len)(int_arg=3))(call_arg=4)
objconf._target_._target_ = TestClass
self.assertEqual(instantiate(objconf), 7)
def test_instantiate_lst(self):
lst = [1, 2, L(TestClass)(int_arg=1)]
x = L(TestClass)(int_arg=lst) # list as an argument should be recursively instantiated
x = instantiate(x).int_arg
self.assertEqual(x[:2], [1, 2])
self.assertIsInstance(x[2], TestClass)
self.assertEqual(x[2].int_arg, 1)
def test_instantiate_namedtuple(self):
x = L(TestClass)(int_arg=ShapeSpec(channels=1, width=3))
# test serialization
with tempfile.TemporaryDirectory() as d:
fname = os.path.join(d, "d2_test.yaml")
OmegaConf.save(x, fname)
with open(fname) as f:
x = yaml.unsafe_load(f)
x = instantiate(x)
self.assertIsInstance(x.int_arg, ShapeSpec)
self.assertEqual(x.int_arg.channels, 1)
def test_bad_lazycall(self):
with self.assertRaises(Exception):
L(3)
|
banmo-main
|
third_party/detectron2_old/tests/config/test_instantiate_config.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from itertools import count
from detectron2.config import LazyCall as L
from .dir1.dir1_a import dir1a_dict, dir1a_str
dir1a_dict.a = "modified"
# modification above won't affect future imports
from .dir1.dir1_b import dir1b_dict, dir1b_str
lazyobj = L(count)(x=dir1a_str, y=dir1b_str)
|
banmo-main
|
third_party/detectron2_old/tests/config/root_cfg.py
|
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
import os
import tempfile
import unittest
import torch
from omegaconf import OmegaConf
from detectron2 import model_zoo
from detectron2.config import configurable, downgrade_config, get_cfg, upgrade_config
from detectron2.layers import ShapeSpec
from detectron2.modeling import build_model
_V0_CFG = """
MODEL:
RPN_HEAD:
NAME: "TEST"
VERSION: 0
"""
_V1_CFG = """
MODEL:
WEIGHT: "/path/to/weight"
"""
class TestConfigVersioning(unittest.TestCase):
def test_upgrade_downgrade_consistency(self):
cfg = get_cfg()
# check that custom is preserved
cfg.USER_CUSTOM = 1
down = downgrade_config(cfg, to_version=0)
up = upgrade_config(down)
self.assertTrue(up == cfg)
def _merge_cfg_str(self, cfg, merge_str):
f = tempfile.NamedTemporaryFile(mode="w", suffix=".yaml", delete=False)
try:
f.write(merge_str)
f.close()
cfg.merge_from_file(f.name)
finally:
os.remove(f.name)
return cfg
def test_auto_upgrade(self):
cfg = get_cfg()
latest_ver = cfg.VERSION
cfg.USER_CUSTOM = 1
self._merge_cfg_str(cfg, _V0_CFG)
self.assertEqual(cfg.MODEL.RPN.HEAD_NAME, "TEST")
self.assertEqual(cfg.VERSION, latest_ver)
def test_guess_v1(self):
cfg = get_cfg()
latest_ver = cfg.VERSION
self._merge_cfg_str(cfg, _V1_CFG)
self.assertEqual(cfg.VERSION, latest_ver)
class _TestClassA(torch.nn.Module):
@configurable
def __init__(self, arg1, arg2, arg3=3):
super().__init__()
self.arg1 = arg1
self.arg2 = arg2
self.arg3 = arg3
assert arg1 == 1
assert arg2 == 2
assert arg3 == 3
@classmethod
def from_config(cls, cfg):
args = {"arg1": cfg.ARG1, "arg2": cfg.ARG2}
return args
class _TestClassB(_TestClassA):
@configurable
def __init__(self, input_shape, arg1, arg2, arg3=3):
"""
Doc of _TestClassB
"""
assert input_shape == "shape"
super().__init__(arg1, arg2, arg3)
@classmethod
def from_config(cls, cfg, input_shape): # test extra positional arg in from_config
args = {"arg1": cfg.ARG1, "arg2": cfg.ARG2}
args["input_shape"] = input_shape
return args
class _LegacySubClass(_TestClassB):
# an old subclass written in cfg style
def __init__(self, cfg, input_shape, arg4=4):
super().__init__(cfg, input_shape)
assert self.arg1 == 1
assert self.arg2 == 2
assert self.arg3 == 3
class _NewSubClassNewInit(_TestClassB):
# test new subclass with a new __init__
@configurable
def __init__(self, input_shape, arg4=4, **kwargs):
super().__init__(input_shape, **kwargs)
assert self.arg1 == 1
assert self.arg2 == 2
assert self.arg3 == 3
class _LegacySubClassNotCfg(_TestClassB):
# an old subclass written in cfg style, but argument is not called "cfg"
def __init__(self, config, input_shape):
super().__init__(config, input_shape)
assert self.arg1 == 1
assert self.arg2 == 2
assert self.arg3 == 3
class _TestClassC(_TestClassB):
@classmethod
def from_config(cls, cfg, input_shape, **kwargs): # test extra kwarg overwrite
args = {"arg1": cfg.ARG1, "arg2": cfg.ARG2}
args["input_shape"] = input_shape
args.update(kwargs)
return args
class _TestClassD(_TestClassA):
@configurable
def __init__(self, input_shape: ShapeSpec, arg1: int, arg2, arg3=3):
assert input_shape == "shape"
super().__init__(arg1, arg2, arg3)
# _TestClassA.from_config does not have input_shape args.
# Test whether input_shape will be forwarded to __init__
@configurable(from_config=lambda cfg, arg2: {"arg1": cfg.ARG1, "arg2": arg2, "arg3": cfg.ARG3})
def _test_func(arg1, arg2=2, arg3=3, arg4=4):
return arg1, arg2, arg3, arg4
class TestConfigurable(unittest.TestCase):
def testInitWithArgs(self):
_ = _TestClassA(arg1=1, arg2=2, arg3=3)
_ = _TestClassB("shape", arg1=1, arg2=2)
_ = _TestClassC("shape", arg1=1, arg2=2)
_ = _TestClassD("shape", arg1=1, arg2=2, arg3=3)
def testPatchedAttr(self):
self.assertTrue("Doc" in _TestClassB.__init__.__doc__)
self.assertEqual(_TestClassD.__init__.__annotations__["arg1"], int)
def testInitWithCfg(self):
cfg = get_cfg()
cfg.ARG1 = 1
cfg.ARG2 = 2
cfg.ARG3 = 3
_ = _TestClassA(cfg)
_ = _TestClassB(cfg, input_shape="shape")
_ = _TestClassC(cfg, input_shape="shape")
_ = _TestClassD(cfg, input_shape="shape")
_ = _LegacySubClass(cfg, input_shape="shape")
_ = _NewSubClassNewInit(cfg, input_shape="shape")
_ = _LegacySubClassNotCfg(cfg, input_shape="shape")
with self.assertRaises(TypeError):
# disallow forwarding positional args to __init__ since it's prone to errors
_ = _TestClassD(cfg, "shape")
# call with kwargs instead
_ = _TestClassA(cfg=cfg)
_ = _TestClassB(cfg=cfg, input_shape="shape")
_ = _TestClassC(cfg=cfg, input_shape="shape")
_ = _TestClassD(cfg=cfg, input_shape="shape")
_ = _LegacySubClass(cfg=cfg, input_shape="shape")
_ = _NewSubClassNewInit(cfg=cfg, input_shape="shape")
_ = _LegacySubClassNotCfg(config=cfg, input_shape="shape")
def testInitWithCfgOverwrite(self):
cfg = get_cfg()
cfg.ARG1 = 1
cfg.ARG2 = 999 # wrong config
with self.assertRaises(AssertionError):
_ = _TestClassA(cfg, arg3=3)
# overwrite arg2 with correct config later:
_ = _TestClassA(cfg, arg2=2, arg3=3)
_ = _TestClassB(cfg, input_shape="shape", arg2=2, arg3=3)
_ = _TestClassC(cfg, input_shape="shape", arg2=2, arg3=3)
_ = _TestClassD(cfg, input_shape="shape", arg2=2, arg3=3)
# call with kwargs cfg=cfg instead
_ = _TestClassA(cfg=cfg, arg2=2, arg3=3)
_ = _TestClassB(cfg=cfg, input_shape="shape", arg2=2, arg3=3)
_ = _TestClassC(cfg=cfg, input_shape="shape", arg2=2, arg3=3)
_ = _TestClassD(cfg=cfg, input_shape="shape", arg2=2, arg3=3)
def testInitWithCfgWrongArgs(self):
cfg = get_cfg()
cfg.ARG1 = 1
cfg.ARG2 = 2
with self.assertRaises(TypeError):
_ = _TestClassB(cfg, "shape", not_exist=1)
with self.assertRaises(TypeError):
_ = _TestClassC(cfg, "shape", not_exist=1)
with self.assertRaises(TypeError):
_ = _TestClassD(cfg, "shape", not_exist=1)
def testBadClass(self):
class _BadClass1:
@configurable
def __init__(self, a=1, b=2):
pass
class _BadClass2:
@configurable
def __init__(self, a=1, b=2):
pass
def from_config(self, cfg): # noqa
pass
class _BadClass3:
@configurable
def __init__(self, a=1, b=2):
pass
# bad name: must be cfg
@classmethod
def from_config(cls, config): # noqa
pass
with self.assertRaises(AttributeError):
_ = _BadClass1(a=1)
with self.assertRaises(TypeError):
_ = _BadClass2(a=1)
with self.assertRaises(TypeError):
_ = _BadClass3(get_cfg())
def testFuncWithCfg(self):
cfg = get_cfg()
cfg.ARG1 = 10
cfg.ARG3 = 30
self.assertEqual(_test_func(1), (1, 2, 3, 4))
with self.assertRaises(TypeError):
_test_func(cfg)
self.assertEqual(_test_func(cfg, arg2=2), (10, 2, 30, 4))
self.assertEqual(_test_func(cfg, arg1=100, arg2=20), (100, 20, 30, 4))
self.assertEqual(_test_func(cfg, arg1=100, arg2=20, arg4=40), (100, 20, 30, 40))
def testOmegaConf(self):
cfg = model_zoo.get_config("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml")
cfg = OmegaConf.create(cfg.dump())
if not torch.cuda.is_available():
cfg.MODEL.DEVICE = "cpu"
# test that a model can be built with omegaconf config as well
build_model(cfg)
|
banmo-main
|
third_party/detectron2_old/tests/config/test_yacs_config.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import os
import unittest
import tempfile
from itertools import count
from detectron2.config import LazyConfig, LazyCall as L
class TestLazyPythonConfig(unittest.TestCase):
def setUp(self):
self.root_filename = os.path.join(os.path.dirname(__file__), "root_cfg.py")
def test_load(self):
cfg = LazyConfig.load(self.root_filename)
self.assertEqual(cfg.dir1a_dict.a, "modified")
self.assertEqual(cfg.dir1b_dict.a, 1)
self.assertEqual(cfg.lazyobj.x, "base_a_1")
cfg.lazyobj.x = "new_x"
# reload
cfg = LazyConfig.load(self.root_filename)
self.assertEqual(cfg.lazyobj.x, "base_a_1")
def test_save_load(self):
cfg = LazyConfig.load(self.root_filename)
with tempfile.TemporaryDirectory(prefix="detectron2") as d:
fname = os.path.join(d, "test_config.yaml")
LazyConfig.save(cfg, fname)
cfg2 = LazyConfig.load(fname)
self.assertEqual(cfg2.lazyobj._target_, "itertools.count")
self.assertEqual(cfg.lazyobj._target_, count)
cfg2.lazyobj.pop("_target_")
cfg.lazyobj.pop("_target_")
# the rest are equal
self.assertEqual(cfg, cfg2)
def test_overrides(self):
cfg = LazyConfig.load(self.root_filename)
LazyConfig.apply_overrides(cfg, ["lazyobj.x=123", 'dir1b_dict.a="123"'])
self.assertEqual(cfg.dir1b_dict.a, "123")
self.assertEqual(cfg.lazyobj.x, 123)
def test_invalid_overrides(self):
cfg = LazyConfig.load(self.root_filename)
with self.assertRaises(KeyError):
LazyConfig.apply_overrides(cfg, ["lazyobj.x.xxx=123"])
def test_to_py(self):
cfg = LazyConfig.load(self.root_filename)
cfg.lazyobj.x = {"a": 1, "b": 2, "c": L(count)(x={"r": "a", "s": 2.4, "t": [1, 2, 3, "z"]})}
cfg.list = ["a", 1, "b", 3.2]
py_str = LazyConfig.to_py(cfg)
expected = """cfg.dir1a_dict.a = "modified"
cfg.dir1a_dict.b = 2
cfg.dir1b_dict.a = 1
cfg.dir1b_dict.b = 2
cfg.lazyobj = itertools.count(
x={
"a": 1,
"b": 2,
"c": itertools.count(x={"r": "a", "s": 2.4, "t": [1, 2, 3, "z"]}),
},
y="base_a_1_from_b",
)
cfg.list = ["a", 1, "b", 3.2]
"""
self.assertEqual(py_str, expected)
|
banmo-main
|
third_party/detectron2_old/tests/config/test_lazy_config.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
dir1a_str = "base_a_1"
dir1a_dict = {"a": 1, "b": 2}
|
banmo-main
|
third_party/detectron2_old/tests/config/dir1/dir1_a.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from detectron2.config import LazyConfig
# equivalent to relative import
dir1a_str, dir1a_dict = LazyConfig.load_rel("dir1_a.py", ("dir1a_str", "dir1a_dict"))
dir1b_str = dir1a_str + "_from_b"
dir1b_dict = dir1a_dict
# Every import is a reload: not modified by other config files
assert dir1a_dict.a == 1
|
banmo-main
|
third_party/detectron2_old/tests/config/dir1/dir1_b.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import unittest
import torch
import detectron2.export.torchscript # apply patch # noqa
from detectron2 import model_zoo
from detectron2.config import get_cfg
from detectron2.layers import ShapeSpec
from detectron2.modeling.backbone import build_resnet_backbone
from detectron2.modeling.backbone.fpn import build_resnet_fpn_backbone
from detectron2.utils.env import TORCH_VERSION
class TestBackBone(unittest.TestCase):
@unittest.skipIf(TORCH_VERSION < (1, 8), "Insufficient pytorch version")
def test_resnet_scriptability(self):
cfg = get_cfg()
resnet = build_resnet_backbone(cfg, ShapeSpec(channels=3))
scripted_resnet = torch.jit.script(resnet)
inp = torch.rand(2, 3, 100, 100)
out1 = resnet(inp)["res4"]
out2 = scripted_resnet(inp)["res4"]
self.assertTrue(torch.allclose(out1, out2))
@unittest.skipIf(TORCH_VERSION < (1, 8), "Insufficient pytorch version")
def test_fpn_scriptability(self):
cfg = model_zoo.get_config("Misc/scratch_mask_rcnn_R_50_FPN_3x_gn.yaml")
bb = build_resnet_fpn_backbone(cfg, ShapeSpec(channels=3))
bb_s = torch.jit.script(bb)
inp = torch.rand(2, 3, 128, 128)
out1 = bb(inp)["p5"]
out2 = bb_s(inp)["p5"]
self.assertTrue(torch.allclose(out1, out2))
|
banmo-main
|
third_party/detectron2_old/tests/modeling/test_backbone.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import unittest
from typing import List
import torch
from detectron2.config import get_cfg
from detectron2.modeling.matcher import Matcher
class TestMatcher(unittest.TestCase):
def test_scriptability(self):
cfg = get_cfg()
anchor_matcher = Matcher(
cfg.MODEL.RPN.IOU_THRESHOLDS, cfg.MODEL.RPN.IOU_LABELS, allow_low_quality_matches=True
)
match_quality_matrix = torch.tensor(
[[0.15, 0.45, 0.2, 0.6], [0.3, 0.65, 0.05, 0.1], [0.05, 0.4, 0.25, 0.4]]
)
expected_matches = torch.tensor([1, 1, 2, 0])
expected_match_labels = torch.tensor([-1, 1, 0, 1], dtype=torch.int8)
matches, match_labels = anchor_matcher(match_quality_matrix)
self.assertTrue(torch.allclose(matches, expected_matches))
self.assertTrue(torch.allclose(match_labels, expected_match_labels))
# nonzero_tuple must be import explicitly to let jit know what it is.
# https://github.com/pytorch/pytorch/issues/38964
from detectron2.layers import nonzero_tuple # noqa F401
def f(thresholds: List[float], labels: List[int]):
return Matcher(thresholds, labels, allow_low_quality_matches=True)
scripted_anchor_matcher = torch.jit.script(f)(
cfg.MODEL.RPN.IOU_THRESHOLDS, cfg.MODEL.RPN.IOU_LABELS
)
matches, match_labels = scripted_anchor_matcher(match_quality_matrix)
self.assertTrue(torch.allclose(matches, expected_matches))
self.assertTrue(torch.allclose(match_labels, expected_match_labels))
if __name__ == "__main__":
unittest.main()
|
banmo-main
|
third_party/detectron2_old/tests/modeling/test_matcher.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import logging
import unittest
import torch
from detectron2.modeling.poolers import ROIPooler, _fmt_box_list
from detectron2.structures import Boxes, RotatedBoxes
from detectron2.utils.env import TORCH_VERSION
from detectron2.utils.testing import random_boxes
logger = logging.getLogger(__name__)
class TestROIPooler(unittest.TestCase):
def _test_roialignv2_roialignrotated_match(self, device):
pooler_resolution = 14
canonical_level = 4
canonical_scale_factor = 2 ** canonical_level
pooler_scales = (1.0 / canonical_scale_factor,)
sampling_ratio = 0
N, C, H, W = 2, 4, 10, 8
N_rois = 10
std = 11
mean = 0
feature = (torch.rand(N, C, H, W) - 0.5) * 2 * std + mean
features = [feature.to(device)]
rois = []
rois_rotated = []
for _ in range(N):
boxes = random_boxes(N_rois, W * canonical_scale_factor)
rotated_boxes = torch.zeros(N_rois, 5)
rotated_boxes[:, 0] = (boxes[:, 0] + boxes[:, 2]) / 2.0
rotated_boxes[:, 1] = (boxes[:, 1] + boxes[:, 3]) / 2.0
rotated_boxes[:, 2] = boxes[:, 2] - boxes[:, 0]
rotated_boxes[:, 3] = boxes[:, 3] - boxes[:, 1]
rois.append(Boxes(boxes).to(device))
rois_rotated.append(RotatedBoxes(rotated_boxes).to(device))
roialignv2_pooler = ROIPooler(
output_size=pooler_resolution,
scales=pooler_scales,
sampling_ratio=sampling_ratio,
pooler_type="ROIAlignV2",
)
roialignv2_out = roialignv2_pooler(features, rois)
roialignrotated_pooler = ROIPooler(
output_size=pooler_resolution,
scales=pooler_scales,
sampling_ratio=sampling_ratio,
pooler_type="ROIAlignRotated",
)
roialignrotated_out = roialignrotated_pooler(features, rois_rotated)
self.assertTrue(torch.allclose(roialignv2_out, roialignrotated_out, atol=1e-4))
def test_roialignv2_roialignrotated_match_cpu(self):
self._test_roialignv2_roialignrotated_match(device="cpu")
@unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
def test_roialignv2_roialignrotated_match_cuda(self):
self._test_roialignv2_roialignrotated_match(device="cuda")
def _test_scriptability(self, device):
pooler_resolution = 14
canonical_level = 4
canonical_scale_factor = 2 ** canonical_level
pooler_scales = (1.0 / canonical_scale_factor,)
sampling_ratio = 0
N, C, H, W = 2, 4, 10, 8
N_rois = 10
std = 11
mean = 0
feature = (torch.rand(N, C, H, W) - 0.5) * 2 * std + mean
features = [feature.to(device)]
rois = []
for _ in range(N):
boxes = random_boxes(N_rois, W * canonical_scale_factor)
rois.append(Boxes(boxes).to(device))
roialignv2_pooler = ROIPooler(
output_size=pooler_resolution,
scales=pooler_scales,
sampling_ratio=sampling_ratio,
pooler_type="ROIAlignV2",
)
roialignv2_out = roialignv2_pooler(features, rois)
scripted_roialignv2_out = torch.jit.script(roialignv2_pooler)(features, rois)
self.assertTrue(torch.equal(roialignv2_out, scripted_roialignv2_out))
@unittest.skipIf(TORCH_VERSION < (1, 7), "Insufficient pytorch version")
def test_scriptability_cpu(self):
self._test_scriptability(device="cpu")
@unittest.skipIf(TORCH_VERSION < (1, 7), "Insufficient pytorch version")
@unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
def test_scriptability_gpu(self):
self._test_scriptability(device="cuda")
def test_no_images(self):
N, C, H, W = 0, 32, 32, 32
feature = torch.rand(N, C, H, W) - 0.5
features = [feature]
pooler = ROIPooler(
output_size=14, scales=(1.0,), sampling_ratio=0.0, pooler_type="ROIAlignV2"
)
output = pooler.forward(features, [])
self.assertEqual(output.shape, (0, C, 14, 14))
def test_fmt_box_list_tracing(self):
class Model(torch.nn.Module):
def forward(self, box_tensor):
return _fmt_box_list(box_tensor, 0)
with torch.no_grad():
func = torch.jit.trace(Model(), torch.ones(10, 4))
self.assertEqual(func(torch.ones(10, 4)).shape, (10, 5))
self.assertEqual(func(torch.ones(5, 4)).shape, (5, 5))
self.assertEqual(func(torch.ones(20, 4)).shape, (20, 5))
def test_roi_pooler_tracing(self):
class Model(torch.nn.Module):
def __init__(self, roi):
super(Model, self).__init__()
self.roi = roi
def forward(self, x, boxes):
return self.roi(x, [Boxes(boxes)])
pooler_resolution = 14
canonical_level = 4
canonical_scale_factor = 2 ** canonical_level
pooler_scales = (1.0 / canonical_scale_factor, 0.5 / canonical_scale_factor)
sampling_ratio = 0
N, C, H, W = 1, 4, 10, 8
N_rois = 10
std = 11
mean = 0
feature = (torch.rand(N, C, H, W) - 0.5) * 2 * std + mean
feature = [feature, feature]
rois = random_boxes(N_rois, W * canonical_scale_factor)
# Add one larger box so that this level has only one box.
# This may trigger the bug https://github.com/pytorch/pytorch/issues/49852
# that we shall workaround.
rois = torch.cat([rois, torch.tensor([[0, 0, 448, 448]])])
model = Model(
ROIPooler(
output_size=pooler_resolution,
scales=pooler_scales,
sampling_ratio=sampling_ratio,
pooler_type="ROIAlign",
)
)
with torch.no_grad():
func = torch.jit.trace(model, (feature, rois))
o = func(feature, rois)
self.assertEqual(o.shape, (11, 4, 14, 14))
o = func(feature, rois[:5])
self.assertEqual(o.shape, (5, 4, 14, 14))
o = func(feature, random_boxes(20, W * canonical_scale_factor))
self.assertEqual(o.shape, (20, 4, 14, 14))
if __name__ == "__main__":
unittest.main()
|
banmo-main
|
third_party/detectron2_old/tests/modeling/test_roi_pooler.py
|
import unittest
from detectron2.layers import ShapeSpec
from detectron2.modeling.mmdet_wrapper import MMDetBackbone, MMDetDetector
try:
import mmdet.models # noqa
HAS_MMDET = True
except ImportError:
HAS_MMDET = False
@unittest.skipIf(not HAS_MMDET, "mmdet not available")
class TestMMDetWrapper(unittest.TestCase):
def test_backbone(self):
MMDetBackbone(
backbone=dict(
type="DetectoRS_ResNet",
conv_cfg=dict(type="ConvAWS"),
sac=dict(type="SAC", use_deform=True),
stage_with_sac=(False, True, True, True),
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type="BN", requires_grad=True),
norm_eval=True,
style="pytorch",
),
neck=dict(
type="FPN",
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5,
),
# skip pretrained model for tests
# pretrained_backbone="torchvision://resnet50",
output_shapes=[ShapeSpec(channels=256, stride=s) for s in [4, 8, 16, 32, 64]],
output_names=["p2", "p3", "p4", "p5", "p6"],
)
def test_detector(self):
# a basic R50 Mask R-CNN
MMDetDetector(
detector=dict(
type="MaskRCNN",
# skip pretrained model for tests
# pretrained="torchvision://resnet50",
backbone=dict(
type="ResNet",
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type="BN", requires_grad=True),
norm_eval=True,
style="pytorch",
),
neck=dict(
type="FPN", in_channels=[256, 512, 1024, 2048], out_channels=256, num_outs=5
),
rpn_head=dict(
type="RPNHead",
in_channels=256,
feat_channels=256,
anchor_generator=dict(
type="AnchorGenerator",
scales=[8],
ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64],
),
bbox_coder=dict(
type="DeltaXYWHBBoxCoder",
target_means=[0.0, 0.0, 0.0, 0.0],
target_stds=[1.0, 1.0, 1.0, 1.0],
),
loss_cls=dict(type="CrossEntropyLoss", use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type="L1Loss", loss_weight=1.0),
),
roi_head=dict(
type="StandardRoIHead",
bbox_roi_extractor=dict(
type="SingleRoIExtractor",
roi_layer=dict(type="RoIAlign", output_size=7, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32],
),
bbox_head=dict(
type="Shared2FCBBoxHead",
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type="DeltaXYWHBBoxCoder",
target_means=[0.0, 0.0, 0.0, 0.0],
target_stds=[0.1, 0.1, 0.2, 0.2],
),
reg_class_agnostic=False,
loss_cls=dict(type="CrossEntropyLoss", use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type="L1Loss", loss_weight=1.0),
),
mask_roi_extractor=dict(
type="SingleRoIExtractor",
roi_layer=dict(type="RoIAlign", output_size=14, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32],
),
mask_head=dict(
type="FCNMaskHead",
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=80,
loss_mask=dict(type="CrossEntropyLoss", use_mask=True, loss_weight=1.0),
),
),
# model training and testing settings
train_cfg=dict(
rpn=dict(
assigner=dict(
type="MaxIoUAssigner",
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
match_low_quality=True,
ignore_iof_thr=-1,
),
sampler=dict(
type="RandomSampler",
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False,
),
allowed_border=-1,
pos_weight=-1,
debug=False,
),
rpn_proposal=dict(
nms_pre=2000,
max_per_img=1000,
nms=dict(type="nms", iou_threshold=0.7),
min_bbox_size=0,
),
rcnn=dict(
assigner=dict(
type="MaxIoUAssigner",
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
match_low_quality=True,
ignore_iof_thr=-1,
),
sampler=dict(
type="RandomSampler",
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True,
),
mask_size=28,
pos_weight=-1,
debug=False,
),
),
test_cfg=dict(
rpn=dict(
nms_pre=1000,
max_per_img=1000,
nms=dict(type="nms", iou_threshold=0.7),
min_bbox_size=0,
),
rcnn=dict(
score_thr=0.05,
nms=dict(type="nms", iou_threshold=0.5),
max_per_img=100,
mask_thr_binary=0.5,
),
),
),
pixel_mean=[1, 2, 3],
pixel_std=[1, 2, 3],
)
|
banmo-main
|
third_party/detectron2_old/tests/modeling/test_mmdet.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import itertools
import numpy as np
import unittest
from contextlib import contextmanager
from copy import deepcopy
import torch
from detectron2.structures import BitMasks, Boxes, ImageList, Instances
from detectron2.utils.events import EventStorage
from detectron2.utils.testing import get_model_no_weights
@contextmanager
def typecheck_hook(model, *, in_dtype=None, out_dtype=None):
"""
Check that the model must be called with the given input/output dtype
"""
if not isinstance(in_dtype, set):
in_dtype = {in_dtype}
if not isinstance(out_dtype, set):
out_dtype = {out_dtype}
def flatten(x):
if isinstance(x, torch.Tensor):
return [x]
if isinstance(x, (list, tuple)):
return list(itertools.chain(*[flatten(t) for t in x]))
if isinstance(x, dict):
return flatten(list(x.values()))
return []
def hook(module, input, output):
if in_dtype is not None:
dtypes = {x.dtype for x in flatten(input)}
assert (
dtypes == in_dtype
), f"Expected input dtype of {type(module)} is {in_dtype}. Got {dtypes} instead!"
if out_dtype is not None:
dtypes = {x.dtype for x in flatten(output)}
assert (
dtypes == out_dtype
), f"Expected output dtype of {type(module)} is {out_dtype}. Got {dtypes} instead!"
with model.register_forward_hook(hook):
yield
def create_model_input(img, inst=None):
if inst is not None:
return {"image": img, "instances": inst}
else:
return {"image": img}
def get_empty_instance(h, w):
inst = Instances((h, w))
inst.gt_boxes = Boxes(torch.rand(0, 4))
inst.gt_classes = torch.tensor([]).to(dtype=torch.int64)
inst.gt_masks = BitMasks(torch.rand(0, h, w))
return inst
def get_regular_bitmask_instances(h, w):
inst = Instances((h, w))
inst.gt_boxes = Boxes(torch.rand(3, 4))
inst.gt_boxes.tensor[:, 2:] += inst.gt_boxes.tensor[:, :2]
inst.gt_classes = torch.tensor([3, 4, 5]).to(dtype=torch.int64)
inst.gt_masks = BitMasks((torch.rand(3, h, w) > 0.5))
return inst
class ModelE2ETest:
def setUp(self):
torch.manual_seed(43)
self.model = get_model_no_weights(self.CONFIG_PATH)
def _test_eval(self, input_sizes):
inputs = [create_model_input(torch.rand(3, s[0], s[1])) for s in input_sizes]
self.model.eval()
self.model(inputs)
def _test_train(self, input_sizes, instances):
assert len(input_sizes) == len(instances)
inputs = [
create_model_input(torch.rand(3, s[0], s[1]), inst)
for s, inst in zip(input_sizes, instances)
]
self.model.train()
with EventStorage():
losses = self.model(inputs)
sum(losses.values()).backward()
del losses
def _inf_tensor(self, *shape):
return 1.0 / torch.zeros(*shape, device=self.model.device)
def _nan_tensor(self, *shape):
return torch.zeros(*shape, device=self.model.device).fill_(float("nan"))
def test_empty_data(self):
instances = [get_empty_instance(200, 250), get_empty_instance(200, 249)]
self._test_eval([(200, 250), (200, 249)])
self._test_train([(200, 250), (200, 249)], instances)
@unittest.skipIf(not torch.cuda.is_available(), "CUDA unavailable")
def test_eval_tocpu(self):
model = deepcopy(self.model).cpu()
model.eval()
input_sizes = [(200, 250), (200, 249)]
inputs = [create_model_input(torch.rand(3, s[0], s[1])) for s in input_sizes]
model(inputs)
class MaskRCNNE2ETest(ModelE2ETest, unittest.TestCase):
CONFIG_PATH = "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml"
def test_half_empty_data(self):
instances = [get_empty_instance(200, 250), get_regular_bitmask_instances(200, 249)]
self._test_train([(200, 250), (200, 249)], instances)
# This test is flaky because in some environment the output features are zero due to relu
# def test_rpn_inf_nan_data(self):
# self.model.eval()
# for tensor in [self._inf_tensor, self._nan_tensor]:
# images = ImageList(tensor(1, 3, 512, 512), [(510, 510)])
# features = {
# "p2": tensor(1, 256, 256, 256),
# "p3": tensor(1, 256, 128, 128),
# "p4": tensor(1, 256, 64, 64),
# "p5": tensor(1, 256, 32, 32),
# "p6": tensor(1, 256, 16, 16),
# }
# props, _ = self.model.proposal_generator(images, features)
# self.assertEqual(len(props[0]), 0)
def test_roiheads_inf_nan_data(self):
self.model.eval()
for tensor in [self._inf_tensor, self._nan_tensor]:
images = ImageList(tensor(1, 3, 512, 512), [(510, 510)])
features = {
"p2": tensor(1, 256, 256, 256),
"p3": tensor(1, 256, 128, 128),
"p4": tensor(1, 256, 64, 64),
"p5": tensor(1, 256, 32, 32),
"p6": tensor(1, 256, 16, 16),
}
props = [Instances((510, 510))]
props[0].proposal_boxes = Boxes([[10, 10, 20, 20]]).to(device=self.model.device)
props[0].objectness_logits = torch.tensor([1.0]).reshape(1, 1)
det, _ = self.model.roi_heads(images, features, props)
self.assertEqual(len(det[0]), 0)
@unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
def test_autocast(self):
from torch.cuda.amp import autocast
inputs = [{"image": torch.rand(3, 100, 100)}]
self.model.eval()
with autocast(), typecheck_hook(
self.model.backbone, in_dtype=torch.float32, out_dtype=torch.float16
), typecheck_hook(
self.model.roi_heads.box_predictor, in_dtype=torch.float16, out_dtype=torch.float16
):
out = self.model.inference(inputs, do_postprocess=False)[0]
self.assertEqual(out.pred_boxes.tensor.dtype, torch.float32)
self.assertEqual(out.pred_masks.dtype, torch.float16)
self.assertEqual(out.scores.dtype, torch.float32) # scores comes from softmax
class RetinaNetE2ETest(ModelE2ETest, unittest.TestCase):
CONFIG_PATH = "COCO-Detection/retinanet_R_50_FPN_1x.yaml"
def test_inf_nan_data(self):
self.model.eval()
self.model.score_threshold = -999999999
for tensor in [self._inf_tensor, self._nan_tensor]:
images = ImageList(tensor(1, 3, 512, 512), [(510, 510)])
features = [
tensor(1, 256, 128, 128),
tensor(1, 256, 64, 64),
tensor(1, 256, 32, 32),
tensor(1, 256, 16, 16),
tensor(1, 256, 8, 8),
]
anchors = self.model.anchor_generator(features)
_, pred_anchor_deltas = self.model.head(features)
HWAs = [np.prod(x.shape[-3:]) // 4 for x in pred_anchor_deltas]
pred_logits = [tensor(1, HWA, self.model.num_classes) for HWA in HWAs]
pred_anchor_deltas = [tensor(1, HWA, 4) for HWA in HWAs]
det = self.model.inference(anchors, pred_logits, pred_anchor_deltas, images.image_sizes)
# all predictions (if any) are infinite or nan
if len(det[0]):
self.assertTrue(torch.isfinite(det[0].pred_boxes.tensor).sum() == 0)
@unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
def test_autocast(self):
from torch.cuda.amp import autocast
inputs = [{"image": torch.rand(3, 100, 100)}]
self.model.eval()
with autocast(), typecheck_hook(
self.model.backbone, in_dtype=torch.float32, out_dtype=torch.float16
), typecheck_hook(self.model.head, in_dtype=torch.float16, out_dtype=torch.float16):
out = self.model(inputs)[0]["instances"]
self.assertEqual(out.pred_boxes.tensor.dtype, torch.float32)
self.assertEqual(out.scores.dtype, torch.float16)
|
banmo-main
|
third_party/detectron2_old/tests/modeling/test_model_e2e.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import logging
import unittest
import torch
from detectron2.config import get_cfg
from detectron2.layers import ShapeSpec
from detectron2.modeling.anchor_generator import DefaultAnchorGenerator, RotatedAnchorGenerator
logger = logging.getLogger(__name__)
class TestAnchorGenerator(unittest.TestCase):
def test_default_anchor_generator(self):
cfg = get_cfg()
cfg.MODEL.ANCHOR_GENERATOR.SIZES = [[32, 64]]
cfg.MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS = [[0.25, 1, 4]]
anchor_generator = DefaultAnchorGenerator(cfg, [ShapeSpec(stride=4)])
# only the last two dimensions of features matter here
num_images = 2
features = {"stage3": torch.rand(num_images, 96, 1, 2)}
anchors = anchor_generator([features["stage3"]])
expected_anchor_tensor = torch.tensor(
[
[-32.0, -8.0, 32.0, 8.0],
[-16.0, -16.0, 16.0, 16.0],
[-8.0, -32.0, 8.0, 32.0],
[-64.0, -16.0, 64.0, 16.0],
[-32.0, -32.0, 32.0, 32.0],
[-16.0, -64.0, 16.0, 64.0],
[-28.0, -8.0, 36.0, 8.0], # -28.0 == -32.0 + STRIDE (4)
[-12.0, -16.0, 20.0, 16.0],
[-4.0, -32.0, 12.0, 32.0],
[-60.0, -16.0, 68.0, 16.0],
[-28.0, -32.0, 36.0, 32.0],
[-12.0, -64.0, 20.0, 64.0],
]
)
self.assertTrue(torch.allclose(anchors[0].tensor, expected_anchor_tensor))
def test_default_anchor_generator_centered(self):
# test explicit args
anchor_generator = DefaultAnchorGenerator(
sizes=[32, 64], aspect_ratios=[0.25, 1, 4], strides=[4]
)
# only the last two dimensions of features matter here
num_images = 2
features = {"stage3": torch.rand(num_images, 96, 1, 2)}
expected_anchor_tensor = torch.tensor(
[
[-30.0, -6.0, 34.0, 10.0],
[-14.0, -14.0, 18.0, 18.0],
[-6.0, -30.0, 10.0, 34.0],
[-62.0, -14.0, 66.0, 18.0],
[-30.0, -30.0, 34.0, 34.0],
[-14.0, -62.0, 18.0, 66.0],
[-26.0, -6.0, 38.0, 10.0],
[-10.0, -14.0, 22.0, 18.0],
[-2.0, -30.0, 14.0, 34.0],
[-58.0, -14.0, 70.0, 18.0],
[-26.0, -30.0, 38.0, 34.0],
[-10.0, -62.0, 22.0, 66.0],
]
)
anchors = anchor_generator([features["stage3"]])
self.assertTrue(torch.allclose(anchors[0].tensor, expected_anchor_tensor))
anchors = torch.jit.script(anchor_generator)([features["stage3"]])
self.assertTrue(torch.allclose(anchors[0].tensor, expected_anchor_tensor))
def test_rrpn_anchor_generator(self):
cfg = get_cfg()
cfg.MODEL.ANCHOR_GENERATOR.SIZES = [[32, 64]]
cfg.MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS = [[0.25, 1, 4]]
cfg.MODEL.ANCHOR_GENERATOR.ANGLES = [0, 45] # test single list[float]
anchor_generator = RotatedAnchorGenerator(cfg, [ShapeSpec(stride=4)])
# only the last two dimensions of features matter here
num_images = 2
features = {"stage3": torch.rand(num_images, 96, 1, 2)}
anchors = anchor_generator([features["stage3"]])
expected_anchor_tensor = torch.tensor(
[
[0.0, 0.0, 64.0, 16.0, 0.0],
[0.0, 0.0, 64.0, 16.0, 45.0],
[0.0, 0.0, 32.0, 32.0, 0.0],
[0.0, 0.0, 32.0, 32.0, 45.0],
[0.0, 0.0, 16.0, 64.0, 0.0],
[0.0, 0.0, 16.0, 64.0, 45.0],
[0.0, 0.0, 128.0, 32.0, 0.0],
[0.0, 0.0, 128.0, 32.0, 45.0],
[0.0, 0.0, 64.0, 64.0, 0.0],
[0.0, 0.0, 64.0, 64.0, 45.0],
[0.0, 0.0, 32.0, 128.0, 0.0],
[0.0, 0.0, 32.0, 128.0, 45.0],
[4.0, 0.0, 64.0, 16.0, 0.0], # 4.0 == 0.0 + STRIDE (4)
[4.0, 0.0, 64.0, 16.0, 45.0],
[4.0, 0.0, 32.0, 32.0, 0.0],
[4.0, 0.0, 32.0, 32.0, 45.0],
[4.0, 0.0, 16.0, 64.0, 0.0],
[4.0, 0.0, 16.0, 64.0, 45.0],
[4.0, 0.0, 128.0, 32.0, 0.0],
[4.0, 0.0, 128.0, 32.0, 45.0],
[4.0, 0.0, 64.0, 64.0, 0.0],
[4.0, 0.0, 64.0, 64.0, 45.0],
[4.0, 0.0, 32.0, 128.0, 0.0],
[4.0, 0.0, 32.0, 128.0, 45.0],
]
)
self.assertTrue(torch.allclose(anchors[0].tensor, expected_anchor_tensor))
if __name__ == "__main__":
unittest.main()
|
banmo-main
|
third_party/detectron2_old/tests/modeling/test_anchor_generator.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import logging
import unittest
from copy import deepcopy
import torch
from torch import nn
from detectron2 import model_zoo
from detectron2.config import get_cfg
from detectron2.export.torchscript_patch import (
freeze_training_mode,
patch_builtin_len,
patch_instances,
)
from detectron2.layers import ShapeSpec
from detectron2.modeling.proposal_generator.build import build_proposal_generator
from detectron2.modeling.roi_heads import (
FastRCNNConvFCHead,
KRCNNConvDeconvUpsampleHead,
MaskRCNNConvUpsampleHead,
StandardROIHeads,
build_roi_heads,
)
from detectron2.projects import point_rend
from detectron2.structures import BitMasks, Boxes, ImageList, Instances, RotatedBoxes
from detectron2.utils.env import TORCH_VERSION
from detectron2.utils.events import EventStorage
from detectron2.utils.testing import assert_instances_allclose, random_boxes
logger = logging.getLogger(__name__)
"""
Make sure the losses of ROIHeads/RPN do not change, to avoid
breaking the forward logic by mistake.
This relies on assumption that pytorch's RNG is stable.
"""
class ROIHeadsTest(unittest.TestCase):
def test_roi_heads(self):
torch.manual_seed(121)
cfg = get_cfg()
cfg.MODEL.ROI_BOX_HEAD.NAME = "FastRCNNConvFCHead"
cfg.MODEL.ROI_BOX_HEAD.NUM_FC = 2
cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE = "ROIAlignV2"
cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_WEIGHTS = (10, 10, 5, 5)
cfg.MODEL.MASK_ON = True
num_images = 2
images_tensor = torch.rand(num_images, 20, 30)
image_sizes = [(10, 10), (20, 30)]
images = ImageList(images_tensor, image_sizes)
num_channels = 1024
features = {"res4": torch.rand(num_images, num_channels, 1, 2)}
feature_shape = {"res4": ShapeSpec(channels=num_channels, stride=16)}
image_shape = (15, 15)
gt_boxes0 = torch.tensor([[1, 1, 3, 3], [2, 2, 6, 6]], dtype=torch.float32)
gt_instance0 = Instances(image_shape)
gt_instance0.gt_boxes = Boxes(gt_boxes0)
gt_instance0.gt_classes = torch.tensor([2, 1])
gt_instance0.gt_masks = BitMasks(torch.rand((2,) + image_shape) > 0.5)
gt_boxes1 = torch.tensor([[1, 5, 2, 8], [7, 3, 10, 5]], dtype=torch.float32)
gt_instance1 = Instances(image_shape)
gt_instance1.gt_boxes = Boxes(gt_boxes1)
gt_instance1.gt_classes = torch.tensor([1, 2])
gt_instance1.gt_masks = BitMasks(torch.rand((2,) + image_shape) > 0.5)
gt_instances = [gt_instance0, gt_instance1]
proposal_generator = build_proposal_generator(cfg, feature_shape)
roi_heads = StandardROIHeads(cfg, feature_shape)
with EventStorage(): # capture events in a new storage to discard them
proposals, proposal_losses = proposal_generator(images, features, gt_instances)
_, detector_losses = roi_heads(images, features, proposals, gt_instances)
detector_losses.update(proposal_losses)
expected_losses = {
"loss_cls": 4.5253729820251465,
"loss_box_reg": 0.009785720147192478,
"loss_mask": 0.693184494972229,
"loss_rpn_cls": 0.08186662942171097,
"loss_rpn_loc": 0.1104838103055954,
}
succ = all(
torch.allclose(detector_losses[name], torch.tensor(expected_losses.get(name, 0.0)))
for name in detector_losses.keys()
)
self.assertTrue(
succ,
"Losses has changed! New losses: {}".format(
{k: v.item() for k, v in detector_losses.items()}
),
)
def test_rroi_heads(self):
torch.manual_seed(121)
cfg = get_cfg()
cfg.MODEL.PROPOSAL_GENERATOR.NAME = "RRPN"
cfg.MODEL.ANCHOR_GENERATOR.NAME = "RotatedAnchorGenerator"
cfg.MODEL.ROI_HEADS.NAME = "RROIHeads"
cfg.MODEL.ROI_BOX_HEAD.NAME = "FastRCNNConvFCHead"
cfg.MODEL.ROI_BOX_HEAD.NUM_FC = 2
cfg.MODEL.RPN.BBOX_REG_WEIGHTS = (1, 1, 1, 1, 1)
cfg.MODEL.RPN.HEAD_NAME = "StandardRPNHead"
cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE = "ROIAlignRotated"
cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_WEIGHTS = (10, 10, 5, 5, 1)
num_images = 2
images_tensor = torch.rand(num_images, 20, 30)
image_sizes = [(10, 10), (20, 30)]
images = ImageList(images_tensor, image_sizes)
num_channels = 1024
features = {"res4": torch.rand(num_images, num_channels, 1, 2)}
feature_shape = {"res4": ShapeSpec(channels=num_channels, stride=16)}
image_shape = (15, 15)
gt_boxes0 = torch.tensor([[2, 2, 2, 2, 30], [4, 4, 4, 4, 0]], dtype=torch.float32)
gt_instance0 = Instances(image_shape)
gt_instance0.gt_boxes = RotatedBoxes(gt_boxes0)
gt_instance0.gt_classes = torch.tensor([2, 1])
gt_boxes1 = torch.tensor([[1.5, 5.5, 1, 3, 0], [8.5, 4, 3, 2, -50]], dtype=torch.float32)
gt_instance1 = Instances(image_shape)
gt_instance1.gt_boxes = RotatedBoxes(gt_boxes1)
gt_instance1.gt_classes = torch.tensor([1, 2])
gt_instances = [gt_instance0, gt_instance1]
proposal_generator = build_proposal_generator(cfg, feature_shape)
roi_heads = build_roi_heads(cfg, feature_shape)
with EventStorage(): # capture events in a new storage to discard them
proposals, proposal_losses = proposal_generator(images, features, gt_instances)
_, detector_losses = roi_heads(images, features, proposals, gt_instances)
detector_losses.update(proposal_losses)
expected_losses = {
"loss_cls": 4.365657806396484,
"loss_box_reg": 0.0015851043863222003,
"loss_rpn_cls": 0.2427729219198227,
"loss_rpn_loc": 0.3646621108055115,
}
succ = all(
torch.allclose(detector_losses[name], torch.tensor(expected_losses.get(name, 0.0)))
for name in detector_losses.keys()
)
self.assertTrue(
succ,
"Losses has changed! New losses: {}".format(
{k: v.item() for k, v in detector_losses.items()}
),
)
@unittest.skipIf(TORCH_VERSION < (1, 7), "Insufficient pytorch version")
def test_box_head_scriptability(self):
input_shape = ShapeSpec(channels=1024, height=14, width=14)
box_features = torch.randn(4, 1024, 14, 14)
box_head = FastRCNNConvFCHead(
input_shape, conv_dims=[512, 512], fc_dims=[1024, 1024]
).eval()
script_box_head = torch.jit.script(box_head)
origin_output = box_head(box_features)
script_output = script_box_head(box_features)
self.assertTrue(torch.equal(origin_output, script_output))
@unittest.skipIf(TORCH_VERSION < (1, 8), "Insufficient pytorch version")
def test_mask_head_scriptability(self):
input_shape = ShapeSpec(channels=1024)
mask_features = torch.randn(4, 1024, 14, 14)
image_shapes = [(10, 10), (15, 15)]
pred_instance0 = Instances(image_shapes[0])
pred_classes0 = torch.tensor([1, 2, 3], dtype=torch.int64)
pred_instance0.pred_classes = pred_classes0
pred_instance1 = Instances(image_shapes[1])
pred_classes1 = torch.tensor([4], dtype=torch.int64)
pred_instance1.pred_classes = pred_classes1
mask_head = MaskRCNNConvUpsampleHead(
input_shape, num_classes=80, conv_dims=[256, 256]
).eval()
# pred_instance will be in-place changed during the inference
# process of `MaskRCNNConvUpsampleHead`
origin_outputs = mask_head(mask_features, deepcopy([pred_instance0, pred_instance1]))
fields = {"pred_masks": torch.Tensor, "pred_classes": torch.Tensor}
with freeze_training_mode(mask_head), patch_instances(fields) as NewInstances:
sciript_mask_head = torch.jit.script(mask_head)
pred_instance0 = NewInstances.from_instances(pred_instance0)
pred_instance1 = NewInstances.from_instances(pred_instance1)
script_outputs = sciript_mask_head(mask_features, [pred_instance0, pred_instance1])
for origin_ins, script_ins in zip(origin_outputs, script_outputs):
assert_instances_allclose(origin_ins, script_ins, rtol=0)
@unittest.skipIf(TORCH_VERSION < (1, 8), "Insufficient pytorch version")
def test_keypoint_head_scriptability(self):
input_shape = ShapeSpec(channels=1024, height=14, width=14)
keypoint_features = torch.randn(4, 1024, 14, 14)
image_shapes = [(10, 10), (15, 15)]
pred_boxes0 = torch.tensor([[1, 1, 3, 3], [2, 2, 6, 6], [1, 5, 2, 8]], dtype=torch.float32)
pred_instance0 = Instances(image_shapes[0])
pred_instance0.pred_boxes = Boxes(pred_boxes0)
pred_boxes1 = torch.tensor([[7, 3, 10, 5]], dtype=torch.float32)
pred_instance1 = Instances(image_shapes[1])
pred_instance1.pred_boxes = Boxes(pred_boxes1)
keypoint_head = KRCNNConvDeconvUpsampleHead(
input_shape, num_keypoints=17, conv_dims=[512, 512]
).eval()
origin_outputs = keypoint_head(
keypoint_features, deepcopy([pred_instance0, pred_instance1])
)
fields = {
"pred_boxes": Boxes,
"pred_keypoints": torch.Tensor,
"pred_keypoint_heatmaps": torch.Tensor,
}
with freeze_training_mode(keypoint_head), patch_instances(fields) as NewInstances:
sciript_keypoint_head = torch.jit.script(keypoint_head)
pred_instance0 = NewInstances.from_instances(pred_instance0)
pred_instance1 = NewInstances.from_instances(pred_instance1)
script_outputs = sciript_keypoint_head(
keypoint_features, [pred_instance0, pred_instance1]
)
for origin_ins, script_ins in zip(origin_outputs, script_outputs):
assert_instances_allclose(origin_ins, script_ins, rtol=0)
@unittest.skipIf(TORCH_VERSION < (1, 8), "Insufficient pytorch version")
def test_StandardROIHeads_scriptability(self):
cfg = get_cfg()
cfg.MODEL.ROI_BOX_HEAD.NAME = "FastRCNNConvFCHead"
cfg.MODEL.ROI_BOX_HEAD.NUM_FC = 2
cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE = "ROIAlignV2"
cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_WEIGHTS = (10, 10, 5, 5)
cfg.MODEL.MASK_ON = True
cfg.MODEL.ROI_HEADS.NMS_THRESH_TEST = 0.01
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.01
num_images = 2
images_tensor = torch.rand(num_images, 20, 30)
image_sizes = [(10, 10), (20, 30)]
images = ImageList(images_tensor, image_sizes)
num_channels = 1024
features = {"res4": torch.rand(num_images, num_channels, 1, 2)}
feature_shape = {"res4": ShapeSpec(channels=num_channels, stride=16)}
roi_heads = StandardROIHeads(cfg, feature_shape).eval()
proposal0 = Instances(image_sizes[0])
proposal_boxes0 = torch.tensor([[1, 1, 3, 3], [2, 2, 6, 6]], dtype=torch.float32)
proposal0.proposal_boxes = Boxes(proposal_boxes0)
proposal0.objectness_logits = torch.tensor([0.5, 0.7], dtype=torch.float32)
proposal1 = Instances(image_sizes[1])
proposal_boxes1 = torch.tensor([[1, 5, 2, 8], [7, 3, 10, 5]], dtype=torch.float32)
proposal1.proposal_boxes = Boxes(proposal_boxes1)
proposal1.objectness_logits = torch.tensor([0.1, 0.9], dtype=torch.float32)
proposals = [proposal0, proposal1]
pred_instances, _ = roi_heads(images, features, proposals)
fields = {
"objectness_logits": torch.Tensor,
"proposal_boxes": Boxes,
"pred_classes": torch.Tensor,
"scores": torch.Tensor,
"pred_masks": torch.Tensor,
"pred_boxes": Boxes,
"pred_keypoints": torch.Tensor,
"pred_keypoint_heatmaps": torch.Tensor,
}
with freeze_training_mode(roi_heads), patch_instances(fields) as new_instances:
proposal0 = new_instances.from_instances(proposal0)
proposal1 = new_instances.from_instances(proposal1)
proposals = [proposal0, proposal1]
scripted_rot_heads = torch.jit.script(roi_heads)
scripted_pred_instances, _ = scripted_rot_heads(images, features, proposals)
for instance, scripted_instance in zip(pred_instances, scripted_pred_instances):
assert_instances_allclose(instance, scripted_instance, rtol=0)
@unittest.skipIf(TORCH_VERSION < (1, 8), "Insufficient pytorch version")
def test_PointRend_mask_head_tracing(self):
cfg = model_zoo.get_config("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml")
point_rend.add_pointrend_config(cfg)
cfg.MODEL.ROI_HEADS.IN_FEATURES = ["p2", "p3"]
cfg.MODEL.ROI_MASK_HEAD.NAME = "PointRendMaskHead"
cfg.MODEL.ROI_MASK_HEAD.POOLER_TYPE = ""
cfg.MODEL.ROI_MASK_HEAD.POINT_HEAD_ON = True
chan = 256
head = point_rend.PointRendMaskHead(
cfg,
{
"p2": ShapeSpec(channels=chan, stride=4),
"p3": ShapeSpec(channels=chan, stride=8),
},
)
def gen_inputs(h, w, N):
p2 = torch.rand(1, chan, h, w)
p3 = torch.rand(1, chan, h // 2, w // 2)
boxes = random_boxes(N, max_coord=h)
return p2, p3, boxes
class Wrap(nn.ModuleDict):
def forward(self, p2, p3, boxes):
features = {
"p2": p2,
"p3": p3,
}
inst = Instances((p2.shape[2] * 4, p2.shape[3] * 4))
inst.pred_boxes = Boxes(boxes)
inst.pred_classes = torch.zeros(inst.__len__(), dtype=torch.long)
out = self.head(features, [inst])[0]
return out.pred_masks
model = Wrap({"head": head})
model.eval()
with torch.no_grad(), patch_builtin_len():
traced = torch.jit.trace(model, gen_inputs(302, 208, 20))
inputs = gen_inputs(100, 120, 30)
out_eager = model(*inputs)
out_trace = traced(*inputs)
self.assertTrue(torch.allclose(out_eager, out_trace))
if __name__ == "__main__":
unittest.main()
|
banmo-main
|
third_party/detectron2_old/tests/modeling/test_roi_heads.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import logging
import sys
import unittest
import torch
from detectron2.config import get_cfg
from detectron2.export import scripting_with_instances
from detectron2.layers import ShapeSpec
from detectron2.modeling.backbone import build_backbone
from detectron2.modeling.proposal_generator import RPN, build_proposal_generator
from detectron2.modeling.proposal_generator.proposal_utils import (
add_ground_truth_to_proposals,
find_top_rpn_proposals,
)
from detectron2.structures import Boxes, ImageList, Instances, RotatedBoxes
from detectron2.utils.env import TORCH_VERSION
from detectron2.utils.events import EventStorage
logger = logging.getLogger(__name__)
class RPNTest(unittest.TestCase):
def get_gt_and_features(self):
num_images = 2
images_tensor = torch.rand(num_images, 20, 30)
image_sizes = [(10, 10), (20, 30)]
images = ImageList(images_tensor, image_sizes)
image_shape = (15, 15)
num_channels = 1024
features = {"res4": torch.rand(num_images, num_channels, 1, 2)}
gt_boxes = torch.tensor([[1, 1, 3, 3], [2, 2, 6, 6]], dtype=torch.float32)
gt_instances = Instances(image_shape)
gt_instances.gt_boxes = Boxes(gt_boxes)
return (gt_instances, features, images, image_sizes)
def test_rpn(self):
torch.manual_seed(121)
cfg = get_cfg()
backbone = build_backbone(cfg)
proposal_generator = RPN(cfg, backbone.output_shape())
(gt_instances, features, images, image_sizes) = self.get_gt_and_features()
with EventStorage(): # capture events in a new storage to discard them
proposals, proposal_losses = proposal_generator(
images, features, [gt_instances[0], gt_instances[1]]
)
expected_losses = {
"loss_rpn_cls": torch.tensor(0.08011703193),
"loss_rpn_loc": torch.tensor(0.101470276),
}
for name in expected_losses.keys():
err_msg = "proposal_losses[{}] = {}, expected losses = {}".format(
name, proposal_losses[name], expected_losses[name]
)
self.assertTrue(torch.allclose(proposal_losses[name], expected_losses[name]), err_msg)
self.assertEqual(len(proposals), len(image_sizes))
for proposal, im_size in zip(proposals, image_sizes):
self.assertEqual(proposal.image_size, im_size)
expected_proposal_box = torch.tensor([[0, 0, 10, 10], [7.2702, 0, 10, 10]])
expected_objectness_logit = torch.tensor([0.1596, -0.0007])
self.assertTrue(
torch.allclose(proposals[0].proposal_boxes.tensor, expected_proposal_box, atol=1e-4)
)
self.assertTrue(
torch.allclose(proposals[0].objectness_logits, expected_objectness_logit, atol=1e-4)
)
def verify_rpn(self, conv_dims, expected_conv_dims):
torch.manual_seed(121)
cfg = get_cfg()
cfg.MODEL.RPN.CONV_DIMS = conv_dims
backbone = build_backbone(cfg)
proposal_generator = RPN(cfg, backbone.output_shape())
for k, conv in enumerate(proposal_generator.rpn_head.conv):
self.assertEqual(expected_conv_dims[k], conv.out_channels)
return proposal_generator
def test_rpn_larger_num_convs(self):
conv_dims = [64, 64, 64, 64, 64]
proposal_generator = self.verify_rpn(conv_dims, conv_dims)
(gt_instances, features, images, image_sizes) = self.get_gt_and_features()
with EventStorage(): # capture events in a new storage to discard them
proposals, proposal_losses = proposal_generator(
images, features, [gt_instances[0], gt_instances[1]]
)
expected_losses = {
"loss_rpn_cls": torch.tensor(0.08122821152),
"loss_rpn_loc": torch.tensor(0.10064548254),
}
for name in expected_losses.keys():
err_msg = "proposal_losses[{}] = {}, expected losses = {}".format(
name, proposal_losses[name], expected_losses[name]
)
self.assertTrue(torch.allclose(proposal_losses[name], expected_losses[name]), err_msg)
def test_rpn_conv_dims_not_set(self):
conv_dims = [-1, -1, -1]
expected_conv_dims = [1024, 1024, 1024]
self.verify_rpn(conv_dims, expected_conv_dims)
# https://github.com/pytorch/pytorch/issues/46964
@unittest.skipIf(
TORCH_VERSION < (1, 7) or sys.version_info.minor <= 6, "Insufficient pytorch version"
)
def test_rpn_scriptability(self):
cfg = get_cfg()
proposal_generator = RPN(cfg, {"res4": ShapeSpec(channels=1024, stride=16)}).eval()
num_images = 2
images_tensor = torch.rand(num_images, 30, 40)
image_sizes = [(32, 32), (30, 40)]
images = ImageList(images_tensor, image_sizes)
features = {"res4": torch.rand(num_images, 1024, 1, 2)}
fields = {"proposal_boxes": Boxes, "objectness_logits": torch.Tensor}
proposal_generator_ts = scripting_with_instances(proposal_generator, fields)
proposals, _ = proposal_generator(images, features)
proposals_ts, _ = proposal_generator_ts(images, features)
for proposal, proposal_ts in zip(proposals, proposals_ts):
self.assertEqual(proposal.image_size, proposal_ts.image_size)
self.assertTrue(
torch.equal(proposal.proposal_boxes.tensor, proposal_ts.proposal_boxes.tensor)
)
self.assertTrue(torch.equal(proposal.objectness_logits, proposal_ts.objectness_logits))
def test_rrpn(self):
torch.manual_seed(121)
cfg = get_cfg()
cfg.MODEL.PROPOSAL_GENERATOR.NAME = "RRPN"
cfg.MODEL.ANCHOR_GENERATOR.NAME = "RotatedAnchorGenerator"
cfg.MODEL.ANCHOR_GENERATOR.SIZES = [[32, 64]]
cfg.MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS = [[0.25, 1]]
cfg.MODEL.ANCHOR_GENERATOR.ANGLES = [[0, 60]]
cfg.MODEL.RPN.BBOX_REG_WEIGHTS = (1, 1, 1, 1, 1)
cfg.MODEL.RPN.HEAD_NAME = "StandardRPNHead"
backbone = build_backbone(cfg)
proposal_generator = build_proposal_generator(cfg, backbone.output_shape())
num_images = 2
images_tensor = torch.rand(num_images, 20, 30)
image_sizes = [(10, 10), (20, 30)]
images = ImageList(images_tensor, image_sizes)
image_shape = (15, 15)
num_channels = 1024
features = {"res4": torch.rand(num_images, num_channels, 1, 2)}
gt_boxes = torch.tensor([[2, 2, 2, 2, 0], [4, 4, 4, 4, 0]], dtype=torch.float32)
gt_instances = Instances(image_shape)
gt_instances.gt_boxes = RotatedBoxes(gt_boxes)
with EventStorage(): # capture events in a new storage to discard them
proposals, proposal_losses = proposal_generator(
images, features, [gt_instances[0], gt_instances[1]]
)
expected_losses = {
"loss_rpn_cls": torch.tensor(0.04291602224),
"loss_rpn_loc": torch.tensor(0.145077362),
}
for name in expected_losses.keys():
err_msg = "proposal_losses[{}] = {}, expected losses = {}".format(
name, proposal_losses[name], expected_losses[name]
)
self.assertTrue(torch.allclose(proposal_losses[name], expected_losses[name]), err_msg)
expected_proposal_box = torch.tensor(
[
[-1.77999556, 0.78155339, 68.04367828, 14.78156471, 60.59333801],
[13.82740974, -1.50282836, 34.67269897, 29.19676590, -3.81942749],
[8.10392570, -0.99071521, 145.39100647, 32.13126373, 3.67242432],
[5.00000000, 4.57370186, 10.00000000, 9.14740372, 0.89196777],
]
)
expected_objectness_logit = torch.tensor([0.10924313, 0.09881870, 0.07649877, 0.05858029])
torch.set_printoptions(precision=8, sci_mode=False)
self.assertEqual(len(proposals), len(image_sizes))
proposal = proposals[0]
# It seems that there's some randomness in the result across different machines:
# This test can be run on a local machine for 100 times with exactly the same result,
# However, a different machine might produce slightly different results,
# thus the atol here.
err_msg = "computed proposal boxes = {}, expected {}".format(
proposal.proposal_boxes.tensor, expected_proposal_box
)
self.assertTrue(
torch.allclose(proposal.proposal_boxes.tensor[:4], expected_proposal_box, atol=1e-5),
err_msg,
)
err_msg = "computed objectness logits = {}, expected {}".format(
proposal.objectness_logits, expected_objectness_logit
)
self.assertTrue(
torch.allclose(proposal.objectness_logits[:4], expected_objectness_logit, atol=1e-5),
err_msg,
)
def test_find_rpn_proposals_inf(self):
N, Hi, Wi, A = 3, 3, 3, 3
proposals = [torch.rand(N, Hi * Wi * A, 4)]
pred_logits = [torch.rand(N, Hi * Wi * A)]
pred_logits[0][1][3:5].fill_(float("inf"))
find_top_rpn_proposals(proposals, pred_logits, [(10, 10)], 0.5, 1000, 1000, 0, False)
@unittest.skipIf(TORCH_VERSION < (1, 7), "Insufficient pytorch version")
def test_find_rpn_proposals_tracing(self):
N, Hi, Wi, A = 3, 50, 50, 9
proposal = torch.rand(N, Hi * Wi * A, 4)
pred_logit = torch.rand(N, Hi * Wi * A)
def func(proposal, logit, image_size):
r = find_top_rpn_proposals(
[proposal], [logit], [image_size], 0.7, 1000, 1000, 0, False
)[0]
size = r.image_size
if not isinstance(size, torch.Tensor):
size = torch.tensor(size)
return (size, r.proposal_boxes.tensor, r.objectness_logits)
other_inputs = []
# test that it generalizes to other shapes
for Hi, Wi, shp in [(30, 30, 60), (10, 10, 800)]:
other_inputs.append(
(
torch.rand(N, Hi * Wi * A, 4),
torch.rand(N, Hi * Wi * A),
torch.tensor([shp, shp]),
)
)
torch.jit.trace(
func, (proposal, pred_logit, torch.tensor([100, 100])), check_inputs=other_inputs
)
def test_append_gt_to_proposal(self):
proposals = Instances(
(10, 10),
**{
"proposal_boxes": Boxes(torch.empty((0, 4))),
"objectness_logits": torch.tensor([]),
"custom_attribute": torch.tensor([]),
}
)
gt_boxes = Boxes(torch.tensor([[0, 0, 1, 1]]))
self.assertRaises(AssertionError, add_ground_truth_to_proposals, [gt_boxes], [proposals])
gt_instances = Instances((10, 10))
gt_instances.gt_boxes = gt_boxes
self.assertRaises(
AssertionError, add_ground_truth_to_proposals, [gt_instances], [proposals]
)
gt_instances.custom_attribute = torch.tensor([1])
gt_instances.custom_attribute2 = torch.tensor([1])
new_proposals = add_ground_truth_to_proposals([gt_instances], [proposals])[0]
self.assertEqual(new_proposals.custom_attribute[0], 1)
# new proposals should only include the attributes in proposals
self.assertRaises(AttributeError, lambda: new_proposals.custom_attribute2)
if __name__ == "__main__":
unittest.main()
|
banmo-main
|
third_party/detectron2_old/tests/modeling/test_rpn.py
|
banmo-main
|
third_party/detectron2_old/tests/modeling/__init__.py
|
|
# Copyright (c) Facebook, Inc. and its affiliates.
import logging
import unittest
import torch
from detectron2.layers import ShapeSpec
from detectron2.modeling.box_regression import Box2BoxTransform, Box2BoxTransformRotated
from detectron2.modeling.roi_heads.fast_rcnn import FastRCNNOutputLayers
from detectron2.modeling.roi_heads.rotated_fast_rcnn import RotatedFastRCNNOutputLayers
from detectron2.structures import Boxes, Instances, RotatedBoxes
from detectron2.utils.env import TORCH_VERSION
from detectron2.utils.events import EventStorage
logger = logging.getLogger(__name__)
class FastRCNNTest(unittest.TestCase):
def test_fast_rcnn(self):
torch.manual_seed(132)
box_head_output_size = 8
box_predictor = FastRCNNOutputLayers(
ShapeSpec(channels=box_head_output_size),
box2box_transform=Box2BoxTransform(weights=(10, 10, 5, 5)),
num_classes=5,
)
feature_pooled = torch.rand(2, box_head_output_size)
predictions = box_predictor(feature_pooled)
proposal_boxes = torch.tensor([[0.8, 1.1, 3.2, 2.8], [2.3, 2.5, 7, 8]], dtype=torch.float32)
gt_boxes = torch.tensor([[1, 1, 3, 3], [2, 2, 6, 6]], dtype=torch.float32)
proposal = Instances((10, 10))
proposal.proposal_boxes = Boxes(proposal_boxes)
proposal.gt_boxes = Boxes(gt_boxes)
proposal.gt_classes = torch.tensor([1, 2])
with EventStorage(): # capture events in a new storage to discard them
losses = box_predictor.losses(predictions, [proposal])
expected_losses = {
"loss_cls": torch.tensor(1.7951188087),
"loss_box_reg": torch.tensor(4.0357131958),
}
for name in expected_losses.keys():
assert torch.allclose(losses[name], expected_losses[name])
def test_fast_rcnn_empty_batch(self, device="cpu"):
box_predictor = FastRCNNOutputLayers(
ShapeSpec(channels=10),
box2box_transform=Box2BoxTransform(weights=(10, 10, 5, 5)),
num_classes=8,
).to(device=device)
logits = torch.randn(0, 100, requires_grad=True, device=device)
deltas = torch.randn(0, 4, requires_grad=True, device=device)
losses = box_predictor.losses([logits, deltas], [])
for value in losses.values():
self.assertTrue(torch.allclose(value, torch.zeros_like(value)))
sum(losses.values()).backward()
self.assertTrue(logits.grad is not None)
self.assertTrue(deltas.grad is not None)
predictions, _ = box_predictor.inference([logits, deltas], [])
self.assertEqual(len(predictions), 0)
@unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
def test_fast_rcnn_empty_batch_cuda(self):
self.test_fast_rcnn_empty_batch(device=torch.device("cuda"))
def test_fast_rcnn_rotated(self):
torch.manual_seed(132)
box_head_output_size = 8
box_predictor = RotatedFastRCNNOutputLayers(
ShapeSpec(channels=box_head_output_size),
box2box_transform=Box2BoxTransformRotated(weights=(10, 10, 5, 5, 1)),
num_classes=5,
)
feature_pooled = torch.rand(2, box_head_output_size)
predictions = box_predictor(feature_pooled)
proposal_boxes = torch.tensor(
[[2, 1.95, 2.4, 1.7, 0], [4.65, 5.25, 4.7, 5.5, 0]], dtype=torch.float32
)
gt_boxes = torch.tensor([[2, 2, 2, 2, 0], [4, 4, 4, 4, 0]], dtype=torch.float32)
proposal = Instances((10, 10))
proposal.proposal_boxes = RotatedBoxes(proposal_boxes)
proposal.gt_boxes = RotatedBoxes(gt_boxes)
proposal.gt_classes = torch.tensor([1, 2])
with EventStorage(): # capture events in a new storage to discard them
losses = box_predictor.losses(predictions, [proposal])
# Note: the expected losses are slightly different even if
# the boxes are essentially the same as in the FastRCNNOutput test, because
# bbox_pred in FastRCNNOutputLayers have different Linear layers/initialization
# between the two cases.
expected_losses = {
"loss_cls": torch.tensor(1.7920907736),
"loss_box_reg": torch.tensor(4.0410838127),
}
for name in expected_losses.keys():
assert torch.allclose(losses[name], expected_losses[name])
@unittest.skipIf(TORCH_VERSION < (1, 8), "Insufficient pytorch version")
def test_predict_boxes_tracing(self):
class Model(torch.nn.Module):
def __init__(self, output_layer):
super(Model, self).__init__()
self._output_layer = output_layer
def forward(self, proposal_deltas, proposal_boxes):
instances = Instances((10, 10))
instances.proposal_boxes = Boxes(proposal_boxes)
return self._output_layer.predict_boxes((None, proposal_deltas), [instances])
box_head_output_size = 8
box_predictor = FastRCNNOutputLayers(
ShapeSpec(channels=box_head_output_size),
box2box_transform=Box2BoxTransform(weights=(10, 10, 5, 5)),
num_classes=5,
)
model = Model(box_predictor)
from detectron2.export.torchscript_patch import patch_builtin_len
with torch.no_grad(), patch_builtin_len():
func = torch.jit.trace(model, (torch.randn(10, 20), torch.randn(10, 4)))
o = func(torch.randn(10, 20), torch.randn(10, 4))
self.assertEqual(o[0].shape, (10, 20))
o = func(torch.randn(5, 20), torch.randn(5, 4))
self.assertEqual(o[0].shape, (5, 20))
o = func(torch.randn(20, 20), torch.randn(20, 4))
self.assertEqual(o[0].shape, (20, 20))
def test_predict_probs_tracing(self):
class Model(torch.nn.Module):
def __init__(self, output_layer):
super(Model, self).__init__()
self._output_layer = output_layer
def forward(self, scores, proposal_boxes):
instances = Instances((10, 10))
instances.proposal_boxes = Boxes(proposal_boxes)
return self._output_layer.predict_probs((scores, None), [instances])
box_head_output_size = 8
box_predictor = FastRCNNOutputLayers(
ShapeSpec(channels=box_head_output_size),
box2box_transform=Box2BoxTransform(weights=(10, 10, 5, 5)),
num_classes=5,
)
model = Model(box_predictor)
from detectron2.export.torchscript_patch import patch_builtin_len
with torch.no_grad(), patch_builtin_len():
func = torch.jit.trace(model, (torch.randn(10, 6), torch.rand(10, 4)))
o = func(torch.randn(10, 6), torch.randn(10, 4))
self.assertEqual(o[0].shape, (10, 6))
o = func(torch.randn(5, 6), torch.randn(5, 4))
self.assertEqual(o[0].shape, (5, 6))
o = func(torch.randn(20, 6), torch.randn(20, 4))
self.assertEqual(o[0].shape, (20, 6))
if __name__ == "__main__":
unittest.main()
|
banmo-main
|
third_party/detectron2_old/tests/modeling/test_fast_rcnn.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import logging
import unittest
import torch
from detectron2.modeling.box_regression import Box2BoxTransform, Box2BoxTransformRotated
from detectron2.utils.env import TORCH_VERSION
from detectron2.utils.testing import random_boxes
logger = logging.getLogger(__name__)
class TestBox2BoxTransform(unittest.TestCase):
def test_reconstruction(self):
weights = (5, 5, 10, 10)
b2b_tfm = Box2BoxTransform(weights=weights)
src_boxes = random_boxes(10)
dst_boxes = random_boxes(10)
devices = [torch.device("cpu")]
if torch.cuda.is_available():
devices.append(torch.device("cuda"))
for device in devices:
src_boxes = src_boxes.to(device=device)
dst_boxes = dst_boxes.to(device=device)
deltas = b2b_tfm.get_deltas(src_boxes, dst_boxes)
dst_boxes_reconstructed = b2b_tfm.apply_deltas(deltas, src_boxes)
self.assertTrue(torch.allclose(dst_boxes, dst_boxes_reconstructed))
@unittest.skipIf(TORCH_VERSION < (1, 8), "Insufficient pytorch version")
def test_apply_deltas_tracing(self):
weights = (5, 5, 10, 10)
b2b_tfm = Box2BoxTransform(weights=weights)
with torch.no_grad():
func = torch.jit.trace(b2b_tfm.apply_deltas, (torch.randn(10, 20), torch.randn(10, 4)))
o = func(torch.randn(10, 20), torch.randn(10, 4))
self.assertEqual(o.shape, (10, 20))
o = func(torch.randn(5, 20), torch.randn(5, 4))
self.assertEqual(o.shape, (5, 20))
def random_rotated_boxes(mean_box, std_length, std_angle, N):
return torch.cat(
[torch.rand(N, 4) * std_length, torch.rand(N, 1) * std_angle], dim=1
) + torch.tensor(mean_box, dtype=torch.float)
class TestBox2BoxTransformRotated(unittest.TestCase):
def test_reconstruction(self):
weights = (5, 5, 10, 10, 1)
b2b_transform = Box2BoxTransformRotated(weights=weights)
src_boxes = random_rotated_boxes([10, 10, 20, 20, -30], 5, 60.0, 10)
dst_boxes = random_rotated_boxes([10, 10, 20, 20, -30], 5, 60.0, 10)
devices = [torch.device("cpu")]
if torch.cuda.is_available():
devices.append(torch.device("cuda"))
for device in devices:
src_boxes = src_boxes.to(device=device)
dst_boxes = dst_boxes.to(device=device)
deltas = b2b_transform.get_deltas(src_boxes, dst_boxes)
dst_boxes_reconstructed = b2b_transform.apply_deltas(deltas, src_boxes)
assert torch.allclose(dst_boxes[:, :4], dst_boxes_reconstructed[:, :4], atol=1e-5)
# angle difference has to be normalized
assert torch.allclose(
(dst_boxes[:, 4] - dst_boxes_reconstructed[:, 4] + 180.0) % 360.0 - 180.0,
torch.zeros_like(dst_boxes[:, 4]),
atol=1e-4,
)
if __name__ == "__main__":
unittest.main()
|
banmo-main
|
third_party/detectron2_old/tests/modeling/test_box2box_transform.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import json
import numpy as np
import os
import tempfile
import unittest
import pycocotools.mask as mask_util
from detectron2.data import DatasetCatalog, MetadataCatalog
from detectron2.data.datasets.coco import convert_to_coco_dict, load_coco_json
from detectron2.structures import BoxMode
def make_mask():
"""
Makes a donut shaped binary mask.
"""
H = 100
W = 100
mask = np.zeros([H, W], dtype=np.uint8)
for x in range(W):
for y in range(H):
d = np.linalg.norm(np.array([W, H]) / 2 - np.array([x, y]))
if d > 10 and d < 20:
mask[y, x] = 1
return mask
def uncompressed_rle(mask):
l = mask.flatten(order="F").tolist()
counts = []
p = False
cnt = 0
for i in l:
if i == p:
cnt += 1
else:
counts.append(cnt)
p = i
cnt = 1
counts.append(cnt)
return {"counts": counts, "size": [mask.shape[0], mask.shape[1]]}
def make_dataset_dicts(mask, compressed: bool = True):
"""
Returns a list of dicts that represents a single COCO data point for
object detection. The single instance given by `mask` is represented by
RLE, either compressed or uncompressed.
"""
record = {}
record["file_name"] = "test"
record["image_id"] = 0
record["height"] = mask.shape[0]
record["width"] = mask.shape[1]
y, x = np.nonzero(mask)
if compressed:
segmentation = mask_util.encode(np.asarray(mask, order="F"))
else:
segmentation = uncompressed_rle(mask)
min_x = np.min(x)
max_x = np.max(x)
min_y = np.min(y)
max_y = np.max(y)
obj = {
"bbox": [min_x, min_y, max_x, max_y],
"bbox_mode": BoxMode.XYXY_ABS,
"category_id": 0,
"iscrowd": 0,
"segmentation": segmentation,
}
record["annotations"] = [obj]
return [record]
class TestRLEToJson(unittest.TestCase):
def test(self):
# Make a dummy dataset.
mask = make_mask()
DatasetCatalog.register("test_dataset", lambda: make_dataset_dicts(mask))
MetadataCatalog.get("test_dataset").set(thing_classes=["test_label"])
# Dump to json.
json_dict = convert_to_coco_dict("test_dataset")
with tempfile.TemporaryDirectory() as tmpdir:
json_file_name = os.path.join(tmpdir, "test.json")
with open(json_file_name, "w") as f:
json.dump(json_dict, f)
# Load from json.
dicts = load_coco_json(json_file_name, "")
# Check the loaded mask matches the original.
anno = dicts[0]["annotations"][0]
loaded_mask = mask_util.decode(anno["segmentation"])
self.assertTrue(np.array_equal(loaded_mask, mask))
DatasetCatalog.pop("test_dataset")
MetadataCatalog.pop("test_dataset")
def test_uncompressed_RLE(self):
mask = make_mask()
rle = mask_util.encode(np.asarray(mask, order="F"))
uncompressed = uncompressed_rle(mask)
compressed = mask_util.frPyObjects(uncompressed, *rle["size"])
self.assertEqual(rle, compressed)
class TestConvertCOCO(unittest.TestCase):
@staticmethod
def generate_data():
record = {
"file_name": "test",
"image_id": 0,
"height": 100,
"width": 100,
"annotations": [
{
"bbox": [10, 10, 10, 10, 5],
"bbox_mode": BoxMode.XYWHA_ABS,
"category_id": 0,
"iscrowd": 0,
},
{
"bbox": [15, 15, 3, 3],
"bbox_mode": BoxMode.XYXY_ABS,
"category_id": 0,
"iscrowd": 0,
},
],
}
return [record]
def test_convert_to_coco(self):
DatasetCatalog.register("test_dataset", lambda: TestConvertCOCO.generate_data())
MetadataCatalog.get("test_dataset").set(thing_classes=["test_label"])
convert_to_coco_dict("test_dataset")
DatasetCatalog.pop("test_dataset")
MetadataCatalog.pop("test_dataset")
|
banmo-main
|
third_party/detectron2_old/tests/data/test_coco.py
|
banmo-main
|
third_party/detectron2_old/tests/data/__init__.py
|
|
# Copyright (c) Facebook, Inc. and its affiliates.
import numpy as np
import unittest
from detectron2.data.transforms.transform import RotationTransform
class TestRotationTransform(unittest.TestCase):
def assertEqualsArrays(self, a1, a2):
self.assertTrue(np.allclose(a1, a2))
def randomData(self, h=5, w=5):
image = np.random.rand(h, w)
coords = np.array([[i, j] for j in range(h + 1) for i in range(w + 1)], dtype=float)
return image, coords, h, w
def test180(self):
image, coords, h, w = self.randomData(6, 6)
rot = RotationTransform(h, w, 180, expand=False, center=None)
self.assertEqualsArrays(rot.apply_image(image), image[::-1, ::-1])
rotated_coords = [[w - c[0], h - c[1]] for c in coords]
self.assertEqualsArrays(rot.apply_coords(coords), rotated_coords)
def test45_coords(self):
_, coords, h, w = self.randomData(4, 6)
rot = RotationTransform(h, w, 45, expand=False, center=None)
rotated_coords = [
[(x + y - (h + w) / 2) / np.sqrt(2) + w / 2, h / 2 + (y + (w - h) / 2 - x) / np.sqrt(2)]
for (x, y) in coords
]
self.assertEqualsArrays(rot.apply_coords(coords), rotated_coords)
def test90(self):
image, coords, h, w = self.randomData()
rot = RotationTransform(h, w, 90, expand=False, center=None)
self.assertEqualsArrays(rot.apply_image(image), image.T[::-1])
rotated_coords = [[c[1], w - c[0]] for c in coords]
self.assertEqualsArrays(rot.apply_coords(coords), rotated_coords)
def test90_expand(self): # non-square image
image, coords, h, w = self.randomData(h=5, w=8)
rot = RotationTransform(h, w, 90, expand=True, center=None)
self.assertEqualsArrays(rot.apply_image(image), image.T[::-1])
rotated_coords = [[c[1], w - c[0]] for c in coords]
self.assertEqualsArrays(rot.apply_coords(coords), rotated_coords)
def test_center_expand(self):
# center has no effect if expand=True because it only affects shifting
image, coords, h, w = self.randomData(h=5, w=8)
angle = np.random.randint(360)
rot1 = RotationTransform(h, w, angle, expand=True, center=None)
rot2 = RotationTransform(h, w, angle, expand=True, center=(0, 0))
rot3 = RotationTransform(h, w, angle, expand=True, center=(h, w))
rot4 = RotationTransform(h, w, angle, expand=True, center=(2, 5))
for r1 in [rot1, rot2, rot3, rot4]:
for r2 in [rot1, rot2, rot3, rot4]:
self.assertEqualsArrays(r1.apply_image(image), r2.apply_image(image))
self.assertEqualsArrays(r1.apply_coords(coords), r2.apply_coords(coords))
def test_inverse_transform(self):
image, coords, h, w = self.randomData(h=5, w=8)
rot = RotationTransform(h, w, 90, expand=True, center=None)
rot_image = rot.apply_image(image)
self.assertEqualsArrays(rot.inverse().apply_image(rot_image), image)
rot = RotationTransform(h, w, 65, expand=True, center=None)
rotated_coords = rot.apply_coords(coords)
self.assertEqualsArrays(rot.inverse().apply_coords(rotated_coords), coords)
if __name__ == "__main__":
unittest.main()
|
banmo-main
|
third_party/detectron2_old/tests/data/test_rotation_transform.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import os
import unittest
from functools import partial
from iopath.common.file_io import LazyPath
from detectron2.data.build import DatasetFromList
def _a_slow_func(x):
return "path/{}".format(x)
class TestDatasetFromList(unittest.TestCase):
def test_using_lazy_path(self):
dataset = []
for i in range(10):
dataset.append({"file_name": LazyPath(partial(_a_slow_func, i))})
dataset = DatasetFromList(dataset)
for i in range(10):
path = dataset[i]["file_name"]
self.assertTrue(isinstance(path, LazyPath))
self.assertEqual(os.fspath(path), _a_slow_func(i))
|
banmo-main
|
third_party/detectron2_old/tests/data/test_dataset.py
|
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
import logging
import numpy as np
import unittest
from unittest import mock
from PIL import Image, ImageOps
from detectron2.config import get_cfg
from detectron2.data import detection_utils
from detectron2.data import transforms as T
from detectron2.utils.logger import setup_logger
logger = logging.getLogger(__name__)
class TestTransforms(unittest.TestCase):
def setUp(self):
setup_logger()
def test_apply_rotated_boxes(self):
np.random.seed(125)
cfg = get_cfg()
is_train = True
augs = detection_utils.build_augmentation(cfg, is_train)
image = np.random.rand(200, 300)
image, transforms = T.apply_augmentations(augs, image)
image_shape = image.shape[:2] # h, w
assert image_shape == (800, 1200)
annotation = {"bbox": [179, 97, 62, 40, -56]}
boxes = np.array([annotation["bbox"]], dtype=np.float64) # boxes.shape = (1, 5)
transformed_bbox = transforms.apply_rotated_box(boxes)[0]
expected_bbox = np.array([484, 388, 248, 160, 56], dtype=np.float64)
err_msg = "transformed_bbox = {}, expected {}".format(transformed_bbox, expected_bbox)
assert np.allclose(transformed_bbox, expected_bbox), err_msg
def test_resize_and_crop(self):
np.random.seed(125)
min_scale = 0.2
max_scale = 2.0
target_height = 1100
target_width = 1000
resize_aug = T.ResizeScale(min_scale, max_scale, target_height, target_width)
fixed_size_crop_aug = T.FixedSizeCrop((target_height, target_width))
hflip_aug = T.RandomFlip()
augs = [resize_aug, fixed_size_crop_aug, hflip_aug]
original_image = np.random.rand(900, 800)
image, transforms = T.apply_augmentations(augs, original_image)
image_shape = image.shape[:2] # h, w
self.assertEqual((1100, 1000), image_shape)
boxes = np.array(
[[91, 46, 144, 111], [523, 251, 614, 295]],
dtype=np.float64,
)
transformed_bboxs = transforms.apply_box(boxes)
expected_bboxs = np.array(
[
[895.42, 33.42666667, 933.91125, 80.66],
[554.0825, 182.39333333, 620.17125, 214.36666667],
],
dtype=np.float64,
)
err_msg = "transformed_bbox = {}, expected {}".format(transformed_bboxs, expected_bboxs)
self.assertTrue(np.allclose(transformed_bboxs, expected_bboxs), err_msg)
polygon = np.array([[91, 46], [144, 46], [144, 111], [91, 111]])
transformed_polygons = transforms.apply_polygons([polygon])
expected_polygon = np.array([[934.0, 33.0], [934.0, 80.0], [896.0, 80.0], [896.0, 33.0]])
self.assertEqual(1, len(transformed_polygons))
err_msg = "transformed_polygon = {}, expected {}".format(
transformed_polygons[0], expected_polygon
)
self.assertTrue(np.allclose(transformed_polygons[0], expected_polygon), err_msg)
def test_apply_rotated_boxes_unequal_scaling_factor(self):
np.random.seed(125)
h, w = 400, 200
newh, neww = 800, 800
image = np.random.rand(h, w)
augs = []
augs.append(T.Resize(shape=(newh, neww)))
image, transforms = T.apply_augmentations(augs, image)
image_shape = image.shape[:2] # h, w
assert image_shape == (newh, neww)
boxes = np.array(
[
[150, 100, 40, 20, 0],
[150, 100, 40, 20, 30],
[150, 100, 40, 20, 90],
[150, 100, 40, 20, -90],
],
dtype=np.float64,
)
transformed_boxes = transforms.apply_rotated_box(boxes)
expected_bboxes = np.array(
[
[600, 200, 160, 40, 0],
[600, 200, 144.22205102, 52.91502622, 49.10660535],
[600, 200, 80, 80, 90],
[600, 200, 80, 80, -90],
],
dtype=np.float64,
)
err_msg = "transformed_boxes = {}, expected {}".format(transformed_boxes, expected_bboxes)
assert np.allclose(transformed_boxes, expected_bboxes), err_msg
def test_print_augmentation(self):
t = T.RandomCrop("relative", (100, 100))
self.assertEqual(str(t), "RandomCrop(crop_type='relative', crop_size=(100, 100))")
t0 = T.RandomFlip(prob=0.5)
self.assertEqual(str(t0), "RandomFlip(prob=0.5)")
t1 = T.RandomFlip()
self.assertEqual(str(t1), "RandomFlip()")
t = T.AugmentationList([t0, t1])
self.assertEqual(str(t), f"AugmentationList[{t0}, {t1}]")
def test_random_apply_prob_out_of_range_check(self):
test_probabilities = {0.0: True, 0.5: True, 1.0: True, -0.01: False, 1.01: False}
for given_probability, is_valid in test_probabilities.items():
if not is_valid:
self.assertRaises(AssertionError, T.RandomApply, None, prob=given_probability)
else:
T.RandomApply(T.NoOpTransform(), prob=given_probability)
def test_random_apply_wrapping_aug_probability_occured_evaluation(self):
transform_mock = mock.MagicMock(name="MockTransform", spec=T.Augmentation)
image_mock = mock.MagicMock(name="MockImage")
random_apply = T.RandomApply(transform_mock, prob=0.001)
with mock.patch.object(random_apply, "_rand_range", return_value=0.0001):
transform = random_apply.get_transform(image_mock)
transform_mock.get_transform.assert_called_once_with(image_mock)
self.assertIsNot(transform, transform_mock)
def test_random_apply_wrapping_std_transform_probability_occured_evaluation(self):
transform_mock = mock.MagicMock(name="MockTransform", spec=T.Transform)
image_mock = mock.MagicMock(name="MockImage")
random_apply = T.RandomApply(transform_mock, prob=0.001)
with mock.patch.object(random_apply, "_rand_range", return_value=0.0001):
transform = random_apply.get_transform(image_mock)
self.assertIs(transform, transform_mock)
def test_random_apply_probability_not_occured_evaluation(self):
transform_mock = mock.MagicMock(name="MockTransform", spec=T.Augmentation)
image_mock = mock.MagicMock(name="MockImage")
random_apply = T.RandomApply(transform_mock, prob=0.001)
with mock.patch.object(random_apply, "_rand_range", return_value=0.9):
transform = random_apply.get_transform(image_mock)
transform_mock.get_transform.assert_not_called()
self.assertIsInstance(transform, T.NoOpTransform)
def test_augmentation_input_args(self):
input_shape = (100, 100)
output_shape = (50, 50)
# define two augmentations with different args
class TG1(T.Augmentation):
def get_transform(self, image, sem_seg):
return T.ResizeTransform(
input_shape[0], input_shape[1], output_shape[0], output_shape[1]
)
class TG2(T.Augmentation):
def get_transform(self, image):
assert image.shape[:2] == output_shape # check that TG1 is applied
return T.HFlipTransform(output_shape[1])
image = np.random.rand(*input_shape).astype("float32")
sem_seg = (np.random.rand(*input_shape) < 0.5).astype("uint8")
inputs = T.AugInput(image, sem_seg=sem_seg) # provide two args
tfms = inputs.apply_augmentations([TG1(), TG2()])
self.assertIsInstance(tfms[0], T.ResizeTransform)
self.assertIsInstance(tfms[1], T.HFlipTransform)
self.assertTrue(inputs.image.shape[:2] == output_shape)
self.assertTrue(inputs.sem_seg.shape[:2] == output_shape)
class TG3(T.Augmentation):
def get_transform(self, image, nonexist):
pass
with self.assertRaises(AttributeError):
inputs.apply_augmentations([TG3()])
def test_augmentation_list(self):
input_shape = (100, 100)
image = np.random.rand(*input_shape).astype("float32")
sem_seg = (np.random.rand(*input_shape) < 0.5).astype("uint8")
inputs = T.AugInput(image, sem_seg=sem_seg) # provide two args
augs = T.AugmentationList([T.RandomFlip(), T.Resize(20)])
_ = T.AugmentationList([augs, T.Resize(30)])(inputs)
# 3 in latest fvcore (flattened transformlist), 2 in older
# self.assertEqual(len(tfms), 3)
def test_color_transforms(self):
rand_img = np.random.random((100, 100, 3)) * 255
rand_img = rand_img.astype("uint8")
# Test no-op
noop_transform = T.ColorTransform(lambda img: img)
self.assertTrue(np.array_equal(rand_img, noop_transform.apply_image(rand_img)))
# Test a ImageOps operation
magnitude = np.random.randint(0, 256)
solarize_transform = T.PILColorTransform(lambda img: ImageOps.solarize(img, magnitude))
expected_img = ImageOps.solarize(Image.fromarray(rand_img), magnitude)
self.assertTrue(np.array_equal(expected_img, solarize_transform.apply_image(rand_img)))
def test_resize_transform(self):
input_shapes = [(100, 100), (100, 100, 1), (100, 100, 3)]
output_shapes = [(200, 200), (200, 200, 1), (200, 200, 3)]
for in_shape, out_shape in zip(input_shapes, output_shapes):
in_img = np.random.randint(0, 255, size=in_shape, dtype=np.uint8)
tfm = T.ResizeTransform(in_shape[0], in_shape[1], out_shape[0], out_shape[1])
out_img = tfm.apply_image(in_img)
self.assertTrue(out_img.shape == out_shape)
def test_extent_transform(self):
input_shapes = [(100, 100), (100, 100, 1), (100, 100, 3)]
src_rect = (20, 20, 80, 80)
output_shapes = [(200, 200), (200, 200, 1), (200, 200, 3)]
for in_shape, out_shape in zip(input_shapes, output_shapes):
in_img = np.random.randint(0, 255, size=in_shape, dtype=np.uint8)
tfm = T.ExtentTransform(src_rect, out_shape[:2])
out_img = tfm.apply_image(in_img)
self.assertTrue(out_img.shape == out_shape)
|
banmo-main
|
third_party/detectron2_old/tests/data/test_transforms.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import copy
import numpy as np
import os
import unittest
import pycocotools.mask as mask_util
from detectron2.data import MetadataCatalog, detection_utils
from detectron2.data import transforms as T
from detectron2.structures import BitMasks, BoxMode
from detectron2.utils.file_io import PathManager
class TestTransformAnnotations(unittest.TestCase):
def test_transform_simple_annotation(self):
transforms = T.TransformList([T.HFlipTransform(400)])
anno = {
"bbox": np.asarray([10, 10, 200, 300]),
"bbox_mode": BoxMode.XYXY_ABS,
"category_id": 3,
"segmentation": [[10, 10, 100, 100, 100, 10], [150, 150, 200, 150, 200, 200]],
}
output = detection_utils.transform_instance_annotations(anno, transforms, (400, 400))
self.assertTrue(np.allclose(output["bbox"], [200, 10, 390, 300]))
self.assertEqual(len(output["segmentation"]), len(anno["segmentation"]))
self.assertTrue(np.allclose(output["segmentation"][0], [390, 10, 300, 100, 300, 10]))
detection_utils.annotations_to_instances([output, output], (400, 400))
def test_flip_keypoints(self):
transforms = T.TransformList([T.HFlipTransform(400)])
anno = {
"bbox": np.asarray([10, 10, 200, 300]),
"bbox_mode": BoxMode.XYXY_ABS,
"keypoints": np.random.rand(17, 3) * 50 + 15,
}
output = detection_utils.transform_instance_annotations(
copy.deepcopy(anno),
transforms,
(400, 400),
keypoint_hflip_indices=detection_utils.create_keypoint_hflip_indices(
["keypoints_coco_2017_train"]
),
)
# The first keypoint is nose
self.assertTrue(np.allclose(output["keypoints"][0, 0], 400 - anno["keypoints"][0, 0]))
# The last 16 keypoints are 8 left-right pairs
self.assertTrue(
np.allclose(
output["keypoints"][1:, 0].reshape(-1, 2)[:, ::-1],
400 - anno["keypoints"][1:, 0].reshape(-1, 2),
)
)
self.assertTrue(
np.allclose(
output["keypoints"][1:, 1:].reshape(-1, 2, 2)[:, ::-1, :],
anno["keypoints"][1:, 1:].reshape(-1, 2, 2),
)
)
def test_crop(self):
transforms = T.TransformList([T.CropTransform(300, 300, 10, 10)])
keypoints = np.random.rand(17, 3) * 50 + 15
keypoints[:, 2] = 2
anno = {
"bbox": np.asarray([10, 10, 200, 400]),
"bbox_mode": BoxMode.XYXY_ABS,
"keypoints": keypoints,
}
output = detection_utils.transform_instance_annotations(
copy.deepcopy(anno), transforms, (10, 10)
)
# box is shifted and cropped
self.assertTrue((output["bbox"] == np.asarray([0, 0, 0, 10])).all())
# keypoints are no longer visible
self.assertTrue((output["keypoints"][:, 2] == 0).all())
def test_transform_RLE(self):
transforms = T.TransformList([T.HFlipTransform(400)])
mask = np.zeros((300, 400), order="F").astype("uint8")
mask[:, :200] = 1
anno = {
"bbox": np.asarray([10, 10, 200, 300]),
"bbox_mode": BoxMode.XYXY_ABS,
"segmentation": mask_util.encode(mask[:, :, None])[0],
"category_id": 3,
}
output = detection_utils.transform_instance_annotations(
copy.deepcopy(anno), transforms, (300, 400)
)
mask = output["segmentation"]
self.assertTrue((mask[:, 200:] == 1).all())
self.assertTrue((mask[:, :200] == 0).all())
inst = detection_utils.annotations_to_instances(
[output, output], (400, 400), mask_format="bitmask"
)
self.assertTrue(isinstance(inst.gt_masks, BitMasks))
def test_transform_RLE_resize(self):
transforms = T.TransformList(
[T.HFlipTransform(400), T.ScaleTransform(300, 400, 400, 400, "bilinear")]
)
mask = np.zeros((300, 400), order="F").astype("uint8")
mask[:, :200] = 1
anno = {
"bbox": np.asarray([10, 10, 200, 300]),
"bbox_mode": BoxMode.XYXY_ABS,
"segmentation": mask_util.encode(mask[:, :, None])[0],
"category_id": 3,
}
output = detection_utils.transform_instance_annotations(
copy.deepcopy(anno), transforms, (400, 400)
)
inst = detection_utils.annotations_to_instances(
[output, output], (400, 400), mask_format="bitmask"
)
self.assertTrue(isinstance(inst.gt_masks, BitMasks))
def test_gen_crop(self):
instance = {"bbox": [10, 10, 100, 100], "bbox_mode": BoxMode.XYXY_ABS}
t = detection_utils.gen_crop_transform_with_instance((10, 10), (150, 150), instance)
# the box center must fall into the cropped region
self.assertTrue(t.x0 <= 55 <= t.x0 + t.w)
def test_gen_crop_outside_boxes(self):
instance = {"bbox": [10, 10, 100, 100], "bbox_mode": BoxMode.XYXY_ABS}
with self.assertRaises(AssertionError):
detection_utils.gen_crop_transform_with_instance((10, 10), (15, 15), instance)
def test_read_sem_seg(self):
cityscapes_dir = MetadataCatalog.get("cityscapes_fine_sem_seg_val").gt_dir
sem_seg_gt_path = os.path.join(
cityscapes_dir, "frankfurt", "frankfurt_000001_083852_gtFine_labelIds.png"
)
if not PathManager.exists(sem_seg_gt_path):
raise unittest.SkipTest(
"Semantic segmentation ground truth {} not found.".format(sem_seg_gt_path)
)
sem_seg = detection_utils.read_image(sem_seg_gt_path, "L")
self.assertEqual(sem_seg.ndim, 3)
self.assertEqual(sem_seg.shape[2], 1)
self.assertEqual(sem_seg.dtype, np.uint8)
self.assertEqual(sem_seg.max(), 32)
self.assertEqual(sem_seg.min(), 1)
def test_read_exif_orientation(self):
# https://github.com/recurser/exif-orientation-examples/raw/master/Landscape_5.jpg
URL = "detectron2://assets/Landscape_5.jpg"
img = detection_utils.read_image(URL, "RGB")
self.assertEqual(img.ndim, 3)
self.assertEqual(img.dtype, np.uint8)
self.assertEqual(img.shape, (1200, 1800, 3)) # check that shape is not transposed
if __name__ == "__main__":
unittest.main()
|
banmo-main
|
third_party/detectron2_old/tests/data/test_detection_utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import contextlib
import copy
import io
import json
import numpy as np
import os
import tempfile
import unittest
import torch
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from detectron2.data import DatasetCatalog
from detectron2.evaluation import COCOEvaluator
from detectron2.evaluation.fast_eval_api import COCOeval_opt
from detectron2.structures import Boxes, Instances
class TestCOCOeval(unittest.TestCase):
def test_fast_eval(self):
# A small set of images/categories from COCO val
# fmt: off
detections = [{"image_id": 139, "category_id": 1, "bbox": [417.3332824707031, 159.27003479003906, 47.66064453125, 143.00193786621094], "score": 0.9949821829795837, "segmentation": {"size": [426, 640], "counts": "Tc`52W=3N0N4aNN^E7]:4XE1g:8kDMT;U100000001O1gE[Nk8h1dFiNY9Z1aFkN]9g2J3NdN`FlN`9S1cFRN07]9g1bFoM6;X9c1cFoM=8R9g1bFQN>3U9Y30O01OO1O001N2O1N1O4L4L5UNoE3V:CVF6Q:@YF9l9@ZF<k9[O`F=];HYnX2"}}, {"image_id": 139, "category_id": 1, "bbox": [383.5909118652344, 172.0777587890625, 17.959075927734375, 36.94813537597656], "score": 0.7685421705245972, "segmentation": {"size": [426, 640], "counts": "lZP5m0Z<300O100O100000001O00]OlC0T<OnCOT<OnCNX<JnC2bQT3"}}, {"image_id": 139, "category_id": 1, "bbox": [457.8359069824219, 158.88027954101562, 9.89764404296875, 8.771820068359375], "score": 0.07092753797769547, "segmentation": {"size": [426, 640], "counts": "bSo54T=2N2O1001O006ImiW2"}}] # noqa
gt_annotations = {"categories": [{"supercategory": "person", "id": 1, "name": "person"}, {"supercategory": "furniture", "id": 65, "name": "bed"}], "images": [{"license": 4, "file_name": "000000000285.jpg", "coco_url": "http://images.cocodataset.org/val2017/000000000285.jpg", "height": 640, "width": 586, "date_captured": "2013-11-18 13:09:47", "flickr_url": "http://farm8.staticflickr.com/7434/9138147604_c6225224b8_z.jpg", "id": 285}, {"license": 2, "file_name": "000000000139.jpg", "coco_url": "http://images.cocodataset.org/val2017/000000000139.jpg", "height": 426, "width": 640, "date_captured": "2013-11-21 01:34:01", "flickr_url": "http://farm9.staticflickr.com/8035/8024364858_9c41dc1666_z.jpg", "id": 139}], "annotations": [{"segmentation": [[428.19, 219.47, 430.94, 209.57, 430.39, 210.12, 421.32, 216.17, 412.8, 217.27, 413.9, 214.24, 422.42, 211.22, 429.29, 201.6, 430.67, 181.8, 430.12, 175.2, 427.09, 168.06, 426.27, 164.21, 430.94, 159.26, 440.29, 157.61, 446.06, 163.93, 448.53, 168.06, 448.53, 173.01, 449.08, 174.93, 454.03, 185.1, 455.41, 188.4, 458.43, 195.0, 460.08, 210.94, 462.28, 226.61, 460.91, 233.76, 454.31, 234.04, 460.08, 256.85, 462.56, 268.13, 465.58, 290.67, 465.85, 293.14, 463.38, 295.62, 452.66, 295.34, 448.26, 294.52, 443.59, 282.7, 446.06, 235.14, 446.34, 230.19, 438.09, 232.39, 438.09, 221.67, 434.24, 221.12, 427.09, 219.74]], "area": 2913.1103999999987, "iscrowd": 0, "image_id": 139, "bbox": [412.8, 157.61, 53.05, 138.01], "category_id": 1, "id": 230831}, {"segmentation": [[384.98, 206.58, 384.43, 199.98, 385.25, 193.66, 385.25, 190.08, 387.18, 185.13, 387.18, 182.93, 386.08, 181.01, 385.25, 178.81, 385.25, 175.79, 388.0, 172.76, 394.88, 172.21, 398.72, 173.31, 399.27, 176.06, 399.55, 183.48, 397.9, 185.68, 395.15, 188.98, 396.8, 193.38, 398.45, 194.48, 399.0, 205.75, 395.43, 207.95, 388.83, 206.03]], "area": 435.1449499999997, "iscrowd": 0, "image_id": 139, "bbox": [384.43, 172.21, 15.12, 35.74], "category_id": 1, "id": 233201}]} # noqa
# fmt: on
# Test a small dataset for typical COCO format
experiments = {"full": (detections, gt_annotations, {})}
# Test what happens if the list of detections or ground truth annotations is empty
experiments["empty_dt"] = ([], gt_annotations, {})
gt = copy.deepcopy(gt_annotations)
gt["annotations"] = []
experiments["empty_gt"] = (detections, gt, {})
# Test changing parameter settings
experiments["no_categories"] = (detections, gt_annotations, {"useCats": 0})
experiments["no_ious"] = (detections, gt_annotations, {"iouThrs": []})
experiments["no_rec_thrs"] = (detections, gt_annotations, {"recThrs": []})
experiments["no_max_dets"] = (detections, gt_annotations, {"maxDets": []})
experiments["one_max_det"] = (detections, gt_annotations, {"maxDets": [1]})
experiments["no_area"] = (detections, gt_annotations, {"areaRng": [], "areaRngLbl": []})
# Test what happens if one omits different fields from the annotation structure
annotation_fields = [
"id",
"image_id",
"category_id",
"score",
"area",
"iscrowd",
"ignore",
"bbox",
"segmentation",
]
for a in annotation_fields:
gt = copy.deepcopy(gt_annotations)
for g in gt["annotations"]:
if a in g:
del g[a]
dt = copy.deepcopy(detections)
for d in dt:
if a in d:
del d[a]
experiments["omit_gt_" + a] = (detections, gt, {})
experiments["omit_dt_" + a] = (dt, gt_annotations, {})
# Compare precision/recall for original COCO PythonAPI to custom optimized one
for name, (dt, gt, params) in experiments.items():
# Dump to json.
try:
with tempfile.TemporaryDirectory() as tmpdir:
json_file_name = os.path.join(tmpdir, "gt_" + name + ".json")
with open(json_file_name, "w") as f:
json.dump(gt, f)
with contextlib.redirect_stdout(io.StringIO()):
coco_api = COCO(json_file_name)
except Exception:
pass
for iou_type in ["bbox", "segm", "keypoints"]:
# Run original COCOeval PythonAPI
api_exception = None
try:
with contextlib.redirect_stdout(io.StringIO()):
coco_dt = coco_api.loadRes(dt)
coco_eval = COCOeval(coco_api, coco_dt, iou_type)
for p, v in params.items():
setattr(coco_eval.params, p, v)
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
except Exception as ex:
api_exception = ex
# Run optimized COCOeval_opt API
opt_exception = None
try:
with contextlib.redirect_stdout(io.StringIO()):
coco_dt = coco_api.loadRes(dt)
coco_eval_opt = COCOeval_opt(coco_api, coco_dt, iou_type)
for p, v in params.items():
setattr(coco_eval_opt.params, p, v)
coco_eval_opt.evaluate()
coco_eval_opt.accumulate()
coco_eval_opt.summarize()
except Exception as ex:
opt_exception = ex
if api_exception is not None and opt_exception is not None:
# Original API and optimized API should throw the same exception if annotation
# format is bad
api_error = "" if api_exception is None else type(api_exception).__name__
opt_error = "" if opt_exception is None else type(opt_exception).__name__
msg = "%s: comparing COCO APIs, '%s' != '%s'" % (name, api_error, opt_error)
self.assertTrue(api_error == opt_error, msg=msg)
else:
# Original API and optimized API should produce the same precision/recalls
for k in ["precision", "recall"]:
diff = np.abs(coco_eval.eval[k] - coco_eval_opt.eval[k])
abs_diff = np.max(diff) if diff.size > 0 else 0.0
msg = "%s: comparing COCO APIs, %s differs by %f" % (name, k, abs_diff)
self.assertTrue(abs_diff < 1e-4, msg=msg)
@unittest.skipIf(os.environ.get("CI"), "Require COCO data.")
def test_unknown_category(self):
dataset = "coco_2017_val_100"
evaluator = COCOEvaluator(dataset)
evaluator.reset()
inputs = DatasetCatalog.get(dataset)[:2]
pred = Instances((100, 100))
pred.pred_boxes = Boxes(torch.rand(2, 4))
pred.scores = torch.rand(2)
pred.pred_classes = torch.tensor([10, 80])
output = {"instances": pred}
evaluator.process(inputs, [output, output])
with self.assertRaises(AssertionError):
evaluator.evaluate()
|
banmo-main
|
third_party/detectron2_old/tests/data/test_coco_evaluation.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import itertools
import math
import operator
import unittest
import torch
from torch.utils import data
from torch.utils.data.sampler import SequentialSampler
from detectron2.data.build import worker_init_reset_seed
from detectron2.data.common import DatasetFromList, ToIterableDataset
from detectron2.data.samplers import (
GroupedBatchSampler,
RepeatFactorTrainingSampler,
TrainingSampler,
)
from detectron2.utils.env import seed_all_rng
class TestGroupedBatchSampler(unittest.TestCase):
def test_missing_group_id(self):
sampler = SequentialSampler(list(range(100)))
group_ids = [1] * 100
samples = GroupedBatchSampler(sampler, group_ids, 2)
for mini_batch in samples:
self.assertEqual(len(mini_batch), 2)
def test_groups(self):
sampler = SequentialSampler(list(range(100)))
group_ids = [1, 0] * 50
samples = GroupedBatchSampler(sampler, group_ids, 2)
for mini_batch in samples:
self.assertEqual((mini_batch[0] + mini_batch[1]) % 2, 0)
class TestSamplerDeterministic(unittest.TestCase):
def test_to_iterable(self):
sampler = TrainingSampler(100, seed=10)
dataset = DatasetFromList(list(range(100)))
dataset = ToIterableDataset(dataset, sampler)
data_loader = data.DataLoader(dataset, num_workers=0, collate_fn=operator.itemgetter(0))
output = list(itertools.islice(data_loader, 100))
self.assertEqual(set(output), set(range(100)))
data_loader = data.DataLoader(
dataset,
num_workers=2,
collate_fn=operator.itemgetter(0),
worker_init_fn=worker_init_reset_seed,
# reset seed should not affect behavior of TrainingSampler
)
output = list(itertools.islice(data_loader, 100))
# multiple workers should not lead to duplicate or different data
self.assertEqual(set(output), set(range(100)))
def test_training_sampler_seed(self):
seed_all_rng(42)
sampler = TrainingSampler(30)
data = list(itertools.islice(sampler, 65))
seed_all_rng(42)
sampler = TrainingSampler(30)
seed_all_rng(999) # should be ineffective
data2 = list(itertools.islice(sampler, 65))
self.assertEqual(data, data2)
class TestRepeatFactorTrainingSampler(unittest.TestCase):
def test_repeat_factors_from_category_frequency(self):
repeat_thresh = 0.5
dataset_dicts = [
{"annotations": [{"category_id": 0}, {"category_id": 1}]},
{"annotations": [{"category_id": 0}]},
{"annotations": []},
]
rep_factors = RepeatFactorTrainingSampler.repeat_factors_from_category_frequency(
dataset_dicts, repeat_thresh
)
expected_rep_factors = torch.tensor([math.sqrt(3 / 2), 1.0, 1.0])
self.assertTrue(torch.allclose(rep_factors, expected_rep_factors))
|
banmo-main
|
third_party/detectron2_old/tests/data/test_sampler.py
|
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
# flake8: noqa
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
from unittest import mock
from sphinx.domains import Domain
from typing import Dict, List, Tuple
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
import sphinx_rtd_theme
class GithubURLDomain(Domain):
"""
Resolve certain links in markdown files to github source.
"""
name = "githuburl"
ROOT = "https://github.com/facebookresearch/detectron2/blob/master/"
LINKED_DOC = ["tutorials/install", "tutorials/getting_started"]
def resolve_any_xref(self, env, fromdocname, builder, target, node, contnode):
github_url = None
if not target.endswith("html") and target.startswith("../../"):
url = target.replace("../", "")
github_url = url
if fromdocname in self.LINKED_DOC:
# unresolved links in these docs are all github links
github_url = target
if github_url is not None:
if github_url.endswith("MODEL_ZOO") or github_url.endswith("README"):
# bug of recommonmark.
# https://github.com/readthedocs/recommonmark/blob/ddd56e7717e9745f11300059e4268e204138a6b1/recommonmark/parser.py#L152-L155
github_url += ".md"
print("Ref {} resolved to github:{}".format(target, github_url))
contnode["refuri"] = self.ROOT + github_url
return [("githuburl:any", contnode)]
else:
return []
# to support markdown
from recommonmark.parser import CommonMarkParser
sys.path.insert(0, os.path.abspath("../"))
os.environ["_DOC_BUILDING"] = "True"
DEPLOY = os.environ.get("READTHEDOCS") == "True"
# -- Project information -----------------------------------------------------
# fmt: off
try:
import torch # noqa
except ImportError:
for m in [
"torch", "torchvision", "torch.nn", "torch.nn.parallel", "torch.distributed", "torch.multiprocessing", "torch.autograd",
"torch.autograd.function", "torch.nn.modules", "torch.nn.modules.utils", "torch.utils", "torch.utils.data", "torch.onnx",
"torchvision", "torchvision.ops",
]:
sys.modules[m] = mock.Mock(name=m)
sys.modules['torch'].__version__ = "1.7" # fake version
HAS_TORCH = False
else:
try:
torch.ops.detectron2 = mock.Mock(name="torch.ops.detectron2")
except:
pass
HAS_TORCH = True
for m in [
"cv2", "scipy", "portalocker", "detectron2._C",
"pycocotools", "pycocotools.mask", "pycocotools.coco", "pycocotools.cocoeval",
"google", "google.protobuf", "google.protobuf.internal", "onnx",
"caffe2", "caffe2.proto", "caffe2.python", "caffe2.python.utils", "caffe2.python.onnx", "caffe2.python.onnx.backend",
]:
sys.modules[m] = mock.Mock(name=m)
# fmt: on
sys.modules["cv2"].__version__ = "3.4"
import detectron2 # isort: skip
if HAS_TORCH:
from detectron2.utils.env import fixup_module_metadata
fixup_module_metadata("torch.nn", torch.nn.__dict__)
fixup_module_metadata("torch.utils.data", torch.utils.data.__dict__)
project = "detectron2"
copyright = "2019-2020, detectron2 contributors"
author = "detectron2 contributors"
# The short X.Y version
version = detectron2.__version__
# The full version, including alpha/beta/rc tags
release = version
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
needs_sphinx = "3.0"
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"recommonmark",
"sphinx.ext.autodoc",
"sphinx.ext.napoleon",
"sphinx.ext.intersphinx",
"sphinx.ext.todo",
"sphinx.ext.coverage",
"sphinx.ext.mathjax",
"sphinx.ext.viewcode",
"sphinx.ext.githubpages",
]
# -- Configurations for plugins ------------
napoleon_google_docstring = True
napoleon_include_init_with_doc = True
napoleon_include_special_with_doc = True
napoleon_numpy_docstring = False
napoleon_use_rtype = False
autodoc_inherit_docstrings = False
autodoc_member_order = "bysource"
if DEPLOY:
intersphinx_timeout = 10
else:
# skip this when building locally
intersphinx_timeout = 0.5
intersphinx_mapping = {
"python": ("https://docs.python.org/3.6", None),
"numpy": ("https://docs.scipy.org/doc/numpy/", None),
"torch": ("https://pytorch.org/docs/master/", None),
}
# -------------------------
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
source_suffix = [".rst", ".md"]
# The master toctree document.
master_doc = "index"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", "build", "README.md", "tutorials/README.md"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# -- Options for HTML output -------------------------------------------------
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
html_css_files = ["css/custom.css"]
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "detectron2doc"
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, "detectron2.tex", "detectron2 Documentation", "detectron2 contributors", "manual")
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "detectron2", "detectron2 Documentation", [author], 1)]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"detectron2",
"detectron2 Documentation",
author,
"detectron2",
"One line description of project.",
"Miscellaneous",
)
]
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
def autodoc_skip_member(app, what, name, obj, skip, options):
# we hide something deliberately
if getattr(obj, "__HIDE_SPHINX_DOC__", False):
return True
# Hide some that are deprecated or not intended to be used
HIDDEN = {
"ResNetBlockBase",
"GroupedBatchSampler",
"build_transform_gen",
"export_caffe2_model",
"export_onnx_model",
"apply_transform_gens",
"TransformGen",
"apply_augmentations",
"StandardAugInput",
"build_batch_data_loader",
"draw_panoptic_seg_predictions",
"WarmupCosineLR",
"WarmupMultiStepLR",
}
try:
if name in HIDDEN or (
hasattr(obj, "__doc__") and obj.__doc__.lower().strip().startswith("deprecated")
):
print("Skipping deprecated object: {}".format(name))
return True
except:
pass
return skip
_PAPER_DATA = {
"resnet": ("1512.03385", "Deep Residual Learning for Image Recognition"),
"fpn": ("1612.03144", "Feature Pyramid Networks for Object Detection"),
"mask r-cnn": ("1703.06870", "Mask R-CNN"),
"faster r-cnn": (
"1506.01497",
"Faster R-CNN: Towards Real-Time Object Detection with Region Proposal Networks",
),
"deformconv": ("1703.06211", "Deformable Convolutional Networks"),
"deformconv2": ("1811.11168", "Deformable ConvNets v2: More Deformable, Better Results"),
"panopticfpn": ("1901.02446", "Panoptic Feature Pyramid Networks"),
"retinanet": ("1708.02002", "Focal Loss for Dense Object Detection"),
"cascade r-cnn": ("1712.00726", "Cascade R-CNN: Delving into High Quality Object Detection"),
"lvis": ("1908.03195", "LVIS: A Dataset for Large Vocabulary Instance Segmentation"),
"rrpn": ("1703.01086", "Arbitrary-Oriented Scene Text Detection via Rotation Proposals"),
"imagenet in 1h": ("1706.02677", "Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour"),
"xception": ("1610.02357", "Xception: Deep Learning with Depthwise Separable Convolutions"),
"mobilenet": (
"1704.04861",
"MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications",
),
"deeplabv3+": (
"1802.02611",
"Encoder-Decoder with Atrous Separable Convolution for Semantic Image Segmentation",
),
"dds": ("2003.13678", "Designing Network Design Spaces"),
"scaling": ("2103.06877", "Fast and Accurate Model Scaling"),
}
def paper_ref_role(
typ: str,
rawtext: str,
text: str,
lineno: int,
inliner,
options: Dict = {},
content: List[str] = [],
):
"""
Parse :paper:`xxx`. Similar to the "extlinks" sphinx extension.
"""
from docutils import nodes, utils
from sphinx.util.nodes import split_explicit_title
text = utils.unescape(text)
has_explicit_title, title, link = split_explicit_title(text)
link = link.lower()
if link not in _PAPER_DATA:
inliner.reporter.warning("Cannot find paper " + link)
paper_url, paper_title = "#", link
else:
paper_url, paper_title = _PAPER_DATA[link]
if "/" not in paper_url:
paper_url = "https://arxiv.org/abs/" + paper_url
if not has_explicit_title:
title = paper_title
pnode = nodes.reference(title, title, internal=False, refuri=paper_url)
return [pnode], []
def setup(app):
from recommonmark.transform import AutoStructify
app.add_domain(GithubURLDomain)
app.connect("autodoc-skip-member", autodoc_skip_member)
app.add_role("paper", paper_ref_role)
app.add_config_value(
"recommonmark_config",
{"enable_math": True, "enable_inline_math": True, "enable_eval_rst": True},
True,
)
app.add_transform(AutoStructify)
|
banmo-main
|
third_party/detectron2_old/docs/conf.py
|
# An example config to train a mmdetection model using detectron2.
from ..common.data.coco import dataloader
from ..common.coco_schedule import lr_multiplier_1x as lr_multiplier
from ..common.optim import SGD as optimizer
from ..common.train import train
from detectron2.modeling.mmdet_wrapper import MMDetDetector
from detectron2.config import LazyCall as L
model = L(MMDetDetector)(
detector=dict(
type="MaskRCNN",
pretrained="torchvision://resnet50",
backbone=dict(
type="ResNet",
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type="BN", requires_grad=True),
norm_eval=True,
style="pytorch",
),
neck=dict(type="FPN", in_channels=[256, 512, 1024, 2048], out_channels=256, num_outs=5),
rpn_head=dict(
type="RPNHead",
in_channels=256,
feat_channels=256,
anchor_generator=dict(
type="AnchorGenerator",
scales=[8],
ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64],
),
bbox_coder=dict(
type="DeltaXYWHBBoxCoder",
target_means=[0.0, 0.0, 0.0, 0.0],
target_stds=[1.0, 1.0, 1.0, 1.0],
),
loss_cls=dict(type="CrossEntropyLoss", use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type="L1Loss", loss_weight=1.0),
),
roi_head=dict(
type="StandardRoIHead",
bbox_roi_extractor=dict(
type="SingleRoIExtractor",
roi_layer=dict(type="RoIAlign", output_size=7, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32],
),
bbox_head=dict(
type="Shared2FCBBoxHead",
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type="DeltaXYWHBBoxCoder",
target_means=[0.0, 0.0, 0.0, 0.0],
target_stds=[0.1, 0.1, 0.2, 0.2],
),
reg_class_agnostic=False,
loss_cls=dict(type="CrossEntropyLoss", use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type="L1Loss", loss_weight=1.0),
),
mask_roi_extractor=dict(
type="SingleRoIExtractor",
roi_layer=dict(type="RoIAlign", output_size=14, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32],
),
mask_head=dict(
type="FCNMaskHead",
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=80,
loss_mask=dict(type="CrossEntropyLoss", use_mask=True, loss_weight=1.0),
),
),
# model training and testing settings
train_cfg=dict(
rpn=dict(
assigner=dict(
type="MaxIoUAssigner",
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
match_low_quality=True,
ignore_iof_thr=-1,
),
sampler=dict(
type="RandomSampler",
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False,
),
allowed_border=-1,
pos_weight=-1,
debug=False,
),
rpn_proposal=dict(
nms_pre=2000,
max_per_img=1000,
nms=dict(type="nms", iou_threshold=0.7),
min_bbox_size=0,
),
rcnn=dict(
assigner=dict(
type="MaxIoUAssigner",
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
match_low_quality=True,
ignore_iof_thr=-1,
),
sampler=dict(
type="RandomSampler",
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True,
),
mask_size=28,
pos_weight=-1,
debug=False,
),
),
test_cfg=dict(
rpn=dict(
nms_pre=1000,
max_per_img=1000,
nms=dict(type="nms", iou_threshold=0.7),
min_bbox_size=0,
),
rcnn=dict(
score_thr=0.05,
nms=dict(type="nms", iou_threshold=0.5),
max_per_img=100,
mask_thr_binary=0.5,
),
),
),
pixel_mean=[123.675, 116.280, 103.530],
pixel_std=[58.395, 57.120, 57.375],
)
dataloader.train.mapper.image_format = "RGB" # torchvision pretrained model
train.init_checkpoint = None # pretrained model is loaded inside backbone
|
banmo-main
|
third_party/detectron2_old/configs/Misc/mmdet_mask_rcnn_R_50_FPN_1x.py
|
"""
An example config file to train a ImageNet classifier with detectron2.
Model and dataloader both come from torchvision.
This shows how to use detectron2 as a general engine for any new models and tasks.
To run, use the following command:
python tools/lazyconfig_train_net.py --config-file configs/Misc/torchvision_imagenet_R_50.py \
--num-gpus 8 dataloader.train.dataset.root=/path/to/imagenet/
"""
import torch
from torch import nn
from torch.nn import functional as F
from omegaconf import OmegaConf
import torchvision
from torchvision.transforms import transforms as T
from torchvision.models.resnet import ResNet, Bottleneck
from fvcore.common.param_scheduler import MultiStepParamScheduler
from detectron2.solver import WarmupParamScheduler
from detectron2.solver.build import get_default_optimizer_params
from detectron2.config import LazyCall as L
from detectron2.model_zoo import get_config
from detectron2.data.samplers import TrainingSampler, InferenceSampler
from detectron2.evaluation import DatasetEvaluator
from detectron2.utils import comm
"""
Note: Here we put reusable code (models, evaluation, data) together with configs just as a
proof-of-concept, to easily demonstrate what's needed to train a ImageNet classifier in detectron2.
Writing code in configs offers extreme flexibility but is often not a good engineering practice.
In practice, you might want to put code in your project and import them instead.
"""
def build_data_loader(dataset, batch_size, num_workers, training=True):
return torch.utils.data.DataLoader(
dataset,
sampler=(TrainingSampler if training else InferenceSampler)(len(dataset)),
batch_size=batch_size,
num_workers=num_workers,
pin_memory=True,
)
class ClassificationNet(nn.Module):
def __init__(self, model: nn.Module):
super().__init__()
self.model = model
@property
def device(self):
return list(self.model.parameters())[0].device
def forward(self, inputs):
image, label = inputs
pred = self.model(image.to(self.device))
if self.training:
label = label.to(self.device)
return F.cross_entropy(pred, label)
else:
return pred
class ClassificationAcc(DatasetEvaluator):
def reset(self):
self.corr = self.total = 0
def process(self, inputs, outputs):
image, label = inputs
self.corr += (outputs.argmax(dim=1).cpu() == label.cpu()).sum().item()
self.total += len(label)
def evaluate(self):
all_corr_total = comm.all_gather([self.corr, self.total])
corr = sum(x[0] for x in all_corr_total)
total = sum(x[1] for x in all_corr_total)
return {"accuracy": corr / total}
# --- End of code that could be in a project and be imported
dataloader = OmegaConf.create()
dataloader.train = L(build_data_loader)(
dataset=L(torchvision.datasets.ImageNet)(
root="/path/to/imagenet",
split="train",
transform=L(T.Compose)(
transforms=[
L(T.RandomResizedCrop)(size=224),
L(T.RandomHorizontalFlip)(),
T.ToTensor(),
L(T.Normalize)(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
]
),
),
batch_size=256 // 8,
num_workers=4,
training=True,
)
dataloader.test = L(build_data_loader)(
dataset=L(torchvision.datasets.ImageNet)(
root="${...train.dataset.root}",
split="val",
transform=L(T.Compose)(
transforms=[
L(T.Resize)(size=256),
L(T.CenterCrop)(size=224),
T.ToTensor(),
L(T.Normalize)(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
]
),
),
batch_size=256 // 8,
num_workers=4,
training=False,
)
dataloader.evaluator = L(ClassificationAcc)()
model = L(ClassificationNet)(
model=(ResNet)(block=Bottleneck, layers=[3, 4, 6, 3], zero_init_residual=True)
)
optimizer = L(torch.optim.SGD)(
params=L(get_default_optimizer_params)(),
lr=0.1,
momentum=0.9,
weight_decay=1e-4,
)
lr_multiplier = L(WarmupParamScheduler)(
scheduler=L(MultiStepParamScheduler)(
values=[1.0, 0.1, 0.01, 0.001], milestones=[30, 60, 90, 100]
),
warmup_length=1 / 100,
warmup_factor=0.1,
)
train = get_config("common/train.py").train
train.init_checkpoint = None
train.max_iter = 100 * 1281167 // 256
|
banmo-main
|
third_party/detectron2_old/configs/Misc/torchvision_imagenet_R_50.py
|
from ..common.optim import SGD as optimizer
from ..common.coco_schedule import lr_multiplier_1x as lr_multiplier
from ..common.data.coco_keypoint import dataloader
from ..common.models.keypoint_rcnn_fpn import model
from ..common.train import train
model.backbone.bottom_up.freeze_at = 2
|
banmo-main
|
third_party/detectron2_old/configs/COCO-Keypoints/keypoint_rcnn_R_50_FPN_1x.py
|
from ..common.optim import SGD as optimizer
from ..common.coco_schedule import lr_multiplier_1x as lr_multiplier
from ..common.data.coco import dataloader
from ..common.models.retinanet import model
from ..common.train import train
dataloader.train.mapper.use_instance_mask = False
model.backbone.bottom_up.freeze_at = 2
optimizer.lr = 0.01
|
banmo-main
|
third_party/detectron2_old/configs/COCO-Detection/retinanet_R_50_FPN_1x.py
|
from fvcore.common.param_scheduler import MultiStepParamScheduler
from detectron2.config import LazyCall as L
from detectron2.solver import WarmupParamScheduler
def default_X_scheduler(num_X):
"""
Returns the config for a default multi-step LR scheduler such as "1x", "3x",
commonly referred to in papers, where every 1x has the total length of 1440k
training images (~12 COCO epochs). LR is decayed twice at the end of training
following the strategy defined in "Rethinking ImageNet Pretraining", Sec 4.
Args:
num_X: a positive real number
Returns:
DictConfig: configs that define the multiplier for LR during training
"""
# total number of iterations assuming 16 batch size, using 1440000/16=90000
total_steps_16bs = num_X * 90000
if num_X <= 2:
scheduler = L(MultiStepParamScheduler)(
values=[1.0, 0.1, 0.01],
# note that scheduler is scale-invariant. This is equivalent to
# milestones=[6, 8, 9]
milestones=[60000, 80000, 90000],
)
else:
scheduler = L(MultiStepParamScheduler)(
values=[1.0, 0.1, 0.01],
milestones=[total_steps_16bs - 60000, total_steps_16bs - 20000, total_steps_16bs],
)
return L(WarmupParamScheduler)(
scheduler=scheduler,
warmup_length=1000 / total_steps_16bs,
warmup_method="linear",
warmup_factor=0.001,
)
lr_multiplier_1x = default_X_scheduler(1)
lr_multiplier_2x = default_X_scheduler(2)
lr_multiplier_3x = default_X_scheduler(3)
lr_multiplier_6x = default_X_scheduler(6)
lr_multiplier_9x = default_X_scheduler(9)
|
banmo-main
|
third_party/detectron2_old/configs/common/coco_schedule.py
|
# Common training-related configs that are designed for "tools/lazyconfig_train_net.py"
# You can use your own instead, together with your own train_net.py
train = dict(
output_dir="./output",
init_checkpoint="detectron2://ImageNetPretrained/MSRA/R-50.pkl",
max_iter=90000,
amp=dict(enabled=False), # options for Automatic Mixed Precision
ddp=dict( # options for DistributedDataParallel
broadcast_buffers=False,
find_unused_parameters=False,
fp16_compression=False,
),
checkpointer=dict(period=5000, max_to_keep=100), # options for PeriodicCheckpointer
eval_period=5000,
log_period=20,
device="cuda"
# ...
)
|
banmo-main
|
third_party/detectron2_old/configs/common/train.py
|
import torch
from detectron2.config import LazyCall as L
from detectron2.solver.build import get_default_optimizer_params
SGD = L(torch.optim.SGD)(
params=L(get_default_optimizer_params)(
# params.model is meant to be set to the model object, before instantiating
# the optimizer.
weight_decay_norm=0.0
),
lr=0.02,
momentum=0.9,
weight_decay=1e-4,
)
|
banmo-main
|
third_party/detectron2_old/configs/common/optim.py
|
from detectron2.config import LazyCall as L
from detectron2.layers import ShapeSpec
from detectron2.modeling import PanopticFPN
from detectron2.modeling.meta_arch.semantic_seg import SemSegFPNHead
from .mask_rcnn_fpn import model
model._target_ = PanopticFPN
model.sem_seg_head = L(SemSegFPNHead)(
input_shape={
f: L(ShapeSpec)(stride=s, channels="${....backbone.out_channels}")
for f, s in zip(["p2", "p3", "p4", "p5"], [4, 8, 16, 32])
},
ignore_value=255,
num_classes=54, # COCO stuff + 1
conv_dims=128,
common_stride=4,
loss_weight=0.5,
norm="GN",
)
|
banmo-main
|
third_party/detectron2_old/configs/common/models/panoptic_fpn.py
|
from detectron2.config import LazyCall as L
from detectron2.layers import ShapeSpec
from detectron2.modeling.poolers import ROIPooler
from detectron2.modeling.roi_heads import KRCNNConvDeconvUpsampleHead
from .mask_rcnn_fpn import model
[model.roi_heads.pop(x) for x in ["mask_in_features", "mask_pooler", "mask_head"]]
model.roi_heads.update(
num_classes=1,
keypoint_in_features=["p2", "p3", "p4", "p5"],
keypoint_pooler=L(ROIPooler)(
output_size=14,
scales=(1.0 / 4, 1.0 / 8, 1.0 / 16, 1.0 / 32),
sampling_ratio=0,
pooler_type="ROIAlignV2",
),
keypoint_head=L(KRCNNConvDeconvUpsampleHead)(
input_shape=ShapeSpec(channels=256, width=14, height=14),
num_keypoints=17,
conv_dims=[512] * 8,
loss_normalizer="visible",
),
)
# Detectron1 uses 2000 proposals per-batch, but this option is per-image in detectron2.
# 1000 proposals per-image is found to hurt box AP.
# Therefore we increase it to 1500 per-image.
model.proposal_generator.post_nms_topk = (1500, 1000)
# Keypoint AP degrades (though box AP improves) when using plain L1 loss
model.roi_heads.box_predictor.smooth_l1_beta = 0.5
|
banmo-main
|
third_party/detectron2_old/configs/common/models/keypoint_rcnn_fpn.py
|
from detectron2.config import LazyCall as L
from detectron2.layers import ShapeSpec
from detectron2.modeling.meta_arch import GeneralizedRCNN
from detectron2.modeling.anchor_generator import DefaultAnchorGenerator
from detectron2.modeling.backbone import BasicStem, BottleneckBlock, ResNet
from detectron2.modeling.box_regression import Box2BoxTransform
from detectron2.modeling.matcher import Matcher
from detectron2.modeling.poolers import ROIPooler
from detectron2.modeling.proposal_generator import RPN, StandardRPNHead
from detectron2.modeling.roi_heads import (
FastRCNNOutputLayers,
MaskRCNNConvUpsampleHead,
Res5ROIHeads,
)
model = L(GeneralizedRCNN)(
backbone=L(ResNet)(
stem=L(BasicStem)(in_channels=3, out_channels=64, norm="FrozenBN"),
stages=L(ResNet.make_default_stages)(
depth=50,
stride_in_1x1=True,
norm="FrozenBN",
),
out_features=["res4"],
),
proposal_generator=L(RPN)(
in_features=["res4"],
head=L(StandardRPNHead)(in_channels=1024, num_anchors=15),
anchor_generator=L(DefaultAnchorGenerator)(
sizes=[[32, 64, 128, 256, 512]],
aspect_ratios=[0.5, 1.0, 2.0],
strides=[16],
offset=0.0,
),
anchor_matcher=L(Matcher)(
thresholds=[0.3, 0.7], labels=[0, -1, 1], allow_low_quality_matches=True
),
box2box_transform=L(Box2BoxTransform)(weights=[1.0, 1.0, 1.0, 1.0]),
batch_size_per_image=256,
positive_fraction=0.5,
pre_nms_topk=(12000, 6000),
post_nms_topk=(2000, 1000),
nms_thresh=0.7,
),
roi_heads=L(Res5ROIHeads)(
num_classes=80,
batch_size_per_image=512,
positive_fraction=0.25,
proposal_matcher=L(Matcher)(
thresholds=[0.5], labels=[0, 1], allow_low_quality_matches=False
),
in_features=["res4"],
pooler=L(ROIPooler)(
output_size=14,
scales=(1.0 / 16,),
sampling_ratio=0,
pooler_type="ROIAlignV2",
),
res5=L(ResNet.make_stage)(
block_class=BottleneckBlock,
num_blocks=3,
stride_per_block=[2, 1, 1],
in_channels=1024,
bottleneck_channels=512,
out_channels=2048,
norm="FrozenBN",
stride_in_1x1=True,
),
box_predictor=L(FastRCNNOutputLayers)(
input_shape=L(ShapeSpec)(channels="${...res5.out_channels}", height=1, width=1),
test_score_thresh=0.05,
box2box_transform=L(Box2BoxTransform)(weights=(10, 10, 5, 5)),
num_classes="${..num_classes}",
),
mask_head=L(MaskRCNNConvUpsampleHead)(
input_shape=L(ShapeSpec)(
channels="${...res5.out_channels}",
width="${...pooler.output_size}",
height="${...pooler.output_size}",
),
num_classes="${..num_classes}",
conv_dims=[256],
),
),
pixel_mean=[103.530, 116.280, 123.675],
pixel_std=[1.0, 1.0, 1.0],
input_format="BGR",
)
|
banmo-main
|
third_party/detectron2_old/configs/common/models/mask_rcnn_c4.py
|
# -*- coding: utf-8 -*-
from detectron2.config import LazyCall as L
from detectron2.layers import ShapeSpec
from detectron2.modeling.meta_arch import RetinaNet
from detectron2.modeling.anchor_generator import DefaultAnchorGenerator
from detectron2.modeling.backbone.fpn import LastLevelP6P7
from detectron2.modeling.backbone import BasicStem, FPN, ResNet
from detectron2.modeling.box_regression import Box2BoxTransform
from detectron2.modeling.matcher import Matcher
from detectron2.modeling.meta_arch.retinanet import RetinaNetHead
model = L(RetinaNet)(
backbone=L(FPN)(
bottom_up=L(ResNet)(
stem=L(BasicStem)(in_channels=3, out_channels=64, norm="FrozenBN"),
stages=L(ResNet.make_default_stages)(
depth=50,
stride_in_1x1=True,
norm="FrozenBN",
),
out_features=["res3", "res4", "res5"],
),
in_features=["res3", "res4", "res5"],
out_channels=256,
top_block=L(LastLevelP6P7)(in_channels=2048, out_channels="${..out_channels}"),
),
head=L(RetinaNetHead)(
input_shape=[ShapeSpec(channels=256)],
num_classes="${..num_classes}",
conv_dims=[256, 256, 256, 256],
prior_prob=0.01,
num_anchors=9,
),
anchor_generator=L(DefaultAnchorGenerator)(
sizes=[[x, x * 2 ** (1.0 / 3), x * 2 ** (2.0 / 3)] for x in [32, 64, 128, 256, 512]],
aspect_ratios=[0.5, 1.0, 2.0],
strides=[8, 16, 32, 64, 128],
offset=0.0,
),
box2box_transform=L(Box2BoxTransform)(weights=[1.0, 1.0, 1.0, 1.0]),
anchor_matcher=L(Matcher)(
thresholds=[0.4, 0.5], labels=[0, -1, 1], allow_low_quality_matches=True
),
num_classes=80,
head_in_features=["p3", "p4", "p5", "p6", "p7"],
focal_loss_alpha=0.25,
focal_loss_gamma=2.0,
pixel_mean=[103.530, 116.280, 123.675],
pixel_std=[1.0, 1.0, 1.0],
input_format="BGR",
)
|
banmo-main
|
third_party/detectron2_old/configs/common/models/retinanet.py
|
from detectron2.config import LazyCall as L
from detectron2.layers import ShapeSpec
from detectron2.modeling.box_regression import Box2BoxTransform
from detectron2.modeling.matcher import Matcher
from detectron2.modeling.roi_heads import FastRCNNOutputLayers, FastRCNNConvFCHead, CascadeROIHeads
from .mask_rcnn_fpn import model
# arguments that don't exist for Cascade R-CNN
[model.roi_heads.pop(k) for k in ["box_head", "box_predictor", "proposal_matcher"]]
model.roi_heads.update(
_target_=CascadeROIHeads,
box_heads=[
L(FastRCNNConvFCHead)(
input_shape=ShapeSpec(channels=256, height=7, width=7),
conv_dims=[],
fc_dims=[1024, 1024],
)
for k in range(3)
],
box_predictors=[
L(FastRCNNOutputLayers)(
input_shape=ShapeSpec(channels=1024),
test_score_thresh=0.05,
box2box_transform=L(Box2BoxTransform)(weights=(w1, w1, w2, w2)),
cls_agnostic_bbox_reg=True,
num_classes="${...num_classes}",
)
for (w1, w2) in [(10, 5), (20, 10), (30, 15)]
],
proposal_matchers=[
L(Matcher)(thresholds=[th], labels=[0, 1], allow_low_quality_matches=False)
for th in [0.5, 0.6, 0.7]
],
)
|
banmo-main
|
third_party/detectron2_old/configs/common/models/cascade_rcnn.py
|
from detectron2.config import LazyCall as L
from detectron2.layers import ShapeSpec
from detectron2.modeling.meta_arch import GeneralizedRCNN
from detectron2.modeling.anchor_generator import DefaultAnchorGenerator
from detectron2.modeling.backbone.fpn import LastLevelMaxPool
from detectron2.modeling.backbone import BasicStem, FPN, ResNet
from detectron2.modeling.box_regression import Box2BoxTransform
from detectron2.modeling.matcher import Matcher
from detectron2.modeling.poolers import ROIPooler
from detectron2.modeling.proposal_generator import RPN, StandardRPNHead
from detectron2.modeling.roi_heads import (
StandardROIHeads,
FastRCNNOutputLayers,
MaskRCNNConvUpsampleHead,
FastRCNNConvFCHead,
)
model = L(GeneralizedRCNN)(
backbone=L(FPN)(
bottom_up=L(ResNet)(
stem=L(BasicStem)(in_channels=3, out_channels=64, norm="FrozenBN"),
stages=L(ResNet.make_default_stages)(
depth=50,
stride_in_1x1=True,
norm="FrozenBN",
),
out_features=["res2", "res3", "res4", "res5"],
),
in_features="${.bottom_up.out_features}",
out_channels=256,
top_block=L(LastLevelMaxPool)(),
),
proposal_generator=L(RPN)(
in_features=["p2", "p3", "p4", "p5", "p6"],
head=L(StandardRPNHead)(in_channels=256, num_anchors=3),
anchor_generator=L(DefaultAnchorGenerator)(
sizes=[[32], [64], [128], [256], [512]],
aspect_ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64],
offset=0.0,
),
anchor_matcher=L(Matcher)(
thresholds=[0.3, 0.7], labels=[0, -1, 1], allow_low_quality_matches=True
),
box2box_transform=L(Box2BoxTransform)(weights=[1.0, 1.0, 1.0, 1.0]),
batch_size_per_image=256,
positive_fraction=0.5,
pre_nms_topk=(2000, 1000),
post_nms_topk=(1000, 1000),
nms_thresh=0.7,
),
roi_heads=L(StandardROIHeads)(
num_classes=80,
batch_size_per_image=512,
positive_fraction=0.25,
proposal_matcher=L(Matcher)(
thresholds=[0.5], labels=[0, 1], allow_low_quality_matches=False
),
box_in_features=["p2", "p3", "p4", "p5"],
box_pooler=L(ROIPooler)(
output_size=7,
scales=(1.0 / 4, 1.0 / 8, 1.0 / 16, 1.0 / 32),
sampling_ratio=0,
pooler_type="ROIAlignV2",
),
box_head=L(FastRCNNConvFCHead)(
input_shape=ShapeSpec(channels=256, height=7, width=7),
conv_dims=[],
fc_dims=[1024, 1024],
),
box_predictor=L(FastRCNNOutputLayers)(
input_shape=ShapeSpec(channels=1024),
test_score_thresh=0.05,
box2box_transform=L(Box2BoxTransform)(weights=(10, 10, 5, 5)),
num_classes="${..num_classes}",
),
mask_in_features=["p2", "p3", "p4", "p5"],
mask_pooler=L(ROIPooler)(
output_size=14,
scales=(1.0 / 4, 1.0 / 8, 1.0 / 16, 1.0 / 32),
sampling_ratio=0,
pooler_type="ROIAlignV2",
),
mask_head=L(MaskRCNNConvUpsampleHead)(
input_shape=ShapeSpec(channels=256, width=14, height=14),
num_classes="${..num_classes}",
conv_dims=[256, 256, 256, 256, 256],
),
),
pixel_mean=[103.530, 116.280, 123.675],
pixel_std=[1.0, 1.0, 1.0],
input_format="BGR",
)
|
banmo-main
|
third_party/detectron2_old/configs/common/models/mask_rcnn_fpn.py
|
from omegaconf import OmegaConf
import detectron2.data.transforms as T
from detectron2.config import LazyCall as L
from detectron2.data import (
DatasetMapper,
build_detection_test_loader,
build_detection_train_loader,
get_detection_dataset_dicts,
)
from detectron2.evaluation import COCOEvaluator
dataloader = OmegaConf.create()
dataloader.train = L(build_detection_train_loader)(
dataset=L(get_detection_dataset_dicts)(names="coco_2017_train"),
mapper=L(DatasetMapper)(
is_train=True,
augmentations=[
L(T.ResizeShortestEdge)(
short_edge_length=(640, 672, 704, 736, 768, 800),
sample_style="choice",
max_size=1333,
),
L(T.RandomFlip)(horizontal=True),
],
image_format="BGR",
use_instance_mask=True,
),
total_batch_size=16,
num_workers=4,
)
dataloader.test = L(build_detection_test_loader)(
dataset=L(get_detection_dataset_dicts)(names="coco_2017_val", filter_empty=False),
mapper=L(DatasetMapper)(
is_train=False,
augmentations=[
L(T.ResizeShortestEdge)(short_edge_length=800, max_size=1333),
],
image_format="${...train.mapper.image_format}",
),
num_workers=4,
)
dataloader.evaluator = L(COCOEvaluator)(
dataset_name="${..test.dataset.names}",
)
|
banmo-main
|
third_party/detectron2_old/configs/common/data/coco.py
|
from detectron2.config import LazyCall as L
from detectron2.evaluation import (
COCOEvaluator,
COCOPanopticEvaluator,
DatasetEvaluators,
SemSegEvaluator,
)
from .coco import dataloader
dataloader.train.dataset.names = "coco_2017_train_panoptic_separated"
dataloader.train.dataset.filter_empty = False
dataloader.test.dataset.names = "coco_2017_val_panoptic_separated"
dataloader.evaluator = [
L(COCOEvaluator)(
dataset_name="${...test.dataset.names}",
),
L(SemSegEvaluator)(
dataset_name="${...test.dataset.names}",
),
L(COCOPanopticEvaluator)(
dataset_name="${...test.dataset.names}",
),
]
|
banmo-main
|
third_party/detectron2_old/configs/common/data/coco_panoptic_separated.py
|
from detectron2.data.detection_utils import create_keypoint_hflip_indices
from .coco import dataloader
dataloader.train.dataset.min_keypoints = 1
dataloader.train.dataset.names = "keypoints_coco_2017_train"
dataloader.test.dataset.names = "keypoints_coco_2017_val"
dataloader.train.mapper.update(
use_instance_mask=False,
use_keypoint=True,
keypoint_hflip_indices=create_keypoint_hflip_indices(dataloader.train.dataset.names),
)
|
banmo-main
|
third_party/detectron2_old/configs/common/data/coco_keypoint.py
|
from ..common.optim import SGD as optimizer
from ..common.coco_schedule import lr_multiplier_1x as lr_multiplier
from ..common.data.coco_panoptic_separated import dataloader
from ..common.models.panoptic_fpn import model
from ..common.train import train
model.backbone.bottom_up.freeze_at = 2
|
banmo-main
|
third_party/detectron2_old/configs/COCO-PanopticSegmentation/panoptic_fpn_R_50_1x.py
|
from .mask_rcnn_R_101_FPN_100ep_LSJ import (
dataloader,
lr_multiplier,
model,
optimizer,
train,
)
train.max_iter *= 2 # 100ep -> 200ep
|
banmo-main
|
third_party/detectron2_old/configs/new_baselines/mask_rcnn_R_101_FPN_200ep_LSJ.py
|
from .mask_rcnn_R_50_FPN_100ep_LSJ import (
dataloader,
lr_multiplier,
model,
optimizer,
train,
)
from detectron2.config import LazyCall as L
from detectron2.modeling.backbone import RegNet
from detectron2.modeling.backbone.regnet import SimpleStem, ResBottleneckBlock
# Config source:
# https://github.com/facebookresearch/detectron2/blob/master/configs/COCO-InstanceSegmentation/mask_rcnn_regnetx_4gf_dds_fpn_1x.py # noqa
model.backbone.bottom_up = L(RegNet)(
stem_class=SimpleStem,
stem_width=32,
block_class=ResBottleneckBlock,
depth=23,
w_a=38.65,
w_0=96,
w_m=2.43,
group_width=40,
norm="SyncBN",
out_features=["s1", "s2", "s3", "s4"],
)
model.pixel_std = [57.375, 57.120, 58.395]
# RegNets benefit from enabling cudnn benchmark mode
train.cudnn_benchmark = True
|
banmo-main
|
third_party/detectron2_old/configs/new_baselines/mask_rcnn_regnetx_4gf_dds_FPN_100ep_LSJ.py
|
from .mask_rcnn_regnetx_4gf_dds_FPN_100ep_LSJ import (
dataloader,
lr_multiplier,
model,
optimizer,
train,
)
train.max_iter *= 4 # 100ep -> 400ep
|
banmo-main
|
third_party/detectron2_old/configs/new_baselines/mask_rcnn_regnetx_4gf_dds_FPN_400ep_LSJ.py
|
from .mask_rcnn_regnety_4gf_dds_FPN_100ep_LSJ import (
dataloader,
lr_multiplier,
model,
optimizer,
train,
)
train.max_iter *= 4 # 100ep -> 400ep
|
banmo-main
|
third_party/detectron2_old/configs/new_baselines/mask_rcnn_regnety_4gf_dds_FPN_400ep_LSJ.py
|
from .mask_rcnn_R_50_FPN_100ep_LSJ import (
dataloader,
lr_multiplier,
model,
optimizer,
train,
)
train.max_iter *= 2 # 100ep -> 200ep
|
banmo-main
|
third_party/detectron2_old/configs/new_baselines/mask_rcnn_R_50_FPN_200ep_LSJ.py
|
from .mask_rcnn_R_50_FPN_100ep_LSJ import (
dataloader,
lr_multiplier,
model,
optimizer,
train,
)
from detectron2.config import LazyCall as L
from detectron2.modeling.backbone import RegNet
from detectron2.modeling.backbone.regnet import SimpleStem, ResBottleneckBlock
# Config source:
# https://github.com/facebookresearch/detectron2/blob/master/configs/COCO-InstanceSegmentation/mask_rcnn_regnety_4gf_dds_fpn_1x.py # noqa
model.backbone.bottom_up = L(RegNet)(
stem_class=SimpleStem,
stem_width=32,
block_class=ResBottleneckBlock,
depth=22,
w_a=31.41,
w_0=96,
w_m=2.24,
group_width=64,
se_ratio=0.25,
norm="SyncBN",
out_features=["s1", "s2", "s3", "s4"],
)
model.pixel_std = [57.375, 57.120, 58.395]
# RegNets benefit from enabling cudnn benchmark mode
train.cudnn_benchmark = True
|
banmo-main
|
third_party/detectron2_old/configs/new_baselines/mask_rcnn_regnety_4gf_dds_FPN_100ep_LSJ.py
|
import detectron2.data.transforms as T
from detectron2.config.lazy import LazyCall as L
from detectron2.layers.batch_norm import NaiveSyncBatchNorm
from detectron2.solver import WarmupParamScheduler
from fvcore.common.param_scheduler import CosineParamScheduler
from ..common.data.coco import dataloader
from ..common.models.mask_rcnn_fpn import model
from ..common.optim import SGD as optimizer
from ..common.train import train
# train from scratch
train.init_checkpoint = ""
train.amp.enabled = True
train.ddp.fp16_compression = True
model.backbone.bottom_up.freeze_at = 0
# SyncBN
# fmt: off
model.backbone.bottom_up.stem.norm = \
model.backbone.bottom_up.stages.norm = \
model.backbone.norm = "SyncBN"
# Using NaiveSyncBatchNorm becase heads may have empty input. That is not supported by
# torch.nn.SyncBatchNorm. We can remove this after
# https://github.com/pytorch/pytorch/issues/36530 is fixed.
model.roi_heads.box_head.conv_norm = \
model.roi_heads.mask_head.conv_norm = lambda c: NaiveSyncBatchNorm(c,
stats_mode="N")
# fmt: on
# 2conv in RPN:
# https://github.com/tensorflow/tpu/blob/b24729de804fdb751b06467d3dce0637fa652060/models/official/detection/modeling/architecture/heads.py#L95-L97 # noqa: E501, B950
model.proposal_generator.head.conv_dims = [-1, -1]
# 4conv1fc box head
model.roi_heads.box_head.conv_dims = [256, 256, 256, 256]
model.roi_heads.box_head.fc_dims = [1024]
# resize_and_crop_image in:
# https://github.com/tensorflow/tpu/blob/b24729de804fdb751b06467d3dce0637fa652060/models/official/detection/utils/input_utils.py#L127 # noqa: E501, B950
image_size = 1024
dataloader.train.mapper.augmentations = [
L(T.ResizeScale)(
min_scale=0.1, max_scale=2.0, target_height=image_size, target_width=image_size
),
L(T.FixedSizeCrop)(crop_size=(image_size, image_size)),
L(T.RandomFlip)(horizontal=True),
]
# recompute boxes due to cropping
dataloader.train.mapper.recompute_boxes = True
# larger batch-size.
dataloader.train.total_batch_size = 64
# Equivalent to 100 epochs.
# 100 ep = 184375 iters * 64 images/iter / 118000 images/ep
train.max_iter = 184375
lr_multiplier = L(WarmupParamScheduler)(
scheduler=CosineParamScheduler(1.0, 0.0),
warmup_length=500 / train.max_iter,
warmup_factor=0.067,
)
optimizer.lr = 0.1
optimizer.weight_decay = 4e-5
|
banmo-main
|
third_party/detectron2_old/configs/new_baselines/mask_rcnn_R_50_FPN_100ep_LSJ.py
|
from .mask_rcnn_regnety_4gf_dds_FPN_100ep_LSJ import (
dataloader,
lr_multiplier,
model,
optimizer,
train,
)
train.max_iter *= 2 # 100ep -> 200ep
|
banmo-main
|
third_party/detectron2_old/configs/new_baselines/mask_rcnn_regnety_4gf_dds_FPN_200ep_LSJ.py
|
from .mask_rcnn_R_50_FPN_100ep_LSJ import (
dataloader,
lr_multiplier,
model,
optimizer,
train,
)
train.max_iter *= 4 # 100ep -> 400ep
|
banmo-main
|
third_party/detectron2_old/configs/new_baselines/mask_rcnn_R_50_FPN_400ep_LSJ.py
|
from .mask_rcnn_R_101_FPN_100ep_LSJ import (
dataloader,
lr_multiplier,
model,
optimizer,
train,
)
train.max_iter *= 4 # 100ep -> 400ep
|
banmo-main
|
third_party/detectron2_old/configs/new_baselines/mask_rcnn_R_101_FPN_400ep_LSJ.py
|
from .mask_rcnn_R_50_FPN_100ep_LSJ import (
dataloader,
lr_multiplier,
model,
optimizer,
train,
)
model.backbone.bottom_up.stages.depth = 101
|
banmo-main
|
third_party/detectron2_old/configs/new_baselines/mask_rcnn_R_101_FPN_100ep_LSJ.py
|
from .mask_rcnn_regnetx_4gf_dds_FPN_100ep_LSJ import (
dataloader,
lr_multiplier,
model,
optimizer,
train,
)
train.max_iter *= 2 # 100ep -> 200ep
|
banmo-main
|
third_party/detectron2_old/configs/new_baselines/mask_rcnn_regnetx_4gf_dds_FPN_200ep_LSJ.py
|
from ..common.optim import SGD as optimizer
from ..common.coco_schedule import lr_multiplier_1x as lr_multiplier
from ..common.data.coco import dataloader
from ..common.models.mask_rcnn_fpn import model
from ..common.train import train
from detectron2.config import LazyCall as L
from detectron2.modeling.backbone import RegNet
from detectron2.modeling.backbone.regnet import SimpleStem, ResBottleneckBlock
# Replace default ResNet with RegNetX-4GF from the DDS paper. Config source:
# https://github.com/facebookresearch/pycls/blob/2c152a6e5d913e898cca4f0a758f41e6b976714d/configs/dds_baselines/regnetx/RegNetX-4.0GF_dds_8gpu.yaml#L4-L9 # noqa
model.backbone.bottom_up = L(RegNet)(
stem_class=SimpleStem,
stem_width=32,
block_class=ResBottleneckBlock,
depth=23,
w_a=38.65,
w_0=96,
w_m=2.43,
group_width=40,
freeze_at=2,
norm="FrozenBN",
out_features=["s1", "s2", "s3", "s4"],
)
model.pixel_std = [57.375, 57.120, 58.395]
optimizer.weight_decay = 5e-5
train.init_checkpoint = (
"https://dl.fbaipublicfiles.com/pycls/dds_baselines/160906383/RegNetX-4.0GF_dds_8gpu.pyth"
)
# RegNets benefit from enabling cudnn benchmark mode
train.cudnn_benchmark = True
|
banmo-main
|
third_party/detectron2_old/configs/COCO-InstanceSegmentation/mask_rcnn_regnetx_4gf_dds_fpn_1x.py
|
from ..common.optim import SGD as optimizer
from ..common.coco_schedule import lr_multiplier_1x as lr_multiplier
from ..common.data.coco import dataloader
from ..common.models.mask_rcnn_fpn import model
from ..common.train import train
from detectron2.config import LazyCall as L
from detectron2.modeling.backbone import RegNet
from detectron2.modeling.backbone.regnet import SimpleStem, ResBottleneckBlock
# Replace default ResNet with RegNetY-4GF from the DDS paper. Config source:
# https://github.com/facebookresearch/pycls/blob/2c152a6e5d913e898cca4f0a758f41e6b976714d/configs/dds_baselines/regnety/RegNetY-4.0GF_dds_8gpu.yaml#L4-L10 # noqa
model.backbone.bottom_up = L(RegNet)(
stem_class=SimpleStem,
stem_width=32,
block_class=ResBottleneckBlock,
depth=22,
w_a=31.41,
w_0=96,
w_m=2.24,
group_width=64,
se_ratio=0.25,
freeze_at=2,
norm="FrozenBN",
out_features=["s1", "s2", "s3", "s4"],
)
model.pixel_std = [57.375, 57.120, 58.395]
optimizer.weight_decay = 5e-5
train.init_checkpoint = (
"https://dl.fbaipublicfiles.com/pycls/dds_baselines/160906838/RegNetY-4.0GF_dds_8gpu.pyth"
)
# RegNets benefit from enabling cudnn benchmark mode
train.cudnn_benchmark = True
|
banmo-main
|
third_party/detectron2_old/configs/COCO-InstanceSegmentation/mask_rcnn_regnety_4gf_dds_fpn_1x.py
|
from ..common.optim import SGD as optimizer
from ..common.coco_schedule import lr_multiplier_1x as lr_multiplier
from ..common.data.coco import dataloader
from ..common.models.mask_rcnn_fpn import model
from ..common.train import train
model.backbone.bottom_up.freeze_at = 2
|
banmo-main
|
third_party/detectron2_old/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.py
|
from ..common.train import train
from ..common.optim import SGD as optimizer
from ..common.coco_schedule import lr_multiplier_1x as lr_multiplier
from ..common.data.coco import dataloader
from ..common.models.mask_rcnn_c4 import model
model.backbone.freeze_at = 2
|
banmo-main
|
third_party/detectron2_old/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_C4_1x.py
|
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# -*- coding: utf-8 -*-
import argparse
template = """<details><summary> install </summary><pre><code>\
python -m pip install detectron2{d2_version} -f \\
https://dl.fbaipublicfiles.com/detectron2/wheels/{cuda}/torch{torch}/index.html
</code></pre> </details>"""
CUDA_SUFFIX = {
"11.1": "cu111",
"11.0": "cu110",
"10.2": "cu102",
"10.1": "cu101",
"10.0": "cu100",
"9.2": "cu92",
"cpu": "cpu",
}
def gen_header(torch_versions):
return '<table class="docutils"><tbody><th width="80"> CUDA </th>' + "".join(
[
'<th valign="bottom" align="left" width="100">torch {}</th>'.format(t)
for t in torch_versions
]
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--d2-version", help="detectron2 version number, default to empty")
args = parser.parse_args()
d2_version = f"=={args.d2_version}" if args.d2_version else ""
all_versions = (
[("1.6", k) for k in ["10.2", "10.1", "9.2", "cpu"]]
+ [("1.7", k) for k in ["11.0", "10.2", "10.1", "9.2", "cpu"]]
+ [("1.8", k) for k in ["11.1", "10.2", "10.1", "cpu"]]
)
torch_versions = sorted({k[0] for k in all_versions}, key=float, reverse=True)
cuda_versions = sorted(
{k[1] for k in all_versions}, key=lambda x: float(x) if x != "cpu" else 0, reverse=True
)
table = gen_header(torch_versions)
for cu in cuda_versions:
table += f""" <tr><td align="left">{cu}</td>"""
cu_suffix = CUDA_SUFFIX[cu]
for torch in torch_versions:
if (torch, cu) in all_versions:
cell = template.format(d2_version=d2_version, cuda=cu_suffix, torch=torch)
else:
cell = ""
table += f"""<td align="left">{cell} </td> """
table += "</tr>"
table += "</tbody></table>"
print(table)
|
banmo-main
|
third_party/detectron2_old/dev/packaging/gen_install_table.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from .utils.env import setup_environment
setup_environment()
# This line will be programatically read/write by setup.py.
# Leave them at the bottom of this file and don't touch them.
__version__ = "0.4"
|
banmo-main
|
third_party/detectron2_old/detectron2/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import logging
from detectron2.utils.file_io import PathHandler, PathManager
class ModelCatalog(object):
"""
Store mappings from names to third-party models.
"""
S3_C2_DETECTRON_PREFIX = "https://dl.fbaipublicfiles.com/detectron"
# MSRA models have STRIDE_IN_1X1=True. False otherwise.
# NOTE: all BN models here have fused BN into an affine layer.
# As a result, you should only load them to a model with "FrozenBN".
# Loading them to a model with regular BN or SyncBN is wrong.
# Even when loaded to FrozenBN, it is still different from affine by an epsilon,
# which should be negligible for training.
# NOTE: all models here uses PIXEL_STD=[1,1,1]
# NOTE: Most of the BN models here are no longer used. We use the
# re-converted pre-trained models under detectron2 model zoo instead.
C2_IMAGENET_MODELS = {
"MSRA/R-50": "ImageNetPretrained/MSRA/R-50.pkl",
"MSRA/R-101": "ImageNetPretrained/MSRA/R-101.pkl",
"FAIR/R-50-GN": "ImageNetPretrained/47261647/R-50-GN.pkl",
"FAIR/R-101-GN": "ImageNetPretrained/47592356/R-101-GN.pkl",
"FAIR/X-101-32x8d": "ImageNetPretrained/20171220/X-101-32x8d.pkl",
"FAIR/X-101-64x4d": "ImageNetPretrained/FBResNeXt/X-101-64x4d.pkl",
"FAIR/X-152-32x8d-IN5k": "ImageNetPretrained/25093814/X-152-32x8d-IN5k.pkl",
}
C2_DETECTRON_PATH_FORMAT = (
"{prefix}/{url}/output/train/{dataset}/{type}/model_final.pkl" # noqa B950
)
C2_DATASET_COCO = "coco_2014_train%3Acoco_2014_valminusminival"
C2_DATASET_COCO_KEYPOINTS = "keypoints_coco_2014_train%3Akeypoints_coco_2014_valminusminival"
# format: {model_name} -> part of the url
C2_DETECTRON_MODELS = {
"35857197/e2e_faster_rcnn_R-50-C4_1x": "35857197/12_2017_baselines/e2e_faster_rcnn_R-50-C4_1x.yaml.01_33_49.iAX0mXvW", # noqa B950
"35857345/e2e_faster_rcnn_R-50-FPN_1x": "35857345/12_2017_baselines/e2e_faster_rcnn_R-50-FPN_1x.yaml.01_36_30.cUF7QR7I", # noqa B950
"35857890/e2e_faster_rcnn_R-101-FPN_1x": "35857890/12_2017_baselines/e2e_faster_rcnn_R-101-FPN_1x.yaml.01_38_50.sNxI7sX7", # noqa B950
"36761737/e2e_faster_rcnn_X-101-32x8d-FPN_1x": "36761737/12_2017_baselines/e2e_faster_rcnn_X-101-32x8d-FPN_1x.yaml.06_31_39.5MIHi1fZ", # noqa B950
"35858791/e2e_mask_rcnn_R-50-C4_1x": "35858791/12_2017_baselines/e2e_mask_rcnn_R-50-C4_1x.yaml.01_45_57.ZgkA7hPB", # noqa B950
"35858933/e2e_mask_rcnn_R-50-FPN_1x": "35858933/12_2017_baselines/e2e_mask_rcnn_R-50-FPN_1x.yaml.01_48_14.DzEQe4wC", # noqa B950
"35861795/e2e_mask_rcnn_R-101-FPN_1x": "35861795/12_2017_baselines/e2e_mask_rcnn_R-101-FPN_1x.yaml.02_31_37.KqyEK4tT", # noqa B950
"36761843/e2e_mask_rcnn_X-101-32x8d-FPN_1x": "36761843/12_2017_baselines/e2e_mask_rcnn_X-101-32x8d-FPN_1x.yaml.06_35_59.RZotkLKI", # noqa B950
"48616381/e2e_mask_rcnn_R-50-FPN_2x_gn": "GN/48616381/04_2018_gn_baselines/e2e_mask_rcnn_R-50-FPN_2x_gn_0416.13_23_38.bTlTI97Q", # noqa B950
"37697547/e2e_keypoint_rcnn_R-50-FPN_1x": "37697547/12_2017_baselines/e2e_keypoint_rcnn_R-50-FPN_1x.yaml.08_42_54.kdzV35ao", # noqa B950
"35998355/rpn_R-50-C4_1x": "35998355/12_2017_baselines/rpn_R-50-C4_1x.yaml.08_00_43.njH5oD9L", # noqa B950
"35998814/rpn_R-50-FPN_1x": "35998814/12_2017_baselines/rpn_R-50-FPN_1x.yaml.08_06_03.Axg0r179", # noqa B950
"36225147/fast_R-50-FPN_1x": "36225147/12_2017_baselines/fast_rcnn_R-50-FPN_1x.yaml.08_39_09.L3obSdQ2", # noqa B950
}
@staticmethod
def get(name):
if name.startswith("Caffe2Detectron/COCO"):
return ModelCatalog._get_c2_detectron_baseline(name)
if name.startswith("ImageNetPretrained/"):
return ModelCatalog._get_c2_imagenet_pretrained(name)
raise RuntimeError("model not present in the catalog: {}".format(name))
@staticmethod
def _get_c2_imagenet_pretrained(name):
prefix = ModelCatalog.S3_C2_DETECTRON_PREFIX
name = name[len("ImageNetPretrained/") :]
name = ModelCatalog.C2_IMAGENET_MODELS[name]
url = "/".join([prefix, name])
return url
@staticmethod
def _get_c2_detectron_baseline(name):
name = name[len("Caffe2Detectron/COCO/") :]
url = ModelCatalog.C2_DETECTRON_MODELS[name]
if "keypoint_rcnn" in name:
dataset = ModelCatalog.C2_DATASET_COCO_KEYPOINTS
else:
dataset = ModelCatalog.C2_DATASET_COCO
if "35998355/rpn_R-50-C4_1x" in name:
# this one model is somehow different from others ..
type = "rpn"
else:
type = "generalized_rcnn"
# Detectron C2 models are stored in the structure defined in `C2_DETECTRON_PATH_FORMAT`.
url = ModelCatalog.C2_DETECTRON_PATH_FORMAT.format(
prefix=ModelCatalog.S3_C2_DETECTRON_PREFIX, url=url, type=type, dataset=dataset
)
return url
class ModelCatalogHandler(PathHandler):
"""
Resolve URL like catalog://.
"""
PREFIX = "catalog://"
def _get_supported_prefixes(self):
return [self.PREFIX]
def _get_local_path(self, path, **kwargs):
logger = logging.getLogger(__name__)
catalog_path = ModelCatalog.get(path[len(self.PREFIX) :])
logger.info("Catalog entry {} points to {}".format(path, catalog_path))
return PathManager.get_local_path(catalog_path, **kwargs)
def _open(self, path, mode="r", **kwargs):
return PathManager.open(self._get_local_path(path), mode, **kwargs)
PathManager.register_handler(ModelCatalogHandler())
|
banmo-main
|
third_party/detectron2_old/detectron2/checkpoint/catalog.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import copy
import logging
import re
from typing import Dict, List
import torch
from tabulate import tabulate
def convert_basic_c2_names(original_keys):
"""
Apply some basic name conversion to names in C2 weights.
It only deals with typical backbone models.
Args:
original_keys (list[str]):
Returns:
list[str]: The same number of strings matching those in original_keys.
"""
layer_keys = copy.deepcopy(original_keys)
layer_keys = [
{"pred_b": "linear_b", "pred_w": "linear_w"}.get(k, k) for k in layer_keys
] # some hard-coded mappings
layer_keys = [k.replace("_", ".") for k in layer_keys]
layer_keys = [re.sub("\\.b$", ".bias", k) for k in layer_keys]
layer_keys = [re.sub("\\.w$", ".weight", k) for k in layer_keys]
# Uniform both bn and gn names to "norm"
layer_keys = [re.sub("bn\\.s$", "norm.weight", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.bias$", "norm.bias", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.rm", "norm.running_mean", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.running.mean$", "norm.running_mean", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.riv$", "norm.running_var", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.running.var$", "norm.running_var", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.gamma$", "norm.weight", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.beta$", "norm.bias", k) for k in layer_keys]
layer_keys = [re.sub("gn\\.s$", "norm.weight", k) for k in layer_keys]
layer_keys = [re.sub("gn\\.bias$", "norm.bias", k) for k in layer_keys]
# stem
layer_keys = [re.sub("^res\\.conv1\\.norm\\.", "conv1.norm.", k) for k in layer_keys]
# to avoid mis-matching with "conv1" in other components (e.g. detection head)
layer_keys = [re.sub("^conv1\\.", "stem.conv1.", k) for k in layer_keys]
# layer1-4 is used by torchvision, however we follow the C2 naming strategy (res2-5)
# layer_keys = [re.sub("^res2.", "layer1.", k) for k in layer_keys]
# layer_keys = [re.sub("^res3.", "layer2.", k) for k in layer_keys]
# layer_keys = [re.sub("^res4.", "layer3.", k) for k in layer_keys]
# layer_keys = [re.sub("^res5.", "layer4.", k) for k in layer_keys]
# blocks
layer_keys = [k.replace(".branch1.", ".shortcut.") for k in layer_keys]
layer_keys = [k.replace(".branch2a.", ".conv1.") for k in layer_keys]
layer_keys = [k.replace(".branch2b.", ".conv2.") for k in layer_keys]
layer_keys = [k.replace(".branch2c.", ".conv3.") for k in layer_keys]
# DensePose substitutions
layer_keys = [re.sub("^body.conv.fcn", "body_conv_fcn", k) for k in layer_keys]
layer_keys = [k.replace("AnnIndex.lowres", "ann_index_lowres") for k in layer_keys]
layer_keys = [k.replace("Index.UV.lowres", "index_uv_lowres") for k in layer_keys]
layer_keys = [k.replace("U.lowres", "u_lowres") for k in layer_keys]
layer_keys = [k.replace("V.lowres", "v_lowres") for k in layer_keys]
return layer_keys
def convert_c2_detectron_names(weights):
"""
Map Caffe2 Detectron weight names to Detectron2 names.
Args:
weights (dict): name -> tensor
Returns:
dict: detectron2 names -> tensor
dict: detectron2 names -> C2 names
"""
logger = logging.getLogger(__name__)
logger.info("Renaming Caffe2 weights ......")
original_keys = sorted(weights.keys())
layer_keys = copy.deepcopy(original_keys)
layer_keys = convert_basic_c2_names(layer_keys)
# --------------------------------------------------------------------------
# RPN hidden representation conv
# --------------------------------------------------------------------------
# FPN case
# In the C2 model, the RPN hidden layer conv is defined for FPN level 2 and then
# shared for all other levels, hence the appearance of "fpn2"
layer_keys = [
k.replace("conv.rpn.fpn2", "proposal_generator.rpn_head.conv") for k in layer_keys
]
# Non-FPN case
layer_keys = [k.replace("conv.rpn", "proposal_generator.rpn_head.conv") for k in layer_keys]
# --------------------------------------------------------------------------
# RPN box transformation conv
# --------------------------------------------------------------------------
# FPN case (see note above about "fpn2")
layer_keys = [
k.replace("rpn.bbox.pred.fpn2", "proposal_generator.rpn_head.anchor_deltas")
for k in layer_keys
]
layer_keys = [
k.replace("rpn.cls.logits.fpn2", "proposal_generator.rpn_head.objectness_logits")
for k in layer_keys
]
# Non-FPN case
layer_keys = [
k.replace("rpn.bbox.pred", "proposal_generator.rpn_head.anchor_deltas") for k in layer_keys
]
layer_keys = [
k.replace("rpn.cls.logits", "proposal_generator.rpn_head.objectness_logits")
for k in layer_keys
]
# --------------------------------------------------------------------------
# Fast R-CNN box head
# --------------------------------------------------------------------------
layer_keys = [re.sub("^bbox\\.pred", "bbox_pred", k) for k in layer_keys]
layer_keys = [re.sub("^cls\\.score", "cls_score", k) for k in layer_keys]
layer_keys = [re.sub("^fc6\\.", "box_head.fc1.", k) for k in layer_keys]
layer_keys = [re.sub("^fc7\\.", "box_head.fc2.", k) for k in layer_keys]
# 4conv1fc head tensor names: head_conv1_w, head_conv1_gn_s
layer_keys = [re.sub("^head\\.conv", "box_head.conv", k) for k in layer_keys]
# --------------------------------------------------------------------------
# FPN lateral and output convolutions
# --------------------------------------------------------------------------
def fpn_map(name):
"""
Look for keys with the following patterns:
1) Starts with "fpn.inner."
Example: "fpn.inner.res2.2.sum.lateral.weight"
Meaning: These are lateral pathway convolutions
2) Starts with "fpn.res"
Example: "fpn.res2.2.sum.weight"
Meaning: These are FPN output convolutions
"""
splits = name.split(".")
norm = ".norm" if "norm" in splits else ""
if name.startswith("fpn.inner."):
# splits example: ['fpn', 'inner', 'res2', '2', 'sum', 'lateral', 'weight']
stage = int(splits[2][len("res") :])
return "fpn_lateral{}{}.{}".format(stage, norm, splits[-1])
elif name.startswith("fpn.res"):
# splits example: ['fpn', 'res2', '2', 'sum', 'weight']
stage = int(splits[1][len("res") :])
return "fpn_output{}{}.{}".format(stage, norm, splits[-1])
return name
layer_keys = [fpn_map(k) for k in layer_keys]
# --------------------------------------------------------------------------
# Mask R-CNN mask head
# --------------------------------------------------------------------------
# roi_heads.StandardROIHeads case
layer_keys = [k.replace(".[mask].fcn", "mask_head.mask_fcn") for k in layer_keys]
layer_keys = [re.sub("^\\.mask\\.fcn", "mask_head.mask_fcn", k) for k in layer_keys]
layer_keys = [k.replace("mask.fcn.logits", "mask_head.predictor") for k in layer_keys]
# roi_heads.Res5ROIHeads case
layer_keys = [k.replace("conv5.mask", "mask_head.deconv") for k in layer_keys]
# --------------------------------------------------------------------------
# Keypoint R-CNN head
# --------------------------------------------------------------------------
# interestingly, the keypoint head convs have blob names that are simply "conv_fcnX"
layer_keys = [k.replace("conv.fcn", "roi_heads.keypoint_head.conv_fcn") for k in layer_keys]
layer_keys = [
k.replace("kps.score.lowres", "roi_heads.keypoint_head.score_lowres") for k in layer_keys
]
layer_keys = [k.replace("kps.score.", "roi_heads.keypoint_head.score.") for k in layer_keys]
# --------------------------------------------------------------------------
# Done with replacements
# --------------------------------------------------------------------------
assert len(set(layer_keys)) == len(layer_keys)
assert len(original_keys) == len(layer_keys)
new_weights = {}
new_keys_to_original_keys = {}
for orig, renamed in zip(original_keys, layer_keys):
new_keys_to_original_keys[renamed] = orig
if renamed.startswith("bbox_pred.") or renamed.startswith("mask_head.predictor."):
# remove the meaningless prediction weight for background class
new_start_idx = 4 if renamed.startswith("bbox_pred.") else 1
new_weights[renamed] = weights[orig][new_start_idx:]
logger.info(
"Remove prediction weight for background class in {}. The shape changes from "
"{} to {}.".format(
renamed, tuple(weights[orig].shape), tuple(new_weights[renamed].shape)
)
)
elif renamed.startswith("cls_score."):
# move weights of bg class from original index 0 to last index
logger.info(
"Move classification weights for background class in {} from index 0 to "
"index {}.".format(renamed, weights[orig].shape[0] - 1)
)
new_weights[renamed] = torch.cat([weights[orig][1:], weights[orig][:1]])
else:
new_weights[renamed] = weights[orig]
return new_weights, new_keys_to_original_keys
# Note the current matching is not symmetric.
# it assumes model_state_dict will have longer names.
def align_and_update_state_dicts(model_state_dict, ckpt_state_dict, c2_conversion=True):
"""
Match names between the two state-dict, and returns a new chkpt_state_dict with names
converted to match model_state_dict with heuristics. The returned dict can be later
loaded with fvcore checkpointer.
If `c2_conversion==True`, `ckpt_state_dict` is assumed to be a Caffe2
model and will be renamed at first.
Strategy: suppose that the models that we will create will have prefixes appended
to each of its keys, for example due to an extra level of nesting that the original
pre-trained weights from ImageNet won't contain. For example, model.state_dict()
might return backbone[0].body.res2.conv1.weight, while the pre-trained model contains
res2.conv1.weight. We thus want to match both parameters together.
For that, we look for each model weight, look among all loaded keys if there is one
that is a suffix of the current weight name, and use it if that's the case.
If multiple matches exist, take the one with longest size
of the corresponding name. For example, for the same model as before, the pretrained
weight file can contain both res2.conv1.weight, as well as conv1.weight. In this case,
we want to match backbone[0].body.conv1.weight to conv1.weight, and
backbone[0].body.res2.conv1.weight to res2.conv1.weight.
"""
model_keys = sorted(model_state_dict.keys())
if c2_conversion:
ckpt_state_dict, original_keys = convert_c2_detectron_names(ckpt_state_dict)
# original_keys: the name in the original dict (before renaming)
else:
original_keys = {x: x for x in ckpt_state_dict.keys()}
ckpt_keys = sorted(ckpt_state_dict.keys())
def match(a, b):
# Matched ckpt_key should be a complete (starts with '.') suffix.
# For example, roi_heads.mesh_head.whatever_conv1 does not match conv1,
# but matches whatever_conv1 or mesh_head.whatever_conv1.
return a == b or a.endswith("." + b)
# get a matrix of string matches, where each (i, j) entry correspond to the size of the
# ckpt_key string, if it matches
match_matrix = [len(j) if match(i, j) else 0 for i in model_keys for j in ckpt_keys]
match_matrix = torch.as_tensor(match_matrix).view(len(model_keys), len(ckpt_keys))
# use the matched one with longest size in case of multiple matches
max_match_size, idxs = match_matrix.max(1)
# remove indices that correspond to no-match
idxs[max_match_size == 0] = -1
logger = logging.getLogger(__name__)
# matched_pairs (matched checkpoint key --> matched model key)
matched_keys = {}
result_state_dict = {}
for idx_model, idx_ckpt in enumerate(idxs.tolist()):
if idx_ckpt == -1:
continue
key_model = model_keys[idx_model]
key_ckpt = ckpt_keys[idx_ckpt]
value_ckpt = ckpt_state_dict[key_ckpt]
shape_in_model = model_state_dict[key_model].shape
if shape_in_model != value_ckpt.shape:
logger.warning(
"Shape of {} in checkpoint is {}, while shape of {} in model is {}.".format(
key_ckpt, value_ckpt.shape, key_model, shape_in_model
)
)
logger.warning(
"{} will not be loaded. Please double check and see if this is desired.".format(
key_ckpt
)
)
continue
assert key_model not in result_state_dict
result_state_dict[key_model] = value_ckpt
if key_ckpt in matched_keys: # already added to matched_keys
logger.error(
"Ambiguity found for {} in checkpoint!"
"It matches at least two keys in the model ({} and {}).".format(
key_ckpt, key_model, matched_keys[key_ckpt]
)
)
raise ValueError("Cannot match one checkpoint key to multiple keys in the model.")
matched_keys[key_ckpt] = key_model
# logging:
matched_model_keys = sorted(matched_keys.values())
if len(matched_model_keys) == 0:
logger.warning("No weights in checkpoint matched with model.")
return ckpt_state_dict
common_prefix = _longest_common_prefix(matched_model_keys)
rev_matched_keys = {v: k for k, v in matched_keys.items()}
original_keys = {k: original_keys[rev_matched_keys[k]] for k in matched_model_keys}
model_key_groups = _group_keys_by_module(matched_model_keys, original_keys)
table = []
memo = set()
for key_model in matched_model_keys:
if key_model in memo:
continue
if key_model in model_key_groups:
group = model_key_groups[key_model]
memo |= set(group)
shapes = [tuple(model_state_dict[k].shape) for k in group]
table.append(
(
_longest_common_prefix([k[len(common_prefix) :] for k in group]) + "*",
_group_str([original_keys[k] for k in group]),
" ".join([str(x).replace(" ", "") for x in shapes]),
)
)
else:
key_checkpoint = original_keys[key_model]
shape = str(tuple(model_state_dict[key_model].shape))
table.append((key_model[len(common_prefix) :], key_checkpoint, shape))
table_str = tabulate(
table, tablefmt="pipe", headers=["Names in Model", "Names in Checkpoint", "Shapes"]
)
logger.info(
"Following weights matched with "
+ (f"submodule {common_prefix[:-1]}" if common_prefix else "model")
+ ":\n"
+ table_str
)
unmatched_ckpt_keys = [k for k in ckpt_keys if k not in set(matched_keys.keys())]
for k in unmatched_ckpt_keys:
result_state_dict[k] = ckpt_state_dict[k]
return result_state_dict
def _group_keys_by_module(keys: List[str], original_names: Dict[str, str]):
"""
Params in the same submodule are grouped together.
Args:
keys: names of all parameters
original_names: mapping from parameter name to their name in the checkpoint
Returns:
dict[name -> all other names in the same group]
"""
def _submodule_name(key):
pos = key.rfind(".")
if pos < 0:
return None
prefix = key[: pos + 1]
return prefix
all_submodules = [_submodule_name(k) for k in keys]
all_submodules = [x for x in all_submodules if x]
all_submodules = sorted(all_submodules, key=len)
ret = {}
for prefix in all_submodules:
group = [k for k in keys if k.startswith(prefix)]
if len(group) <= 1:
continue
original_name_lcp = _longest_common_prefix_str([original_names[k] for k in group])
if len(original_name_lcp) == 0:
# don't group weights if original names don't share prefix
continue
for k in group:
if k in ret:
continue
ret[k] = group
return ret
def _longest_common_prefix(names: List[str]) -> str:
"""
["abc.zfg", "abc.zef"] -> "abc."
"""
names = [n.split(".") for n in names]
m1, m2 = min(names), max(names)
ret = [a for a, b in zip(m1, m2) if a == b]
ret = ".".join(ret) + "." if len(ret) else ""
return ret
def _longest_common_prefix_str(names: List[str]) -> str:
m1, m2 = min(names), max(names)
lcp = [a for a, b in zip(m1, m2) if a == b]
lcp = "".join(lcp)
return lcp
def _group_str(names: List[str]) -> str:
"""
Turn "common1", "common2", "common3" into "common{1,2,3}"
"""
lcp = _longest_common_prefix_str(names)
rest = [x[len(lcp) :] for x in names]
rest = "{" + ",".join(rest) + "}"
ret = lcp + rest
# add some simplification for BN specifically
ret = ret.replace("bn_{beta,running_mean,running_var,gamma}", "bn_*")
ret = ret.replace("bn_beta,bn_running_mean,bn_running_var,bn_gamma", "bn_*")
return ret
|
banmo-main
|
third_party/detectron2_old/detectron2/checkpoint/c2_model_loading.py
|
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
# File:
from . import catalog as _UNUSED # register the handler
from .detection_checkpoint import DetectionCheckpointer
from fvcore.common.checkpoint import Checkpointer, PeriodicCheckpointer
__all__ = ["Checkpointer", "PeriodicCheckpointer", "DetectionCheckpointer"]
|
banmo-main
|
third_party/detectron2_old/detectron2/checkpoint/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import logging
import os
import pickle
import torch
from fvcore.common.checkpoint import Checkpointer
from torch.nn.parallel import DistributedDataParallel
import detectron2.utils.comm as comm
from detectron2.utils.env import TORCH_VERSION
from detectron2.utils.file_io import PathManager
from .c2_model_loading import align_and_update_state_dicts
class DetectionCheckpointer(Checkpointer):
"""
Same as :class:`Checkpointer`, but is able to:
1. handle models in detectron & detectron2 model zoo, and apply conversions for legacy models.
2. correctly load checkpoints that are only available on the master worker
"""
def __init__(self, model, save_dir="", *, save_to_disk=None, **checkpointables):
is_main_process = comm.is_main_process()
super().__init__(
model,
save_dir,
save_to_disk=is_main_process if save_to_disk is None else save_to_disk,
**checkpointables,
)
self.path_manager = PathManager
def load(self, path, *args, **kwargs):
need_sync = False
if path and isinstance(self.model, DistributedDataParallel):
logger = logging.getLogger(__name__)
path = self.path_manager.get_local_path(path)
has_file = os.path.isfile(path)
all_has_file = comm.all_gather(has_file)
if not all_has_file[0]:
raise OSError(f"File {path} not found on main worker.")
if not all(all_has_file):
logger.warning(
f"Not all workers can read checkpoint {path}. "
"Training may fail to fully resume."
)
# TODO: broadcast the checkpoint file contents from main
# worker, and load from it instead.
need_sync = True
if not has_file:
path = None # don't load if not readable
ret = super().load(path, *args, **kwargs)
if need_sync:
logger.info("Broadcasting model states from main worker ...")
if TORCH_VERSION >= (1, 7):
self.model._sync_params_and_buffers()
return ret
def _load_file(self, filename):
if filename.endswith(".pkl"):
with PathManager.open(filename, "rb") as f:
data = pickle.load(f, encoding="latin1")
if "model" in data and "__author__" in data:
# file is in Detectron2 model zoo format
self.logger.info("Reading a file from '{}'".format(data["__author__"]))
return data
else:
# assume file is from Caffe2 / Detectron1 model zoo
if "blobs" in data:
# Detection models have "blobs", but ImageNet models don't
data = data["blobs"]
data = {k: v for k, v in data.items() if not k.endswith("_momentum")}
return {"model": data, "__author__": "Caffe2", "matching_heuristics": True}
elif filename.endswith(".pyth"):
# assume file is from pycls; no one else seems to use the ".pyth" extension
with PathManager.open(filename, "rb") as f:
data = torch.load(f)
assert (
"model_state" in data
), f"Cannot load .pyth file {filename}; pycls checkpoints must contain 'model_state'."
model_state = {
k: v
for k, v in data["model_state"].items()
if not k.endswith("num_batches_tracked")
}
return {"model": model_state, "__author__": "pycls", "matching_heuristics": True}
loaded = super()._load_file(filename) # load native pth checkpoint
if "model" not in loaded:
loaded = {"model": loaded}
return loaded
def _load_model(self, checkpoint):
if checkpoint.get("matching_heuristics", False):
self._convert_ndarray_to_tensor(checkpoint["model"])
# convert weights by name-matching heuristics
checkpoint["model"] = align_and_update_state_dicts(
self.model.state_dict(),
checkpoint["model"],
c2_conversion=checkpoint.get("__author__", None) == "Caffe2",
)
# for non-caffe2 models, use standard ways to load it
incompatible = super()._load_model(checkpoint)
model_buffers = dict(self.model.named_buffers(recurse=False))
for k in ["pixel_mean", "pixel_std"]:
# Ignore missing key message about pixel_mean/std.
# Though they may be missing in old checkpoints, they will be correctly
# initialized from config anyway.
if k in model_buffers:
try:
incompatible.missing_keys.remove(k)
except ValueError:
pass
return incompatible
|
banmo-main
|
third_party/detectron2_old/detectron2/checkpoint/detection_checkpoint.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import math
from functools import lru_cache
import torch
from torch import nn
from torch.autograd import Function
from torch.autograd.function import once_differentiable
from torch.nn.modules.utils import _pair
from torchvision.ops import deform_conv2d
from detectron2 import _C
from .wrappers import _NewEmptyTensorOp
class _DeformConv(Function):
@staticmethod
def forward(
ctx,
input,
offset,
weight,
stride=1,
padding=0,
dilation=1,
groups=1,
deformable_groups=1,
im2col_step=64,
):
if input is not None and input.dim() != 4:
raise ValueError(
"Expected 4D tensor as input, got {}D tensor instead.".format(input.dim())
)
ctx.stride = _pair(stride)
ctx.padding = _pair(padding)
ctx.dilation = _pair(dilation)
ctx.groups = groups
ctx.deformable_groups = deformable_groups
ctx.im2col_step = im2col_step
ctx.save_for_backward(input, offset, weight)
output = input.new_empty(
_DeformConv._output_size(input, weight, ctx.padding, ctx.dilation, ctx.stride)
)
ctx.bufs_ = [input.new_empty(0), input.new_empty(0)] # columns, ones
if not input.is_cuda:
if deformable_groups != 1:
raise NotImplementedError(
"Deformable Conv with deformable_groups != 1 is not supported on CPUs!"
)
return deform_conv2d(
input, offset, weight, stride=stride, padding=padding, dilation=dilation
)
else:
cur_im2col_step = _DeformConv._cal_im2col_step(input.shape[0], ctx.im2col_step)
assert (input.shape[0] % cur_im2col_step) == 0, "im2col step must divide batchsize"
_C.deform_conv_forward(
input,
weight,
offset,
output,
ctx.bufs_[0],
ctx.bufs_[1],
weight.size(3),
weight.size(2),
ctx.stride[1],
ctx.stride[0],
ctx.padding[1],
ctx.padding[0],
ctx.dilation[1],
ctx.dilation[0],
ctx.groups,
ctx.deformable_groups,
cur_im2col_step,
)
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
input, offset, weight = ctx.saved_tensors
grad_input = grad_offset = grad_weight = None
if not grad_output.is_cuda:
raise NotImplementedError("Deformable Conv is not supported on CPUs!")
else:
cur_im2col_step = _DeformConv._cal_im2col_step(input.shape[0], ctx.im2col_step)
assert (input.shape[0] % cur_im2col_step) == 0, "im2col step must divide batchsize"
if ctx.needs_input_grad[0] or ctx.needs_input_grad[1]:
grad_input = torch.zeros_like(input)
grad_offset = torch.zeros_like(offset)
_C.deform_conv_backward_input(
input,
offset,
grad_output,
grad_input,
grad_offset,
weight,
ctx.bufs_[0],
weight.size(3),
weight.size(2),
ctx.stride[1],
ctx.stride[0],
ctx.padding[1],
ctx.padding[0],
ctx.dilation[1],
ctx.dilation[0],
ctx.groups,
ctx.deformable_groups,
cur_im2col_step,
)
if ctx.needs_input_grad[2]:
grad_weight = torch.zeros_like(weight)
_C.deform_conv_backward_filter(
input,
offset,
grad_output,
grad_weight,
ctx.bufs_[0],
ctx.bufs_[1],
weight.size(3),
weight.size(2),
ctx.stride[1],
ctx.stride[0],
ctx.padding[1],
ctx.padding[0],
ctx.dilation[1],
ctx.dilation[0],
ctx.groups,
ctx.deformable_groups,
1,
cur_im2col_step,
)
return grad_input, grad_offset, grad_weight, None, None, None, None, None, None
@staticmethod
def _output_size(input, weight, padding, dilation, stride):
channels = weight.size(0)
output_size = (input.size(0), channels)
for d in range(input.dim() - 2):
in_size = input.size(d + 2)
pad = padding[d]
kernel = dilation[d] * (weight.size(d + 2) - 1) + 1
stride_ = stride[d]
output_size += ((in_size + (2 * pad) - kernel) // stride_ + 1,)
if not all(map(lambda s: s > 0, output_size)):
raise ValueError(
"convolution input is too small (output would be {})".format(
"x".join(map(str, output_size))
)
)
return output_size
@staticmethod
@lru_cache(maxsize=128)
def _cal_im2col_step(input_size, default_size):
"""
Calculate proper im2col step size, which should be divisible by input_size and not larger
than prefer_size. Meanwhile the step size should be as large as possible to be more
efficient. So we choose the largest one among all divisors of input_size which are smaller
than prefer_size.
:param input_size: input batch size .
:param default_size: default preferred im2col step size.
:return: the largest proper step size.
"""
if input_size <= default_size:
return input_size
best_step = 1
for step in range(2, min(int(math.sqrt(input_size)) + 1, default_size)):
if input_size % step == 0:
if input_size // step <= default_size:
return input_size // step
best_step = step
return best_step
class _ModulatedDeformConv(Function):
@staticmethod
def forward(
ctx,
input,
offset,
mask,
weight,
bias=None,
stride=1,
padding=0,
dilation=1,
groups=1,
deformable_groups=1,
):
ctx.stride = stride
ctx.padding = padding
ctx.dilation = dilation
ctx.groups = groups
ctx.deformable_groups = deformable_groups
ctx.with_bias = bias is not None
if not ctx.with_bias:
bias = input.new_empty(1) # fake tensor
if not input.is_cuda:
raise NotImplementedError("Deformable Conv is not supported on CPUs!")
if (
weight.requires_grad
or mask.requires_grad
or offset.requires_grad
or input.requires_grad
):
ctx.save_for_backward(input, offset, mask, weight, bias)
output = input.new_empty(_ModulatedDeformConv._infer_shape(ctx, input, weight))
ctx._bufs = [input.new_empty(0), input.new_empty(0)]
_C.modulated_deform_conv_forward(
input,
weight,
bias,
ctx._bufs[0],
offset,
mask,
output,
ctx._bufs[1],
weight.shape[2],
weight.shape[3],
ctx.stride,
ctx.stride,
ctx.padding,
ctx.padding,
ctx.dilation,
ctx.dilation,
ctx.groups,
ctx.deformable_groups,
ctx.with_bias,
)
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
if not grad_output.is_cuda:
raise NotImplementedError("Deformable Conv is not supported on CPUs!")
input, offset, mask, weight, bias = ctx.saved_tensors
grad_input = torch.zeros_like(input)
grad_offset = torch.zeros_like(offset)
grad_mask = torch.zeros_like(mask)
grad_weight = torch.zeros_like(weight)
grad_bias = torch.zeros_like(bias)
_C.modulated_deform_conv_backward(
input,
weight,
bias,
ctx._bufs[0],
offset,
mask,
ctx._bufs[1],
grad_input,
grad_weight,
grad_bias,
grad_offset,
grad_mask,
grad_output,
weight.shape[2],
weight.shape[3],
ctx.stride,
ctx.stride,
ctx.padding,
ctx.padding,
ctx.dilation,
ctx.dilation,
ctx.groups,
ctx.deformable_groups,
ctx.with_bias,
)
if not ctx.with_bias:
grad_bias = None
return (
grad_input,
grad_offset,
grad_mask,
grad_weight,
grad_bias,
None,
None,
None,
None,
None,
)
@staticmethod
def _infer_shape(ctx, input, weight):
n = input.size(0)
channels_out = weight.size(0)
height, width = input.shape[2:4]
kernel_h, kernel_w = weight.shape[2:4]
height_out = (
height + 2 * ctx.padding - (ctx.dilation * (kernel_h - 1) + 1)
) // ctx.stride + 1
width_out = (
width + 2 * ctx.padding - (ctx.dilation * (kernel_w - 1) + 1)
) // ctx.stride + 1
return n, channels_out, height_out, width_out
deform_conv = _DeformConv.apply
modulated_deform_conv = _ModulatedDeformConv.apply
class DeformConv(nn.Module):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
deformable_groups=1,
bias=False,
norm=None,
activation=None,
):
"""
Deformable convolution from :paper:`deformconv`.
Arguments are similar to :class:`Conv2D`. Extra arguments:
Args:
deformable_groups (int): number of groups used in deformable convolution.
norm (nn.Module, optional): a normalization layer
activation (callable(Tensor) -> Tensor): a callable activation function
"""
super(DeformConv, self).__init__()
assert not bias
assert in_channels % groups == 0, "in_channels {} cannot be divisible by groups {}".format(
in_channels, groups
)
assert (
out_channels % groups == 0
), "out_channels {} cannot be divisible by groups {}".format(out_channels, groups)
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = _pair(kernel_size)
self.stride = _pair(stride)
self.padding = _pair(padding)
self.dilation = _pair(dilation)
self.groups = groups
self.deformable_groups = deformable_groups
self.norm = norm
self.activation = activation
self.weight = nn.Parameter(
torch.Tensor(out_channels, in_channels // self.groups, *self.kernel_size)
)
self.bias = None
nn.init.kaiming_uniform_(self.weight, nonlinearity="relu")
def forward(self, x, offset):
if x.numel() == 0:
# When input is empty, we want to return a empty tensor with "correct" shape,
# So that the following operations will not panic
# if they check for the shape of the tensor.
# This computes the height and width of the output tensor
output_shape = [
(i + 2 * p - (di * (k - 1) + 1)) // s + 1
for i, p, di, k, s in zip(
x.shape[-2:], self.padding, self.dilation, self.kernel_size, self.stride
)
]
output_shape = [x.shape[0], self.weight.shape[0]] + output_shape
return _NewEmptyTensorOp.apply(x, output_shape)
x = deform_conv(
x,
offset,
self.weight,
self.stride,
self.padding,
self.dilation,
self.groups,
self.deformable_groups,
)
if self.norm is not None:
x = self.norm(x)
if self.activation is not None:
x = self.activation(x)
return x
def extra_repr(self):
tmpstr = "in_channels=" + str(self.in_channels)
tmpstr += ", out_channels=" + str(self.out_channels)
tmpstr += ", kernel_size=" + str(self.kernel_size)
tmpstr += ", stride=" + str(self.stride)
tmpstr += ", padding=" + str(self.padding)
tmpstr += ", dilation=" + str(self.dilation)
tmpstr += ", groups=" + str(self.groups)
tmpstr += ", deformable_groups=" + str(self.deformable_groups)
tmpstr += ", bias=False"
return tmpstr
class ModulatedDeformConv(nn.Module):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
deformable_groups=1,
bias=True,
norm=None,
activation=None,
):
"""
Modulated deformable convolution from :paper:`deformconv2`.
Arguments are similar to :class:`Conv2D`. Extra arguments:
Args:
deformable_groups (int): number of groups used in deformable convolution.
norm (nn.Module, optional): a normalization layer
activation (callable(Tensor) -> Tensor): a callable activation function
"""
super(ModulatedDeformConv, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = _pair(kernel_size)
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = groups
self.deformable_groups = deformable_groups
self.with_bias = bias
self.norm = norm
self.activation = activation
self.weight = nn.Parameter(
torch.Tensor(out_channels, in_channels // groups, *self.kernel_size)
)
if bias:
self.bias = nn.Parameter(torch.Tensor(out_channels))
else:
self.bias = None
nn.init.kaiming_uniform_(self.weight, nonlinearity="relu")
if self.bias is not None:
nn.init.constant_(self.bias, 0)
def forward(self, x, offset, mask):
if x.numel() == 0:
output_shape = [
(i + 2 * p - (di * (k - 1) + 1)) // s + 1
for i, p, di, k, s in zip(
x.shape[-2:], self.padding, self.dilation, self.kernel_size, self.stride
)
]
output_shape = [x.shape[0], self.weight.shape[0]] + output_shape
return _NewEmptyTensorOp.apply(x, output_shape)
x = modulated_deform_conv(
x,
offset,
mask,
self.weight,
self.bias,
self.stride,
self.padding,
self.dilation,
self.groups,
self.deformable_groups,
)
if self.norm is not None:
x = self.norm(x)
if self.activation is not None:
x = self.activation(x)
return x
def extra_repr(self):
tmpstr = "in_channels=" + str(self.in_channels)
tmpstr += ", out_channels=" + str(self.out_channels)
tmpstr += ", kernel_size=" + str(self.kernel_size)
tmpstr += ", stride=" + str(self.stride)
tmpstr += ", padding=" + str(self.padding)
tmpstr += ", dilation=" + str(self.dilation)
tmpstr += ", groups=" + str(self.groups)
tmpstr += ", deformable_groups=" + str(self.deformable_groups)
tmpstr += ", bias=" + str(self.with_bias)
return tmpstr
|
banmo-main
|
third_party/detectron2_old/detectron2/layers/deform_conv.py
|
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
from collections import namedtuple
class ShapeSpec(namedtuple("_ShapeSpec", ["channels", "height", "width", "stride"])):
"""
A simple structure that contains basic shape specification about a tensor.
It is often used as the auxiliary inputs/outputs of models,
to complement the lack of shape inference ability among pytorch modules.
Attributes:
channels:
height:
width:
stride:
"""
def __new__(cls, channels=None, height=None, width=None, stride=None):
return super().__new__(cls, channels, height, width, stride)
|
banmo-main
|
third_party/detectron2_old/detectron2/layers/shape_spec.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from torch import nn
from torchvision.ops import roi_align
# NOTE: torchvision's RoIAlign has a different default aligned=False
class ROIAlign(nn.Module):
def __init__(self, output_size, spatial_scale, sampling_ratio, aligned=True):
"""
Args:
output_size (tuple): h, w
spatial_scale (float): scale the input boxes by this number
sampling_ratio (int): number of inputs samples to take for each output
sample. 0 to take samples densely.
aligned (bool): if False, use the legacy implementation in
Detectron. If True, align the results more perfectly.
Note:
The meaning of aligned=True:
Given a continuous coordinate c, its two neighboring pixel indices (in our
pixel model) are computed by floor(c - 0.5) and ceil(c - 0.5). For example,
c=1.3 has pixel neighbors with discrete indices [0] and [1] (which are sampled
from the underlying signal at continuous coordinates 0.5 and 1.5). But the original
roi_align (aligned=False) does not subtract the 0.5 when computing neighboring
pixel indices and therefore it uses pixels with a slightly incorrect alignment
(relative to our pixel model) when performing bilinear interpolation.
With `aligned=True`,
we first appropriately scale the ROI and then shift it by -0.5
prior to calling roi_align. This produces the correct neighbors; see
detectron2/tests/test_roi_align.py for verification.
The difference does not make a difference to the model's performance if
ROIAlign is used together with conv layers.
"""
super().__init__()
self.output_size = output_size
self.spatial_scale = spatial_scale
self.sampling_ratio = sampling_ratio
self.aligned = aligned
from torchvision import __version__
version = tuple(int(x) for x in __version__.split(".")[:2])
# https://github.com/pytorch/vision/pull/2438
assert version >= (0, 7), "Require torchvision >= 0.7"
def forward(self, input, rois):
"""
Args:
input: NCHW images
rois: Bx5 boxes. First column is the index into N. The other 4 columns are xyxy.
"""
assert rois.dim() == 2 and rois.size(1) == 5
if input.is_quantized:
input = input.dequantize()
return roi_align(
input,
rois.to(dtype=input.dtype),
self.output_size,
self.spatial_scale,
self.sampling_ratio,
self.aligned,
)
def __repr__(self):
tmpstr = self.__class__.__name__ + "("
tmpstr += "output_size=" + str(self.output_size)
tmpstr += ", spatial_scale=" + str(self.spatial_scale)
tmpstr += ", sampling_ratio=" + str(self.sampling_ratio)
tmpstr += ", aligned=" + str(self.aligned)
tmpstr += ")"
return tmpstr
|
banmo-main
|
third_party/detectron2_old/detectron2/layers/roi_align.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import torch
from torch import nn
from torch.autograd import Function
from torch.autograd.function import once_differentiable
from torch.nn.modules.utils import _pair
from detectron2 import _C
class _ROIAlignRotated(Function):
@staticmethod
def forward(ctx, input, roi, output_size, spatial_scale, sampling_ratio):
ctx.save_for_backward(roi)
ctx.output_size = _pair(output_size)
ctx.spatial_scale = spatial_scale
ctx.sampling_ratio = sampling_ratio
ctx.input_shape = input.size()
output = _C.roi_align_rotated_forward(
input, roi, spatial_scale, output_size[0], output_size[1], sampling_ratio
)
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
(rois,) = ctx.saved_tensors
output_size = ctx.output_size
spatial_scale = ctx.spatial_scale
sampling_ratio = ctx.sampling_ratio
bs, ch, h, w = ctx.input_shape
grad_input = _C.roi_align_rotated_backward(
grad_output,
rois,
spatial_scale,
output_size[0],
output_size[1],
bs,
ch,
h,
w,
sampling_ratio,
)
return grad_input, None, None, None, None, None
roi_align_rotated = _ROIAlignRotated.apply
class ROIAlignRotated(nn.Module):
def __init__(self, output_size, spatial_scale, sampling_ratio):
"""
Args:
output_size (tuple): h, w
spatial_scale (float): scale the input boxes by this number
sampling_ratio (int): number of inputs samples to take for each output
sample. 0 to take samples densely.
Note:
ROIAlignRotated supports continuous coordinate by default:
Given a continuous coordinate c, its two neighboring pixel indices (in our
pixel model) are computed by floor(c - 0.5) and ceil(c - 0.5). For example,
c=1.3 has pixel neighbors with discrete indices [0] and [1] (which are sampled
from the underlying signal at continuous coordinates 0.5 and 1.5).
"""
super(ROIAlignRotated, self).__init__()
self.output_size = output_size
self.spatial_scale = spatial_scale
self.sampling_ratio = sampling_ratio
def forward(self, input, rois):
"""
Args:
input: NCHW images
rois: Bx6 boxes. First column is the index into N.
The other 5 columns are (x_ctr, y_ctr, width, height, angle_degrees).
"""
assert rois.dim() == 2 and rois.size(1) == 6
orig_dtype = input.dtype
if orig_dtype == torch.float16:
input = input.float()
rois = rois.float()
return roi_align_rotated(
input, rois, self.output_size, self.spatial_scale, self.sampling_ratio
).to(dtype=orig_dtype)
def __repr__(self):
tmpstr = self.__class__.__name__ + "("
tmpstr += "output_size=" + str(self.output_size)
tmpstr += ", spatial_scale=" + str(self.spatial_scale)
tmpstr += ", sampling_ratio=" + str(self.sampling_ratio)
tmpstr += ")"
return tmpstr
|
banmo-main
|
third_party/detectron2_old/detectron2/layers/roi_align_rotated.py
|
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
from typing import List
import torch
from torchvision.ops import boxes as box_ops
from torchvision.ops import nms # BC-compat
from detectron2.utils.env import TORCH_VERSION
if TORCH_VERSION < (1, 7):
from detectron2 import _C
nms_rotated_func = _C.nms_rotated
else:
nms_rotated_func = torch.ops.detectron2.nms_rotated
def batched_nms(
boxes: torch.Tensor, scores: torch.Tensor, idxs: torch.Tensor, iou_threshold: float
):
"""
Same as torchvision.ops.boxes.batched_nms, but safer.
"""
assert boxes.shape[-1] == 4
# TODO may need better strategy.
# Investigate after having a fully-cuda NMS op.
if len(boxes) < 40000:
# fp16 does not have enough range for batched NMS
return box_ops.batched_nms(boxes.float(), scores, idxs, iou_threshold)
result_mask = scores.new_zeros(scores.size(), dtype=torch.bool)
for id in torch.jit.annotate(List[int], torch.unique(idxs).cpu().tolist()):
mask = (idxs == id).nonzero().view(-1)
keep = nms(boxes[mask], scores[mask], iou_threshold)
result_mask[mask[keep]] = True
keep = result_mask.nonzero().view(-1)
keep = keep[scores[keep].argsort(descending=True)]
return keep
# Note: this function (nms_rotated) might be moved into
# torchvision/ops/boxes.py in the future
def nms_rotated(boxes, scores, iou_threshold):
"""
Performs non-maximum suppression (NMS) on the rotated boxes according
to their intersection-over-union (IoU).
Rotated NMS iteratively removes lower scoring rotated boxes which have an
IoU greater than iou_threshold with another (higher scoring) rotated box.
Note that RotatedBox (5, 3, 4, 2, -90) covers exactly the same region as
RotatedBox (5, 3, 4, 2, 90) does, and their IoU will be 1. However, they
can be representing completely different objects in certain tasks, e.g., OCR.
As for the question of whether rotated-NMS should treat them as faraway boxes
even though their IOU is 1, it depends on the application and/or ground truth annotation.
As an extreme example, consider a single character v and the square box around it.
If the angle is 0 degree, the object (text) would be read as 'v';
If the angle is 90 degrees, the object (text) would become '>';
If the angle is 180 degrees, the object (text) would become '^';
If the angle is 270/-90 degrees, the object (text) would become '<'
All of these cases have IoU of 1 to each other, and rotated NMS that only
uses IoU as criterion would only keep one of them with the highest score -
which, practically, still makes sense in most cases because typically
only one of theses orientations is the correct one. Also, it does not matter
as much if the box is only used to classify the object (instead of transcribing
them with a sequential OCR recognition model) later.
On the other hand, when we use IoU to filter proposals that are close to the
ground truth during training, we should definitely take the angle into account if
we know the ground truth is labeled with the strictly correct orientation (as in,
upside-down words are annotated with -180 degrees even though they can be covered
with a 0/90/-90 degree box, etc.)
The way the original dataset is annotated also matters. For example, if the dataset
is a 4-point polygon dataset that does not enforce ordering of vertices/orientation,
we can estimate a minimum rotated bounding box to this polygon, but there's no way
we can tell the correct angle with 100% confidence (as shown above, there could be 4 different
rotated boxes, with angles differed by 90 degrees to each other, covering the exactly
same region). In that case we have to just use IoU to determine the box
proximity (as many detection benchmarks (even for text) do) unless there're other
assumptions we can make (like width is always larger than height, or the object is not
rotated by more than 90 degrees CCW/CW, etc.)
In summary, not considering angles in rotated NMS seems to be a good option for now,
but we should be aware of its implications.
Args:
boxes (Tensor[N, 5]): Rotated boxes to perform NMS on. They are expected to be in
(x_center, y_center, width, height, angle_degrees) format.
scores (Tensor[N]): Scores for each one of the rotated boxes
iou_threshold (float): Discards all overlapping rotated boxes with IoU < iou_threshold
Returns:
keep (Tensor): int64 tensor with the indices of the elements that have been kept
by Rotated NMS, sorted in decreasing order of scores
"""
return nms_rotated_func(boxes, scores, iou_threshold)
# Note: this function (batched_nms_rotated) might be moved into
# torchvision/ops/boxes.py in the future
def batched_nms_rotated(boxes, scores, idxs, iou_threshold):
"""
Performs non-maximum suppression in a batched fashion.
Each index value correspond to a category, and NMS
will not be applied between elements of different categories.
Args:
boxes (Tensor[N, 5]):
boxes where NMS will be performed. They
are expected to be in (x_ctr, y_ctr, width, height, angle_degrees) format
scores (Tensor[N]):
scores for each one of the boxes
idxs (Tensor[N]):
indices of the categories for each one of the boxes.
iou_threshold (float):
discards all overlapping boxes
with IoU < iou_threshold
Returns:
Tensor:
int64 tensor with the indices of the elements that have been kept
by NMS, sorted in decreasing order of scores
"""
assert boxes.shape[-1] == 5
if boxes.numel() == 0:
return torch.empty((0,), dtype=torch.int64, device=boxes.device)
boxes = boxes.float() # fp16 does not have enough range for batched NMS
# Strategy: in order to perform NMS independently per class,
# we add an offset to all the boxes. The offset is dependent
# only on the class idx, and is large enough so that boxes
# from different classes do not overlap
# Note that batched_nms in torchvision/ops/boxes.py only uses max_coordinate,
# which won't handle negative coordinates correctly.
# Here by using min_coordinate we can make sure the negative coordinates are
# correctly handled.
max_coordinate = (
torch.max(boxes[:, 0], boxes[:, 1]) + torch.max(boxes[:, 2], boxes[:, 3]) / 2
).max()
min_coordinate = (
torch.min(boxes[:, 0], boxes[:, 1]) - torch.max(boxes[:, 2], boxes[:, 3]) / 2
).min()
offsets = idxs.to(boxes) * (max_coordinate - min_coordinate + 1)
boxes_for_nms = boxes.clone() # avoid modifying the original values in boxes
boxes_for_nms[:, :2] += offsets[:, None]
keep = nms_rotated(boxes_for_nms, scores, iou_threshold)
return keep
|
banmo-main
|
third_party/detectron2_old/detectron2/layers/nms.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from .batch_norm import FrozenBatchNorm2d, get_norm, NaiveSyncBatchNorm
from .deform_conv import DeformConv, ModulatedDeformConv
from .mask_ops import paste_masks_in_image
from .nms import batched_nms, batched_nms_rotated, nms, nms_rotated
from .roi_align import ROIAlign, roi_align
from .roi_align_rotated import ROIAlignRotated, roi_align_rotated
from .shape_spec import ShapeSpec
from .wrappers import (
BatchNorm2d,
Conv2d,
ConvTranspose2d,
cat,
interpolate,
Linear,
nonzero_tuple,
cross_entropy,
)
from .blocks import CNNBlockBase, DepthwiseSeparableConv2d
from .aspp import ASPP
__all__ = [k for k in globals().keys() if not k.startswith("_")]
|
banmo-main
|
third_party/detectron2_old/detectron2/layers/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from copy import deepcopy
import fvcore.nn.weight_init as weight_init
import torch
from torch import nn
from torch.nn import functional as F
from .batch_norm import get_norm
from .blocks import DepthwiseSeparableConv2d
from .wrappers import Conv2d
class ASPP(nn.Module):
"""
Atrous Spatial Pyramid Pooling (ASPP).
"""
def __init__(
self,
in_channels,
out_channels,
dilations,
*,
norm,
activation,
pool_kernel_size=None,
dropout: float = 0.0,
use_depthwise_separable_conv=False,
):
"""
Args:
in_channels (int): number of input channels for ASPP.
out_channels (int): number of output channels.
dilations (list): a list of 3 dilations in ASPP.
norm (str or callable): normalization for all conv layers.
See :func:`layers.get_norm` for supported format. norm is
applied to all conv layers except the conv following
global average pooling.
activation (callable): activation function.
pool_kernel_size (tuple, list): the average pooling size (kh, kw)
for image pooling layer in ASPP. If set to None, it always
performs global average pooling. If not None, it must be
divisible by the shape of inputs in forward(). It is recommended
to use a fixed input feature size in training, and set this
option to match this size, so that it performs global average
pooling in training, and the size of the pooling window stays
consistent in inference.
dropout (float): apply dropout on the output of ASPP. It is used in
the official DeepLab implementation with a rate of 0.1:
https://github.com/tensorflow/models/blob/21b73d22f3ed05b650e85ac50849408dd36de32e/research/deeplab/model.py#L532 # noqa
use_depthwise_separable_conv (bool): use DepthwiseSeparableConv2d
for 3x3 convs in ASPP, proposed in :paper:`DeepLabV3+`.
"""
super(ASPP, self).__init__()
assert len(dilations) == 3, "ASPP expects 3 dilations, got {}".format(len(dilations))
self.pool_kernel_size = pool_kernel_size
self.dropout = dropout
use_bias = norm == ""
self.convs = nn.ModuleList()
# conv 1x1
self.convs.append(
Conv2d(
in_channels,
out_channels,
kernel_size=1,
bias=use_bias,
norm=get_norm(norm, out_channels),
activation=deepcopy(activation),
)
)
weight_init.c2_xavier_fill(self.convs[-1])
# atrous convs
for dilation in dilations:
if use_depthwise_separable_conv:
self.convs.append(
DepthwiseSeparableConv2d(
in_channels,
out_channels,
kernel_size=3,
padding=dilation,
dilation=dilation,
norm1=norm,
activation1=deepcopy(activation),
norm2=norm,
activation2=deepcopy(activation),
)
)
else:
self.convs.append(
Conv2d(
in_channels,
out_channels,
kernel_size=3,
padding=dilation,
dilation=dilation,
bias=use_bias,
norm=get_norm(norm, out_channels),
activation=deepcopy(activation),
)
)
weight_init.c2_xavier_fill(self.convs[-1])
# image pooling
# We do not add BatchNorm because the spatial resolution is 1x1,
# the original TF implementation has BatchNorm.
if pool_kernel_size is None:
image_pooling = nn.Sequential(
nn.AdaptiveAvgPool2d(1),
Conv2d(in_channels, out_channels, 1, bias=True, activation=deepcopy(activation)),
)
else:
image_pooling = nn.Sequential(
nn.AvgPool2d(kernel_size=pool_kernel_size, stride=1),
Conv2d(in_channels, out_channels, 1, bias=True, activation=deepcopy(activation)),
)
weight_init.c2_xavier_fill(image_pooling[1])
self.convs.append(image_pooling)
self.project = Conv2d(
5 * out_channels,
out_channels,
kernel_size=1,
bias=use_bias,
norm=get_norm(norm, out_channels),
activation=deepcopy(activation),
)
weight_init.c2_xavier_fill(self.project)
def forward(self, x):
size = x.shape[-2:]
if self.pool_kernel_size is not None:
if size[0] % self.pool_kernel_size[0] or size[1] % self.pool_kernel_size[1]:
raise ValueError(
"`pool_kernel_size` must be divisible by the shape of inputs. "
"Input size: {} `pool_kernel_size`: {}".format(size, self.pool_kernel_size)
)
res = []
for conv in self.convs:
res.append(conv(x))
res[-1] = F.interpolate(res[-1], size=size, mode="bilinear", align_corners=False)
res = torch.cat(res, dim=1)
res = self.project(res)
res = F.dropout(res, self.dropout, training=self.training) if self.dropout > 0 else res
return res
|
banmo-main
|
third_party/detectron2_old/detectron2/layers/aspp.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.