python_code
stringlengths 0
4.04M
| repo_name
stringlengths 8
58
| file_path
stringlengths 5
147
|
---|---|---|
import torch
import torch.nn.functional as F
def vertex_normals(vertices, faces):
"""
:param vertices: [batch size, number of vertices, 3]
:param faces: [batch size, number of faces, 3]
:return: [batch size, number of vertices, 3]
"""
assert (vertices.ndimension() == 3)
assert (faces.ndimension() == 3)
assert (vertices.shape[0] == faces.shape[0])
assert (vertices.shape[2] == 3)
assert (faces.shape[2] == 3)
bs, nv = vertices.shape[:2]
bs, nf = faces.shape[:2]
device = vertices.device
normals = torch.zeros(bs * nv, 3).to(device)
faces = faces + (torch.arange(bs, dtype=torch.int32).to(device) * nv)[:, None, None] # expanded faces
vertices_faces = vertices.reshape((bs * nv, 3))[faces.long()]
faces = faces.view(-1, 3)
vertices_faces = vertices_faces.view(-1, 3, 3)
normals.index_add_(0, faces[:, 1].long(),
torch.cross(vertices_faces[:, 2] - vertices_faces[:, 1], vertices_faces[:, 0] - vertices_faces[:, 1]))
normals.index_add_(0, faces[:, 2].long(),
torch.cross(vertices_faces[:, 0] - vertices_faces[:, 2], vertices_faces[:, 1] - vertices_faces[:, 2]))
normals.index_add_(0, faces[:, 0].long(),
torch.cross(vertices_faces[:, 1] - vertices_faces[:, 0], vertices_faces[:, 2] - vertices_faces[:, 0]))
normals = F.normalize(normals, eps=1e-6, dim=1)
normals = normals.reshape((bs, nv, 3))
# pytorch only supports long and byte tensors for indexing
return normals
|
banmo-main
|
third_party/softras/soft_renderer/functional/vertex_normals.py
|
from torch import nn
from torch.autograd import Function
import torch
import importlib
import os
chamfer_found = importlib.find_loader("chamfer_3D") is not None
if not chamfer_found:
## Cool trick from https://github.com/chrdiller
print("Jitting Chamfer 3D")
from torch.utils.cpp_extension import load
chamfer_3D = load(name="chamfer_3D",
sources=[
"/".join(os.path.abspath(__file__).split('/')[:-1] + ["chamfer_cuda.cpp"]),
"/".join(os.path.abspath(__file__).split('/')[:-1] + ["chamfer3D.cu"]),
])
print("Loaded JIT 3D CUDA chamfer distance")
else:
import chamfer_3D
print("Loaded compiled 3D CUDA chamfer distance")
# Chamfer's distance module @thibaultgroueix
# GPU tensors only
class chamfer_3DFunction(Function):
@staticmethod
def forward(ctx, xyz1, xyz2):
batchsize, n, _ = xyz1.size()
_, m, _ = xyz2.size()
device = xyz1.device
dist1 = torch.zeros(batchsize, n)
dist2 = torch.zeros(batchsize, m)
idx1 = torch.zeros(batchsize, n).type(torch.IntTensor)
idx2 = torch.zeros(batchsize, m).type(torch.IntTensor)
dist1 = dist1.to(device)
dist2 = dist2.to(device)
idx1 = idx1.to(device)
idx2 = idx2.to(device)
torch.cuda.set_device(device)
chamfer_3D.forward(xyz1, xyz2, dist1, dist2, idx1, idx2)
ctx.save_for_backward(xyz1, xyz2, idx1, idx2)
return dist1, dist2, idx1, idx2
@staticmethod
def backward(ctx, graddist1, graddist2, gradidx1, gradidx2):
xyz1, xyz2, idx1, idx2 = ctx.saved_tensors
graddist1 = graddist1.contiguous()
graddist2 = graddist2.contiguous()
device = graddist1.device
gradxyz1 = torch.zeros(xyz1.size())
gradxyz2 = torch.zeros(xyz2.size())
gradxyz1 = gradxyz1.to(device)
gradxyz2 = gradxyz2.to(device)
chamfer_3D.backward(
xyz1, xyz2, gradxyz1, gradxyz2, graddist1, graddist2, idx1, idx2
)
return gradxyz1, gradxyz2
class chamfer_3DDist(nn.Module):
def __init__(self):
super(chamfer_3DDist, self).__init__()
def forward(self, input1, input2):
input1 = input1.contiguous()
input2 = input2.contiguous()
return chamfer_3DFunction.apply(input1, input2)
|
banmo-main
|
third_party/chamfer3D/dist_chamfer_3D.py
|
from setuptools import setup
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
setup(
name='chamfer_3D',
ext_modules=[
CUDAExtension('chamfer_3D', [
"/".join(__file__.split('/')[:-1] + ['chamfer_cuda.cpp']),
"/".join(__file__.split('/')[:-1] + ['chamfer3D.cu']),
]),
],
cmdclass={
'build_ext': BuildExtension
})
|
banmo-main
|
third_party/chamfer3D/setup.py
|
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
import glob
import os
import shutil
from os import path
from setuptools import find_packages, setup
from typing import List
import torch
from torch.utils.cpp_extension import CUDA_HOME, CppExtension, CUDAExtension
from torch.utils.hipify import hipify_python
torch_ver = [int(x) for x in torch.__version__.split(".")[:2]]
assert torch_ver >= [1, 6], "Requires PyTorch >= 1.6"
def get_version():
init_py_path = path.join(path.abspath(path.dirname(__file__)), "detectron2", "__init__.py")
init_py = open(init_py_path, "r").readlines()
version_line = [l.strip() for l in init_py if l.startswith("__version__")][0]
version = version_line.split("=")[-1].strip().strip("'\"")
# The following is used to build release packages.
# Users should never use it.
suffix = os.getenv("D2_VERSION_SUFFIX", "")
version = version + suffix
if os.getenv("BUILD_NIGHTLY", "0") == "1":
from datetime import datetime
date_str = datetime.today().strftime("%y%m%d")
version = version + ".dev" + date_str
new_init_py = [l for l in init_py if not l.startswith("__version__")]
new_init_py.append('__version__ = "{}"\n'.format(version))
with open(init_py_path, "w") as f:
f.write("".join(new_init_py))
return version
def get_extensions():
this_dir = path.dirname(path.abspath(__file__))
extensions_dir = path.join(this_dir, "detectron2", "layers", "csrc")
main_source = path.join(extensions_dir, "vision.cpp")
sources = glob.glob(path.join(extensions_dir, "**", "*.cpp"))
from torch.utils.cpp_extension import ROCM_HOME
is_rocm_pytorch = (
True if ((torch.version.hip is not None) and (ROCM_HOME is not None)) else False
)
hipify_ver = (
[int(x) for x in torch.utils.hipify.__version__.split(".")]
if hasattr(torch.utils.hipify, "__version__")
else [0, 0, 0]
)
if is_rocm_pytorch and hipify_ver < [1, 0, 0]: # TODO not needed since pt1.8
# Earlier versions of hipification and extension modules were not
# transparent, i.e. would require an explicit call to hipify, and the
# hipification would introduce "hip" subdirectories, possibly changing
# the relationship between source and header files.
# This path is maintained for backwards compatibility.
hipify_python.hipify(
project_directory=this_dir,
output_directory=this_dir,
includes="/detectron2/layers/csrc/*",
show_detailed=True,
is_pytorch_extension=True,
)
source_cuda = glob.glob(path.join(extensions_dir, "**", "hip", "*.hip")) + glob.glob(
path.join(extensions_dir, "hip", "*.hip")
)
shutil.copy(
"detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_utils.h",
"detectron2/layers/csrc/box_iou_rotated/hip/box_iou_rotated_utils.h",
)
shutil.copy(
"detectron2/layers/csrc/deformable/deform_conv.h",
"detectron2/layers/csrc/deformable/hip/deform_conv.h",
)
sources = [main_source] + sources
sources = [
s
for s in sources
if not is_rocm_pytorch or torch_ver < [1, 7] or not s.endswith("hip/vision.cpp")
]
else:
# common code between cuda and rocm platforms,
# for hipify version [1,0,0] and later.
source_cuda = glob.glob(path.join(extensions_dir, "**", "*.cu")) + glob.glob(
path.join(extensions_dir, "*.cu")
)
sources = [main_source] + sources
extension = CppExtension
extra_compile_args = {"cxx": []}
define_macros = []
if (torch.cuda.is_available() and ((CUDA_HOME is not None) or is_rocm_pytorch)) or os.getenv(
"FORCE_CUDA", "0"
) == "1":
extension = CUDAExtension
sources += source_cuda
if not is_rocm_pytorch:
define_macros += [("WITH_CUDA", None)]
extra_compile_args["nvcc"] = [
"-O3",
"-DCUDA_HAS_FP16=1",
"-D__CUDA_NO_HALF_OPERATORS__",
"-D__CUDA_NO_HALF_CONVERSIONS__",
"-D__CUDA_NO_HALF2_OPERATORS__",
]
else:
define_macros += [("WITH_HIP", None)]
extra_compile_args["nvcc"] = []
if torch_ver < [1, 7]:
# supported by https://github.com/pytorch/pytorch/pull/43931
CC = os.environ.get("CC", None)
if CC is not None:
extra_compile_args["nvcc"].append("-ccbin={}".format(CC))
include_dirs = [extensions_dir]
ext_modules = [
extension(
"detectron2._C",
sources,
include_dirs=include_dirs,
define_macros=define_macros,
extra_compile_args=extra_compile_args,
)
]
return ext_modules
def get_model_zoo_configs() -> List[str]:
"""
Return a list of configs to include in package for model zoo. Copy over these configs inside
detectron2/model_zoo.
"""
# Use absolute paths while symlinking.
source_configs_dir = path.join(path.dirname(path.realpath(__file__)), "configs")
destination = path.join(
path.dirname(path.realpath(__file__)), "detectron2", "model_zoo", "configs"
)
# Symlink the config directory inside package to have a cleaner pip install.
# Remove stale symlink/directory from a previous build.
if path.exists(source_configs_dir):
if path.islink(destination):
os.unlink(destination)
elif path.isdir(destination):
shutil.rmtree(destination)
if not path.exists(destination):
try:
os.symlink(source_configs_dir, destination)
except OSError:
# Fall back to copying if symlink fails: ex. on Windows.
shutil.copytree(source_configs_dir, destination)
config_paths = glob.glob("configs/**/*.yaml", recursive=True) + glob.glob(
"configs/**/*.py", recursive=True
)
return config_paths
# For projects that are relative small and provide features that are very close
# to detectron2's core functionalities, we install them under detectron2.projects
PROJECTS = {
"detectron2.projects.point_rend": "projects/PointRend/point_rend",
"detectron2.projects.deeplab": "projects/DeepLab/deeplab",
"detectron2.projects.panoptic_deeplab": "projects/Panoptic-DeepLab/panoptic_deeplab",
}
setup(
name="detectron2",
version=get_version(),
author="FAIR",
url="https://github.com/facebookresearch/detectron2",
description="Detectron2 is FAIR's next-generation research "
"platform for object detection and segmentation.",
packages=find_packages(exclude=("configs", "tests*")) + list(PROJECTS.keys()),
package_dir=PROJECTS,
package_data={"detectron2.model_zoo": get_model_zoo_configs()},
python_requires=">=3.6",
install_requires=[
# Do not add opencv here. Just like pytorch, user should install
# opencv themselves, preferrably by OS's package manager, or by
# choosing the proper pypi package name at https://github.com/skvark/opencv-python
"termcolor>=1.1",
"Pillow>=7.1", # or use pillow-simd for better performance
"yacs>=0.1.6",
"tabulate",
"cloudpickle",
"matplotlib",
"tqdm>4.29.0",
"tensorboard",
# Lock version of fvcore/iopath because they may have breaking changes
# NOTE: when updating fvcore/iopath version, make sure fvcore depends
# on compatible version of iopath.
"fvcore>=0.1.5,<0.1.6", # required like this to make it pip installable
"iopath>=0.1.7,<0.1.9",
"pycocotools>=2.0.2", # corresponds to https://github.com/ppwwyyxx/cocoapi
"future", # used by caffe2
"pydot", # used to save caffe2 SVGs
"dataclasses; python_version<'3.7'",
"omegaconf>=2.1.0rc1",
"hydra-core>=1.1.0rc1",
"black==21.4b2",
# When adding to the list, may need to update docs/requirements.txt
# or add mock in docs/conf.py
],
extras_require={
"all": [
"shapely",
"pygments>=2.2",
"psutil",
"panopticapi @ https://github.com/cocodataset/panopticapi/archive/master.zip",
],
"dev": [
"flake8==3.8.1",
"isort==4.3.21",
"flake8-bugbear",
"flake8-comprehensions",
],
},
ext_modules=get_extensions(),
cmdclass={"build_ext": torch.utils.cpp_extension.BuildExtension},
)
|
banmo-main
|
third_party/detectron2_old/setup.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import atexit
import bisect
import multiprocessing as mp
from collections import deque
import cv2
import torch
from detectron2.data import MetadataCatalog
from detectron2.engine.defaults import DefaultPredictor
from detectron2.utils.video_visualizer import VideoVisualizer
from detectron2.utils.visualizer import ColorMode, Visualizer
class VisualizationDemo(object):
def __init__(self, cfg, instance_mode=ColorMode.IMAGE, parallel=False):
"""
Args:
cfg (CfgNode):
instance_mode (ColorMode):
parallel (bool): whether to run the model in different processes from visualization.
Useful since the visualization logic can be slow.
"""
self.metadata = MetadataCatalog.get(
cfg.DATASETS.TEST[0] if len(cfg.DATASETS.TEST) else "__unused"
)
self.cpu_device = torch.device("cpu")
self.instance_mode = instance_mode
self.parallel = parallel
if parallel:
num_gpu = torch.cuda.device_count()
self.predictor = AsyncPredictor(cfg, num_gpus=num_gpu)
else:
self.predictor = DefaultPredictor(cfg)
def run_on_image(self, image):
"""
Args:
image (np.ndarray): an image of shape (H, W, C) (in BGR order).
This is the format used by OpenCV.
Returns:
predictions (dict): the output of the model.
vis_output (VisImage): the visualized image output.
"""
vis_output = None
predictions = self.predictor(image)
# Convert image from OpenCV BGR format to Matplotlib RGB format.
image = image[:, :, ::-1]
visualizer = Visualizer(image, self.metadata, instance_mode=self.instance_mode)
if "panoptic_seg" in predictions:
panoptic_seg, segments_info = predictions["panoptic_seg"]
vis_output = visualizer.draw_panoptic_seg_predictions(
panoptic_seg.to(self.cpu_device), segments_info
)
else:
if "sem_seg" in predictions:
vis_output = visualizer.draw_sem_seg(
predictions["sem_seg"].argmax(dim=0).to(self.cpu_device)
)
if "instances" in predictions:
instances = predictions["instances"].to(self.cpu_device)
vis_output = visualizer.draw_instance_predictions(predictions=instances)
return predictions, vis_output
def _frame_from_video(self, video):
while video.isOpened():
success, frame = video.read()
if success:
yield frame
else:
break
def run_on_video(self, video):
"""
Visualizes predictions on frames of the input video.
Args:
video (cv2.VideoCapture): a :class:`VideoCapture` object, whose source can be
either a webcam or a video file.
Yields:
ndarray: BGR visualizations of each video frame.
"""
video_visualizer = VideoVisualizer(self.metadata, self.instance_mode)
def process_predictions(frame, predictions):
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
if "panoptic_seg" in predictions:
panoptic_seg, segments_info = predictions["panoptic_seg"]
vis_frame = video_visualizer.draw_panoptic_seg_predictions(
frame, panoptic_seg.to(self.cpu_device), segments_info
)
elif "instances" in predictions:
predictions = predictions["instances"].to(self.cpu_device)
vis_frame = video_visualizer.draw_instance_predictions(frame, predictions)
elif "sem_seg" in predictions:
vis_frame = video_visualizer.draw_sem_seg(
frame, predictions["sem_seg"].argmax(dim=0).to(self.cpu_device)
)
# Converts Matplotlib RGB format to OpenCV BGR format
vis_frame = cv2.cvtColor(vis_frame.get_image(), cv2.COLOR_RGB2BGR)
return vis_frame
frame_gen = self._frame_from_video(video)
if self.parallel:
buffer_size = self.predictor.default_buffer_size
frame_data = deque()
for cnt, frame in enumerate(frame_gen):
frame_data.append(frame)
self.predictor.put(frame)
if cnt >= buffer_size:
frame = frame_data.popleft()
predictions = self.predictor.get()
yield process_predictions(frame, predictions)
while len(frame_data):
frame = frame_data.popleft()
predictions = self.predictor.get()
yield process_predictions(frame, predictions)
else:
for frame in frame_gen:
yield process_predictions(frame, self.predictor(frame))
class AsyncPredictor:
"""
A predictor that runs the model asynchronously, possibly on >1 GPUs.
Because rendering the visualization takes considerably amount of time,
this helps improve throughput a little bit when rendering videos.
"""
class _StopToken:
pass
class _PredictWorker(mp.Process):
def __init__(self, cfg, task_queue, result_queue):
self.cfg = cfg
self.task_queue = task_queue
self.result_queue = result_queue
super().__init__()
def run(self):
predictor = DefaultPredictor(self.cfg)
while True:
task = self.task_queue.get()
if isinstance(task, AsyncPredictor._StopToken):
break
idx, data = task
result = predictor(data)
self.result_queue.put((idx, result))
def __init__(self, cfg, num_gpus: int = 1):
"""
Args:
cfg (CfgNode):
num_gpus (int): if 0, will run on CPU
"""
num_workers = max(num_gpus, 1)
self.task_queue = mp.Queue(maxsize=num_workers * 3)
self.result_queue = mp.Queue(maxsize=num_workers * 3)
self.procs = []
for gpuid in range(max(num_gpus, 1)):
cfg = cfg.clone()
cfg.defrost()
cfg.MODEL.DEVICE = "cuda:{}".format(gpuid) if num_gpus > 0 else "cpu"
self.procs.append(
AsyncPredictor._PredictWorker(cfg, self.task_queue, self.result_queue)
)
self.put_idx = 0
self.get_idx = 0
self.result_rank = []
self.result_data = []
for p in self.procs:
p.start()
atexit.register(self.shutdown)
def put(self, image):
self.put_idx += 1
self.task_queue.put((self.put_idx, image))
def get(self):
self.get_idx += 1 # the index needed for this request
if len(self.result_rank) and self.result_rank[0] == self.get_idx:
res = self.result_data[0]
del self.result_data[0], self.result_rank[0]
return res
while True:
# make sure the results are returned in the correct order
idx, res = self.result_queue.get()
if idx == self.get_idx:
return res
insert = bisect.bisect(self.result_rank, idx)
self.result_rank.insert(insert, idx)
self.result_data.insert(insert, res)
def __len__(self):
return self.put_idx - self.get_idx
def __call__(self, image):
self.put(image)
return self.get()
def shutdown(self):
for _ in self.procs:
self.task_queue.put(AsyncPredictor._StopToken())
@property
def default_buffer_size(self):
return len(self.procs) * 5
|
banmo-main
|
third_party/detectron2_old/demo/predictor.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import argparse
import glob
import multiprocessing as mp
import numpy as np
import os
import tempfile
import time
import warnings
import cv2
import tqdm
from detectron2.config import get_cfg
from detectron2.data.detection_utils import read_image
from detectron2.utils.logger import setup_logger
from predictor import VisualizationDemo
# constants
WINDOW_NAME = "COCO detections"
def setup_cfg(args):
# load config from file and command-line arguments
cfg = get_cfg()
# To use demo for Panoptic-DeepLab, please uncomment the following two lines.
# from detectron2.projects.panoptic_deeplab import add_panoptic_deeplab_config # noqa
# add_panoptic_deeplab_config(cfg)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
# Set score_threshold for builtin models
cfg.MODEL.RETINANET.SCORE_THRESH_TEST = args.confidence_threshold
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = args.confidence_threshold
cfg.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH = args.confidence_threshold
cfg.freeze()
return cfg
def get_parser():
parser = argparse.ArgumentParser(description="Detectron2 demo for builtin configs")
parser.add_argument(
"--config-file",
default="configs/quick_schedules/mask_rcnn_R_50_FPN_inference_acc_test.yaml",
metavar="FILE",
help="path to config file",
)
parser.add_argument("--webcam", action="store_true", help="Take inputs from webcam.")
parser.add_argument("--video-input", help="Path to video file.")
parser.add_argument(
"--input",
nargs="+",
help="A list of space separated input images; "
"or a single glob pattern such as 'directory/*.jpg'",
)
parser.add_argument(
"--output",
help="A file or directory to save output visualizations. "
"If not given, will show output in an OpenCV window.",
)
parser.add_argument(
"--confidence-threshold",
type=float,
default=0.5,
help="Minimum score for instance predictions to be shown",
)
parser.add_argument(
"--opts",
help="Modify config options using the command-line 'KEY VALUE' pairs",
default=[],
nargs=argparse.REMAINDER,
)
return parser
def test_opencv_video_format(codec, file_ext):
with tempfile.TemporaryDirectory(prefix="video_format_test") as dir:
filename = os.path.join(dir, "test_file" + file_ext)
writer = cv2.VideoWriter(
filename=filename,
fourcc=cv2.VideoWriter_fourcc(*codec),
fps=float(30),
frameSize=(10, 10),
isColor=True,
)
[writer.write(np.zeros((10, 10, 3), np.uint8)) for _ in range(30)]
writer.release()
if os.path.isfile(filename):
return True
return False
if __name__ == "__main__":
mp.set_start_method("spawn", force=True)
args = get_parser().parse_args()
setup_logger(name="fvcore")
logger = setup_logger()
logger.info("Arguments: " + str(args))
cfg = setup_cfg(args)
demo = VisualizationDemo(cfg)
if args.input:
if len(args.input) == 1:
args.input = glob.glob(os.path.expanduser(args.input[0]))
assert args.input, "The input path(s) was not found"
for path in tqdm.tqdm(args.input, disable=not args.output):
# use PIL, to be consistent with evaluation
img = read_image(path, format="BGR")
start_time = time.time()
predictions, visualized_output = demo.run_on_image(img)
logger.info(
"{}: {} in {:.2f}s".format(
path,
"detected {} instances".format(len(predictions["instances"]))
if "instances" in predictions
else "finished",
time.time() - start_time,
)
)
if args.output:
if os.path.isdir(args.output):
assert os.path.isdir(args.output), args.output
out_filename = os.path.join(args.output, os.path.basename(path))
else:
assert len(args.input) == 1, "Please specify a directory with args.output"
out_filename = args.output
visualized_output.save(out_filename)
else:
cv2.namedWindow(WINDOW_NAME, cv2.WINDOW_NORMAL)
cv2.imshow(WINDOW_NAME, visualized_output.get_image()[:, :, ::-1])
if cv2.waitKey(0) == 27:
break # esc to quit
elif args.webcam:
assert args.input is None, "Cannot have both --input and --webcam!"
assert args.output is None, "output not yet supported with --webcam!"
cam = cv2.VideoCapture(0)
for vis in tqdm.tqdm(demo.run_on_video(cam)):
cv2.namedWindow(WINDOW_NAME, cv2.WINDOW_NORMAL)
cv2.imshow(WINDOW_NAME, vis)
if cv2.waitKey(1) == 27:
break # esc to quit
cam.release()
cv2.destroyAllWindows()
elif args.video_input:
video = cv2.VideoCapture(args.video_input)
width = int(video.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))
frames_per_second = video.get(cv2.CAP_PROP_FPS)
num_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
basename = os.path.basename(args.video_input)
codec, file_ext = (
("x264", ".mkv") if test_opencv_video_format("x264", ".mkv") else ("mp4v", ".mp4")
)
if codec == ".mp4v":
warnings.warn("x264 codec not available, switching to mp4v")
if args.output:
if os.path.isdir(args.output):
output_fname = os.path.join(args.output, basename)
output_fname = os.path.splitext(output_fname)[0] + file_ext
else:
output_fname = args.output
assert not os.path.isfile(output_fname), output_fname
output_file = cv2.VideoWriter(
filename=output_fname,
# some installation of opencv may not support x264 (due to its license),
# you can try other format (e.g. MPEG)
fourcc=cv2.VideoWriter_fourcc(*codec),
fps=float(frames_per_second),
frameSize=(width, height),
isColor=True,
)
assert os.path.isfile(args.video_input)
for vis_frame in tqdm.tqdm(demo.run_on_video(video), total=num_frames):
if args.output:
output_file.write(vis_frame)
else:
cv2.namedWindow(basename, cv2.WINDOW_NORMAL)
cv2.imshow(basename, vis_frame)
if cv2.waitKey(1) == 27:
break # esc to quit
video.release()
if args.output:
output_file.release()
else:
cv2.destroyAllWindows()
|
banmo-main
|
third_party/detectron2_old/demo/demo.py
|
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
import pickle as pkl
import sys
import torch
"""
Usage:
# download one of the ResNet{18,34,50,101,152} models from torchvision:
wget https://download.pytorch.org/models/resnet50-19c8e357.pth -O r50.pth
# run the conversion
./convert-torchvision-to-d2.py r50.pth r50.pkl
# Then, use r50.pkl with the following changes in config:
MODEL:
WEIGHTS: "/path/to/r50.pkl"
PIXEL_MEAN: [123.675, 116.280, 103.530]
PIXEL_STD: [58.395, 57.120, 57.375]
RESNETS:
DEPTH: 50
STRIDE_IN_1X1: False
INPUT:
FORMAT: "RGB"
These models typically produce slightly worse results than the
pre-trained ResNets we use in official configs, which are the
original ResNet models released by MSRA.
"""
if __name__ == "__main__":
input = sys.argv[1]
obj = torch.load(input, map_location="cpu")
newmodel = {}
for k in list(obj.keys()):
old_k = k
if "layer" not in k:
k = "stem." + k
for t in [1, 2, 3, 4]:
k = k.replace("layer{}".format(t), "res{}".format(t + 1))
for t in [1, 2, 3]:
k = k.replace("bn{}".format(t), "conv{}.norm".format(t))
k = k.replace("downsample.0", "shortcut")
k = k.replace("downsample.1", "shortcut.norm")
print(old_k, "->", k)
newmodel[k] = obj.pop(old_k).detach().numpy()
res = {"model": newmodel, "__author__": "torchvision", "matching_heuristics": True}
with open(sys.argv[2], "wb") as f:
pkl.dump(res, f)
if obj:
print("Unconverted keys:", obj.keys())
|
banmo-main
|
third_party/detectron2_old/tools/convert-torchvision-to-d2.py
|
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
"""
A script to benchmark builtin models.
Note: this script has an extra dependency of psutil.
"""
import itertools
import logging
import psutil
import torch
import tqdm
from fvcore.common.timer import Timer
from torch.nn.parallel import DistributedDataParallel
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import get_cfg
from detectron2.data import (
DatasetFromList,
build_detection_test_loader,
build_detection_train_loader,
)
from detectron2.engine import AMPTrainer, SimpleTrainer, default_argument_parser, hooks, launch
from detectron2.modeling import build_model
from detectron2.solver import build_optimizer
from detectron2.utils import comm
from detectron2.utils.collect_env import collect_env_info
from detectron2.utils.events import CommonMetricPrinter
from detectron2.utils.logger import setup_logger
logger = logging.getLogger("detectron2")
def setup(args):
cfg = get_cfg()
cfg.merge_from_file(args.config_file)
cfg.SOLVER.BASE_LR = 0.001 # Avoid NaNs. Not useful in this script anyway.
cfg.merge_from_list(args.opts)
cfg.freeze()
setup_logger(distributed_rank=comm.get_rank())
return cfg
def RAM_msg():
vram = psutil.virtual_memory()
return "RAM Usage: {:.2f}/{:.2f} GB".format(
(vram.total - vram.available) / 1024 ** 3, vram.total / 1024 ** 3
)
def benchmark_data(args):
cfg = setup(args)
logger.info("After spawning " + RAM_msg())
timer = Timer()
dataloader = build_detection_train_loader(cfg)
logger.info("Initialize loader using {} seconds.".format(timer.seconds()))
timer.reset()
itr = iter(dataloader)
for i in range(10): # warmup
next(itr)
if i == 0:
startup_time = timer.seconds()
logger.info("Startup time: {} seconds".format(startup_time))
timer = Timer()
max_iter = 1000
for _ in tqdm.trange(max_iter):
next(itr)
logger.info(
"{} iters ({} images) in {} seconds.".format(
max_iter, max_iter * cfg.SOLVER.IMS_PER_BATCH, timer.seconds()
)
)
# test for a few more rounds
for k in range(10):
logger.info(f"Iteration {k} " + RAM_msg())
timer = Timer()
max_iter = 1000
for _ in tqdm.trange(max_iter):
next(itr)
logger.info(
"{} iters ({} images) in {} seconds.".format(
max_iter, max_iter * cfg.SOLVER.IMS_PER_BATCH, timer.seconds()
)
)
def benchmark_train(args):
cfg = setup(args)
model = build_model(cfg)
logger.info("Model:\n{}".format(model))
if comm.get_world_size() > 1:
model = DistributedDataParallel(
model, device_ids=[comm.get_local_rank()], broadcast_buffers=False
)
optimizer = build_optimizer(cfg, model)
checkpointer = DetectionCheckpointer(model, optimizer=optimizer)
checkpointer.load(cfg.MODEL.WEIGHTS)
cfg.defrost()
cfg.DATALOADER.NUM_WORKERS = 2
data_loader = build_detection_train_loader(cfg)
dummy_data = list(itertools.islice(data_loader, 100))
def f():
data = DatasetFromList(dummy_data, copy=False, serialize=False)
while True:
yield from data
max_iter = 400
trainer = (AMPTrainer if cfg.SOLVER.AMP.ENABLED else SimpleTrainer)(model, f(), optimizer)
trainer.register_hooks(
[hooks.IterationTimer(), hooks.PeriodicWriter([CommonMetricPrinter(max_iter)])]
)
trainer.train(1, max_iter)
@torch.no_grad()
def benchmark_eval(args):
cfg = setup(args)
model = build_model(cfg)
model.eval()
logger.info("Model:\n{}".format(model))
DetectionCheckpointer(model).load(cfg.MODEL.WEIGHTS)
cfg.defrost()
cfg.DATALOADER.NUM_WORKERS = 0
data_loader = build_detection_test_loader(cfg, cfg.DATASETS.TEST[0])
dummy_data = DatasetFromList(list(itertools.islice(data_loader, 100)), copy=False)
def f():
while True:
yield from dummy_data
for k in range(5): # warmup
model(dummy_data[k])
max_iter = 300
timer = Timer()
with tqdm.tqdm(total=max_iter) as pbar:
for idx, d in enumerate(f()):
if idx == max_iter:
break
model(d)
pbar.update()
logger.info("{} iters in {} seconds.".format(max_iter, timer.seconds()))
if __name__ == "__main__":
parser = default_argument_parser()
parser.add_argument("--task", choices=["train", "eval", "data"], required=True)
args = parser.parse_args()
assert not args.eval_only
logger.info("Environment info:\n" + collect_env_info())
if args.task == "data":
f = benchmark_data
print("Initial " + RAM_msg())
elif args.task == "train":
"""
Note: training speed may not be representative.
The training cost of a R-CNN model varies with the content of the data
and the quality of the model.
"""
f = benchmark_train
elif args.task == "eval":
f = benchmark_eval
# only benchmark single-GPU inference.
assert args.num_gpus == 1 and args.num_machines == 1
launch(f, args.num_gpus, args.num_machines, args.machine_rank, args.dist_url, args=(args,))
|
banmo-main
|
third_party/detectron2_old/tools/benchmark.py
|
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
import argparse
import os
from itertools import chain
import cv2
import tqdm
from detectron2.config import get_cfg
from detectron2.data import DatasetCatalog, MetadataCatalog, build_detection_train_loader
from detectron2.data import detection_utils as utils
from detectron2.data.build import filter_images_with_few_keypoints
from detectron2.utils.logger import setup_logger
from detectron2.utils.visualizer import Visualizer
def setup(args):
cfg = get_cfg()
if args.config_file:
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.DATALOADER.NUM_WORKERS = 0
cfg.freeze()
return cfg
def parse_args(in_args=None):
parser = argparse.ArgumentParser(description="Visualize ground-truth data")
parser.add_argument(
"--source",
choices=["annotation", "dataloader"],
required=True,
help="visualize the annotations or the data loader (with pre-processing)",
)
parser.add_argument("--config-file", metavar="FILE", help="path to config file")
parser.add_argument("--output-dir", default="./", help="path to output directory")
parser.add_argument("--show", action="store_true", help="show output in a window")
parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
return parser.parse_args(in_args)
if __name__ == "__main__":
args = parse_args()
logger = setup_logger()
logger.info("Arguments: " + str(args))
cfg = setup(args)
dirname = args.output_dir
os.makedirs(dirname, exist_ok=True)
metadata = MetadataCatalog.get(cfg.DATASETS.TRAIN[0])
def output(vis, fname):
if args.show:
print(fname)
cv2.imshow("window", vis.get_image()[:, :, ::-1])
cv2.waitKey()
else:
filepath = os.path.join(dirname, fname)
print("Saving to {} ...".format(filepath))
vis.save(filepath)
scale = 1.0
if args.source == "dataloader":
train_data_loader = build_detection_train_loader(cfg)
for batch in train_data_loader:
for per_image in batch:
# Pytorch tensor is in (C, H, W) format
img = per_image["image"].permute(1, 2, 0).cpu().detach().numpy()
img = utils.convert_image_to_rgb(img, cfg.INPUT.FORMAT)
visualizer = Visualizer(img, metadata=metadata, scale=scale)
target_fields = per_image["instances"].get_fields()
labels = [metadata.thing_classes[i] for i in target_fields["gt_classes"]]
vis = visualizer.overlay_instances(
labels=labels,
boxes=target_fields.get("gt_boxes", None),
masks=target_fields.get("gt_masks", None),
keypoints=target_fields.get("gt_keypoints", None),
)
output(vis, str(per_image["image_id"]) + ".jpg")
else:
dicts = list(chain.from_iterable([DatasetCatalog.get(k) for k in cfg.DATASETS.TRAIN]))
if cfg.MODEL.KEYPOINT_ON:
dicts = filter_images_with_few_keypoints(dicts, 1)
for dic in tqdm.tqdm(dicts):
img = utils.read_image(dic["file_name"], "RGB")
visualizer = Visualizer(img, metadata=metadata, scale=scale)
vis = visualizer.draw_dataset_dict(dic)
output(vis, os.path.basename(dic["file_name"]))
|
banmo-main
|
third_party/detectron2_old/tools/visualize_data.py
|
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
"""
Detectron2 training script with a plain training loop.
This script reads a given config file and runs the training or evaluation.
It is an entry point that is able to train standard models in detectron2.
In order to let one script support training of many models,
this script contains logic that are specific to these built-in models and therefore
may not be suitable for your own project.
For example, your research project perhaps only needs a single "evaluator".
Therefore, we recommend you to use detectron2 as a library and take
this file as an example of how to use the library.
You may want to write your own script with your datasets and other customizations.
Compared to "train_net.py", this script supports fewer default features.
It also includes fewer abstraction, therefore is easier to add custom logic.
"""
import logging
import os
from collections import OrderedDict
import torch
from torch.nn.parallel import DistributedDataParallel
import detectron2.utils.comm as comm
from detectron2.checkpoint import DetectionCheckpointer, PeriodicCheckpointer
from detectron2.config import get_cfg
from detectron2.data import (
MetadataCatalog,
build_detection_test_loader,
build_detection_train_loader,
)
from detectron2.engine import default_argument_parser, default_setup, default_writers, launch
from detectron2.evaluation import (
CityscapesInstanceEvaluator,
CityscapesSemSegEvaluator,
COCOEvaluator,
COCOPanopticEvaluator,
DatasetEvaluators,
LVISEvaluator,
PascalVOCDetectionEvaluator,
SemSegEvaluator,
inference_on_dataset,
print_csv_format,
)
from detectron2.modeling import build_model
from detectron2.solver import build_lr_scheduler, build_optimizer
from detectron2.utils.events import EventStorage
logger = logging.getLogger("detectron2")
def get_evaluator(cfg, dataset_name, output_folder=None):
"""
Create evaluator(s) for a given dataset.
This uses the special metadata "evaluator_type" associated with each builtin dataset.
For your own dataset, you can simply create an evaluator manually in your
script and do not have to worry about the hacky if-else logic here.
"""
if output_folder is None:
output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
evaluator_list = []
evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
if evaluator_type in ["sem_seg", "coco_panoptic_seg"]:
evaluator_list.append(
SemSegEvaluator(
dataset_name,
distributed=True,
output_dir=output_folder,
)
)
if evaluator_type in ["coco", "coco_panoptic_seg"]:
evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder))
if evaluator_type == "coco_panoptic_seg":
evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder))
if evaluator_type == "cityscapes_instance":
assert (
torch.cuda.device_count() >= comm.get_rank()
), "CityscapesEvaluator currently do not work with multiple machines."
return CityscapesInstanceEvaluator(dataset_name)
if evaluator_type == "cityscapes_sem_seg":
assert (
torch.cuda.device_count() >= comm.get_rank()
), "CityscapesEvaluator currently do not work with multiple machines."
return CityscapesSemSegEvaluator(dataset_name)
if evaluator_type == "pascal_voc":
return PascalVOCDetectionEvaluator(dataset_name)
if evaluator_type == "lvis":
return LVISEvaluator(dataset_name, cfg, True, output_folder)
if len(evaluator_list) == 0:
raise NotImplementedError(
"no Evaluator for the dataset {} with the type {}".format(dataset_name, evaluator_type)
)
if len(evaluator_list) == 1:
return evaluator_list[0]
return DatasetEvaluators(evaluator_list)
def do_test(cfg, model):
results = OrderedDict()
for dataset_name in cfg.DATASETS.TEST:
data_loader = build_detection_test_loader(cfg, dataset_name)
evaluator = get_evaluator(
cfg, dataset_name, os.path.join(cfg.OUTPUT_DIR, "inference", dataset_name)
)
results_i = inference_on_dataset(model, data_loader, evaluator)
results[dataset_name] = results_i
if comm.is_main_process():
logger.info("Evaluation results for {} in csv format:".format(dataset_name))
print_csv_format(results_i)
if len(results) == 1:
results = list(results.values())[0]
return results
def do_train(cfg, model, resume=False):
model.train()
optimizer = build_optimizer(cfg, model)
scheduler = build_lr_scheduler(cfg, optimizer)
checkpointer = DetectionCheckpointer(
model, cfg.OUTPUT_DIR, optimizer=optimizer, scheduler=scheduler
)
start_iter = (
checkpointer.resume_or_load(cfg.MODEL.WEIGHTS, resume=resume).get("iteration", -1) + 1
)
max_iter = cfg.SOLVER.MAX_ITER
periodic_checkpointer = PeriodicCheckpointer(
checkpointer, cfg.SOLVER.CHECKPOINT_PERIOD, max_iter=max_iter
)
writers = default_writers(cfg.OUTPUT_DIR, max_iter) if comm.is_main_process() else []
# compared to "train_net.py", we do not support accurate timing and
# precise BN here, because they are not trivial to implement in a small training loop
data_loader = build_detection_train_loader(cfg)
logger.info("Starting training from iteration {}".format(start_iter))
with EventStorage(start_iter) as storage:
for data, iteration in zip(data_loader, range(start_iter, max_iter)):
storage.iter = iteration
loss_dict = model(data)
losses = sum(loss_dict.values())
assert torch.isfinite(losses).all(), loss_dict
loss_dict_reduced = {k: v.item() for k, v in comm.reduce_dict(loss_dict).items()}
losses_reduced = sum(loss for loss in loss_dict_reduced.values())
if comm.is_main_process():
storage.put_scalars(total_loss=losses_reduced, **loss_dict_reduced)
optimizer.zero_grad()
losses.backward()
optimizer.step()
storage.put_scalar("lr", optimizer.param_groups[0]["lr"], smoothing_hint=False)
scheduler.step()
if (
cfg.TEST.EVAL_PERIOD > 0
and (iteration + 1) % cfg.TEST.EVAL_PERIOD == 0
and iteration != max_iter - 1
):
do_test(cfg, model)
# Compared to "train_net.py", the test results are not dumped to EventStorage
comm.synchronize()
if iteration - start_iter > 5 and (
(iteration + 1) % 20 == 0 or iteration == max_iter - 1
):
for writer in writers:
writer.write()
periodic_checkpointer.step(iteration)
def setup(args):
"""
Create configs and perform basic setups.
"""
cfg = get_cfg()
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
default_setup(
cfg, args
) # if you don't like any of the default setup, write your own setup code
return cfg
def main(args):
cfg = setup(args)
model = build_model(cfg)
logger.info("Model:\n{}".format(model))
if args.eval_only:
DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
cfg.MODEL.WEIGHTS, resume=args.resume
)
return do_test(cfg, model)
distributed = comm.get_world_size() > 1
if distributed:
model = DistributedDataParallel(
model, device_ids=[comm.get_local_rank()], broadcast_buffers=False
)
do_train(cfg, model, resume=args.resume)
return do_test(cfg, model)
if __name__ == "__main__":
args = default_argument_parser().parse_args()
print("Command Line Args:", args)
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
)
|
banmo-main
|
third_party/detectron2_old/tools/plain_train_net.py
|
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
import argparse
import json
import numpy as np
import os
from collections import defaultdict
import cv2
import tqdm
from detectron2.data import DatasetCatalog, MetadataCatalog
from detectron2.structures import Boxes, BoxMode, Instances
from detectron2.utils.file_io import PathManager
from detectron2.utils.logger import setup_logger
from detectron2.utils.visualizer import Visualizer
def create_instances(predictions, image_size):
ret = Instances(image_size)
score = np.asarray([x["score"] for x in predictions])
chosen = (score > args.conf_threshold).nonzero()[0]
score = score[chosen]
bbox = np.asarray([predictions[i]["bbox"] for i in chosen]).reshape(-1, 4)
bbox = BoxMode.convert(bbox, BoxMode.XYWH_ABS, BoxMode.XYXY_ABS)
labels = np.asarray([dataset_id_map(predictions[i]["category_id"]) for i in chosen])
ret.scores = score
ret.pred_boxes = Boxes(bbox)
ret.pred_classes = labels
try:
ret.pred_masks = [predictions[i]["segmentation"] for i in chosen]
except KeyError:
pass
return ret
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="A script that visualizes the json predictions from COCO or LVIS dataset."
)
parser.add_argument("--input", required=True, help="JSON file produced by the model")
parser.add_argument("--output", required=True, help="output directory")
parser.add_argument("--dataset", help="name of the dataset", default="coco_2017_val")
parser.add_argument("--conf-threshold", default=0.5, type=float, help="confidence threshold")
args = parser.parse_args()
logger = setup_logger()
with PathManager.open(args.input, "r") as f:
predictions = json.load(f)
pred_by_image = defaultdict(list)
for p in predictions:
pred_by_image[p["image_id"]].append(p)
dicts = list(DatasetCatalog.get(args.dataset))
metadata = MetadataCatalog.get(args.dataset)
if hasattr(metadata, "thing_dataset_id_to_contiguous_id"):
def dataset_id_map(ds_id):
return metadata.thing_dataset_id_to_contiguous_id[ds_id]
elif "lvis" in args.dataset:
# LVIS results are in the same format as COCO results, but have a different
# mapping from dataset category id to contiguous category id in [0, #categories - 1]
def dataset_id_map(ds_id):
return ds_id - 1
else:
raise ValueError("Unsupported dataset: {}".format(args.dataset))
os.makedirs(args.output, exist_ok=True)
for dic in tqdm.tqdm(dicts):
img = cv2.imread(dic["file_name"], cv2.IMREAD_COLOR)[:, :, ::-1]
basename = os.path.basename(dic["file_name"])
predictions = create_instances(pred_by_image[dic["image_id"]], img.shape[:2])
vis = Visualizer(img, metadata)
vis_pred = vis.draw_instance_predictions(predictions).get_image()
vis = Visualizer(img, metadata)
vis_gt = vis.draw_dataset_dict(dic).get_image()
concat = np.concatenate((vis_pred, vis_gt), axis=1)
cv2.imwrite(os.path.join(args.output, basename), concat[:, :, ::-1])
|
banmo-main
|
third_party/detectron2_old/tools/visualize_json_results.py
|
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
import logging
import numpy as np
from collections import Counter
import tqdm
from fvcore.nn import flop_count_table # can also try flop_count_str
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import get_cfg
from detectron2.data import build_detection_test_loader
from detectron2.engine import default_argument_parser
from detectron2.modeling import build_model
from detectron2.utils.analysis import (
FlopCountAnalysis,
activation_count_operators,
parameter_count_table,
)
from detectron2.utils.logger import setup_logger
logger = logging.getLogger("detectron2")
def setup(args):
cfg = get_cfg()
cfg.merge_from_file(args.config_file)
cfg.DATALOADER.NUM_WORKERS = 0
cfg.merge_from_list(args.opts)
cfg.freeze()
setup_logger(name="fvcore")
setup_logger()
return cfg
def do_flop(cfg):
data_loader = build_detection_test_loader(cfg, cfg.DATASETS.TEST[0])
model = build_model(cfg)
DetectionCheckpointer(model).load(cfg.MODEL.WEIGHTS)
model.eval()
counts = Counter()
total_flops = []
for idx, data in zip(tqdm.trange(args.num_inputs), data_loader): # noqa
flops = FlopCountAnalysis(model, data)
if idx > 0:
flops.unsupported_ops_warnings(False).uncalled_modules_warnings(False)
counts += flops.by_operator()
total_flops.append(flops.total())
logger.info("Flops table computed from only one input sample:\n" + flop_count_table(flops))
logger.info(
"Average GFlops for each type of operators:\n"
+ str([(k, v / (idx + 1) / 1e9) for k, v in counts.items()])
)
logger.info(
"Total GFlops: {:.1f}±{:.1f}".format(np.mean(total_flops) / 1e9, np.std(total_flops) / 1e9)
)
def do_activation(cfg):
data_loader = build_detection_test_loader(cfg, cfg.DATASETS.TEST[0])
model = build_model(cfg)
DetectionCheckpointer(model).load(cfg.MODEL.WEIGHTS)
model.eval()
counts = Counter()
total_activations = []
for idx, data in zip(tqdm.trange(args.num_inputs), data_loader): # noqa
count = activation_count_operators(model, data)
counts += count
total_activations.append(sum(count.values()))
logger.info(
"(Million) Activations for Each Type of Operators:\n"
+ str([(k, v / idx) for k, v in counts.items()])
)
logger.info(
"Total (Million) Activations: {}±{}".format(
np.mean(total_activations), np.std(total_activations)
)
)
def do_parameter(cfg):
model = build_model(cfg)
logger.info("Parameter Count:\n" + parameter_count_table(model, max_depth=5))
def do_structure(cfg):
model = build_model(cfg)
logger.info("Model Structure:\n" + str(model))
if __name__ == "__main__":
parser = default_argument_parser(
epilog="""
Examples:
To show parameters of a model:
$ ./analyze_model.py --tasks parameter \\
--config-file ../configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml
Flops and activations are data-dependent, therefore inputs and model weights
are needed to count them:
$ ./analyze_model.py --num-inputs 100 --tasks flop \\
--config-file ../configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml \\
MODEL.WEIGHTS /path/to/model.pkl
"""
)
parser.add_argument(
"--tasks",
choices=["flop", "activation", "parameter", "structure"],
required=True,
nargs="+",
)
parser.add_argument(
"-n",
"--num-inputs",
default=100,
type=int,
help="number of inputs used to compute statistics for flops/activations, "
"both are data dependent.",
)
args = parser.parse_args()
assert not args.eval_only
assert args.num_gpus == 1
cfg = setup(args)
for task in args.tasks:
{
"flop": do_flop,
"activation": do_activation,
"parameter": do_parameter,
"structure": do_structure,
}[task](cfg)
|
banmo-main
|
third_party/detectron2_old/tools/analyze_model.py
|
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
"""
Training script using the new "LazyConfig" python config files.
This scripts reads a given python config file and runs the training or evaluation.
It can be used to train any models or dataset as long as they can be
instantiated by the recursive construction defined in the given config file.
Besides lazy construction of models, dataloader, etc., this scripts expects a
few common configuration parameters currently defined in "configs/common/train.py".
To add more complicated training logic, you can easily add other configs
in the config file and implement a new train_net.py to handle them.
"""
import logging
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import LazyConfig, instantiate
from detectron2.engine import (
AMPTrainer,
SimpleTrainer,
default_argument_parser,
default_setup,
default_writers,
hooks,
launch,
)
from detectron2.engine.defaults import create_ddp_model
from detectron2.evaluation import inference_on_dataset, print_csv_format
from detectron2.utils import comm
logger = logging.getLogger("detectron2")
def do_test(cfg, model):
if "evaluator" in cfg.dataloader:
ret = inference_on_dataset(
model, instantiate(cfg.dataloader.test), instantiate(cfg.dataloader.evaluator)
)
print_csv_format(ret)
return ret
def do_train(args, cfg):
"""
Args:
cfg: an object with the following attributes:
model: instantiate to a module
dataloader.{train,test}: instantiate to dataloaders
dataloader.evaluator: instantiate to evaluator for test set
optimizer: instantaite to an optimizer
lr_multiplier: instantiate to a fvcore scheduler
train: other misc config defined in `common_train.py`, including:
output_dir (str)
init_checkpoint (str)
amp.enabled (bool)
max_iter (int)
eval_period, log_period (int)
device (str)
checkpointer (dict)
ddp (dict)
"""
model = instantiate(cfg.model)
logger = logging.getLogger("detectron2")
logger.info("Model:\n{}".format(model))
model.to(cfg.train.device)
cfg.optimizer.params.model = model
optim = instantiate(cfg.optimizer)
train_loader = instantiate(cfg.dataloader.train)
model = create_ddp_model(model, **cfg.train.ddp)
trainer = (AMPTrainer if cfg.train.amp.enabled else SimpleTrainer)(model, train_loader, optim)
checkpointer = DetectionCheckpointer(
model,
cfg.train.output_dir,
optimizer=optim,
trainer=trainer,
)
trainer.register_hooks(
[
hooks.IterationTimer(),
hooks.LRScheduler(scheduler=instantiate(cfg.lr_multiplier)),
hooks.PeriodicCheckpointer(checkpointer, **cfg.train.checkpointer)
if comm.is_main_process()
else None,
hooks.EvalHook(cfg.train.eval_period, lambda: do_test(cfg, model)),
hooks.PeriodicWriter(
default_writers(cfg.train.output_dir, cfg.train.max_iter),
period=cfg.train.log_period,
)
if comm.is_main_process()
else None,
]
)
checkpointer.resume_or_load(cfg.train.init_checkpoint, resume=args.resume)
if args.resume and checkpointer.has_checkpoint():
# The checkpoint stores the training iteration that just finished, thus we start
# at the next iteration
start_iter = trainer.iter + 1
else:
start_iter = 0
trainer.train(start_iter, cfg.train.max_iter)
def main(args):
cfg = LazyConfig.load(args.config_file)
cfg = LazyConfig.apply_overrides(cfg, args.opts)
default_setup(cfg, args)
if args.eval_only:
model = instantiate(cfg.model)
model.to(cfg.train.device)
model = create_ddp_model(model)
DetectionCheckpointer(model).load(cfg.train.init_checkpoint)
print(do_test(cfg, model))
else:
do_train(args, cfg)
if __name__ == "__main__":
args = default_argument_parser().parse_args()
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
)
|
banmo-main
|
third_party/detectron2_old/tools/lazyconfig_train_net.py
|
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
"""
A main training script.
This scripts reads a given config file and runs the training or evaluation.
It is an entry point that is made to train standard models in detectron2.
In order to let one script support training of many models,
this script contains logic that are specific to these built-in models and therefore
may not be suitable for your own project.
For example, your research project perhaps only needs a single "evaluator".
Therefore, we recommend you to use detectron2 as an library and take
this file as an example of how to use the library.
You may want to write your own script with your datasets and other customizations.
"""
import logging
import os
from collections import OrderedDict
import torch
import detectron2.utils.comm as comm
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import get_cfg
from detectron2.data import MetadataCatalog
from detectron2.engine import DefaultTrainer, default_argument_parser, default_setup, hooks, launch
from detectron2.evaluation import (
CityscapesInstanceEvaluator,
CityscapesSemSegEvaluator,
COCOEvaluator,
COCOPanopticEvaluator,
DatasetEvaluators,
LVISEvaluator,
PascalVOCDetectionEvaluator,
SemSegEvaluator,
verify_results,
)
from detectron2.modeling import GeneralizedRCNNWithTTA
class Trainer(DefaultTrainer):
"""
We use the "DefaultTrainer" which contains pre-defined default logic for
standard training workflow. They may not work for you, especially if you
are working on a new research project. In that case you can write your
own training loop. You can use "tools/plain_train_net.py" as an example.
"""
@classmethod
def build_evaluator(cls, cfg, dataset_name, output_folder=None):
"""
Create evaluator(s) for a given dataset.
This uses the special metadata "evaluator_type" associated with each builtin dataset.
For your own dataset, you can simply create an evaluator manually in your
script and do not have to worry about the hacky if-else logic here.
"""
if output_folder is None:
output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
evaluator_list = []
evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
if evaluator_type in ["sem_seg", "coco_panoptic_seg"]:
evaluator_list.append(
SemSegEvaluator(
dataset_name,
distributed=True,
output_dir=output_folder,
)
)
if evaluator_type in ["coco", "coco_panoptic_seg"]:
evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder))
if evaluator_type == "coco_panoptic_seg":
evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder))
if evaluator_type == "cityscapes_instance":
assert (
torch.cuda.device_count() >= comm.get_rank()
), "CityscapesEvaluator currently do not work with multiple machines."
return CityscapesInstanceEvaluator(dataset_name)
if evaluator_type == "cityscapes_sem_seg":
assert (
torch.cuda.device_count() >= comm.get_rank()
), "CityscapesEvaluator currently do not work with multiple machines."
return CityscapesSemSegEvaluator(dataset_name)
elif evaluator_type == "pascal_voc":
return PascalVOCDetectionEvaluator(dataset_name)
elif evaluator_type == "lvis":
return LVISEvaluator(dataset_name, output_dir=output_folder)
if len(evaluator_list) == 0:
raise NotImplementedError(
"no Evaluator for the dataset {} with the type {}".format(
dataset_name, evaluator_type
)
)
elif len(evaluator_list) == 1:
return evaluator_list[0]
return DatasetEvaluators(evaluator_list)
@classmethod
def test_with_TTA(cls, cfg, model):
logger = logging.getLogger("detectron2.trainer")
# In the end of training, run an evaluation with TTA
# Only support some R-CNN models.
logger.info("Running inference with test-time augmentation ...")
model = GeneralizedRCNNWithTTA(cfg, model)
evaluators = [
cls.build_evaluator(
cfg, name, output_folder=os.path.join(cfg.OUTPUT_DIR, "inference_TTA")
)
for name in cfg.DATASETS.TEST
]
res = cls.test(cfg, model, evaluators)
res = OrderedDict({k + "_TTA": v for k, v in res.items()})
return res
def setup(args):
"""
Create configs and perform basic setups.
"""
cfg = get_cfg()
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
default_setup(cfg, args)
return cfg
def main(args):
cfg = setup(args)
if args.eval_only:
model = Trainer.build_model(cfg)
DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
cfg.MODEL.WEIGHTS, resume=args.resume
)
res = Trainer.test(cfg, model)
if cfg.TEST.AUG.ENABLED:
res.update(Trainer.test_with_TTA(cfg, model))
if comm.is_main_process():
verify_results(cfg, res)
return res
"""
If you'd like to do anything fancier than the standard training logic,
consider writing your own training loop (see plain_train_net.py) or
subclassing the trainer.
"""
trainer = Trainer(cfg)
trainer.resume_or_load(resume=args.resume)
if cfg.TEST.AUG.ENABLED:
trainer.register_hooks(
[hooks.EvalHook(0, lambda: trainer.test_with_TTA(cfg, trainer.model))]
)
return trainer.train()
if __name__ == "__main__":
args = default_argument_parser().parse_args()
print("Command Line Args:", args)
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
)
|
banmo-main
|
third_party/detectron2_old/tools/train_net.py
|
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
import argparse
import os
from typing import Dict, List, Tuple
import torch
from torch import Tensor, nn
import detectron2.data.transforms as T
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import get_cfg
from detectron2.data import build_detection_test_loader, detection_utils
from detectron2.evaluation import COCOEvaluator, inference_on_dataset, print_csv_format
from detectron2.export import (
Caffe2Tracer,
TracingAdapter,
add_export_config,
dump_torchscript_IR,
scripting_with_instances,
)
from detectron2.modeling import GeneralizedRCNN, RetinaNet, build_model
from detectron2.modeling.postprocessing import detector_postprocess
from detectron2.projects.point_rend import add_pointrend_config
from detectron2.structures import Boxes
from detectron2.utils.env import TORCH_VERSION
from detectron2.utils.file_io import PathManager
from detectron2.utils.logger import setup_logger
def setup_cfg(args):
cfg = get_cfg()
# cuda context is initialized before creating dataloader, so we don't fork anymore
cfg.DATALOADER.NUM_WORKERS = 0
cfg = add_export_config(cfg)
add_pointrend_config(cfg)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
return cfg
def export_caffe2_tracing(cfg, torch_model, inputs):
tracer = Caffe2Tracer(cfg, torch_model, inputs)
if args.format == "caffe2":
caffe2_model = tracer.export_caffe2()
caffe2_model.save_protobuf(args.output)
# draw the caffe2 graph
caffe2_model.save_graph(os.path.join(args.output, "model.svg"), inputs=inputs)
return caffe2_model
elif args.format == "onnx":
import onnx
onnx_model = tracer.export_onnx()
onnx.save(onnx_model, os.path.join(args.output, "model.onnx"))
elif args.format == "torchscript":
ts_model = tracer.export_torchscript()
with PathManager.open(os.path.join(args.output, "model.ts"), "wb") as f:
torch.jit.save(ts_model, f)
dump_torchscript_IR(ts_model, args.output)
# experimental. API not yet final
def export_scripting(torch_model):
assert TORCH_VERSION >= (1, 8)
fields = {
"proposal_boxes": Boxes,
"objectness_logits": Tensor,
"pred_boxes": Boxes,
"scores": Tensor,
"pred_classes": Tensor,
"pred_masks": Tensor,
"pred_keypoints": torch.Tensor,
"pred_keypoint_heatmaps": torch.Tensor,
}
assert args.format == "torchscript", "Scripting only supports torchscript format."
class ScriptableAdapterBase(nn.Module):
# Use this adapter to workaround https://github.com/pytorch/pytorch/issues/46944
# by not retuning instances but dicts. Otherwise the exported model is not deployable
def __init__(self):
super().__init__()
self.model = torch_model
self.eval()
if isinstance(torch_model, GeneralizedRCNN):
class ScriptableAdapter(ScriptableAdapterBase):
def forward(self, inputs: Tuple[Dict[str, torch.Tensor]]) -> List[Dict[str, Tensor]]:
instances = self.model.inference(inputs, do_postprocess=False)
return [i.get_fields() for i in instances]
else:
class ScriptableAdapter(ScriptableAdapterBase):
def forward(self, inputs: Tuple[Dict[str, torch.Tensor]]) -> List[Dict[str, Tensor]]:
instances = self.model(inputs)
return [i.get_fields() for i in instances]
ts_model = scripting_with_instances(ScriptableAdapter(), fields)
with PathManager.open(os.path.join(args.output, "model.ts"), "wb") as f:
torch.jit.save(ts_model, f)
dump_torchscript_IR(ts_model, args.output)
# TODO inference in Python now missing postprocessing glue code
return None
# experimental. API not yet final
def export_tracing(torch_model, inputs):
assert TORCH_VERSION >= (1, 8)
image = inputs[0]["image"]
inputs = [{"image": image}] # remove other unused keys
if isinstance(torch_model, GeneralizedRCNN):
def inference(model, inputs):
# use do_postprocess=False so it returns ROI mask
inst = model.inference(inputs, do_postprocess=False)[0]
return [{"instances": inst}]
else:
inference = None # assume that we just call the model directly
traceable_model = TracingAdapter(torch_model, inputs, inference)
if args.format == "torchscript":
ts_model = torch.jit.trace(traceable_model, (image,))
with PathManager.open(os.path.join(args.output, "model.ts"), "wb") as f:
torch.jit.save(ts_model, f)
dump_torchscript_IR(ts_model, args.output)
elif args.format == "onnx":
# NOTE onnx export currently failing in pytorch
with PathManager.open(os.path.join(args.output, "model.onnx"), "wb") as f:
torch.onnx.export(traceable_model, (image,), f)
logger.info("Inputs schema: " + str(traceable_model.inputs_schema))
logger.info("Outputs schema: " + str(traceable_model.outputs_schema))
if args.format != "torchscript":
return None
if not isinstance(torch_model, (GeneralizedRCNN, RetinaNet)):
return None
def eval_wrapper(inputs):
"""
The exported model does not contain the final resize step, which is typically
unused in deployment but needed for evaluation. We add it manually here.
"""
input = inputs[0]
instances = traceable_model.outputs_schema(ts_model(input["image"]))[0]["instances"]
postprocessed = detector_postprocess(instances, input["height"], input["width"])
return [{"instances": postprocessed}]
return eval_wrapper
def get_sample_inputs(args):
if args.sample_image is None:
# get a first batch from dataset
data_loader = build_detection_test_loader(cfg, cfg.DATASETS.TEST[0])
first_batch = next(iter(data_loader))
return first_batch
else:
# get a sample data
original_image = detection_utils.read_image(args.sample_image, format=cfg.INPUT.FORMAT)
# Do same preprocessing as DefaultPredictor
aug = T.ResizeShortestEdge(
[cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST], cfg.INPUT.MAX_SIZE_TEST
)
height, width = original_image.shape[:2]
image = aug.get_transform(original_image).apply_image(original_image)
image = torch.as_tensor(image.astype("float32").transpose(2, 0, 1))
inputs = {"image": image, "height": height, "width": width}
# Sample ready
sample_inputs = [inputs]
return sample_inputs
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Export a model for deployment.")
parser.add_argument(
"--format",
choices=["caffe2", "onnx", "torchscript"],
help="output format",
default="caffe2",
)
parser.add_argument(
"--export-method",
choices=["caffe2_tracing", "tracing", "scripting"],
help="Method to export models",
default="caffe2_tracing",
)
parser.add_argument("--config-file", default="", metavar="FILE", help="path to config file")
parser.add_argument("--sample-image", default=None, type=str, help="sample image for input")
parser.add_argument("--run-eval", action="store_true")
parser.add_argument("--output", help="output directory for the converted model")
parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
args = parser.parse_args()
logger = setup_logger()
logger.info("Command line arguments: " + str(args))
PathManager.mkdirs(args.output)
# Disable respecialization on new shapes. Otherwise --run-eval will be slow
torch._C._jit_set_bailout_depth(1)
cfg = setup_cfg(args)
# create a torch model
torch_model = build_model(cfg)
DetectionCheckpointer(torch_model).resume_or_load(cfg.MODEL.WEIGHTS)
torch_model.eval()
# get sample data
sample_inputs = get_sample_inputs(args)
# convert and save model
if args.export_method == "caffe2_tracing":
exported_model = export_caffe2_tracing(cfg, torch_model, sample_inputs)
elif args.export_method == "scripting":
exported_model = export_scripting(torch_model)
elif args.export_method == "tracing":
exported_model = export_tracing(torch_model, sample_inputs)
# run evaluation with the converted model
if args.run_eval:
assert exported_model is not None, (
"Python inference is not yet implemented for "
f"export_method={args.export_method}, format={args.format}."
)
logger.info("Running evaluation ... this takes a long time if you export to CPU.")
dataset = cfg.DATASETS.TEST[0]
data_loader = build_detection_test_loader(cfg, dataset)
# NOTE: hard-coded evaluator. change to the evaluator for your dataset
evaluator = COCOEvaluator(dataset, output_dir=args.output)
metrics = inference_on_dataset(exported_model, data_loader, evaluator)
print_csv_format(metrics)
|
banmo-main
|
third_party/detectron2_old/tools/deploy/export_model.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
"""
TridentNet Training Script.
This script is a simplified version of the training script in detectron2/tools.
"""
import os
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import get_cfg
from detectron2.engine import DefaultTrainer, default_argument_parser, default_setup, launch
from detectron2.evaluation import COCOEvaluator
from tridentnet import add_tridentnet_config
class Trainer(DefaultTrainer):
@classmethod
def build_evaluator(cls, cfg, dataset_name, output_folder=None):
if output_folder is None:
output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
return COCOEvaluator(dataset_name, output_dir=output_folder)
def setup(args):
"""
Create configs and perform basic setups.
"""
cfg = get_cfg()
add_tridentnet_config(cfg)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
default_setup(cfg, args)
return cfg
def main(args):
cfg = setup(args)
if args.eval_only:
model = Trainer.build_model(cfg)
DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
cfg.MODEL.WEIGHTS, resume=args.resume
)
res = Trainer.test(cfg, model)
return res
trainer = Trainer(cfg)
trainer.resume_or_load(resume=args.resume)
return trainer.train()
if __name__ == "__main__":
args = default_argument_parser().parse_args()
print("Command Line Args:", args)
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
)
|
banmo-main
|
third_party/detectron2_old/projects/TridentNet/train_net.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import torch
from detectron2.modeling import PROPOSAL_GENERATOR_REGISTRY
from detectron2.modeling.proposal_generator.rpn import RPN
from detectron2.structures import ImageList
@PROPOSAL_GENERATOR_REGISTRY.register()
class TridentRPN(RPN):
"""
Trident RPN subnetwork.
"""
def __init__(self, cfg, input_shape):
super(TridentRPN, self).__init__(cfg, input_shape)
self.num_branch = cfg.MODEL.TRIDENT.NUM_BRANCH
self.trident_fast = cfg.MODEL.TRIDENT.TEST_BRANCH_IDX != -1
def forward(self, images, features, gt_instances=None):
"""
See :class:`RPN.forward`.
"""
num_branch = self.num_branch if self.training or not self.trident_fast else 1
# Duplicate images and gt_instances for all branches in TridentNet.
all_images = ImageList(
torch.cat([images.tensor] * num_branch), images.image_sizes * num_branch
)
all_gt_instances = gt_instances * num_branch if gt_instances is not None else None
return super(TridentRPN, self).forward(all_images, features, all_gt_instances)
|
banmo-main
|
third_party/detectron2_old/projects/TridentNet/tridentnet/trident_rpn.py
|
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
from detectron2.config import CfgNode as CN
def add_tridentnet_config(cfg):
"""
Add config for tridentnet.
"""
_C = cfg
_C.MODEL.TRIDENT = CN()
# Number of branches for TridentNet.
_C.MODEL.TRIDENT.NUM_BRANCH = 3
# Specify the dilations for each branch.
_C.MODEL.TRIDENT.BRANCH_DILATIONS = [1, 2, 3]
# Specify the stage for applying trident blocks. Default stage is Res4 according to the
# TridentNet paper.
_C.MODEL.TRIDENT.TRIDENT_STAGE = "res4"
# Specify the test branch index TridentNet Fast inference:
# - use -1 to aggregate results of all branches during inference.
# - otherwise, only using specified branch for fast inference. Recommended setting is
# to use the middle branch.
_C.MODEL.TRIDENT.TEST_BRANCH_IDX = 1
|
banmo-main
|
third_party/detectron2_old/projects/TridentNet/tridentnet/config.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import fvcore.nn.weight_init as weight_init
import torch
import torch.nn.functional as F
from detectron2.layers import Conv2d, FrozenBatchNorm2d, get_norm
from detectron2.modeling import BACKBONE_REGISTRY, ResNet, ResNetBlockBase
from detectron2.modeling.backbone.resnet import BasicStem, BottleneckBlock, DeformBottleneckBlock
from .trident_conv import TridentConv
__all__ = ["TridentBottleneckBlock", "make_trident_stage", "build_trident_resnet_backbone"]
class TridentBottleneckBlock(ResNetBlockBase):
def __init__(
self,
in_channels,
out_channels,
*,
bottleneck_channels,
stride=1,
num_groups=1,
norm="BN",
stride_in_1x1=False,
num_branch=3,
dilations=(1, 2, 3),
concat_output=False,
test_branch_idx=-1,
):
"""
Args:
num_branch (int): the number of branches in TridentNet.
dilations (tuple): the dilations of multiple branches in TridentNet.
concat_output (bool): if concatenate outputs of multiple branches in TridentNet.
Use 'True' for the last trident block.
"""
super().__init__(in_channels, out_channels, stride)
assert num_branch == len(dilations)
self.num_branch = num_branch
self.concat_output = concat_output
self.test_branch_idx = test_branch_idx
if in_channels != out_channels:
self.shortcut = Conv2d(
in_channels,
out_channels,
kernel_size=1,
stride=stride,
bias=False,
norm=get_norm(norm, out_channels),
)
else:
self.shortcut = None
stride_1x1, stride_3x3 = (stride, 1) if stride_in_1x1 else (1, stride)
self.conv1 = Conv2d(
in_channels,
bottleneck_channels,
kernel_size=1,
stride=stride_1x1,
bias=False,
norm=get_norm(norm, bottleneck_channels),
)
self.conv2 = TridentConv(
bottleneck_channels,
bottleneck_channels,
kernel_size=3,
stride=stride_3x3,
paddings=dilations,
bias=False,
groups=num_groups,
dilations=dilations,
num_branch=num_branch,
test_branch_idx=test_branch_idx,
norm=get_norm(norm, bottleneck_channels),
)
self.conv3 = Conv2d(
bottleneck_channels,
out_channels,
kernel_size=1,
bias=False,
norm=get_norm(norm, out_channels),
)
for layer in [self.conv1, self.conv2, self.conv3, self.shortcut]:
if layer is not None: # shortcut can be None
weight_init.c2_msra_fill(layer)
def forward(self, x):
num_branch = self.num_branch if self.training or self.test_branch_idx == -1 else 1
if not isinstance(x, list):
x = [x] * num_branch
out = [self.conv1(b) for b in x]
out = [F.relu_(b) for b in out]
out = self.conv2(out)
out = [F.relu_(b) for b in out]
out = [self.conv3(b) for b in out]
if self.shortcut is not None:
shortcut = [self.shortcut(b) for b in x]
else:
shortcut = x
out = [out_b + shortcut_b for out_b, shortcut_b in zip(out, shortcut)]
out = [F.relu_(b) for b in out]
if self.concat_output:
out = torch.cat(out)
return out
def make_trident_stage(block_class, num_blocks, **kwargs):
"""
Create a resnet stage by creating many blocks for TridentNet.
"""
concat_output = [False] * (num_blocks - 1) + [True]
kwargs["concat_output_per_block"] = concat_output
return ResNet.make_stage(block_class, num_blocks, **kwargs)
@BACKBONE_REGISTRY.register()
def build_trident_resnet_backbone(cfg, input_shape):
"""
Create a ResNet instance from config for TridentNet.
Returns:
ResNet: a :class:`ResNet` instance.
"""
# need registration of new blocks/stems?
norm = cfg.MODEL.RESNETS.NORM
stem = BasicStem(
in_channels=input_shape.channels,
out_channels=cfg.MODEL.RESNETS.STEM_OUT_CHANNELS,
norm=norm,
)
freeze_at = cfg.MODEL.BACKBONE.FREEZE_AT
if freeze_at >= 1:
for p in stem.parameters():
p.requires_grad = False
stem = FrozenBatchNorm2d.convert_frozen_batchnorm(stem)
# fmt: off
out_features = cfg.MODEL.RESNETS.OUT_FEATURES
depth = cfg.MODEL.RESNETS.DEPTH
num_groups = cfg.MODEL.RESNETS.NUM_GROUPS
width_per_group = cfg.MODEL.RESNETS.WIDTH_PER_GROUP
bottleneck_channels = num_groups * width_per_group
in_channels = cfg.MODEL.RESNETS.STEM_OUT_CHANNELS
out_channels = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS
stride_in_1x1 = cfg.MODEL.RESNETS.STRIDE_IN_1X1
res5_dilation = cfg.MODEL.RESNETS.RES5_DILATION
deform_on_per_stage = cfg.MODEL.RESNETS.DEFORM_ON_PER_STAGE
deform_modulated = cfg.MODEL.RESNETS.DEFORM_MODULATED
deform_num_groups = cfg.MODEL.RESNETS.DEFORM_NUM_GROUPS
num_branch = cfg.MODEL.TRIDENT.NUM_BRANCH
branch_dilations = cfg.MODEL.TRIDENT.BRANCH_DILATIONS
trident_stage = cfg.MODEL.TRIDENT.TRIDENT_STAGE
test_branch_idx = cfg.MODEL.TRIDENT.TEST_BRANCH_IDX
# fmt: on
assert res5_dilation in {1, 2}, "res5_dilation cannot be {}.".format(res5_dilation)
num_blocks_per_stage = {50: [3, 4, 6, 3], 101: [3, 4, 23, 3], 152: [3, 8, 36, 3]}[depth]
stages = []
res_stage_idx = {"res2": 2, "res3": 3, "res4": 4, "res5": 5}
out_stage_idx = [res_stage_idx[f] for f in out_features]
trident_stage_idx = res_stage_idx[trident_stage]
max_stage_idx = max(out_stage_idx)
for idx, stage_idx in enumerate(range(2, max_stage_idx + 1)):
dilation = res5_dilation if stage_idx == 5 else 1
first_stride = 1 if idx == 0 or (stage_idx == 5 and dilation == 2) else 2
stage_kargs = {
"num_blocks": num_blocks_per_stage[idx],
"stride_per_block": [first_stride] + [1] * (num_blocks_per_stage[idx] - 1),
"in_channels": in_channels,
"bottleneck_channels": bottleneck_channels,
"out_channels": out_channels,
"num_groups": num_groups,
"norm": norm,
"stride_in_1x1": stride_in_1x1,
"dilation": dilation,
}
if stage_idx == trident_stage_idx:
assert not deform_on_per_stage[
idx
], "Not support deformable conv in Trident blocks yet."
stage_kargs["block_class"] = TridentBottleneckBlock
stage_kargs["num_branch"] = num_branch
stage_kargs["dilations"] = branch_dilations
stage_kargs["test_branch_idx"] = test_branch_idx
stage_kargs.pop("dilation")
elif deform_on_per_stage[idx]:
stage_kargs["block_class"] = DeformBottleneckBlock
stage_kargs["deform_modulated"] = deform_modulated
stage_kargs["deform_num_groups"] = deform_num_groups
else:
stage_kargs["block_class"] = BottleneckBlock
blocks = (
make_trident_stage(**stage_kargs)
if stage_idx == trident_stage_idx
else ResNet.make_stage(**stage_kargs)
)
in_channels = out_channels
out_channels *= 2
bottleneck_channels *= 2
if freeze_at >= stage_idx:
for block in blocks:
block.freeze()
stages.append(blocks)
return ResNet(stem, stages, out_features=out_features)
|
banmo-main
|
third_party/detectron2_old/projects/TridentNet/tridentnet/trident_backbone.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from .config import add_tridentnet_config
from .trident_backbone import (
TridentBottleneckBlock,
build_trident_resnet_backbone,
make_trident_stage,
)
from .trident_rpn import TridentRPN
from .trident_rcnn import TridentRes5ROIHeads, TridentStandardROIHeads
|
banmo-main
|
third_party/detectron2_old/projects/TridentNet/tridentnet/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from detectron2.layers import batched_nms
from detectron2.modeling import ROI_HEADS_REGISTRY, StandardROIHeads
from detectron2.modeling.roi_heads.roi_heads import Res5ROIHeads
from detectron2.structures import Instances
def merge_branch_instances(instances, num_branch, nms_thresh, topk_per_image):
"""
Merge detection results from different branches of TridentNet.
Return detection results by applying non-maximum suppression (NMS) on bounding boxes
and keep the unsuppressed boxes and other instances (e.g mask) if any.
Args:
instances (list[Instances]): A list of N * num_branch instances that store detection
results. Contain N images and each image has num_branch instances.
num_branch (int): Number of branches used for merging detection results for each image.
nms_thresh (float): The threshold to use for box non-maximum suppression. Value in [0, 1].
topk_per_image (int): The number of top scoring detections to return. Set < 0 to return
all detections.
Returns:
results: (list[Instances]): A list of N instances, one for each image in the batch,
that stores the topk most confidence detections after merging results from multiple
branches.
"""
if num_branch == 1:
return instances
batch_size = len(instances) // num_branch
results = []
for i in range(batch_size):
instance = Instances.cat([instances[i + batch_size * j] for j in range(num_branch)])
# Apply per-class NMS
keep = batched_nms(
instance.pred_boxes.tensor, instance.scores, instance.pred_classes, nms_thresh
)
keep = keep[:topk_per_image]
result = instance[keep]
results.append(result)
return results
@ROI_HEADS_REGISTRY.register()
class TridentRes5ROIHeads(Res5ROIHeads):
"""
The TridentNet ROIHeads in a typical "C4" R-CNN model.
See :class:`Res5ROIHeads`.
"""
def __init__(self, cfg, input_shape):
super().__init__(cfg, input_shape)
self.num_branch = cfg.MODEL.TRIDENT.NUM_BRANCH
self.trident_fast = cfg.MODEL.TRIDENT.TEST_BRANCH_IDX != -1
def forward(self, images, features, proposals, targets=None):
"""
See :class:`Res5ROIHeads.forward`.
"""
num_branch = self.num_branch if self.training or not self.trident_fast else 1
all_targets = targets * num_branch if targets is not None else None
pred_instances, losses = super().forward(images, features, proposals, all_targets)
del images, all_targets, targets
if self.training:
return pred_instances, losses
else:
pred_instances = merge_branch_instances(
pred_instances,
num_branch,
self.box_predictor.test_nms_thresh,
self.box_predictor.test_topk_per_image,
)
return pred_instances, {}
@ROI_HEADS_REGISTRY.register()
class TridentStandardROIHeads(StandardROIHeads):
"""
The `StandardROIHeads` for TridentNet.
See :class:`StandardROIHeads`.
"""
def __init__(self, cfg, input_shape):
super(TridentStandardROIHeads, self).__init__(cfg, input_shape)
self.num_branch = cfg.MODEL.TRIDENT.NUM_BRANCH
self.trident_fast = cfg.MODEL.TRIDENT.TEST_BRANCH_IDX != -1
def forward(self, images, features, proposals, targets=None):
"""
See :class:`Res5ROIHeads.forward`.
"""
# Use 1 branch if using trident_fast during inference.
num_branch = self.num_branch if self.training or not self.trident_fast else 1
# Duplicate targets for all branches in TridentNet.
all_targets = targets * num_branch if targets is not None else None
pred_instances, losses = super().forward(images, features, proposals, all_targets)
del images, all_targets, targets
if self.training:
return pred_instances, losses
else:
pred_instances = merge_branch_instances(
pred_instances,
num_branch,
self.box_predictor.test_nms_thresh,
self.box_predictor.test_topk_per_image,
)
return pred_instances, {}
|
banmo-main
|
third_party/detectron2_old/projects/TridentNet/tridentnet/trident_rcnn.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import torch
from torch import nn
from torch.nn import functional as F
from torch.nn.modules.utils import _pair
from detectron2.layers.wrappers import _NewEmptyTensorOp
class TridentConv(nn.Module):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
paddings=0,
dilations=1,
groups=1,
num_branch=1,
test_branch_idx=-1,
bias=False,
norm=None,
activation=None,
):
super(TridentConv, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = _pair(kernel_size)
self.num_branch = num_branch
self.stride = _pair(stride)
self.groups = groups
self.with_bias = bias
if isinstance(paddings, int):
paddings = [paddings] * self.num_branch
if isinstance(dilations, int):
dilations = [dilations] * self.num_branch
self.paddings = [_pair(padding) for padding in paddings]
self.dilations = [_pair(dilation) for dilation in dilations]
self.test_branch_idx = test_branch_idx
self.norm = norm
self.activation = activation
assert len({self.num_branch, len(self.paddings), len(self.dilations)}) == 1
self.weight = nn.Parameter(
torch.Tensor(out_channels, in_channels // groups, *self.kernel_size)
)
if bias:
self.bias = nn.Parameter(torch.Tensor(out_channels))
else:
self.bias = None
nn.init.kaiming_uniform_(self.weight, nonlinearity="relu")
if self.bias is not None:
nn.init.constant_(self.bias, 0)
def forward(self, inputs):
num_branch = self.num_branch if self.training or self.test_branch_idx == -1 else 1
assert len(inputs) == num_branch
if inputs[0].numel() == 0:
output_shape = [
(i + 2 * p - (di * (k - 1) + 1)) // s + 1
for i, p, di, k, s in zip(
inputs[0].shape[-2:], self.padding, self.dilation, self.kernel_size, self.stride
)
]
output_shape = [input[0].shape[0], self.weight.shape[0]] + output_shape
return [_NewEmptyTensorOp.apply(input, output_shape) for input in inputs]
if self.training or self.test_branch_idx == -1:
outputs = [
F.conv2d(input, self.weight, self.bias, self.stride, padding, dilation, self.groups)
for input, dilation, padding in zip(inputs, self.dilations, self.paddings)
]
else:
outputs = [
F.conv2d(
inputs[0],
self.weight,
self.bias,
self.stride,
self.paddings[self.test_branch_idx],
self.dilations[self.test_branch_idx],
self.groups,
)
]
if self.norm is not None:
outputs = [self.norm(x) for x in outputs]
if self.activation is not None:
outputs = [self.activation(x) for x in outputs]
return outputs
def extra_repr(self):
tmpstr = "in_channels=" + str(self.in_channels)
tmpstr += ", out_channels=" + str(self.out_channels)
tmpstr += ", kernel_size=" + str(self.kernel_size)
tmpstr += ", num_branch=" + str(self.num_branch)
tmpstr += ", test_branch_idx=" + str(self.test_branch_idx)
tmpstr += ", stride=" + str(self.stride)
tmpstr += ", paddings=" + str(self.paddings)
tmpstr += ", dilations=" + str(self.dilations)
tmpstr += ", groups=" + str(self.groups)
tmpstr += ", bias=" + str(self.with_bias)
return tmpstr
|
banmo-main
|
third_party/detectron2_old/projects/TridentNet/tridentnet/trident_conv.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
import argparse
import logging
import os
import sys
from timeit import default_timer as timer
from typing import Any, ClassVar, Dict, List
import torch
from detectron2.data.catalog import DatasetCatalog
from detectron2.utils.file_io import PathManager
from detectron2.utils.logger import setup_logger
from densepose.structures import DensePoseDataRelative
from densepose.utils.dbhelper import EntrySelector
from densepose.utils.logger import verbosity_to_level
from densepose.vis.base import CompoundVisualizer
from densepose.vis.bounding_box import BoundingBoxVisualizer
from densepose.vis.densepose_data_points import (
DensePoseDataCoarseSegmentationVisualizer,
DensePoseDataPointsIVisualizer,
DensePoseDataPointsUVisualizer,
DensePoseDataPointsVisualizer,
DensePoseDataPointsVVisualizer,
)
DOC = """Query DB - a tool to print / visualize data from a database
"""
LOGGER_NAME = "query_db"
logger = logging.getLogger(LOGGER_NAME)
_ACTION_REGISTRY: Dict[str, "Action"] = {}
class Action(object):
@classmethod
def add_arguments(cls: type, parser: argparse.ArgumentParser):
parser.add_argument(
"-v",
"--verbosity",
action="count",
help="Verbose mode. Multiple -v options increase the verbosity.",
)
def register_action(cls: type):
"""
Decorator for action classes to automate action registration
"""
global _ACTION_REGISTRY
_ACTION_REGISTRY[cls.COMMAND] = cls
return cls
class EntrywiseAction(Action):
@classmethod
def add_arguments(cls: type, parser: argparse.ArgumentParser):
super(EntrywiseAction, cls).add_arguments(parser)
parser.add_argument(
"dataset", metavar="<dataset>", help="Dataset name (e.g. densepose_coco_2014_train)"
)
parser.add_argument(
"selector",
metavar="<selector>",
help="Dataset entry selector in the form field1[:type]=value1[,"
"field2[:type]=value_min-value_max...] which selects all "
"entries from the dataset that satisfy the constraints",
)
parser.add_argument(
"--max-entries", metavar="N", help="Maximum number of entries to process", type=int
)
@classmethod
def execute(cls: type, args: argparse.Namespace):
dataset = setup_dataset(args.dataset)
entry_selector = EntrySelector.from_string(args.selector)
context = cls.create_context(args)
if args.max_entries is not None:
for _, entry in zip(range(args.max_entries), dataset):
if entry_selector(entry):
cls.execute_on_entry(entry, context)
else:
for entry in dataset:
if entry_selector(entry):
cls.execute_on_entry(entry, context)
@classmethod
def create_context(cls: type, args: argparse.Namespace) -> Dict[str, Any]:
context = {}
return context
@register_action
class PrintAction(EntrywiseAction):
"""
Print action that outputs selected entries to stdout
"""
COMMAND: ClassVar[str] = "print"
@classmethod
def add_parser(cls: type, subparsers: argparse._SubParsersAction):
parser = subparsers.add_parser(cls.COMMAND, help="Output selected entries to stdout. ")
cls.add_arguments(parser)
parser.set_defaults(func=cls.execute)
@classmethod
def add_arguments(cls: type, parser: argparse.ArgumentParser):
super(PrintAction, cls).add_arguments(parser)
@classmethod
def execute_on_entry(cls: type, entry: Dict[str, Any], context: Dict[str, Any]):
import pprint
printer = pprint.PrettyPrinter(indent=2, width=200, compact=True)
printer.pprint(entry)
@register_action
class ShowAction(EntrywiseAction):
"""
Show action that visualizes selected entries on an image
"""
COMMAND: ClassVar[str] = "show"
VISUALIZERS: ClassVar[Dict[str, object]] = {
"dp_segm": DensePoseDataCoarseSegmentationVisualizer(),
"dp_i": DensePoseDataPointsIVisualizer(),
"dp_u": DensePoseDataPointsUVisualizer(),
"dp_v": DensePoseDataPointsVVisualizer(),
"dp_pts": DensePoseDataPointsVisualizer(),
"bbox": BoundingBoxVisualizer(),
}
@classmethod
def add_parser(cls: type, subparsers: argparse._SubParsersAction):
parser = subparsers.add_parser(cls.COMMAND, help="Visualize selected entries")
cls.add_arguments(parser)
parser.set_defaults(func=cls.execute)
@classmethod
def add_arguments(cls: type, parser: argparse.ArgumentParser):
super(ShowAction, cls).add_arguments(parser)
parser.add_argument(
"visualizations",
metavar="<visualizations>",
help="Comma separated list of visualizations, possible values: "
"[{}]".format(",".join(sorted(cls.VISUALIZERS.keys()))),
)
parser.add_argument(
"--output",
metavar="<image_file>",
default="output.png",
help="File name to save output to",
)
@classmethod
def execute_on_entry(cls: type, entry: Dict[str, Any], context: Dict[str, Any]):
import cv2
import numpy as np
image_fpath = PathManager.get_local_path(entry["file_name"])
image = cv2.imread(image_fpath, cv2.IMREAD_GRAYSCALE)
image = np.tile(image[:, :, np.newaxis], [1, 1, 3])
datas = cls._extract_data_for_visualizers_from_entry(context["vis_specs"], entry)
visualizer = context["visualizer"]
image_vis = visualizer.visualize(image, datas)
entry_idx = context["entry_idx"] + 1
out_fname = cls._get_out_fname(entry_idx, context["out_fname"])
cv2.imwrite(out_fname, image_vis)
logger.info(f"Output saved to {out_fname}")
context["entry_idx"] += 1
@classmethod
def _get_out_fname(cls: type, entry_idx: int, fname_base: str):
base, ext = os.path.splitext(fname_base)
return base + ".{0:04d}".format(entry_idx) + ext
@classmethod
def create_context(cls: type, args: argparse.Namespace) -> Dict[str, Any]:
vis_specs = args.visualizations.split(",")
visualizers = []
for vis_spec in vis_specs:
vis = cls.VISUALIZERS[vis_spec]
visualizers.append(vis)
context = {
"vis_specs": vis_specs,
"visualizer": CompoundVisualizer(visualizers),
"out_fname": args.output,
"entry_idx": 0,
}
return context
@classmethod
def _extract_data_for_visualizers_from_entry(
cls: type, vis_specs: List[str], entry: Dict[str, Any]
):
dp_list = []
bbox_list = []
for annotation in entry["annotations"]:
is_valid, _ = DensePoseDataRelative.validate_annotation(annotation)
if not is_valid:
continue
bbox = torch.as_tensor(annotation["bbox"])
bbox_list.append(bbox)
dp_data = DensePoseDataRelative(annotation)
dp_list.append(dp_data)
datas = []
for vis_spec in vis_specs:
datas.append(bbox_list if "bbox" == vis_spec else (bbox_list, dp_list))
return datas
def setup_dataset(dataset_name):
logger.info("Loading dataset {}".format(dataset_name))
start = timer()
dataset = DatasetCatalog.get(dataset_name)
stop = timer()
logger.info("Loaded dataset {} in {:.3f}s".format(dataset_name, stop - start))
return dataset
def create_argument_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(
description=DOC,
formatter_class=lambda prog: argparse.HelpFormatter(prog, max_help_position=120),
)
parser.set_defaults(func=lambda _: parser.print_help(sys.stdout))
subparsers = parser.add_subparsers(title="Actions")
for _, action in _ACTION_REGISTRY.items():
action.add_parser(subparsers)
return parser
def main():
parser = create_argument_parser()
args = parser.parse_args()
verbosity = args.verbosity if hasattr(args, "verbosity") else None
global logger
logger = setup_logger(name=LOGGER_NAME)
logger.setLevel(verbosity_to_level(verbosity))
args.func(args)
if __name__ == "__main__":
main()
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/query_db.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
import argparse
import glob
import logging
import os
import pickle
import sys
from typing import Any, ClassVar, Dict, List
import torch
from detectron2.config import CfgNode, get_cfg
from detectron2.data.detection_utils import read_image
from detectron2.engine.defaults import DefaultPredictor
from detectron2.structures.instances import Instances
from detectron2.utils.logger import setup_logger
from densepose import add_densepose_config
from densepose.structures import DensePoseChartPredictorOutput, DensePoseEmbeddingPredictorOutput
from densepose.utils.logger import verbosity_to_level
from densepose.vis.base import CompoundVisualizer
from densepose.vis.bounding_box import ScoredBoundingBoxVisualizer
from densepose.vis.densepose_outputs_vertex import (
DensePoseOutputsTextureVisualizer,
DensePoseOutputsVertexVisualizer,
get_texture_atlases,
)
from densepose.vis.densepose_results import (
DensePoseResultsContourVisualizer,
DensePoseResultsFineSegmentationVisualizer,
DensePoseResultsUVisualizer,
DensePoseResultsVVisualizer,
)
from densepose.vis.densepose_results_textures import (
DensePoseResultsVisualizerWithTexture,
get_texture_atlas,
)
from densepose.vis.extractor import (
CompoundExtractor,
DensePoseOutputsExtractor,
DensePoseResultExtractor,
create_extractor,
)
DOC = """Apply Net - a tool to print / visualize DensePose results
"""
LOGGER_NAME = "apply_net"
logger = logging.getLogger(LOGGER_NAME)
_ACTION_REGISTRY: Dict[str, "Action"] = {}
class Action(object):
@classmethod
def add_arguments(cls: type, parser: argparse.ArgumentParser):
parser.add_argument(
"-v",
"--verbosity",
action="count",
help="Verbose mode. Multiple -v options increase the verbosity.",
)
def register_action(cls: type):
"""
Decorator for action classes to automate action registration
"""
global _ACTION_REGISTRY
_ACTION_REGISTRY[cls.COMMAND] = cls
return cls
class InferenceAction(Action):
@classmethod
def add_arguments(cls: type, parser: argparse.ArgumentParser):
super(InferenceAction, cls).add_arguments(parser)
parser.add_argument("cfg", metavar="<config>", help="Config file")
parser.add_argument("model", metavar="<model>", help="Model file")
parser.add_argument("input", metavar="<input>", help="Input data")
parser.add_argument(
"--opts",
help="Modify config options using the command-line 'KEY VALUE' pairs",
default=[],
nargs=argparse.REMAINDER,
)
@classmethod
def execute(cls: type, args: argparse.Namespace):
logger.info(f"Loading config from {args.cfg}")
opts = []
cfg = cls.setup_config(args.cfg, args.model, args, opts)
logger.info(f"Loading model from {args.model}")
predictor = DefaultPredictor(cfg)
logger.info(f"Loading data from {args.input}")
file_list = cls._get_input_file_list(args.input)
if len(file_list) == 0:
logger.warning(f"No input images for {args.input}")
return
context = cls.create_context(args, cfg)
for file_name in file_list:
img = read_image(file_name, format="BGR") # predictor expects BGR image.
with torch.no_grad():
outputs = predictor(img)["instances"]
cls.execute_on_outputs(context, {"file_name": file_name, "image": img}, outputs)
cls.postexecute(context)
@classmethod
def setup_config(
cls: type, config_fpath: str, model_fpath: str, args: argparse.Namespace, opts: List[str]
):
cfg = get_cfg()
add_densepose_config(cfg)
cfg.merge_from_file(config_fpath)
cfg.merge_from_list(args.opts)
if opts:
cfg.merge_from_list(opts)
cfg.MODEL.WEIGHTS = model_fpath
cfg.freeze()
return cfg
@classmethod
def _get_input_file_list(cls: type, input_spec: str):
if os.path.isdir(input_spec):
file_list = [
os.path.join(input_spec, fname)
for fname in os.listdir(input_spec)
if os.path.isfile(os.path.join(input_spec, fname))
]
elif os.path.isfile(input_spec):
file_list = [input_spec]
else:
file_list = glob.glob(input_spec)
return file_list
@register_action
class DumpAction(InferenceAction):
"""
Dump action that outputs results to a pickle file
"""
COMMAND: ClassVar[str] = "dump"
@classmethod
def add_parser(cls: type, subparsers: argparse._SubParsersAction):
parser = subparsers.add_parser(cls.COMMAND, help="Dump model outputs to a file.")
cls.add_arguments(parser)
parser.set_defaults(func=cls.execute)
@classmethod
def add_arguments(cls: type, parser: argparse.ArgumentParser):
super(DumpAction, cls).add_arguments(parser)
parser.add_argument(
"--output",
metavar="<dump_file>",
default="results.pkl",
help="File name to save dump to",
)
@classmethod
def execute_on_outputs(
cls: type, context: Dict[str, Any], entry: Dict[str, Any], outputs: Instances
):
image_fpath = entry["file_name"]
logger.info(f"Processing {image_fpath}")
result = {"file_name": image_fpath}
if outputs.has("scores"):
result["scores"] = outputs.get("scores").cpu()
if outputs.has("pred_boxes"):
result["pred_boxes_XYXY"] = outputs.get("pred_boxes").tensor.cpu()
if outputs.has("pred_densepose"):
if isinstance(outputs.pred_densepose, DensePoseChartPredictorOutput):
extractor = DensePoseResultExtractor()
elif isinstance(outputs.pred_densepose, DensePoseEmbeddingPredictorOutput):
extractor = DensePoseOutputsExtractor()
result["pred_densepose"] = extractor(outputs)[0]
context["results"].append(result)
@classmethod
def create_context(cls: type, args: argparse.Namespace, cfg: CfgNode):
context = {"results": [], "out_fname": args.output}
return context
@classmethod
def postexecute(cls: type, context: Dict[str, Any]):
out_fname = context["out_fname"]
out_dir = os.path.dirname(out_fname)
if len(out_dir) > 0 and not os.path.exists(out_dir):
os.makedirs(out_dir)
with open(out_fname, "wb") as hFile:
pickle.dump(context["results"], hFile)
logger.info(f"Output saved to {out_fname}")
@register_action
class ShowAction(InferenceAction):
"""
Show action that visualizes selected entries on an image
"""
COMMAND: ClassVar[str] = "show"
VISUALIZERS: ClassVar[Dict[str, object]] = {
"dp_contour": DensePoseResultsContourVisualizer,
"dp_segm": DensePoseResultsFineSegmentationVisualizer,
"dp_u": DensePoseResultsUVisualizer,
"dp_v": DensePoseResultsVVisualizer,
"dp_iuv_texture": DensePoseResultsVisualizerWithTexture,
"dp_cse_texture": DensePoseOutputsTextureVisualizer,
"dp_vertex": DensePoseOutputsVertexVisualizer,
"bbox": ScoredBoundingBoxVisualizer,
}
@classmethod
def add_parser(cls: type, subparsers: argparse._SubParsersAction):
parser = subparsers.add_parser(cls.COMMAND, help="Visualize selected entries")
cls.add_arguments(parser)
parser.set_defaults(func=cls.execute)
@classmethod
def add_arguments(cls: type, parser: argparse.ArgumentParser):
super(ShowAction, cls).add_arguments(parser)
parser.add_argument(
"visualizations",
metavar="<visualizations>",
help="Comma separated list of visualizations, possible values: "
"[{}]".format(",".join(sorted(cls.VISUALIZERS.keys()))),
)
parser.add_argument(
"--min_score",
metavar="<score>",
default=0.8,
type=float,
help="Minimum detection score to visualize",
)
parser.add_argument(
"--nms_thresh", metavar="<threshold>", default=None, type=float, help="NMS threshold"
)
parser.add_argument(
"--texture_atlas",
metavar="<texture_atlas>",
default=None,
help="Texture atlas file (for IUV texture transfer)",
)
parser.add_argument(
"--texture_atlases_map",
metavar="<texture_atlases_map>",
default=None,
help="JSON string of a dict containing texture atlas files for each mesh",
)
parser.add_argument(
"--output",
metavar="<image_file>",
default="outputres.png",
help="File name to save output to",
)
@classmethod
def setup_config(
cls: type, config_fpath: str, model_fpath: str, args: argparse.Namespace, opts: List[str]
):
opts.append("MODEL.ROI_HEADS.SCORE_THRESH_TEST")
opts.append(str(args.min_score))
if args.nms_thresh is not None:
opts.append("MODEL.ROI_HEADS.NMS_THRESH_TEST")
opts.append(str(args.nms_thresh))
cfg = super(ShowAction, cls).setup_config(config_fpath, model_fpath, args, opts)
return cfg
@classmethod
def execute_on_outputs(
cls: type, context: Dict[str, Any], entry: Dict[str, Any], outputs: Instances
):
import cv2
import numpy as np
visualizer = context["visualizer"]
extractor = context["extractor"]
image_fpath = entry["file_name"]
logger.info(f"Processing {image_fpath}")
image = cv2.cvtColor(entry["image"], cv2.COLOR_BGR2GRAY)
image = np.tile(image[:, :, np.newaxis], [1, 1, 3])
data = extractor(outputs)
image_vis = visualizer.visualize(image, data)
entry_idx = context["entry_idx"] + 1
out_fname = cls._get_out_fname(entry_idx, context["out_fname"])
out_dir = os.path.dirname(out_fname)
if len(out_dir) > 0 and not os.path.exists(out_dir):
os.makedirs(out_dir)
cv2.imwrite(out_fname, image_vis)
logger.info(f"Output saved to {out_fname}")
context["entry_idx"] += 1
@classmethod
def postexecute(cls: type, context: Dict[str, Any]):
pass
@classmethod
def _get_out_fname(cls: type, entry_idx: int, fname_base: str):
base, ext = os.path.splitext(fname_base)
return base + ".{0:04d}".format(entry_idx) + ext
@classmethod
def create_context(cls: type, args: argparse.Namespace, cfg: CfgNode) -> Dict[str, Any]:
vis_specs = args.visualizations.split(",")
visualizers = []
extractors = []
for vis_spec in vis_specs:
texture_atlas = get_texture_atlas(args.texture_atlas)
texture_atlases_dict = get_texture_atlases(args.texture_atlases_map)
vis = cls.VISUALIZERS[vis_spec](
cfg=cfg,
texture_atlas=texture_atlas,
texture_atlases_dict=texture_atlases_dict,
)
visualizers.append(vis)
extractor = create_extractor(vis)
extractors.append(extractor)
visualizer = CompoundVisualizer(visualizers)
extractor = CompoundExtractor(extractors)
context = {
"extractor": extractor,
"visualizer": visualizer,
"out_fname": args.output,
"entry_idx": 0,
}
return context
def create_argument_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(
description=DOC,
formatter_class=lambda prog: argparse.HelpFormatter(prog, max_help_position=120),
)
parser.set_defaults(func=lambda _: parser.print_help(sys.stdout))
subparsers = parser.add_subparsers(title="Actions")
for _, action in _ACTION_REGISTRY.items():
action.add_parser(subparsers)
return parser
def main():
parser = create_argument_parser()
args = parser.parse_args()
verbosity = args.verbosity if hasattr(args, "verbosity") else None
global logger
logger = setup_logger(name=LOGGER_NAME)
logger.setLevel(verbosity_to_level(verbosity))
args.func(args)
if __name__ == "__main__":
main()
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/apply_net.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
"""
DensePose Training Script.
This script is similar to the training script in detectron2/tools.
It is an example of how a user might use detectron2 for a new project.
"""
from datetime import timedelta
import detectron2.utils.comm as comm
from detectron2.config import get_cfg
from detectron2.engine import DEFAULT_TIMEOUT, default_argument_parser, default_setup, hooks, launch
from detectron2.evaluation import verify_results
from detectron2.utils.file_io import PathManager
from detectron2.utils.logger import setup_logger
from densepose import add_densepose_config
from densepose.engine import Trainer
from densepose.modeling.densepose_checkpoint import DensePoseCheckpointer
def setup(args):
cfg = get_cfg()
add_densepose_config(cfg)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
default_setup(cfg, args)
# Setup logger for "densepose" module
setup_logger(output=cfg.OUTPUT_DIR, distributed_rank=comm.get_rank(), name="densepose")
return cfg
def main(args):
cfg = setup(args)
# disable strict kwargs checking: allow one to specify path handle
# hints through kwargs, like timeout in DP evaluation
PathManager.set_strict_kwargs_checking(False)
if args.eval_only:
model = Trainer.build_model(cfg)
DensePoseCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
cfg.MODEL.WEIGHTS, resume=args.resume
)
res = Trainer.test(cfg, model)
if cfg.TEST.AUG.ENABLED:
res.update(Trainer.test_with_TTA(cfg, model))
if comm.is_main_process():
verify_results(cfg, res)
return res
trainer = Trainer(cfg)
trainer.resume_or_load(resume=args.resume)
if cfg.TEST.AUG.ENABLED:
trainer.register_hooks(
[hooks.EvalHook(0, lambda: trainer.test_with_TTA(cfg, trainer.model))]
)
return trainer.train()
if __name__ == "__main__":
args = default_argument_parser().parse_args()
cfg = setup(args)
timeout = (
DEFAULT_TIMEOUT if cfg.DENSEPOSE_EVALUATION.DISTRIBUTED_INFERENCE else timedelta(hours=4)
)
print("Command Line Args:", args)
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
timeout=timeout,
)
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/train_net.py
|
# -*- coding = utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
# pyre-ignore-all-errors
from detectron2.config import CfgNode as CN
def add_dataset_category_config(cfg: CN):
"""
Add config for additional category-related dataset options
- category whitelisting
- category mapping
"""
_C = cfg
_C.DATASETS.CATEGORY_MAPS = CN(new_allowed=True)
_C.DATASETS.WHITELISTED_CATEGORIES = CN(new_allowed=True)
# class to mesh mapping
_C.DATASETS.CLASS_TO_MESH_NAME_MAPPING = CN(new_allowed=True)
def add_evaluation_config(cfg: CN):
_C = cfg
_C.DENSEPOSE_EVALUATION = CN()
# evaluator type, possible values:
# - "iou": evaluator for models that produce iou data
# - "cse": evaluator for models that produce cse data
_C.DENSEPOSE_EVALUATION.TYPE = "iou"
# storage for DensePose results, possible values:
# - "none": no explicit storage, all the results are stored in the
# dictionary with predictions, memory intensive;
# historically the default storage type
# - "ram": RAM storage, uses per-process RAM storage, which is
# reduced to a single process storage on later stages,
# less memory intensive
# - "file": file storage, uses per-process file-based storage,
# the least memory intensive, but may create bottlenecks
# on file system accesses
_C.DENSEPOSE_EVALUATION.STORAGE = "none"
# minimum threshold for IOU values: the lower its values is,
# the more matches are produced (and the higher the AP score)
_C.DENSEPOSE_EVALUATION.MIN_IOU_THRESHOLD = 0.5
# Non-distributed inference is slower (at inference time) but can avoid RAM OOM
_C.DENSEPOSE_EVALUATION.DISTRIBUTED_INFERENCE = True
# evaluate mesh alignment based on vertex embeddings, only makes sense in CSE context
_C.DENSEPOSE_EVALUATION.EVALUATE_MESH_ALIGNMENT = False
# meshes to compute mesh alignment for
_C.DENSEPOSE_EVALUATION.MESH_ALIGNMENT_MESH_NAMES = []
def add_bootstrap_config(cfg: CN):
""" """
_C = cfg
_C.BOOTSTRAP_DATASETS = []
_C.BOOTSTRAP_MODEL = CN()
_C.BOOTSTRAP_MODEL.WEIGHTS = ""
_C.BOOTSTRAP_MODEL.DEVICE = "cuda"
def get_bootstrap_dataset_config() -> CN:
_C = CN()
_C.DATASET = ""
# ratio used to mix data loaders
_C.RATIO = 0.1
# image loader
_C.IMAGE_LOADER = CN(new_allowed=True)
_C.IMAGE_LOADER.TYPE = ""
_C.IMAGE_LOADER.BATCH_SIZE = 4
_C.IMAGE_LOADER.NUM_WORKERS = 4
_C.IMAGE_LOADER.CATEGORIES = []
_C.IMAGE_LOADER.MAX_COUNT_PER_CATEGORY = 1_000_000
_C.IMAGE_LOADER.CATEGORY_TO_CLASS_MAPPING = CN(new_allowed=True)
# inference
_C.INFERENCE = CN()
# batch size for model inputs
_C.INFERENCE.INPUT_BATCH_SIZE = 4
# batch size to group model outputs
_C.INFERENCE.OUTPUT_BATCH_SIZE = 2
# sampled data
_C.DATA_SAMPLER = CN(new_allowed=True)
_C.DATA_SAMPLER.TYPE = ""
_C.DATA_SAMPLER.USE_GROUND_TRUTH_CATEGORIES = False
# filter
_C.FILTER = CN(new_allowed=True)
_C.FILTER.TYPE = ""
return _C
def load_bootstrap_config(cfg: CN):
"""
Bootstrap datasets are given as a list of `dict` that are not automatically
converted into CfgNode. This method processes all bootstrap dataset entries
and ensures that they are in CfgNode format and comply with the specification
"""
if not cfg.BOOTSTRAP_DATASETS:
return
bootstrap_datasets_cfgnodes = []
for dataset_cfg in cfg.BOOTSTRAP_DATASETS:
_C = get_bootstrap_dataset_config().clone()
_C.merge_from_other_cfg(CN(dataset_cfg))
bootstrap_datasets_cfgnodes.append(_C)
cfg.BOOTSTRAP_DATASETS = bootstrap_datasets_cfgnodes
def add_densepose_head_cse_config(cfg: CN):
"""
Add configuration options for Continuous Surface Embeddings (CSE)
"""
_C = cfg
_C.MODEL.ROI_DENSEPOSE_HEAD.CSE = CN()
# Dimensionality D of the embedding space
_C.MODEL.ROI_DENSEPOSE_HEAD.CSE.EMBED_SIZE = 16
# Embedder specifications for various mesh IDs
_C.MODEL.ROI_DENSEPOSE_HEAD.CSE.EMBEDDERS = CN(new_allowed=True)
# normalization coefficient for embedding distances
_C.MODEL.ROI_DENSEPOSE_HEAD.CSE.EMBEDDING_DIST_GAUSS_SIGMA = 0.01
# normalization coefficient for geodesic distances
_C.MODEL.ROI_DENSEPOSE_HEAD.CSE.GEODESIC_DIST_GAUSS_SIGMA = 0.01
# embedding loss weight
_C.MODEL.ROI_DENSEPOSE_HEAD.CSE.EMBED_LOSS_WEIGHT = 0.6
# embedding loss name, currently the following options are supported:
# - EmbeddingLoss: cross-entropy on vertex labels
# - SoftEmbeddingLoss: cross-entropy on vertex label combined with
# Gaussian penalty on distance between vertices
_C.MODEL.ROI_DENSEPOSE_HEAD.CSE.EMBED_LOSS_NAME = "EmbeddingLoss"
# optimizer hyperparameters
_C.MODEL.ROI_DENSEPOSE_HEAD.CSE.FEATURES_LR_FACTOR = 1.0
_C.MODEL.ROI_DENSEPOSE_HEAD.CSE.EMBEDDING_LR_FACTOR = 1.0
# Shape to shape cycle consistency loss parameters:
_C.MODEL.ROI_DENSEPOSE_HEAD.CSE.SHAPE_TO_SHAPE_CYCLE_LOSS = CN({"ENABLED": False})
# shape to shape cycle consistency loss weight
_C.MODEL.ROI_DENSEPOSE_HEAD.CSE.SHAPE_TO_SHAPE_CYCLE_LOSS.WEIGHT = 0.025
# norm type used for loss computation
_C.MODEL.ROI_DENSEPOSE_HEAD.CSE.SHAPE_TO_SHAPE_CYCLE_LOSS.NORM_P = 2
# normalization term for embedding similarity matrices
_C.MODEL.ROI_DENSEPOSE_HEAD.CSE.SHAPE_TO_SHAPE_CYCLE_LOSS.TEMPERATURE = 0.05
# maximum number of vertices to include into shape to shape cycle loss
# if negative or zero, all vertices are considered
# if positive, random subset of vertices of given size is considered
_C.MODEL.ROI_DENSEPOSE_HEAD.CSE.SHAPE_TO_SHAPE_CYCLE_LOSS.MAX_NUM_VERTICES = 4936
# Pixel to shape cycle consistency loss parameters:
_C.MODEL.ROI_DENSEPOSE_HEAD.CSE.PIX_TO_SHAPE_CYCLE_LOSS = CN({"ENABLED": False})
# pixel to shape cycle consistency loss weight
_C.MODEL.ROI_DENSEPOSE_HEAD.CSE.PIX_TO_SHAPE_CYCLE_LOSS.WEIGHT = 0.0001
# norm type used for loss computation
_C.MODEL.ROI_DENSEPOSE_HEAD.CSE.PIX_TO_SHAPE_CYCLE_LOSS.NORM_P = 2
# map images to all meshes and back (if false, use only gt meshes from the batch)
_C.MODEL.ROI_DENSEPOSE_HEAD.CSE.PIX_TO_SHAPE_CYCLE_LOSS.USE_ALL_MESHES_NOT_GT_ONLY = False
# Randomly select at most this number of pixels from every instance
# if negative or zero, all vertices are considered
_C.MODEL.ROI_DENSEPOSE_HEAD.CSE.PIX_TO_SHAPE_CYCLE_LOSS.NUM_PIXELS_TO_SAMPLE = 100
# normalization factor for pixel to pixel distances (higher value = smoother distribution)
_C.MODEL.ROI_DENSEPOSE_HEAD.CSE.PIX_TO_SHAPE_CYCLE_LOSS.PIXEL_SIGMA = 5.0
_C.MODEL.ROI_DENSEPOSE_HEAD.CSE.PIX_TO_SHAPE_CYCLE_LOSS.TEMPERATURE_PIXEL_TO_VERTEX = 0.05
_C.MODEL.ROI_DENSEPOSE_HEAD.CSE.PIX_TO_SHAPE_CYCLE_LOSS.TEMPERATURE_VERTEX_TO_PIXEL = 0.05
def add_densepose_head_config(cfg: CN):
"""
Add config for densepose head.
"""
_C = cfg
_C.MODEL.DENSEPOSE_ON = True
_C.MODEL.ROI_DENSEPOSE_HEAD = CN()
_C.MODEL.ROI_DENSEPOSE_HEAD.NAME = ""
_C.MODEL.ROI_DENSEPOSE_HEAD.NUM_STACKED_CONVS = 8
# Number of parts used for point labels
_C.MODEL.ROI_DENSEPOSE_HEAD.NUM_PATCHES = 24
_C.MODEL.ROI_DENSEPOSE_HEAD.DECONV_KERNEL = 4
_C.MODEL.ROI_DENSEPOSE_HEAD.CONV_HEAD_DIM = 512
_C.MODEL.ROI_DENSEPOSE_HEAD.CONV_HEAD_KERNEL = 3
_C.MODEL.ROI_DENSEPOSE_HEAD.UP_SCALE = 2
_C.MODEL.ROI_DENSEPOSE_HEAD.HEATMAP_SIZE = 112
_C.MODEL.ROI_DENSEPOSE_HEAD.POOLER_TYPE = "ROIAlignV2"
_C.MODEL.ROI_DENSEPOSE_HEAD.POOLER_RESOLUTION = 28
_C.MODEL.ROI_DENSEPOSE_HEAD.POOLER_SAMPLING_RATIO = 2
_C.MODEL.ROI_DENSEPOSE_HEAD.NUM_COARSE_SEGM_CHANNELS = 2 # 15 or 2
# Overlap threshold for an RoI to be considered foreground (if >= FG_IOU_THRESHOLD)
_C.MODEL.ROI_DENSEPOSE_HEAD.FG_IOU_THRESHOLD = 0.7
# Loss weights for annotation masks.(14 Parts)
_C.MODEL.ROI_DENSEPOSE_HEAD.INDEX_WEIGHTS = 5.0
# Loss weights for surface parts. (24 Parts)
_C.MODEL.ROI_DENSEPOSE_HEAD.PART_WEIGHTS = 1.0
# Loss weights for UV regression.
_C.MODEL.ROI_DENSEPOSE_HEAD.POINT_REGRESSION_WEIGHTS = 0.01
# Coarse segmentation is trained using instance segmentation task data
_C.MODEL.ROI_DENSEPOSE_HEAD.COARSE_SEGM_TRAINED_BY_MASKS = False
# For Decoder
_C.MODEL.ROI_DENSEPOSE_HEAD.DECODER_ON = True
_C.MODEL.ROI_DENSEPOSE_HEAD.DECODER_NUM_CLASSES = 256
_C.MODEL.ROI_DENSEPOSE_HEAD.DECODER_CONV_DIMS = 256
_C.MODEL.ROI_DENSEPOSE_HEAD.DECODER_NORM = ""
_C.MODEL.ROI_DENSEPOSE_HEAD.DECODER_COMMON_STRIDE = 4
# For DeepLab head
_C.MODEL.ROI_DENSEPOSE_HEAD.DEEPLAB = CN()
_C.MODEL.ROI_DENSEPOSE_HEAD.DEEPLAB.NORM = "GN"
_C.MODEL.ROI_DENSEPOSE_HEAD.DEEPLAB.NONLOCAL_ON = 0
# Predictor class name, must be registered in DENSEPOSE_PREDICTOR_REGISTRY
# Some registered predictors:
# "DensePoseChartPredictor": predicts segmentation and UV coordinates for predefined charts
# "DensePoseChartWithConfidencePredictor": predicts segmentation, UV coordinates
# and associated confidences for predefined charts (default)
# "DensePoseEmbeddingWithConfidencePredictor": predicts segmentation, embeddings
# and associated confidences for CSE
_C.MODEL.ROI_DENSEPOSE_HEAD.PREDICTOR_NAME = "DensePoseChartWithConfidencePredictor"
# Loss class name, must be registered in DENSEPOSE_LOSS_REGISTRY
# Some registered losses:
# "DensePoseChartLoss": loss for chart-based models that estimate
# segmentation and UV coordinates
# "DensePoseChartWithConfidenceLoss": loss for chart-based models that estimate
# segmentation, UV coordinates and the corresponding confidences (default)
_C.MODEL.ROI_DENSEPOSE_HEAD.LOSS_NAME = "DensePoseChartWithConfidenceLoss"
# Confidences
# Enable learning UV confidences (variances) along with the actual values
_C.MODEL.ROI_DENSEPOSE_HEAD.UV_CONFIDENCE = CN({"ENABLED": False})
# UV confidence lower bound
_C.MODEL.ROI_DENSEPOSE_HEAD.UV_CONFIDENCE.EPSILON = 0.01
# Enable learning segmentation confidences (variances) along with the actual values
_C.MODEL.ROI_DENSEPOSE_HEAD.SEGM_CONFIDENCE = CN({"ENABLED": False})
# Segmentation confidence lower bound
_C.MODEL.ROI_DENSEPOSE_HEAD.SEGM_CONFIDENCE.EPSILON = 0.01
# Statistical model type for confidence learning, possible values:
# - "iid_iso": statistically independent identically distributed residuals
# with isotropic covariance
# - "indep_aniso": statistically independent residuals with anisotropic
# covariances
_C.MODEL.ROI_DENSEPOSE_HEAD.UV_CONFIDENCE.TYPE = "iid_iso"
# List of angles for rotation in data augmentation during training
_C.INPUT.ROTATION_ANGLES = [0]
_C.TEST.AUG.ROTATION_ANGLES = () # Rotation TTA
add_densepose_head_cse_config(cfg)
def add_hrnet_config(cfg: CN):
"""
Add config for HRNet backbone.
"""
_C = cfg
# For HigherHRNet w32
_C.MODEL.HRNET = CN()
_C.MODEL.HRNET.STEM_INPLANES = 64
_C.MODEL.HRNET.STAGE2 = CN()
_C.MODEL.HRNET.STAGE2.NUM_MODULES = 1
_C.MODEL.HRNET.STAGE2.NUM_BRANCHES = 2
_C.MODEL.HRNET.STAGE2.BLOCK = "BASIC"
_C.MODEL.HRNET.STAGE2.NUM_BLOCKS = [4, 4]
_C.MODEL.HRNET.STAGE2.NUM_CHANNELS = [32, 64]
_C.MODEL.HRNET.STAGE2.FUSE_METHOD = "SUM"
_C.MODEL.HRNET.STAGE3 = CN()
_C.MODEL.HRNET.STAGE3.NUM_MODULES = 4
_C.MODEL.HRNET.STAGE3.NUM_BRANCHES = 3
_C.MODEL.HRNET.STAGE3.BLOCK = "BASIC"
_C.MODEL.HRNET.STAGE3.NUM_BLOCKS = [4, 4, 4]
_C.MODEL.HRNET.STAGE3.NUM_CHANNELS = [32, 64, 128]
_C.MODEL.HRNET.STAGE3.FUSE_METHOD = "SUM"
_C.MODEL.HRNET.STAGE4 = CN()
_C.MODEL.HRNET.STAGE4.NUM_MODULES = 3
_C.MODEL.HRNET.STAGE4.NUM_BRANCHES = 4
_C.MODEL.HRNET.STAGE4.BLOCK = "BASIC"
_C.MODEL.HRNET.STAGE4.NUM_BLOCKS = [4, 4, 4, 4]
_C.MODEL.HRNET.STAGE4.NUM_CHANNELS = [32, 64, 128, 256]
_C.MODEL.HRNET.STAGE4.FUSE_METHOD = "SUM"
_C.MODEL.HRNET.HRFPN = CN()
_C.MODEL.HRNET.HRFPN.OUT_CHANNELS = 256
def add_densepose_config(cfg: CN):
add_densepose_head_config(cfg)
add_hrnet_config(cfg)
add_bootstrap_config(cfg)
add_dataset_category_config(cfg)
add_evaluation_config(cfg)
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/config.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from .data.datasets import builtin # just to register data
from .converters import builtin as builtin_converters # register converters
from .config import (
add_densepose_config,
add_densepose_head_config,
add_hrnet_config,
add_dataset_category_config,
add_bootstrap_config,
load_bootstrap_config,
)
from .structures import DensePoseDataRelative, DensePoseList, DensePoseTransformData
from .evaluation import DensePoseCOCOEvaluator
from .modeling.roi_heads import DensePoseROIHeads
from .modeling.test_time_augmentation import (
DensePoseGeneralizedRCNNWithTTA,
DensePoseDatasetMapperTTA,
)
from .utils.transform import load_from_cfg
from .modeling.hrfpn import build_hrfpn_backbone
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import numpy as np
import torch
from torch.nn import functional as F
from densepose.data.meshes.catalog import MeshCatalog
from densepose.structures.mesh import load_mesh_symmetry
from densepose.structures.transform_data import DensePoseTransformData
class DensePoseDataRelative(object):
"""
Dense pose relative annotations that can be applied to any bounding box:
x - normalized X coordinates [0, 255] of annotated points
y - normalized Y coordinates [0, 255] of annotated points
i - body part labels 0,...,24 for annotated points
u - body part U coordinates [0, 1] for annotated points
v - body part V coordinates [0, 1] for annotated points
segm - 256x256 segmentation mask with values 0,...,14
To obtain absolute x and y data wrt some bounding box one needs to first
divide the data by 256, multiply by the respective bounding box size
and add bounding box offset:
x_img = x0 + x_norm * w / 256.0
y_img = y0 + y_norm * h / 256.0
Segmentation masks are typically sampled to get image-based masks.
"""
# Key for normalized X coordinates in annotation dict
X_KEY = "dp_x"
# Key for normalized Y coordinates in annotation dict
Y_KEY = "dp_y"
# Key for U part coordinates in annotation dict (used in chart-based annotations)
U_KEY = "dp_U"
# Key for V part coordinates in annotation dict (used in chart-based annotations)
V_KEY = "dp_V"
# Key for I point labels in annotation dict (used in chart-based annotations)
I_KEY = "dp_I"
# Key for segmentation mask in annotation dict
S_KEY = "dp_masks"
# Key for vertex ids (used in continuous surface embeddings annotations)
VERTEX_IDS_KEY = "dp_vertex"
# Key for mesh id (used in continuous surface embeddings annotations)
MESH_NAME_KEY = "ref_model"
# Number of body parts in segmentation masks
N_BODY_PARTS = 14
# Number of parts in point labels
N_PART_LABELS = 24
MASK_SIZE = 256
def __init__(self, annotation, cleanup=False):
self.x = torch.as_tensor(annotation[DensePoseDataRelative.X_KEY])
self.y = torch.as_tensor(annotation[DensePoseDataRelative.Y_KEY])
if (
DensePoseDataRelative.I_KEY in annotation
and DensePoseDataRelative.U_KEY in annotation
and DensePoseDataRelative.V_KEY in annotation
):
self.i = torch.as_tensor(annotation[DensePoseDataRelative.I_KEY])
self.u = torch.as_tensor(annotation[DensePoseDataRelative.U_KEY])
self.v = torch.as_tensor(annotation[DensePoseDataRelative.V_KEY])
if (
DensePoseDataRelative.VERTEX_IDS_KEY in annotation
and DensePoseDataRelative.MESH_NAME_KEY in annotation
):
self.vertex_ids = torch.as_tensor(
annotation[DensePoseDataRelative.VERTEX_IDS_KEY], dtype=torch.long
)
self.mesh_id = MeshCatalog.get_mesh_id(annotation[DensePoseDataRelative.MESH_NAME_KEY])
if DensePoseDataRelative.S_KEY in annotation:
self.segm = DensePoseDataRelative.extract_segmentation_mask(annotation)
self.device = torch.device("cpu")
if cleanup:
DensePoseDataRelative.cleanup_annotation(annotation)
def to(self, device):
if self.device == device:
return self
new_data = DensePoseDataRelative.__new__(DensePoseDataRelative)
new_data.x = self.x.to(device)
new_data.y = self.y.to(device)
for attr in ["i", "u", "v", "vertex_ids", "segm"]:
if hasattr(self, attr):
setattr(new_data, attr, getattr(self, attr).to(device))
if hasattr(self, "mesh_id"):
new_data.mesh_id = self.mesh_id
new_data.device = device
return new_data
@staticmethod
def extract_segmentation_mask(annotation):
import pycocotools.mask as mask_utils
# TODO: annotation instance is accepted if it contains either
# DensePose segmentation or instance segmentation. However, here we
# only rely on DensePose segmentation
poly_specs = annotation[DensePoseDataRelative.S_KEY]
if isinstance(poly_specs, torch.Tensor):
# data is already given as mask tensors, no need to decode
return poly_specs
segm = torch.zeros((DensePoseDataRelative.MASK_SIZE,) * 2, dtype=torch.float32)
if isinstance(poly_specs, dict):
if poly_specs:
mask = mask_utils.decode(poly_specs)
segm[mask > 0] = 1
else:
for i in range(len(poly_specs)):
poly_i = poly_specs[i]
if poly_i:
mask_i = mask_utils.decode(poly_i)
segm[mask_i > 0] = i + 1
return segm
@staticmethod
def validate_annotation(annotation):
for key in [
DensePoseDataRelative.X_KEY,
DensePoseDataRelative.Y_KEY,
]:
if key not in annotation:
return False, "no {key} data in the annotation".format(key=key)
valid_for_iuv_setting = all(
key in annotation
for key in [
DensePoseDataRelative.I_KEY,
DensePoseDataRelative.U_KEY,
DensePoseDataRelative.V_KEY,
]
)
valid_for_cse_setting = all(
key in annotation
for key in [
DensePoseDataRelative.VERTEX_IDS_KEY,
DensePoseDataRelative.MESH_NAME_KEY,
]
)
if not valid_for_iuv_setting and not valid_for_cse_setting:
return (
False,
"expected either {} (IUV setting) or {} (CSE setting) annotations".format(
", ".join(
[
DensePoseDataRelative.I_KEY,
DensePoseDataRelative.U_KEY,
DensePoseDataRelative.V_KEY,
]
),
", ".join(
[
DensePoseDataRelative.VERTEX_IDS_KEY,
DensePoseDataRelative.MESH_NAME_KEY,
]
),
),
)
return True, None
@staticmethod
def cleanup_annotation(annotation):
for key in [
DensePoseDataRelative.X_KEY,
DensePoseDataRelative.Y_KEY,
DensePoseDataRelative.I_KEY,
DensePoseDataRelative.U_KEY,
DensePoseDataRelative.V_KEY,
DensePoseDataRelative.S_KEY,
DensePoseDataRelative.VERTEX_IDS_KEY,
DensePoseDataRelative.MESH_NAME_KEY,
]:
if key in annotation:
del annotation[key]
def apply_transform(self, transforms, densepose_transform_data):
self._transform_pts(transforms, densepose_transform_data)
if hasattr(self, "segm"):
self._transform_segm(transforms, densepose_transform_data)
def _transform_pts(self, transforms, dp_transform_data):
import detectron2.data.transforms as T
# NOTE: This assumes that HorizFlipTransform is the only one that does flip
do_hflip = sum(isinstance(t, T.HFlipTransform) for t in transforms.transforms) % 2 == 1
if do_hflip:
self.x = self.MASK_SIZE - self.x
if hasattr(self, "i"):
self._flip_iuv_semantics(dp_transform_data)
if hasattr(self, "vertex_ids"):
self._flip_vertices()
for t in transforms.transforms:
if isinstance(t, T.RotationTransform):
xy_scale = np.array((t.w, t.h)) / DensePoseDataRelative.MASK_SIZE
xy = t.apply_coords(np.stack((self.x, self.y), axis=1) * xy_scale)
self.x, self.y = torch.tensor(xy / xy_scale, dtype=self.x.dtype).T
def _flip_iuv_semantics(self, dp_transform_data: DensePoseTransformData) -> None:
i_old = self.i.clone()
uv_symmetries = dp_transform_data.uv_symmetries
pt_label_symmetries = dp_transform_data.point_label_symmetries
for i in range(self.N_PART_LABELS):
if i + 1 in i_old:
annot_indices_i = i_old == i + 1
if pt_label_symmetries[i + 1] != i + 1:
self.i[annot_indices_i] = pt_label_symmetries[i + 1]
u_loc = (self.u[annot_indices_i] * 255).long()
v_loc = (self.v[annot_indices_i] * 255).long()
self.u[annot_indices_i] = uv_symmetries["U_transforms"][i][v_loc, u_loc].to(
device=self.u.device
)
self.v[annot_indices_i] = uv_symmetries["V_transforms"][i][v_loc, u_loc].to(
device=self.v.device
)
def _flip_vertices(self):
mesh_info = MeshCatalog[MeshCatalog.get_mesh_name(self.mesh_id)]
mesh_symmetry = (
load_mesh_symmetry(mesh_info.symmetry) if mesh_info.symmetry is not None else None
)
self.vertex_ids = mesh_symmetry["vertex_transforms"][self.vertex_ids]
def _transform_segm(self, transforms, dp_transform_data):
import detectron2.data.transforms as T
# NOTE: This assumes that HorizFlipTransform is the only one that does flip
do_hflip = sum(isinstance(t, T.HFlipTransform) for t in transforms.transforms) % 2 == 1
if do_hflip:
self.segm = torch.flip(self.segm, [1])
self._flip_segm_semantics(dp_transform_data)
for t in transforms.transforms:
if isinstance(t, T.RotationTransform):
self._transform_segm_rotation(t)
def _flip_segm_semantics(self, dp_transform_data):
old_segm = self.segm.clone()
mask_label_symmetries = dp_transform_data.mask_label_symmetries
for i in range(self.N_BODY_PARTS):
if mask_label_symmetries[i + 1] != i + 1:
self.segm[old_segm == i + 1] = mask_label_symmetries[i + 1]
def _transform_segm_rotation(self, rotation):
self.segm = F.interpolate(self.segm[None, None, :], (rotation.h, rotation.w)).numpy()
self.segm = torch.tensor(rotation.apply_segmentation(self.segm[0, 0]))[None, None, :]
self.segm = F.interpolate(self.segm, [DensePoseDataRelative.MASK_SIZE] * 2)[0, 0]
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/structures/data_relative.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import torch
from densepose.structures.data_relative import DensePoseDataRelative
class DensePoseList(object):
_TORCH_DEVICE_CPU = torch.device("cpu")
def __init__(self, densepose_datas, boxes_xyxy_abs, image_size_hw, device=_TORCH_DEVICE_CPU):
assert len(densepose_datas) == len(
boxes_xyxy_abs
), "Attempt to initialize DensePoseList with {} DensePose datas " "and {} boxes".format(
len(densepose_datas), len(boxes_xyxy_abs)
)
self.densepose_datas = []
for densepose_data in densepose_datas:
assert isinstance(densepose_data, DensePoseDataRelative) or densepose_data is None, (
"Attempt to initialize DensePoseList with DensePose datas "
"of type {}, expected DensePoseDataRelative".format(type(densepose_data))
)
densepose_data_ondevice = (
densepose_data.to(device) if densepose_data is not None else None
)
self.densepose_datas.append(densepose_data_ondevice)
self.boxes_xyxy_abs = boxes_xyxy_abs.to(device)
self.image_size_hw = image_size_hw
self.device = device
def to(self, device):
if self.device == device:
return self
return DensePoseList(self.densepose_datas, self.boxes_xyxy_abs, self.image_size_hw, device)
def __iter__(self):
return iter(self.densepose_datas)
def __len__(self):
return len(self.densepose_datas)
def __repr__(self):
s = self.__class__.__name__ + "("
s += "num_instances={}, ".format(len(self.densepose_datas))
s += "image_width={}, ".format(self.image_size_hw[1])
s += "image_height={})".format(self.image_size_hw[0])
return s
def __getitem__(self, item):
if isinstance(item, int):
densepose_data_rel = self.densepose_datas[item]
return densepose_data_rel
elif isinstance(item, slice):
densepose_datas_rel = self.densepose_datas[item]
boxes_xyxy_abs = self.boxes_xyxy_abs[item]
return DensePoseList(
densepose_datas_rel, boxes_xyxy_abs, self.image_size_hw, self.device
)
elif isinstance(item, torch.Tensor) and (item.dtype == torch.bool):
densepose_datas_rel = [self.densepose_datas[i] for i, x in enumerate(item) if x > 0]
boxes_xyxy_abs = self.boxes_xyxy_abs[item]
return DensePoseList(
densepose_datas_rel, boxes_xyxy_abs, self.image_size_hw, self.device
)
else:
densepose_datas_rel = [self.densepose_datas[i] for i in item]
boxes_xyxy_abs = self.boxes_xyxy_abs[item]
return DensePoseList(
densepose_datas_rel, boxes_xyxy_abs, self.image_size_hw, self.device
)
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/structures/list.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from dataclasses import make_dataclass
from functools import lru_cache
from typing import Any, Optional
import torch
@lru_cache(maxsize=None)
def decorate_predictor_output_class_with_confidences(BasePredictorOutput: type) -> type:
"""
Create a new output class from an existing one by adding new attributes
related to confidence estimation:
- sigma_1 (tensor)
- sigma_2 (tensor)
- kappa_u (tensor)
- kappa_v (tensor)
- fine_segm_confidence (tensor)
- coarse_segm_confidence (tensor)
Details on confidence estimation parameters can be found in:
N. Neverova, D. Novotny, A. Vedaldi "Correlated Uncertainty for Learning
Dense Correspondences from Noisy Labels", p. 918--926, in Proc. NIPS 2019
A. Sanakoyeu et al., Transferring Dense Pose to Proximal Animal Classes, CVPR 2020
The new class inherits the provided `BasePredictorOutput` class,
it's name is composed of the name of the provided class and
"WithConfidences" suffix.
Args:
BasePredictorOutput (type): output type to which confidence data
is to be added, assumed to be a dataclass
Return:
New dataclass derived from the provided one that has attributes
for confidence estimation
"""
PredictorOutput = make_dataclass(
BasePredictorOutput.__name__ + "WithConfidences",
fields=[ # pyre-ignore[6]
("sigma_1", Optional[torch.Tensor], None),
("sigma_2", Optional[torch.Tensor], None),
("kappa_u", Optional[torch.Tensor], None),
("kappa_v", Optional[torch.Tensor], None),
("fine_segm_confidence", Optional[torch.Tensor], None),
("coarse_segm_confidence", Optional[torch.Tensor], None),
],
bases=(BasePredictorOutput,),
)
# add possibility to index PredictorOutput
def slice_if_not_none(data, item):
if data is None:
return None
if isinstance(item, int):
return data[item].unsqueeze(0)
return data[item]
def PredictorOutput_getitem(self, item):
PredictorOutput = type(self)
base_predictor_output_sliced = super(PredictorOutput, self).__getitem__(item)
return PredictorOutput(
**base_predictor_output_sliced.__dict__,
coarse_segm_confidence=slice_if_not_none(self.coarse_segm_confidence, item),
fine_segm_confidence=slice_if_not_none(self.fine_segm_confidence, item),
sigma_1=slice_if_not_none(self.sigma_1, item),
sigma_2=slice_if_not_none(self.sigma_2, item),
kappa_u=slice_if_not_none(self.kappa_u, item),
kappa_v=slice_if_not_none(self.kappa_v, item),
)
PredictorOutput.__getitem__ = PredictorOutput_getitem
def PredictorOutput_to(self, device: torch.device):
"""
Transfers all tensors to the given device
"""
PredictorOutput = type(self)
base_predictor_output_to = super(PredictorOutput, self).to(device) # pyre-ignore[16]
def to_device_if_tensor(var: Any):
if isinstance(var, torch.Tensor):
return var.to(device)
return var
return PredictorOutput(
**base_predictor_output_to.__dict__,
sigma_1=to_device_if_tensor(self.sigma_1),
sigma_2=to_device_if_tensor(self.sigma_2),
kappa_u=to_device_if_tensor(self.kappa_u),
kappa_v=to_device_if_tensor(self.kappa_v),
fine_segm_confidence=to_device_if_tensor(self.fine_segm_confidence),
coarse_segm_confidence=to_device_if_tensor(self.coarse_segm_confidence),
)
PredictorOutput.to = PredictorOutput_to
return PredictorOutput
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/structures/chart_confidence.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from dataclasses import dataclass
from typing import Any, Optional, Tuple
import torch
@dataclass
class DensePoseChartResult:
"""
DensePose results for chart-based methods represented by labels and inner
coordinates (U, V) of individual charts. Each chart is a 2D manifold
that has an associated label and is parameterized by two coordinates U and V.
Both U and V take values in [0, 1].
Thus the results are represented by two tensors:
- labels (tensor [H, W] of long): contains estimated label for each pixel of
the detection bounding box of size (H, W)
- uv (tensor [2, H, W] of float): contains estimated U and V coordinates
for each pixel of the detection bounding box of size (H, W)
"""
labels: torch.Tensor
uv: torch.Tensor
def to(self, device: torch.device):
"""
Transfers all tensors to the given device
"""
labels = self.labels.to(device)
uv = self.uv.to(device)
return DensePoseChartResult(labels=labels, uv=uv)
@dataclass
class DensePoseChartResultWithConfidences:
"""
We add confidence values to DensePoseChartResult
Thus the results are represented by two tensors:
- labels (tensor [H, W] of long): contains estimated label for each pixel of
the detection bounding box of size (H, W)
- uv (tensor [2, H, W] of float): contains estimated U and V coordinates
for each pixel of the detection bounding box of size (H, W)
Plus one [H, W] tensor of float for each confidence type
"""
labels: torch.Tensor
uv: torch.Tensor
sigma_1: Optional[torch.Tensor] = None
sigma_2: Optional[torch.Tensor] = None
kappa_u: Optional[torch.Tensor] = None
kappa_v: Optional[torch.Tensor] = None
fine_segm_confidence: Optional[torch.Tensor] = None
coarse_segm_confidence: Optional[torch.Tensor] = None
def to(self, device: torch.device):
"""
Transfers all tensors to the given device, except if their value is None
"""
def to_device_if_tensor(var: Any):
if isinstance(var, torch.Tensor):
return var.to(device)
return var
return DensePoseChartResultWithConfidences(
labels=self.labels.to(device),
uv=self.uv.to(device),
sigma_1=to_device_if_tensor(self.sigma_1),
sigma_2=to_device_if_tensor(self.sigma_2),
kappa_u=to_device_if_tensor(self.kappa_u),
kappa_v=to_device_if_tensor(self.kappa_v),
fine_segm_confidence=to_device_if_tensor(self.fine_segm_confidence),
coarse_segm_confidence=to_device_if_tensor(self.coarse_segm_confidence),
)
@dataclass
class DensePoseChartResultQuantized:
"""
DensePose results for chart-based methods represented by labels and quantized
inner coordinates (U, V) of individual charts. Each chart is a 2D manifold
that has an associated label and is parameterized by two coordinates U and V.
Both U and V take values in [0, 1].
Quantized coordinates Uq and Vq have uint8 values which are obtained as:
Uq = U * 255 (hence 0 <= Uq <= 255)
Vq = V * 255 (hence 0 <= Vq <= 255)
Thus the results are represented by one tensor:
- labels_uv_uint8 (tensor [3, H, W] of uint8): contains estimated label
and quantized coordinates Uq and Vq for each pixel of the detection
bounding box of size (H, W)
"""
labels_uv_uint8: torch.Tensor
def to(self, device: torch.device):
"""
Transfers all tensors to the given device
"""
labels_uv_uint8 = self.labels_uv_uint8.to(device)
return DensePoseChartResultQuantized(labels_uv_uint8=labels_uv_uint8)
@dataclass
class DensePoseChartResultCompressed:
"""
DensePose results for chart-based methods represented by a PNG-encoded string.
The tensor of quantized DensePose results of size [3, H, W] is considered
as an image with 3 color channels. PNG compression is applied and the result
is stored as a Base64-encoded string. The following attributes are defined:
- shape_chw (tuple of 3 int): contains shape of the result tensor
(number of channels, height, width)
- labels_uv_str (str): contains Base64-encoded results tensor of size
[3, H, W] compressed with PNG compression methods
"""
shape_chw: Tuple[int, int, int]
labels_uv_str: str
def quantize_densepose_chart_result(result: DensePoseChartResult) -> DensePoseChartResultQuantized:
"""
Applies quantization to DensePose chart-based result.
Args:
result (DensePoseChartResult): DensePose chart-based result
Return:
Quantized DensePose chart-based result (DensePoseChartResultQuantized)
"""
h, w = result.labels.shape
labels_uv_uint8 = torch.zeros([3, h, w], dtype=torch.uint8, device=result.labels.device)
labels_uv_uint8[0] = result.labels
labels_uv_uint8[1:] = (result.uv * 255).clamp(0, 255).byte()
return DensePoseChartResultQuantized(labels_uv_uint8=labels_uv_uint8)
def compress_quantized_densepose_chart_result(
result: DensePoseChartResultQuantized,
) -> DensePoseChartResultCompressed:
"""
Compresses quantized DensePose chart-based result
Args:
result (DensePoseChartResultQuantized): quantized DensePose chart-based result
Return:
Compressed DensePose chart-based result (DensePoseChartResultCompressed)
"""
import base64
import numpy as np
from io import BytesIO
from PIL import Image
labels_uv_uint8_np_chw = result.labels_uv_uint8.cpu().numpy()
labels_uv_uint8_np_hwc = np.moveaxis(labels_uv_uint8_np_chw, 0, -1)
im = Image.fromarray(labels_uv_uint8_np_hwc)
fstream = BytesIO()
im.save(fstream, format="png", optimize=True)
labels_uv_str = base64.encodebytes(fstream.getvalue()).decode()
shape_chw = labels_uv_uint8_np_chw.shape
return DensePoseChartResultCompressed(labels_uv_str=labels_uv_str, shape_chw=shape_chw)
def decompress_compressed_densepose_chart_result(
result: DensePoseChartResultCompressed,
) -> DensePoseChartResultQuantized:
"""
Decompresses DensePose chart-based result encoded into a base64 string
Args:
result (DensePoseChartResultCompressed): compressed DensePose chart result
Return:
Quantized DensePose chart-based result (DensePoseChartResultQuantized)
"""
import base64
import numpy as np
from io import BytesIO
from PIL import Image
fstream = BytesIO(base64.decodebytes(result.labels_uv_str.encode()))
im = Image.open(fstream)
labels_uv_uint8_np_chw = np.moveaxis(np.array(im, dtype=np.uint8), -1, 0)
return DensePoseChartResultQuantized(
labels_uv_uint8=torch.from_numpy(labels_uv_uint8_np_chw.reshape(result.shape_chw))
)
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/structures/chart_result.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from dataclasses import make_dataclass
from functools import lru_cache
from typing import Any, Optional
import torch
@lru_cache(maxsize=None)
def decorate_cse_predictor_output_class_with_confidences(BasePredictorOutput: type) -> type:
"""
Create a new output class from an existing one by adding new attributes
related to confidence estimation:
- coarse_segm_confidence (tensor)
Details on confidence estimation parameters can be found in:
N. Neverova, D. Novotny, A. Vedaldi "Correlated Uncertainty for Learning
Dense Correspondences from Noisy Labels", p. 918--926, in Proc. NIPS 2019
A. Sanakoyeu et al., Transferring Dense Pose to Proximal Animal Classes, CVPR 2020
The new class inherits the provided `BasePredictorOutput` class,
it's name is composed of the name of the provided class and
"WithConfidences" suffix.
Args:
BasePredictorOutput (type): output type to which confidence data
is to be added, assumed to be a dataclass
Return:
New dataclass derived from the provided one that has attributes
for confidence estimation
"""
PredictorOutput = make_dataclass(
BasePredictorOutput.__name__ + "WithConfidences",
fields=[ # pyre-ignore[6]
("coarse_segm_confidence", Optional[torch.Tensor], None),
],
bases=(BasePredictorOutput,),
)
# add possibility to index PredictorOutput
def slice_if_not_none(data, item):
if data is None:
return None
if isinstance(item, int):
return data[item].unsqueeze(0)
return data[item]
def PredictorOutput_getitem(self, item):
PredictorOutput = type(self)
base_predictor_output_sliced = super(PredictorOutput, self).__getitem__(item)
return PredictorOutput(
**base_predictor_output_sliced.__dict__,
coarse_segm_confidence=slice_if_not_none(self.coarse_segm_confidence, item),
)
PredictorOutput.__getitem__ = PredictorOutput_getitem
def PredictorOutput_to(self, device: torch.device):
"""
Transfers all tensors to the given device
"""
PredictorOutput = type(self)
base_predictor_output_to = super(PredictorOutput, self).to(device) # pyre-ignore[16]
def to_device_if_tensor(var: Any):
if isinstance(var, torch.Tensor):
return var.to(device)
return var
return PredictorOutput(
**base_predictor_output_to.__dict__,
coarse_segm_confidence=to_device_if_tensor(self.coarse_segm_confidence),
)
PredictorOutput.to = PredictorOutput_to
return PredictorOutput
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/structures/cse_confidence.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from .chart import DensePoseChartPredictorOutput
from .chart_confidence import decorate_predictor_output_class_with_confidences
from .cse_confidence import decorate_cse_predictor_output_class_with_confidences
from .chart_result import (
DensePoseChartResult,
DensePoseChartResultWithConfidences,
quantize_densepose_chart_result,
compress_quantized_densepose_chart_result,
decompress_compressed_densepose_chart_result,
)
from .cse import DensePoseEmbeddingPredictorOutput
from .data_relative import DensePoseDataRelative
from .list import DensePoseList
from .mesh import Mesh, create_mesh
from .transform_data import DensePoseTransformData, normalized_coords_transform
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/structures/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from dataclasses import dataclass
from typing import Union
import torch
@dataclass
class DensePoseChartPredictorOutput:
"""
Predictor output that contains segmentation and inner coordinates predictions for predefined
body parts:
* coarse segmentation, a tensor of shape [N, K, Hout, Wout]
* fine segmentation, a tensor of shape [N, C, Hout, Wout]
* U coordinates, a tensor of shape [N, C, Hout, Wout]
* V coordinates, a tensor of shape [N, C, Hout, Wout]
where
- N is the number of instances
- K is the number of coarse segmentation channels (
2 = foreground / background,
15 = one of 14 body parts / background)
- C is the number of fine segmentation channels (
24 fine body parts / background)
- Hout and Wout are height and width of predictions
"""
coarse_segm: torch.Tensor
fine_segm: torch.Tensor
u: torch.Tensor
v: torch.Tensor
def __len__(self):
"""
Number of instances (N) in the output
"""
return self.coarse_segm.size(0)
def __getitem__(
self, item: Union[int, slice, torch.BoolTensor]
) -> "DensePoseChartPredictorOutput":
"""
Get outputs for the selected instance(s)
Args:
item (int or slice or tensor): selected items
"""
if isinstance(item, int):
return DensePoseChartPredictorOutput(
coarse_segm=self.coarse_segm[item].unsqueeze(0),
fine_segm=self.fine_segm[item].unsqueeze(0),
u=self.u[item].unsqueeze(0),
v=self.v[item].unsqueeze(0),
)
else:
return DensePoseChartPredictorOutput(
coarse_segm=self.coarse_segm[item],
fine_segm=self.fine_segm[item],
u=self.u[item],
v=self.v[item],
)
def to(self, device: torch.device):
"""
Transfers all tensors to the given device
"""
coarse_segm = self.coarse_segm.to(device)
fine_segm = self.fine_segm.to(device)
u = self.u.to(device)
v = self.v.to(device)
return DensePoseChartPredictorOutput(coarse_segm=coarse_segm, fine_segm=fine_segm, u=u, v=v)
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/structures/chart.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import pdb
import pickle
from functools import lru_cache
from typing import Dict, Optional, Tuple
import torch
from detectron2.utils.file_io import PathManager
from densepose.data.meshes.catalog import MeshCatalog, MeshInfo
def _maybe_copy_to_device(
attribute: Optional[torch.Tensor], device: torch.device
) -> Optional[torch.Tensor]:
if attribute is None:
return None
return attribute.to(device)
class Mesh:
def __init__(
self,
vertices: Optional[torch.Tensor] = None,
faces: Optional[torch.Tensor] = None,
geodists: Optional[torch.Tensor] = None,
symmetry: Optional[Dict[str, torch.Tensor]] = None,
texcoords: Optional[torch.Tensor] = None,
mesh_info: Optional[MeshInfo] = None,
device: Optional[torch.device] = None,
):
"""
Args:
vertices (tensor [N, 3] of float32): vertex coordinates in 3D
faces (tensor [M, 3] of long): triangular face represented as 3
vertex indices
geodists (tensor [N, N] of float32): geodesic distances from
vertex `i` to vertex `j` (optional, default: None)
symmetry (dict: str -> tensor): various mesh symmetry data:
- "vertex_transforms": vertex mapping under horizontal flip,
tensor of size [N] of type long; vertex `i` is mapped to
vertex `tensor[i]` (optional, default: None)
texcoords (tensor [N, 2] of float32): texture coordinates, i.e. global
and normalized mesh UVs (optional, default: None)
mesh_info (MeshInfo type): necessary to load the attributes on-the-go,
can be used instead of passing all the variables one by one
device (torch.device): device of the Mesh. If not provided, will use
the device of the vertices
"""
self._vertices = vertices
self._faces = faces
self._geodists = geodists
self._symmetry = symmetry
self._texcoords = texcoords
self.mesh_info = mesh_info
self.device = device
assert self._vertices is not None or self.mesh_info is not None
all_fields = [self._vertices, self._faces, self._geodists, self._texcoords]
if self.device is None:
for field in all_fields:
if field is not None:
self.device = field.device
break
if self.device is None and symmetry is not None:
for key in symmetry:
self.device = symmetry[key].device
break
self.device = torch.device("cpu") if self.device is None else self.device
assert all([var.device == self.device for var in all_fields if var is not None])
if symmetry:
assert all(symmetry[key].device == self.device for key in symmetry)
if texcoords and vertices:
assert len(vertices) == len(texcoords)
def to(self, device: torch.device):
device_symmetry = self._symmetry
if device_symmetry:
device_symmetry = {key: value.to(device) for key, value in device_symmetry.items()}
return Mesh(
_maybe_copy_to_device(self._vertices, device),
_maybe_copy_to_device(self._faces, device),
_maybe_copy_to_device(self._geodists, device),
device_symmetry,
_maybe_copy_to_device(self._texcoords, device),
self.mesh_info,
device,
)
@property
def vertices(self):
if self._vertices is None and self.mesh_info is not None:
self._vertices = load_mesh_data(self.mesh_info.data, "vertices", self.device)
return self._vertices
@property
def faces(self):
if self._faces is None and self.mesh_info is not None:
self._faces = load_mesh_data(self.mesh_info.data, "faces", self.device)
return self._faces
@property
def geodists(self):
if self._geodists is None and self.mesh_info is not None:
self._geodists = load_mesh_auxiliary_data(self.mesh_info.geodists, self.device)
return self._geodists
@property
def symmetry(self):
if self._symmetry is None and self.mesh_info is not None:
self._symmetry = load_mesh_symmetry(self.mesh_info.symmetry, self.device)
return self._symmetry
@property
def texcoords(self):
if self._texcoords is None and self.mesh_info is not None:
self._texcoords = load_mesh_auxiliary_data(self.mesh_info.texcoords, self.device)
return self._texcoords
def get_geodists(self):
if self.geodists is None:
self.geodists = self._compute_geodists()
return self.geodists
def _compute_geodists(self):
# TODO: compute using Laplace-Beltrami
geodists = None
return geodists
def load_mesh_data(
mesh_fpath: str, field: str, device: Optional[torch.device] = None
) -> Tuple[Optional[torch.Tensor], Optional[torch.Tensor]]:
with PathManager.open(mesh_fpath, "rb") as hFile:
return torch.as_tensor(pickle.load(hFile)[field], dtype=torch.float).to( # pyre-ignore[6]
device
)
return None
def load_mesh_auxiliary_data(
fpath: str, device: Optional[torch.device] = None
) -> Optional[torch.Tensor]:
fpath_local = PathManager.get_local_path(fpath)
with PathManager.open(fpath_local, "rb") as hFile:
return torch.as_tensor(pickle.load(hFile), dtype=torch.float).to(device) # pyre-ignore[6]
return None
@lru_cache()
def load_mesh_symmetry(
symmetry_fpath: str, device: Optional[torch.device] = None
) -> Optional[Dict[str, torch.Tensor]]:
with PathManager.open(symmetry_fpath, "rb") as hFile:
symmetry_loaded = pickle.load(hFile) # pyre-ignore[6]
symmetry = {
"vertex_transforms": torch.as_tensor(
symmetry_loaded["vertex_transforms"], dtype=torch.long
).to(device),
}
return symmetry
return None
@lru_cache()
def create_mesh(mesh_name: str, device: Optional[torch.device] = None):
MeshCatalog[mesh_name].data = './mesh_material/%s_sph.pkl'%(mesh_name)
return Mesh(mesh_info=MeshCatalog[mesh_name], device=device)
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/structures/mesh.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from typing import BinaryIO, Dict, Union
import torch
def normalized_coords_transform(x0, y0, w, h):
"""
Coordinates transform that maps top left corner to (-1, -1) and bottom
right corner to (1, 1). Used for torch.grid_sample to initialize the
grid
"""
def f(p):
return (2 * (p[0] - x0) / w - 1, 2 * (p[1] - y0) / h - 1)
return f
class DensePoseTransformData(object):
# Horizontal symmetry label transforms used for horizontal flip
MASK_LABEL_SYMMETRIES = [0, 1, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 14]
# fmt: off
POINT_LABEL_SYMMETRIES = [ 0, 1, 2, 4, 3, 6, 5, 8, 7, 10, 9, 12, 11, 14, 13, 16, 15, 18, 17, 20, 19, 22, 21, 24, 23] # noqa
# fmt: on
def __init__(self, uv_symmetries: Dict[str, torch.Tensor], device: torch.device):
self.mask_label_symmetries = DensePoseTransformData.MASK_LABEL_SYMMETRIES
self.point_label_symmetries = DensePoseTransformData.POINT_LABEL_SYMMETRIES
self.uv_symmetries = uv_symmetries
self.device = torch.device("cpu")
def to(self, device: torch.device, copy: bool = False) -> "DensePoseTransformData":
"""
Convert transform data to the specified device
Args:
device (torch.device): device to convert the data to
copy (bool): flag that specifies whether to copy or to reference the data
in case the device is the same
Return:
An instance of `DensePoseTransformData` with data stored on the specified device
"""
if self.device == device and not copy:
return self
uv_symmetry_map = {}
for key in self.uv_symmetries:
uv_symmetry_map[key] = self.uv_symmetries[key].to(device=device, copy=copy)
return DensePoseTransformData(uv_symmetry_map, device)
@staticmethod
def load(io: Union[str, BinaryIO]):
"""
Args:
io: (str or binary file-like object): input file to load data from
Returns:
An instance of `DensePoseTransformData` with transforms loaded from the file
"""
import scipy.io
uv_symmetry_map = scipy.io.loadmat(io)
uv_symmetry_map_torch = {}
for key in ["U_transforms", "V_transforms"]:
uv_symmetry_map_torch[key] = []
map_src = uv_symmetry_map[key]
map_dst = uv_symmetry_map_torch[key]
for i in range(map_src.shape[1]):
map_dst.append(torch.from_numpy(map_src[0, i]).to(dtype=torch.float))
uv_symmetry_map_torch[key] = torch.stack(map_dst, dim=0)
transform_data = DensePoseTransformData(uv_symmetry_map_torch, device=torch.device("cpu"))
return transform_data
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/structures/transform_data.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from dataclasses import dataclass
from typing import Union
import torch
@dataclass
class DensePoseEmbeddingPredictorOutput:
"""
Predictor output that contains embedding and coarse segmentation data:
* embedding: float tensor of size [N, D, H, W], contains estimated embeddings
* coarse_segm: float tensor of size [N, K, H, W]
Here D = MODEL.ROI_DENSEPOSE_HEAD.CSE.EMBED_SIZE
K = MODEL.ROI_DENSEPOSE_HEAD.NUM_COARSE_SEGM_CHANNELS
"""
embedding: torch.Tensor
coarse_segm: torch.Tensor
def __len__(self):
"""
Number of instances (N) in the output
"""
return self.coarse_segm.size(0)
def __getitem__(
self, item: Union[int, slice, torch.BoolTensor]
) -> "DensePoseEmbeddingPredictorOutput":
"""
Get outputs for the selected instance(s)
Args:
item (int or slice or tensor): selected items
"""
if isinstance(item, int):
return DensePoseEmbeddingPredictorOutput(
coarse_segm=self.coarse_segm[item].unsqueeze(0),
embedding=self.embedding[item].unsqueeze(0),
)
else:
return DensePoseEmbeddingPredictorOutput(
coarse_segm=self.coarse_segm[item], embedding=self.embedding[item]
)
def to(self, device: torch.device):
"""
Transfers all tensors to the given device
"""
coarse_segm = self.coarse_segm.to(device)
embedding = self.embedding.to(device)
return DensePoseEmbeddingPredictorOutput(coarse_segm=coarse_segm, embedding=embedding)
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/structures/cse.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from typing import Any, Tuple
from detectron2.structures import BitMasks, Boxes
from .base import BaseConverter
ImageSizeType = Tuple[int, int]
class ToMaskConverter(BaseConverter):
"""
Converts various DensePose predictor outputs to masks
in bit mask format (see `BitMasks`). Each DensePose predictor output type
has to register its convertion strategy.
"""
registry = {}
dst_type = BitMasks
@classmethod
def convert(
cls,
densepose_predictor_outputs: Any,
boxes: Boxes,
image_size_hw: ImageSizeType,
*args,
**kwargs
) -> BitMasks:
"""
Convert DensePose predictor outputs to BitMasks using some registered
converter. Does recursive lookup for base classes, so there's no need
for explicit registration for derived classes.
Args:
densepose_predictor_outputs: DensePose predictor output to be
converted to BitMasks
boxes (Boxes): bounding boxes that correspond to the DensePose
predictor outputs
image_size_hw (tuple [int, int]): image height and width
Return:
An instance of `BitMasks`. If no suitable converter was found, raises KeyError
"""
return super(ToMaskConverter, cls).convert(
densepose_predictor_outputs, boxes, image_size_hw, *args, **kwargs
)
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/converters/to_mask.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from typing import Dict
import torch
from torch.nn import functional as F
from detectron2.structures.boxes import Boxes, BoxMode
from ..structures import (
DensePoseChartPredictorOutput,
DensePoseChartResult,
DensePoseChartResultWithConfidences,
)
from . import resample_fine_and_coarse_segm_to_bbox
from .base import IntTupleBox, make_int_box
def resample_uv_tensors_to_bbox(
u: torch.Tensor,
v: torch.Tensor,
labels: torch.Tensor,
box_xywh_abs: IntTupleBox,
) -> torch.Tensor:
"""
Resamples U and V coordinate estimates for the given bounding box
Args:
u (tensor [1, C, H, W] of float): U coordinates
v (tensor [1, C, H, W] of float): V coordinates
labels (tensor [H, W] of long): labels obtained by resampling segmentation
outputs for the given bounding box
box_xywh_abs (tuple of 4 int): bounding box that corresponds to predictor outputs
Return:
Resampled U and V coordinates - a tensor [2, H, W] of float
"""
x, y, w, h = box_xywh_abs
w = max(int(w), 1)
h = max(int(h), 1)
u_bbox = F.interpolate(u, (h, w), mode="bilinear", align_corners=False)
v_bbox = F.interpolate(v, (h, w), mode="bilinear", align_corners=False)
uv = torch.zeros([2, h, w], dtype=torch.float32, device=u.device)
for part_id in range(1, u_bbox.size(1)):
uv[0][labels == part_id] = u_bbox[0, part_id][labels == part_id]
uv[1][labels == part_id] = v_bbox[0, part_id][labels == part_id]
return uv
def resample_uv_to_bbox(
predictor_output: DensePoseChartPredictorOutput,
labels: torch.Tensor,
box_xywh_abs: IntTupleBox,
) -> torch.Tensor:
"""
Resamples U and V coordinate estimates for the given bounding box
Args:
predictor_output (DensePoseChartPredictorOutput): DensePose predictor
output to be resampled
labels (tensor [H, W] of long): labels obtained by resampling segmentation
outputs for the given bounding box
box_xywh_abs (tuple of 4 int): bounding box that corresponds to predictor outputs
Return:
Resampled U and V coordinates - a tensor [2, H, W] of float
"""
return resample_uv_tensors_to_bbox(
predictor_output.u,
predictor_output.v,
labels,
box_xywh_abs,
)
def densepose_chart_predictor_output_to_result(
predictor_output: DensePoseChartPredictorOutput, boxes: Boxes
) -> DensePoseChartResult:
"""
Convert densepose chart predictor outputs to results
Args:
predictor_output (DensePoseChartPredictorOutput): DensePose predictor
output to be converted to results, must contain only 1 output
boxes (Boxes): bounding box that corresponds to the predictor output,
must contain only 1 bounding box
Return:
DensePose chart-based result (DensePoseChartResult)
"""
assert len(predictor_output) == 1 and len(boxes) == 1, (
f"Predictor output to result conversion can operate only single outputs"
f", got {len(predictor_output)} predictor outputs and {len(boxes)} boxes"
)
boxes_xyxy_abs = boxes.tensor.clone()
boxes_xywh_abs = BoxMode.convert(boxes_xyxy_abs, BoxMode.XYXY_ABS, BoxMode.XYWH_ABS)
box_xywh = make_int_box(boxes_xywh_abs[0])
labels = resample_fine_and_coarse_segm_to_bbox(predictor_output, box_xywh).squeeze(0)
uv = resample_uv_to_bbox(predictor_output, labels, box_xywh)
return DensePoseChartResult(labels=labels, uv=uv)
def resample_confidences_to_bbox(
predictor_output: DensePoseChartPredictorOutput,
labels: torch.Tensor,
box_xywh_abs: IntTupleBox,
) -> Dict[str, torch.Tensor]:
"""
Resamples confidences for the given bounding box
Args:
predictor_output (DensePoseChartPredictorOutput): DensePose predictor
output to be resampled
labels (tensor [H, W] of long): labels obtained by resampling segmentation
outputs for the given bounding box
box_xywh_abs (tuple of 4 int): bounding box that corresponds to predictor outputs
Return:
Resampled confidences - a dict of [H, W] tensors of float
"""
x, y, w, h = box_xywh_abs
w = max(int(w), 1)
h = max(int(h), 1)
confidence_names = [
"sigma_1",
"sigma_2",
"kappa_u",
"kappa_v",
"fine_segm_confidence",
"coarse_segm_confidence",
]
confidence_results = {key: None for key in confidence_names}
confidence_names = [
key for key in confidence_names if getattr(predictor_output, key) is not None
]
confidence_base = torch.zeros([h, w], dtype=torch.float32, device=predictor_output.u.device)
# assign data from channels that correspond to the labels
for key in confidence_names:
resampled_confidence = F.interpolate(
getattr(predictor_output, key), (h, w), mode="bilinear", align_corners=False
)
result = confidence_base.clone()
for part_id in range(1, predictor_output.u.size(1)):
if resampled_confidence.size(1) != predictor_output.u.size(1):
# confidence is not part-based, don't try to fill it part by part
continue
result[labels == part_id] = resampled_confidence[0, part_id][labels == part_id]
if resampled_confidence.size(1) != predictor_output.u.size(1):
# confidence is not part-based, fill the data with the first channel
# (targeted for segmentation confidences that have only 1 channel)
result = resampled_confidence[0, 0]
confidence_results[key] = result
return confidence_results # pyre-ignore[7]
def densepose_chart_predictor_output_to_result_with_confidences(
predictor_output: DensePoseChartPredictorOutput, boxes: Boxes
) -> DensePoseChartResultWithConfidences:
"""
Convert densepose chart predictor outputs to results
Args:
predictor_output (DensePoseChartPredictorOutput): DensePose predictor
output with confidences to be converted to results, must contain only 1 output
boxes (Boxes): bounding box that corresponds to the predictor output,
must contain only 1 bounding box
Return:
DensePose chart-based result with confidences (DensePoseChartResultWithConfidences)
"""
assert len(predictor_output) == 1 and len(boxes) == 1, (
f"Predictor output to result conversion can operate only single outputs"
f", got {len(predictor_output)} predictor outputs and {len(boxes)} boxes"
)
boxes_xyxy_abs = boxes.tensor.clone()
boxes_xywh_abs = BoxMode.convert(boxes_xyxy_abs, BoxMode.XYXY_ABS, BoxMode.XYWH_ABS)
box_xywh = make_int_box(boxes_xywh_abs[0])
labels = resample_fine_and_coarse_segm_to_bbox(predictor_output, box_xywh).squeeze(0)
uv = resample_uv_to_bbox(predictor_output, labels, box_xywh)
confidences = resample_confidences_to_bbox(predictor_output, labels, box_xywh)
return DensePoseChartResultWithConfidences(labels=labels, uv=uv, **confidences)
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/converters/chart_output_to_chart_result.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from typing import Any
from detectron2.structures import Boxes
from ..structures import DensePoseChartResult, DensePoseChartResultWithConfidences
from .base import BaseConverter
class ToChartResultConverter(BaseConverter):
"""
Converts various DensePose predictor outputs to DensePose results.
Each DensePose predictor output type has to register its convertion strategy.
"""
registry = {}
dst_type = DensePoseChartResult
@classmethod
def convert(cls, predictor_outputs: Any, boxes: Boxes, *args, **kwargs) -> DensePoseChartResult:
"""
Convert DensePose predictor outputs to DensePoseResult using some registered
converter. Does recursive lookup for base classes, so there's no need
for explicit registration for derived classes.
Args:
densepose_predictor_outputs: DensePose predictor output to be
converted to BitMasks
boxes (Boxes): bounding boxes that correspond to the DensePose
predictor outputs
Return:
An instance of DensePoseResult. If no suitable converter was found, raises KeyError
"""
return super(ToChartResultConverter, cls).convert(predictor_outputs, boxes, *args, **kwargs)
class ToChartResultConverterWithConfidences(BaseConverter):
"""
Converts various DensePose predictor outputs to DensePose results.
Each DensePose predictor output type has to register its convertion strategy.
"""
registry = {}
dst_type = DensePoseChartResultWithConfidences
@classmethod
def convert(
cls, predictor_outputs: Any, boxes: Boxes, *args, **kwargs
) -> DensePoseChartResultWithConfidences:
"""
Convert DensePose predictor outputs to DensePoseResult with confidences
using some registered converter. Does recursive lookup for base classes,
so there's no need for explicit registration for derived classes.
Args:
densepose_predictor_outputs: DensePose predictor output with confidences
to be converted to BitMasks
boxes (Boxes): bounding boxes that correspond to the DensePose
predictor outputs
Return:
An instance of DensePoseResult. If no suitable converter was found, raises KeyError
"""
return super(ToChartResultConverterWithConfidences, cls).convert(
predictor_outputs, boxes, *args, **kwargs
)
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/converters/to_chart_result.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from .hflip import HFlipConverter
from .to_mask import ToMaskConverter
from .to_chart_result import ToChartResultConverter, ToChartResultConverterWithConfidences
from .segm_to_mask import (
predictor_output_with_fine_and_coarse_segm_to_mask,
predictor_output_with_coarse_segm_to_mask,
resample_fine_and_coarse_segm_to_bbox,
)
from .chart_output_to_chart_result import (
densepose_chart_predictor_output_to_result,
densepose_chart_predictor_output_to_result_with_confidences,
)
from .chart_output_hflip import densepose_chart_predictor_output_hflip
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/converters/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from dataclasses import fields
import torch
from densepose.structures import DensePoseChartPredictorOutput, DensePoseTransformData
def densepose_chart_predictor_output_hflip(
densepose_predictor_output: DensePoseChartPredictorOutput,
transform_data: DensePoseTransformData,
) -> DensePoseChartPredictorOutput:
"""
Change to take into account a Horizontal flip.
"""
if len(densepose_predictor_output) > 0:
PredictorOutput = type(densepose_predictor_output)
output_dict = {}
for field in fields(densepose_predictor_output):
field_value = getattr(densepose_predictor_output, field.name)
# flip tensors
if isinstance(field_value, torch.Tensor):
setattr(densepose_predictor_output, field.name, torch.flip(field_value, [3]))
densepose_predictor_output = _flip_iuv_semantics_tensor(
densepose_predictor_output, transform_data
)
densepose_predictor_output = _flip_segm_semantics_tensor(
densepose_predictor_output, transform_data
)
for field in fields(densepose_predictor_output):
output_dict[field.name] = getattr(densepose_predictor_output, field.name)
return PredictorOutput(**output_dict)
else:
return densepose_predictor_output
def _flip_iuv_semantics_tensor(
densepose_predictor_output: DensePoseChartPredictorOutput,
dp_transform_data: DensePoseTransformData,
) -> DensePoseChartPredictorOutput:
point_label_symmetries = dp_transform_data.point_label_symmetries
uv_symmetries = dp_transform_data.uv_symmetries
N, C, H, W = densepose_predictor_output.u.shape
u_loc = (densepose_predictor_output.u[:, 1:, :, :].clamp(0, 1) * 255).long()
v_loc = (densepose_predictor_output.v[:, 1:, :, :].clamp(0, 1) * 255).long()
Iindex = torch.arange(C - 1, device=densepose_predictor_output.u.device)[
None, :, None, None
].expand(N, C - 1, H, W)
densepose_predictor_output.u[:, 1:, :, :] = uv_symmetries["U_transforms"][Iindex, v_loc, u_loc]
densepose_predictor_output.v[:, 1:, :, :] = uv_symmetries["V_transforms"][Iindex, v_loc, u_loc]
for el in ["fine_segm", "u", "v"]:
densepose_predictor_output.__dict__[el] = densepose_predictor_output.__dict__[el][
:, point_label_symmetries, :, :
]
return densepose_predictor_output
def _flip_segm_semantics_tensor(
densepose_predictor_output: DensePoseChartPredictorOutput, dp_transform_data
):
if densepose_predictor_output.coarse_segm.shape[1] > 2:
densepose_predictor_output.coarse_segm = densepose_predictor_output.coarse_segm[
:, dp_transform_data.mask_label_symmetries, :, :
]
return densepose_predictor_output
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/converters/chart_output_hflip.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from typing import Any
from .base import BaseConverter
class HFlipConverter(BaseConverter):
"""
Converts various DensePose predictor outputs to DensePose results.
Each DensePose predictor output type has to register its convertion strategy.
"""
registry = {}
dst_type = None
@classmethod
def convert(cls, predictor_outputs: Any, transform_data: Any, *args, **kwargs):
"""
Performs an horizontal flip on DensePose predictor outputs.
Does recursive lookup for base classes, so there's no need
for explicit registration for derived classes.
Args:
predictor_outputs: DensePose predictor output to be converted to BitMasks
transform_data: Anything useful for the flip
Return:
An instance of the same type as predictor_outputs
"""
return super(HFlipConverter, cls).convert(
predictor_outputs, transform_data, *args, **kwargs
)
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/converters/hflip.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from typing import Any
import torch
from torch.nn import functional as F
from detectron2.structures import BitMasks, Boxes, BoxMode
from .base import IntTupleBox, make_int_box
from .to_mask import ImageSizeType
def resample_coarse_segm_tensor_to_bbox(coarse_segm: torch.Tensor, box_xywh_abs: IntTupleBox):
"""
Resample coarse segmentation tensor to the given
bounding box and derive labels for each pixel of the bounding box
Args:
coarse_segm: float tensor of shape [1, K, Hout, Wout]
box_xywh_abs (tuple of 4 int): bounding box given by its upper-left
corner coordinates, width (W) and height (H)
Return:
Labels for each pixel of the bounding box, a long tensor of size [1, H, W]
"""
x, y, w, h = box_xywh_abs
w = max(int(w), 1)
h = max(int(h), 1)
labels = F.interpolate(coarse_segm, (h, w), mode="bilinear", align_corners=False).argmax(dim=1)
return labels
def resample_fine_and_coarse_segm_tensors_to_bbox(
fine_segm: torch.Tensor, coarse_segm: torch.Tensor, box_xywh_abs: IntTupleBox
):
"""
Resample fine and coarse segmentation tensors to the given
bounding box and derive labels for each pixel of the bounding box
Args:
fine_segm: float tensor of shape [1, C, Hout, Wout]
coarse_segm: float tensor of shape [1, K, Hout, Wout]
box_xywh_abs (tuple of 4 int): bounding box given by its upper-left
corner coordinates, width (W) and height (H)
Return:
Labels for each pixel of the bounding box, a long tensor of size [1, H, W]
"""
x, y, w, h = box_xywh_abs
w = max(int(w), 1)
h = max(int(h), 1)
# coarse segmentation
coarse_segm_bbox = F.interpolate(
coarse_segm, (h, w), mode="bilinear", align_corners=False
).argmax(dim=1)
# combined coarse and fine segmentation
labels = (
F.interpolate(fine_segm, (h, w), mode="bilinear", align_corners=False).argmax(dim=1)
* (coarse_segm_bbox > 0).long()
)
return labels
def resample_fine_and_coarse_segm_to_bbox(predictor_output: Any, box_xywh_abs: IntTupleBox):
"""
Resample fine and coarse segmentation outputs from a predictor to the given
bounding box and derive labels for each pixel of the bounding box
Args:
predictor_output: DensePose predictor output that contains segmentation
results to be resampled
box_xywh_abs (tuple of 4 int): bounding box given by its upper-left
corner coordinates, width (W) and height (H)
Return:
Labels for each pixel of the bounding box, a long tensor of size [1, H, W]
"""
return resample_fine_and_coarse_segm_tensors_to_bbox(
predictor_output.fine_segm,
predictor_output.coarse_segm,
box_xywh_abs,
)
def predictor_output_with_coarse_segm_to_mask(
predictor_output: Any, boxes: Boxes, image_size_hw: ImageSizeType
) -> BitMasks:
"""
Convert predictor output with coarse and fine segmentation to a mask.
Assumes that predictor output has the following attributes:
- coarse_segm (tensor of size [N, D, H, W]): coarse segmentation
unnormalized scores for N instances; D is the number of coarse
segmentation labels, H and W is the resolution of the estimate
Args:
predictor_output: DensePose predictor output to be converted to mask
boxes (Boxes): bounding boxes that correspond to the DensePose
predictor outputs
image_size_hw (tuple [int, int]): image height Himg and width Wimg
Return:
BitMasks that contain a bool tensor of size [N, Himg, Wimg] with
a mask of the size of the image for each instance
"""
H, W = image_size_hw
boxes_xyxy_abs = boxes.tensor.clone()
boxes_xywh_abs = BoxMode.convert(boxes_xyxy_abs, BoxMode.XYXY_ABS, BoxMode.XYWH_ABS)
N = len(boxes_xywh_abs)
masks = torch.zeros((N, H, W), dtype=torch.bool, device=boxes.tensor.device)
for i in range(len(boxes_xywh_abs)):
box_xywh = make_int_box(boxes_xywh_abs[i])
box_mask = resample_coarse_segm_tensor_to_bbox(predictor_output[i].coarse_segm, box_xywh)
x, y, w, h = box_xywh
masks[i, y : y + h, x : x + w] = box_mask
return BitMasks(masks)
def predictor_output_with_fine_and_coarse_segm_to_mask(
predictor_output: Any, boxes: Boxes, image_size_hw: ImageSizeType
) -> BitMasks:
"""
Convert predictor output with coarse and fine segmentation to a mask.
Assumes that predictor output has the following attributes:
- coarse_segm (tensor of size [N, D, H, W]): coarse segmentation
unnormalized scores for N instances; D is the number of coarse
segmentation labels, H and W is the resolution of the estimate
- fine_segm (tensor of size [N, C, H, W]): fine segmentation
unnormalized scores for N instances; C is the number of fine
segmentation labels, H and W is the resolution of the estimate
Args:
predictor_output: DensePose predictor output to be converted to mask
boxes (Boxes): bounding boxes that correspond to the DensePose
predictor outputs
image_size_hw (tuple [int, int]): image height Himg and width Wimg
Return:
BitMasks that contain a bool tensor of size [N, Himg, Wimg] with
a mask of the size of the image for each instance
"""
H, W = image_size_hw
boxes_xyxy_abs = boxes.tensor.clone()
boxes_xywh_abs = BoxMode.convert(boxes_xyxy_abs, BoxMode.XYXY_ABS, BoxMode.XYWH_ABS)
N = len(boxes_xywh_abs)
masks = torch.zeros((N, H, W), dtype=torch.bool, device=boxes.tensor.device)
for i in range(len(boxes_xywh_abs)):
box_xywh = make_int_box(boxes_xywh_abs[i])
labels_i = resample_fine_and_coarse_segm_to_bbox(predictor_output[i], box_xywh)
x, y, w, h = box_xywh
masks[i, y : y + h, x : x + w] = labels_i > 0
return BitMasks(masks)
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/converters/segm_to_mask.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from ..structures import DensePoseChartPredictorOutput, DensePoseEmbeddingPredictorOutput
from . import (
HFlipConverter,
ToChartResultConverter,
ToChartResultConverterWithConfidences,
ToMaskConverter,
densepose_chart_predictor_output_hflip,
densepose_chart_predictor_output_to_result,
densepose_chart_predictor_output_to_result_with_confidences,
predictor_output_with_coarse_segm_to_mask,
predictor_output_with_fine_and_coarse_segm_to_mask,
)
ToMaskConverter.register(
DensePoseChartPredictorOutput, predictor_output_with_fine_and_coarse_segm_to_mask
)
ToMaskConverter.register(
DensePoseEmbeddingPredictorOutput, predictor_output_with_coarse_segm_to_mask
)
ToChartResultConverter.register(
DensePoseChartPredictorOutput, densepose_chart_predictor_output_to_result
)
ToChartResultConverterWithConfidences.register(
DensePoseChartPredictorOutput, densepose_chart_predictor_output_to_result_with_confidences
)
HFlipConverter.register(DensePoseChartPredictorOutput, densepose_chart_predictor_output_hflip)
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/converters/builtin.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from typing import Any, Tuple, Type
import torch
class BaseConverter:
"""
Converter base class to be reused by various converters.
Converter allows one to convert data from various source types to a particular
destination type. Each source type needs to register its converter. The
registration for each source type is valid for all descendants of that type.
"""
@classmethod
def register(cls, from_type: Type, converter: Any = None):
"""
Registers a converter for the specified type.
Can be used as a decorator (if converter is None), or called as a method.
Args:
from_type (type): type to register the converter for;
all instances of this type will use the same converter
converter (callable): converter to be registered for the given
type; if None, this method is assumed to be a decorator for the converter
"""
if converter is not None:
cls._do_register(from_type, converter)
def wrapper(converter: Any) -> Any:
cls._do_register(from_type, converter)
return converter
return wrapper
@classmethod
def _do_register(cls, from_type: Type, converter: Any):
cls.registry[from_type] = converter # pyre-ignore[16]
@classmethod
def _lookup_converter(cls, from_type: Type) -> Any:
"""
Perform recursive lookup for the given type
to find registered converter. If a converter was found for some base
class, it gets registered for this class to save on further lookups.
Args:
from_type: type for which to find a converter
Return:
callable or None - registered converter or None
if no suitable entry was found in the registry
"""
if from_type in cls.registry: # pyre-ignore[16]
return cls.registry[from_type]
for base in from_type.__bases__:
converter = cls._lookup_converter(base)
if converter is not None:
cls._do_register(from_type, converter)
return converter
return None
@classmethod
def convert(cls, instance: Any, *args, **kwargs):
"""
Convert an instance to the destination type using some registered
converter. Does recursive lookup for base classes, so there's no need
for explicit registration for derived classes.
Args:
instance: source instance to convert to the destination type
Return:
An instance of the destination type obtained from the source instance
Raises KeyError, if no suitable converter found
"""
instance_type = type(instance)
converter = cls._lookup_converter(instance_type)
if converter is None:
if cls.dst_type is None: # pyre-ignore[16]
output_type_str = "itself"
else:
output_type_str = cls.dst_type
raise KeyError(f"Could not find converter from {instance_type} to {output_type_str}")
return converter(instance, *args, **kwargs)
IntTupleBox = Tuple[int, int, int, int]
def make_int_box(box: torch.Tensor) -> IntTupleBox:
int_box = [0, 0, 0, 0]
int_box[0], int_box[1], int_box[2], int_box[3] = tuple(box.long().tolist())
return int_box[0], int_box[1], int_box[2], int_box[3]
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/converters/base.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import logging
def verbosity_to_level(verbosity):
if verbosity is not None:
if verbosity == 0:
return logging.WARNING
elif verbosity == 1:
return logging.INFO
elif verbosity >= 2:
return logging.DEBUG
return logging.WARNING
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/utils/logger.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from typing import Any, Dict, Optional, Tuple
class EntrySelector(object):
"""
Base class for entry selectors
"""
@staticmethod
def from_string(spec: str) -> "EntrySelector":
if spec == "*":
return AllEntrySelector()
return FieldEntrySelector(spec)
class AllEntrySelector(EntrySelector):
"""
Selector that accepts all entries
"""
SPECIFIER = "*"
def __call__(self, entry):
return True
class FieldEntrySelector(EntrySelector):
"""
Selector that accepts only entries that match provided field
specifier(s). Only a limited set of specifiers is supported for now:
<specifiers>::=<specifier>[<comma><specifiers>]
<specifier>::=<field_name>[<type_delim><type>]<equal><value_or_range>
<field_name> is a valid identifier
<type> ::= "int" | "str"
<equal> ::= "="
<comma> ::= ","
<type_delim> ::= ":"
<value_or_range> ::= <value> | <range>
<range> ::= <value><range_delim><value>
<range_delim> ::= "-"
<value> is a string without spaces and special symbols
(e.g. <comma>, <equal>, <type_delim>, <range_delim>)
"""
_SPEC_DELIM = ","
_TYPE_DELIM = ":"
_RANGE_DELIM = "-"
_EQUAL = "="
_ERROR_PREFIX = "Invalid field selector specifier"
class _FieldEntryValuePredicate(object):
"""
Predicate that checks strict equality for the specified entry field
"""
def __init__(self, name: str, typespec: Optional[str], value: str):
import builtins
self.name = name
self.type = getattr(builtins, typespec) if typespec is not None else str
self.value = value
def __call__(self, entry):
return entry[self.name] == self.type(self.value)
class _FieldEntryRangePredicate(object):
"""
Predicate that checks whether an entry field falls into the specified range
"""
def __init__(self, name: str, typespec: Optional[str], vmin: str, vmax: str):
import builtins
self.name = name
self.type = getattr(builtins, typespec) if typespec is not None else str
self.vmin = vmin
self.vmax = vmax
def __call__(self, entry):
return (entry[self.name] >= self.type(self.vmin)) and (
entry[self.name] <= self.type(self.vmax)
)
def __init__(self, spec: str):
self._predicates = self._parse_specifier_into_predicates(spec)
def __call__(self, entry: Dict[str, Any]):
for predicate in self._predicates:
if not predicate(entry):
return False
return True
def _parse_specifier_into_predicates(self, spec: str):
predicates = []
specs = spec.split(self._SPEC_DELIM)
for subspec in specs:
eq_idx = subspec.find(self._EQUAL)
if eq_idx > 0:
field_name_with_type = subspec[:eq_idx]
field_name, field_type = self._parse_field_name_type(field_name_with_type)
field_value_or_range = subspec[eq_idx + 1 :]
if self._is_range_spec(field_value_or_range):
vmin, vmax = self._get_range_spec(field_value_or_range)
predicate = FieldEntrySelector._FieldEntryRangePredicate(
field_name, field_type, vmin, vmax
)
else:
predicate = FieldEntrySelector._FieldEntryValuePredicate(
field_name, field_type, field_value_or_range
)
predicates.append(predicate)
elif eq_idx == 0:
self._parse_error(f'"{subspec}", field name is empty!')
else:
self._parse_error(f'"{subspec}", should have format ' "<field>=<value_or_range>!")
return predicates
def _parse_field_name_type(self, field_name_with_type: str) -> Tuple[str, Optional[str]]:
type_delim_idx = field_name_with_type.find(self._TYPE_DELIM)
if type_delim_idx > 0:
field_name = field_name_with_type[:type_delim_idx]
field_type = field_name_with_type[type_delim_idx + 1 :]
elif type_delim_idx == 0:
self._parse_error(f'"{field_name_with_type}", field name is empty!')
else:
field_name = field_name_with_type
field_type = None
return field_name, field_type
def _is_range_spec(self, field_value_or_range):
delim_idx = field_value_or_range.find(self._RANGE_DELIM)
return delim_idx > 0
def _get_range_spec(self, field_value_or_range):
if self._is_range_spec(field_value_or_range):
delim_idx = field_value_or_range.find(self._RANGE_DELIM)
vmin = field_value_or_range[:delim_idx]
vmax = field_value_or_range[delim_idx + 1 :]
return vmin, vmax
else:
self._parse_error('"field_value_or_range", range of values expected!')
def _parse_error(self, msg):
raise ValueError(f"{self._ERROR_PREFIX}: {msg}")
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/utils/dbhelper.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from detectron2.data import MetadataCatalog
from detectron2.utils.file_io import PathManager
from densepose import DensePoseTransformData
def load_for_dataset(dataset_name):
path = MetadataCatalog.get(dataset_name).densepose_transform_src
densepose_transform_data_fpath = PathManager.get_local_path(path)
return DensePoseTransformData.load(densepose_transform_data_fpath)
def load_from_cfg(cfg):
return load_for_dataset(cfg.DATASETS.TEST[0])
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/utils/transform.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from typing import Optional
from torch import nn
from detectron2.config import CfgNode
from .cse.embedder import Embedder
from .filter import DensePoseDataFilter
def build_densepose_predictor(cfg: CfgNode, input_channels: int):
"""
Create an instance of DensePose predictor based on configuration options.
Args:
cfg (CfgNode): configuration options
input_channels (int): input tensor size along the channel dimension
Return:
An instance of DensePose predictor
"""
from .predictors import DENSEPOSE_PREDICTOR_REGISTRY
predictor_name = cfg.MODEL.ROI_DENSEPOSE_HEAD.PREDICTOR_NAME
return DENSEPOSE_PREDICTOR_REGISTRY.get(predictor_name)(cfg, input_channels)
def build_densepose_data_filter(cfg: CfgNode):
"""
Build DensePose data filter which selects data for training
Args:
cfg (CfgNode): configuration options
Return:
Callable: list(Tensor), list(Instances) -> list(Tensor), list(Instances)
An instance of DensePose filter, which takes feature tensors and proposals
as an input and returns filtered features and proposals
"""
dp_filter = DensePoseDataFilter(cfg)
return dp_filter
def build_densepose_head(cfg: CfgNode, input_channels: int):
"""
Build DensePose head based on configurations options
Args:
cfg (CfgNode): configuration options
input_channels (int): input tensor size along the channel dimension
Return:
An instance of DensePose head
"""
from .roi_heads.registry import ROI_DENSEPOSE_HEAD_REGISTRY
head_name = cfg.MODEL.ROI_DENSEPOSE_HEAD.NAME
return ROI_DENSEPOSE_HEAD_REGISTRY.get(head_name)(cfg, input_channels)
def build_densepose_losses(cfg: CfgNode):
"""
Build DensePose loss based on configurations options
Args:
cfg (CfgNode): configuration options
Return:
An instance of DensePose loss
"""
from .losses import DENSEPOSE_LOSS_REGISTRY
loss_name = cfg.MODEL.ROI_DENSEPOSE_HEAD.LOSS_NAME
return DENSEPOSE_LOSS_REGISTRY.get(loss_name)(cfg)
def build_densepose_embedder(cfg: CfgNode) -> Optional[nn.Module]:
"""
Build embedder used to embed mesh vertices into an embedding space.
Embedder contains sub-embedders, one for each mesh ID.
Args:
cfg (cfgNode): configuration options
Return:
Embedding module
"""
if cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.EMBEDDERS:
return Embedder(cfg)
return None
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/modeling/build.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# Written by Bin Xiao (leoxiaobin@gmail.com)
# Modified by Bowen Cheng (bcheng9@illinois.edu)
# Adapted from https://github.com/HRNet/Higher-HRNet-Human-Pose-Estimation/blob/master/lib/models/pose_higher_hrnet.py # noqa
# ------------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
import logging
import torch.nn as nn
from detectron2.layers import ShapeSpec
from detectron2.modeling.backbone import BACKBONE_REGISTRY
from detectron2.modeling.backbone.backbone import Backbone
BN_MOMENTUM = 0.1
logger = logging.getLogger(__name__)
__all__ = ["build_pose_hrnet_backbone", "PoseHigherResolutionNet"]
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class HighResolutionModule(nn.Module):
"""HighResolutionModule
Building block of the PoseHigherResolutionNet (see lower)
arXiv: https://arxiv.org/abs/1908.10357
Args:
num_branches (int): number of branches of the modyle
blocks (str): type of block of the module
num_blocks (int): number of blocks of the module
num_inchannels (int): number of input channels of the module
num_channels (list): number of channels of each branch
multi_scale_output (bool): only used by the last module of PoseHigherResolutionNet
"""
def __init__(
self,
num_branches,
blocks,
num_blocks,
num_inchannels,
num_channels,
multi_scale_output=True,
):
super(HighResolutionModule, self).__init__()
self._check_branches(num_branches, blocks, num_blocks, num_inchannels, num_channels)
self.num_inchannels = num_inchannels
self.num_branches = num_branches
self.multi_scale_output = multi_scale_output
self.branches = self._make_branches(num_branches, blocks, num_blocks, num_channels)
self.fuse_layers = self._make_fuse_layers()
self.relu = nn.ReLU(True)
def _check_branches(self, num_branches, blocks, num_blocks, num_inchannels, num_channels):
if num_branches != len(num_blocks):
error_msg = "NUM_BRANCHES({}) <> NUM_BLOCKS({})".format(num_branches, len(num_blocks))
logger.error(error_msg)
raise ValueError(error_msg)
if num_branches != len(num_channels):
error_msg = "NUM_BRANCHES({}) <> NUM_CHANNELS({})".format(
num_branches, len(num_channels)
)
logger.error(error_msg)
raise ValueError(error_msg)
if num_branches != len(num_inchannels):
error_msg = "NUM_BRANCHES({}) <> NUM_INCHANNELS({})".format(
num_branches, len(num_inchannels)
)
logger.error(error_msg)
raise ValueError(error_msg)
def _make_one_branch(self, branch_index, block, num_blocks, num_channels, stride=1):
downsample = None
if (
stride != 1
or self.num_inchannels[branch_index] != num_channels[branch_index] * block.expansion
):
downsample = nn.Sequential(
nn.Conv2d(
self.num_inchannels[branch_index],
num_channels[branch_index] * block.expansion,
kernel_size=1,
stride=stride,
bias=False,
),
nn.BatchNorm2d(num_channels[branch_index] * block.expansion, momentum=BN_MOMENTUM),
)
layers = []
layers.append(
block(self.num_inchannels[branch_index], num_channels[branch_index], stride, downsample)
)
self.num_inchannels[branch_index] = num_channels[branch_index] * block.expansion
for _ in range(1, num_blocks[branch_index]):
layers.append(block(self.num_inchannels[branch_index], num_channels[branch_index]))
return nn.Sequential(*layers)
def _make_branches(self, num_branches, block, num_blocks, num_channels):
branches = []
for i in range(num_branches):
branches.append(self._make_one_branch(i, block, num_blocks, num_channels))
return nn.ModuleList(branches)
def _make_fuse_layers(self):
if self.num_branches == 1:
return None
num_branches = self.num_branches
num_inchannels = self.num_inchannels
fuse_layers = []
for i in range(num_branches if self.multi_scale_output else 1):
fuse_layer = []
for j in range(num_branches):
if j > i:
fuse_layer.append(
nn.Sequential(
nn.Conv2d(num_inchannels[j], num_inchannels[i], 1, 1, 0, bias=False),
nn.BatchNorm2d(num_inchannels[i]),
nn.Upsample(scale_factor=2 ** (j - i), mode="nearest"),
)
)
elif j == i:
fuse_layer.append(None)
else:
conv3x3s = []
for k in range(i - j):
if k == i - j - 1:
num_outchannels_conv3x3 = num_inchannels[i]
conv3x3s.append(
nn.Sequential(
nn.Conv2d(
num_inchannels[j],
num_outchannels_conv3x3,
3,
2,
1,
bias=False,
),
nn.BatchNorm2d(num_outchannels_conv3x3),
)
)
else:
num_outchannels_conv3x3 = num_inchannels[j]
conv3x3s.append(
nn.Sequential(
nn.Conv2d(
num_inchannels[j],
num_outchannels_conv3x3,
3,
2,
1,
bias=False,
),
nn.BatchNorm2d(num_outchannels_conv3x3),
nn.ReLU(True),
)
)
fuse_layer.append(nn.Sequential(*conv3x3s))
fuse_layers.append(nn.ModuleList(fuse_layer))
return nn.ModuleList(fuse_layers)
def get_num_inchannels(self):
return self.num_inchannels
def forward(self, x):
if self.num_branches == 1:
return [self.branches[0](x[0])]
for i in range(self.num_branches):
x[i] = self.branches[i](x[i])
x_fuse = []
for i in range(len(self.fuse_layers)):
y = x[0] if i == 0 else self.fuse_layers[i][0](x[0])
for j in range(1, self.num_branches):
if i == j:
y = y + x[j]
else:
z = self.fuse_layers[i][j](x[j])[:, :, : y.shape[2], : y.shape[3]]
y = y + z
x_fuse.append(self.relu(y))
return x_fuse
blocks_dict = {"BASIC": BasicBlock, "BOTTLENECK": Bottleneck}
class PoseHigherResolutionNet(Backbone):
"""PoseHigherResolutionNet
Composed of several HighResolutionModule tied together with ConvNets
Adapted from the GitHub version to fit with HRFPN and the Detectron2 infrastructure
arXiv: https://arxiv.org/abs/1908.10357
"""
def __init__(self, cfg, **kwargs):
self.inplanes = cfg.MODEL.HRNET.STEM_INPLANES
super(PoseHigherResolutionNet, self).__init__()
# stem net
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=2, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64, momentum=BN_MOMENTUM)
self.conv2 = nn.Conv2d(64, 64, kernel_size=3, stride=2, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(64, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(Bottleneck, 64, 4)
self.stage2_cfg = cfg.MODEL.HRNET.STAGE2
num_channels = self.stage2_cfg.NUM_CHANNELS
block = blocks_dict[self.stage2_cfg.BLOCK]
num_channels = [num_channels[i] * block.expansion for i in range(len(num_channels))]
self.transition1 = self._make_transition_layer([256], num_channels)
self.stage2, pre_stage_channels = self._make_stage(self.stage2_cfg, num_channels)
self.stage3_cfg = cfg.MODEL.HRNET.STAGE3
num_channels = self.stage3_cfg.NUM_CHANNELS
block = blocks_dict[self.stage3_cfg.BLOCK]
num_channels = [num_channels[i] * block.expansion for i in range(len(num_channels))]
self.transition2 = self._make_transition_layer(pre_stage_channels, num_channels)
self.stage3, pre_stage_channels = self._make_stage(self.stage3_cfg, num_channels)
self.stage4_cfg = cfg.MODEL.HRNET.STAGE4
num_channels = self.stage4_cfg.NUM_CHANNELS
block = blocks_dict[self.stage4_cfg.BLOCK]
num_channels = [num_channels[i] * block.expansion for i in range(len(num_channels))]
self.transition3 = self._make_transition_layer(pre_stage_channels, num_channels)
self.stage4, pre_stage_channels = self._make_stage(
self.stage4_cfg, num_channels, multi_scale_output=True
)
self._out_features = []
self._out_feature_channels = {}
self._out_feature_strides = {}
for i in range(cfg.MODEL.HRNET.STAGE4.NUM_BRANCHES):
self._out_features.append("p%d" % (i + 1))
self._out_feature_channels.update(
{self._out_features[-1]: cfg.MODEL.HRNET.STAGE4.NUM_CHANNELS[i]}
)
self._out_feature_strides.update({self._out_features[-1]: 1})
def _get_deconv_cfg(self, deconv_kernel):
if deconv_kernel == 4:
padding = 1
output_padding = 0
elif deconv_kernel == 3:
padding = 1
output_padding = 1
elif deconv_kernel == 2:
padding = 0
output_padding = 0
return deconv_kernel, padding, output_padding
def _make_transition_layer(self, num_channels_pre_layer, num_channels_cur_layer):
num_branches_cur = len(num_channels_cur_layer)
num_branches_pre = len(num_channels_pre_layer)
transition_layers = []
for i in range(num_branches_cur):
if i < num_branches_pre:
if num_channels_cur_layer[i] != num_channels_pre_layer[i]:
transition_layers.append(
nn.Sequential(
nn.Conv2d(
num_channels_pre_layer[i],
num_channels_cur_layer[i],
3,
1,
1,
bias=False,
),
nn.BatchNorm2d(num_channels_cur_layer[i]),
nn.ReLU(inplace=True),
)
)
else:
transition_layers.append(None)
else:
conv3x3s = []
for j in range(i + 1 - num_branches_pre):
inchannels = num_channels_pre_layer[-1]
outchannels = (
num_channels_cur_layer[i] if j == i - num_branches_pre else inchannels
)
conv3x3s.append(
nn.Sequential(
nn.Conv2d(inchannels, outchannels, 3, 2, 1, bias=False),
nn.BatchNorm2d(outchannels),
nn.ReLU(inplace=True),
)
)
transition_layers.append(nn.Sequential(*conv3x3s))
return nn.ModuleList(transition_layers)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(
self.inplanes,
planes * block.expansion,
kernel_size=1,
stride=stride,
bias=False,
),
nn.BatchNorm2d(planes * block.expansion, momentum=BN_MOMENTUM),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def _make_stage(self, layer_config, num_inchannels, multi_scale_output=True):
num_modules = layer_config["NUM_MODULES"]
num_branches = layer_config["NUM_BRANCHES"]
num_blocks = layer_config["NUM_BLOCKS"]
num_channels = layer_config["NUM_CHANNELS"]
block = blocks_dict[layer_config["BLOCK"]]
modules = []
for i in range(num_modules):
# multi_scale_output is only used last module
if not multi_scale_output and i == num_modules - 1:
reset_multi_scale_output = False
else:
reset_multi_scale_output = True
modules.append(
HighResolutionModule(
num_branches,
block,
num_blocks,
num_inchannels,
num_channels,
reset_multi_scale_output,
)
)
num_inchannels = modules[-1].get_num_inchannels()
return nn.Sequential(*modules), num_inchannels
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.layer1(x)
x_list = []
for i in range(self.stage2_cfg.NUM_BRANCHES):
if self.transition1[i] is not None:
x_list.append(self.transition1[i](x))
else:
x_list.append(x)
y_list = self.stage2(x_list)
x_list = []
for i in range(self.stage3_cfg.NUM_BRANCHES):
if self.transition2[i] is not None:
x_list.append(self.transition2[i](y_list[-1]))
else:
x_list.append(y_list[i])
y_list = self.stage3(x_list)
x_list = []
for i in range(self.stage4_cfg.NUM_BRANCHES):
if self.transition3[i] is not None:
x_list.append(self.transition3[i](y_list[-1]))
else:
x_list.append(y_list[i])
y_list = self.stage4(x_list)
assert len(self._out_features) == len(y_list)
return dict(zip(self._out_features, y_list)) # final_outputs
@BACKBONE_REGISTRY.register()
def build_pose_hrnet_backbone(cfg, input_shape: ShapeSpec):
model = PoseHigherResolutionNet(cfg)
return model
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/modeling/hrnet.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import copy
import numpy as np
import torch
from fvcore.transforms import HFlipTransform, TransformList
from torch.nn import functional as F
from detectron2.data.transforms import RandomRotation, RotationTransform, apply_transform_gens
from detectron2.modeling.postprocessing import detector_postprocess
from detectron2.modeling.test_time_augmentation import DatasetMapperTTA, GeneralizedRCNNWithTTA
from ..converters import HFlipConverter
class DensePoseDatasetMapperTTA(DatasetMapperTTA):
def __init__(self, cfg):
super().__init__(cfg=cfg)
self.angles = cfg.TEST.AUG.ROTATION_ANGLES
def __call__(self, dataset_dict):
ret = super().__call__(dataset_dict=dataset_dict)
numpy_image = dataset_dict["image"].permute(1, 2, 0).numpy()
for angle in self.angles:
rotate = RandomRotation(angle=angle, expand=True)
new_numpy_image, tfms = apply_transform_gens([rotate], np.copy(numpy_image))
torch_image = torch.from_numpy(np.ascontiguousarray(new_numpy_image.transpose(2, 0, 1)))
dic = copy.deepcopy(dataset_dict)
# In DatasetMapperTTA, there is a pre_tfm transform (resize or no-op) that is
# added at the beginning of each TransformList. That's '.transforms[0]'.
dic["transforms"] = TransformList(
[ret[-1]["transforms"].transforms[0]] + tfms.transforms
)
dic["image"] = torch_image
ret.append(dic)
return ret
class DensePoseGeneralizedRCNNWithTTA(GeneralizedRCNNWithTTA):
def __init__(self, cfg, model, transform_data, tta_mapper=None, batch_size=1):
"""
Args:
cfg (CfgNode):
model (GeneralizedRCNN): a GeneralizedRCNN to apply TTA on.
transform_data (DensePoseTransformData): contains symmetry label
transforms used for horizontal flip
tta_mapper (callable): takes a dataset dict and returns a list of
augmented versions of the dataset dict. Defaults to
`DatasetMapperTTA(cfg)`.
batch_size (int): batch the augmented images into this batch size for inference.
"""
self._transform_data = transform_data.to(model.device)
super().__init__(cfg=cfg, model=model, tta_mapper=tta_mapper, batch_size=batch_size)
# the implementation follows closely the one from detectron2/modeling
def _inference_one_image(self, input):
"""
Args:
input (dict): one dataset dict with "image" field being a CHW tensor
Returns:
dict: one output dict
"""
orig_shape = (input["height"], input["width"])
# For some reason, resize with uint8 slightly increases box AP but decreases densepose AP
input["image"] = input["image"].to(torch.uint8)
augmented_inputs, tfms = self._get_augmented_inputs(input)
# Detect boxes from all augmented versions
with self._turn_off_roi_heads(["mask_on", "keypoint_on", "densepose_on"]):
# temporarily disable roi heads
all_boxes, all_scores, all_classes = self._get_augmented_boxes(augmented_inputs, tfms)
merged_instances = self._merge_detections(all_boxes, all_scores, all_classes, orig_shape)
if self.cfg.MODEL.MASK_ON or self.cfg.MODEL.DENSEPOSE_ON:
# Use the detected boxes to obtain new fields
augmented_instances = self._rescale_detected_boxes(
augmented_inputs, merged_instances, tfms
)
# run forward on the detected boxes
outputs = self._batch_inference(augmented_inputs, augmented_instances)
# Delete now useless variables to avoid being out of memory
del augmented_inputs, augmented_instances
# average the predictions
if self.cfg.MODEL.MASK_ON:
merged_instances.pred_masks = self._reduce_pred_masks(outputs, tfms)
if self.cfg.MODEL.DENSEPOSE_ON:
merged_instances.pred_densepose = self._reduce_pred_densepose(outputs, tfms)
# postprocess
merged_instances = detector_postprocess(merged_instances, *orig_shape)
return {"instances": merged_instances}
else:
return {"instances": merged_instances}
def _get_augmented_boxes(self, augmented_inputs, tfms):
# Heavily based on detectron2/modeling/test_time_augmentation.py
# Only difference is that RotationTransform is excluded from bbox computation
# 1: forward with all augmented images
outputs = self._batch_inference(augmented_inputs)
# 2: union the results
all_boxes = []
all_scores = []
all_classes = []
for output, tfm in zip(outputs, tfms):
# Need to inverse the transforms on boxes, to obtain results on original image
if not any(isinstance(t, RotationTransform) for t in tfm.transforms):
# Some transforms can't compute bbox correctly
pred_boxes = output.pred_boxes.tensor
original_pred_boxes = tfm.inverse().apply_box(pred_boxes.cpu().numpy())
all_boxes.append(torch.from_numpy(original_pred_boxes).to(pred_boxes.device))
all_scores.extend(output.scores)
all_classes.extend(output.pred_classes)
all_boxes = torch.cat(all_boxes, dim=0)
return all_boxes, all_scores, all_classes
def _reduce_pred_densepose(self, outputs, tfms):
# Should apply inverse transforms on densepose preds.
# We assume only rotation, resize & flip are used. pred_masks is a scale-invariant
# representation, so we handle the other ones specially
for idx, (output, tfm) in enumerate(zip(outputs, tfms)):
for t in tfm.transforms:
for attr in ["coarse_segm", "fine_segm", "u", "v"]:
setattr(
output.pred_densepose,
attr,
_inverse_rotation(
getattr(output.pred_densepose, attr), output.pred_boxes.tensor, t
),
)
if any(isinstance(t, HFlipTransform) for t in tfm.transforms):
output.pred_densepose = HFlipConverter.convert(
output.pred_densepose, self._transform_data
)
self._incremental_avg_dp(outputs[0].pred_densepose, output.pred_densepose, idx)
return outputs[0].pred_densepose
# incrementally computed average: u_(n + 1) = u_n + (x_(n+1) - u_n) / (n + 1).
def _incremental_avg_dp(self, avg, new_el, idx):
for attr in ["coarse_segm", "fine_segm", "u", "v"]:
setattr(avg, attr, (getattr(avg, attr) * idx + getattr(new_el, attr)) / (idx + 1))
if idx:
# Deletion of the > 0 index intermediary values to prevent GPU OOM
setattr(new_el, attr, None)
return avg
def _inverse_rotation(densepose_attrs, boxes, transform):
# resample outputs to image size and rotate back the densepose preds
# on the rotated images to the space of the original image
if len(boxes) == 0 or not isinstance(transform, RotationTransform):
return densepose_attrs
boxes = boxes.int().cpu().numpy()
wh_boxes = boxes[:, 2:] - boxes[:, :2] # bboxes in the rotated space
inv_boxes = rotate_box_inverse(transform, boxes).astype(int) # bboxes in original image
wh_diff = (inv_boxes[:, 2:] - inv_boxes[:, :2] - wh_boxes) // 2 # diff between new/old bboxes
rotation_matrix = torch.tensor([transform.rm_image]).to(device=densepose_attrs.device).float()
rotation_matrix[:, :, -1] = 0
# To apply grid_sample for rotation, we need to have enough space to fit the original and
# rotated bboxes. l_bds and r_bds are the left/right bounds that will be used to
# crop the difference once the rotation is done
l_bds = np.maximum(0, -wh_diff)
for i in range(len(densepose_attrs)):
if min(wh_boxes[i]) <= 0:
continue
densepose_attr = densepose_attrs[[i]].clone()
# 1. Interpolate densepose attribute to size of the rotated bbox
densepose_attr = F.interpolate(densepose_attr, wh_boxes[i].tolist()[::-1], mode="bilinear")
# 2. Pad the interpolated attribute so it has room for the original + rotated bbox
densepose_attr = F.pad(densepose_attr, tuple(np.repeat(np.maximum(0, wh_diff[i]), 2)))
# 3. Compute rotation grid and transform
grid = F.affine_grid(rotation_matrix, size=densepose_attr.shape)
densepose_attr = F.grid_sample(densepose_attr, grid)
# 4. Compute right bounds and crop the densepose_attr to the size of the original bbox
r_bds = densepose_attr.shape[2:][::-1] - l_bds[i]
densepose_attr = densepose_attr[:, :, l_bds[i][1] : r_bds[1], l_bds[i][0] : r_bds[0]]
if min(densepose_attr.shape) > 0:
# Interpolate back to the original size of the densepose attribute
densepose_attr = F.interpolate(
densepose_attr, densepose_attrs.shape[-2:], mode="bilinear"
)
# Adding a very small probability to the background class to fill padded zones
densepose_attr[:, 0] += 1e-10
densepose_attrs[i] = densepose_attr
return densepose_attrs
def rotate_box_inverse(rot_tfm, rotated_box):
"""
rotated_box is a N * 4 array of [x0, y0, x1, y1] boxes
When a bbox is rotated, it gets bigger, because we need to surround the tilted bbox
So when a bbox is rotated then inverse-rotated, it is much bigger than the original
This function aims to invert the rotation on the box, but also resize it to its original size
"""
# 1. Compute the inverse rotation of the rotated bboxes (bigger than it )
invrot_box = rot_tfm.inverse().apply_box(rotated_box)
h, w = rotated_box[:, 3] - rotated_box[:, 1], rotated_box[:, 2] - rotated_box[:, 0]
ih, iw = invrot_box[:, 3] - invrot_box[:, 1], invrot_box[:, 2] - invrot_box[:, 0]
assert 2 * rot_tfm.abs_sin ** 2 != 1, "45 degrees angle can't be inverted"
# 2. Inverse the corresponding computation in the rotation transform
# to get the original height/width of the rotated boxes
orig_h = (h * rot_tfm.abs_cos - w * rot_tfm.abs_sin) / (1 - 2 * rot_tfm.abs_sin ** 2)
orig_w = (w * rot_tfm.abs_cos - h * rot_tfm.abs_sin) / (1 - 2 * rot_tfm.abs_sin ** 2)
# 3. Resize the inverse-rotated bboxes to their original size
invrot_box[:, 0] += (iw - orig_w) / 2
invrot_box[:, 1] += (ih - orig_h) / 2
invrot_box[:, 2] -= (iw - orig_w) / 2
invrot_box[:, 3] -= (ih - orig_h) / 2
return invrot_box
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/modeling/test_time_augmentation.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from dataclasses import dataclass
from enum import Enum
from detectron2.config import CfgNode
class DensePoseUVConfidenceType(Enum):
"""
Statistical model type for confidence learning, possible values:
- "iid_iso": statistically independent identically distributed residuals
with anisotropic covariance
- "indep_aniso": statistically independent residuals with anisotropic
covariances
For details, see:
N. Neverova, D. Novotny, A. Vedaldi "Correlated Uncertainty for Learning
Dense Correspondences from Noisy Labels", p. 918--926, in Proc. NIPS 2019
"""
# fmt: off
IID_ISO = "iid_iso"
INDEP_ANISO = "indep_aniso"
# fmt: on
@dataclass
class DensePoseUVConfidenceConfig:
"""
Configuration options for confidence on UV data
"""
enabled: bool = False
# lower bound on UV confidences
epsilon: float = 0.01
type: DensePoseUVConfidenceType = DensePoseUVConfidenceType.IID_ISO
@dataclass
class DensePoseSegmConfidenceConfig:
"""
Configuration options for confidence on segmentation
"""
enabled: bool = False
# lower bound on confidence values
epsilon: float = 0.01
@dataclass
class DensePoseConfidenceModelConfig:
"""
Configuration options for confidence models
"""
# confidence for U and V values
uv_confidence: DensePoseUVConfidenceConfig
# segmentation confidence
segm_confidence: DensePoseSegmConfidenceConfig
@staticmethod
def from_cfg(cfg: CfgNode) -> "DensePoseConfidenceModelConfig":
return DensePoseConfidenceModelConfig(
uv_confidence=DensePoseUVConfidenceConfig(
enabled=cfg.MODEL.ROI_DENSEPOSE_HEAD.UV_CONFIDENCE.ENABLED,
epsilon=cfg.MODEL.ROI_DENSEPOSE_HEAD.UV_CONFIDENCE.EPSILON,
type=DensePoseUVConfidenceType(cfg.MODEL.ROI_DENSEPOSE_HEAD.UV_CONFIDENCE.TYPE),
),
segm_confidence=DensePoseSegmConfidenceConfig(
enabled=cfg.MODEL.ROI_DENSEPOSE_HEAD.SEGM_CONFIDENCE.ENABLED,
epsilon=cfg.MODEL.ROI_DENSEPOSE_HEAD.SEGM_CONFIDENCE.EPSILON,
),
)
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/modeling/confidence.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from collections import OrderedDict
from detectron2.checkpoint import DetectionCheckpointer
def _rename_HRNet_weights(weights):
# We detect and rename HRNet weights for DensePose. 1956 and 1716 are values that are
# common to all HRNet pretrained weights, and should be enough to accurately identify them
if (
len(weights["model"].keys()) == 1956
and len([k for k in weights["model"].keys() if k.startswith("stage")]) == 1716
):
hrnet_weights = OrderedDict()
for k in weights["model"].keys():
hrnet_weights["backbone.bottom_up." + str(k)] = weights["model"][k]
return {"model": hrnet_weights}
else:
return weights
class DensePoseCheckpointer(DetectionCheckpointer):
"""
Same as :class:`DetectionCheckpointer`, but is able to handle HRNet weights
"""
def __init__(self, model, save_dir="", *, save_to_disk=None, **checkpointables):
super().__init__(model, save_dir, save_to_disk=save_to_disk, **checkpointables)
def _load_file(self, filename: str) -> object:
"""
Adding hrnet support
"""
weights = super()._load_file(filename)
return _rename_HRNet_weights(weights)
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/modeling/densepose_checkpoint.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
"""
MIT License
Copyright (c) 2019 Microsoft
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from detectron2.layers import ShapeSpec
from detectron2.modeling.backbone import BACKBONE_REGISTRY
from detectron2.modeling.backbone.backbone import Backbone
from .hrnet import build_pose_hrnet_backbone
class HRFPN(Backbone):
"""HRFPN (High Resolution Feature Pyramids)
Transforms outputs of HRNet backbone so they are suitable for the ROI_heads
arXiv: https://arxiv.org/abs/1904.04514
Adapted from https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/necks/hrfpn.py
Args:
bottom_up: (list) output of HRNet
in_features (list): names of the input features (output of HRNet)
in_channels (list): number of channels for each branch
out_channels (int): output channels of feature pyramids
n_out_features (int): number of output stages
pooling (str): pooling for generating feature pyramids (from {MAX, AVG})
share_conv (bool): Have one conv per output, or share one with all the outputs
"""
def __init__(
self,
bottom_up,
in_features,
n_out_features,
in_channels,
out_channels,
pooling="AVG",
share_conv=False,
):
super(HRFPN, self).__init__()
assert isinstance(in_channels, list)
self.bottom_up = bottom_up
self.in_features = in_features
self.n_out_features = n_out_features
self.in_channels = in_channels
self.out_channels = out_channels
self.num_ins = len(in_channels)
self.share_conv = share_conv
if self.share_conv:
self.fpn_conv = nn.Conv2d(
in_channels=out_channels, out_channels=out_channels, kernel_size=3, padding=1
)
else:
self.fpn_conv = nn.ModuleList()
for _ in range(self.n_out_features):
self.fpn_conv.append(
nn.Conv2d(
in_channels=out_channels,
out_channels=out_channels,
kernel_size=3,
padding=1,
)
)
# Custom change: Replaces a simple bilinear interpolation
self.interp_conv = nn.ModuleList()
for i in range(len(self.in_features)):
self.interp_conv.append(
nn.Sequential(
nn.ConvTranspose2d(
in_channels=in_channels[i],
out_channels=in_channels[i],
kernel_size=4,
stride=2 ** i,
padding=0,
output_padding=0,
bias=False,
),
nn.BatchNorm2d(in_channels[i], momentum=0.1),
nn.ReLU(inplace=True),
)
)
# Custom change: Replaces a couple (reduction conv + pooling) by one conv
self.reduction_pooling_conv = nn.ModuleList()
for i in range(self.n_out_features):
self.reduction_pooling_conv.append(
nn.Sequential(
nn.Conv2d(sum(in_channels), out_channels, kernel_size=2 ** i, stride=2 ** i),
nn.BatchNorm2d(out_channels, momentum=0.1),
nn.ReLU(inplace=True),
)
)
if pooling == "MAX":
self.pooling = F.max_pool2d
else:
self.pooling = F.avg_pool2d
self._out_features = []
self._out_feature_channels = {}
self._out_feature_strides = {}
for i in range(self.n_out_features):
self._out_features.append("p%d" % (i + 1))
self._out_feature_channels.update({self._out_features[-1]: self.out_channels})
self._out_feature_strides.update({self._out_features[-1]: 2 ** (i + 2)})
# default init_weights for conv(msra) and norm in ConvModule
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, a=1)
nn.init.constant_(m.bias, 0)
def forward(self, inputs):
bottom_up_features = self.bottom_up(inputs)
assert len(bottom_up_features) == len(self.in_features)
inputs = [bottom_up_features[f] for f in self.in_features]
outs = []
for i in range(len(inputs)):
outs.append(self.interp_conv[i](inputs[i]))
shape_2 = min(o.shape[2] for o in outs)
shape_3 = min(o.shape[3] for o in outs)
out = torch.cat([o[:, :, :shape_2, :shape_3] for o in outs], dim=1)
outs = []
for i in range(self.n_out_features):
outs.append(self.reduction_pooling_conv[i](out))
for i in range(len(outs)): # Make shapes consistent
outs[-1 - i] = outs[-1 - i][
:, :, : outs[-1].shape[2] * 2 ** i, : outs[-1].shape[3] * 2 ** i
]
outputs = []
for i in range(len(outs)):
if self.share_conv:
outputs.append(self.fpn_conv(outs[i]))
else:
outputs.append(self.fpn_conv[i](outs[i]))
assert len(self._out_features) == len(outputs)
return dict(zip(self._out_features, outputs))
@BACKBONE_REGISTRY.register()
def build_hrfpn_backbone(cfg, input_shape: ShapeSpec):
in_channels = cfg.MODEL.HRNET.STAGE4.NUM_CHANNELS
in_features = ["p%d" % (i + 1) for i in range(cfg.MODEL.HRNET.STAGE4.NUM_BRANCHES)]
n_out_features = len(cfg.MODEL.ROI_HEADS.IN_FEATURES)
out_channels = cfg.MODEL.HRNET.HRFPN.OUT_CHANNELS
hrnet = build_pose_hrnet_backbone(cfg, input_shape)
hrfpn = HRFPN(
hrnet,
in_features,
n_out_features,
in_channels,
out_channels,
pooling="AVG",
share_conv=False,
)
return hrfpn
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/modeling/hrfpn.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from .confidence import DensePoseConfidenceModelConfig, DensePoseUVConfidenceType
from .filter import DensePoseDataFilter
from .inference import densepose_inference
from .utils import initialize_module_params
from .build import (
build_densepose_data_filter,
build_densepose_embedder,
build_densepose_head,
build_densepose_losses,
build_densepose_predictor,
)
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/modeling/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from torch import nn
def initialize_module_params(module: nn.Module):
for name, param in module.named_parameters():
if "bias" in name:
nn.init.constant_(param, 0)
elif "weight" in name:
nn.init.kaiming_normal_(param, mode="fan_out", nonlinearity="relu")
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/modeling/utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from dataclasses import fields
from typing import Any, List
import torch
from detectron2.structures import Instances
def densepose_inference(densepose_predictor_output: Any, detections: List[Instances]):
"""
Splits DensePose predictor outputs into chunks, each chunk corresponds to
detections on one image. Predictor output chunks are stored in `pred_densepose`
attribute of the corresponding `Instances` object.
Args:
densepose_predictor_output: a dataclass instance (can be of different types,
depending on predictor used for inference). Each field can be `None`
(if the corresponding output was not inferred) or a tensor of size
[N, ...], where N = N_1 + N_2 + .. + N_k is a total number of
detections on all images, N_1 is the number of detections on image 1,
N_2 is the number of detections on image 2, etc.
detections: a list of objects of type `Instance`, k-th object corresponds
to detections on k-th image.
"""
k = 0
for detection_i in detections:
if densepose_predictor_output is None:
# don't add `pred_densepose` attribute
continue
n_i = len(detection_i)
PredictorOutput = type(densepose_predictor_output)
output_i_dict = {}
# we assume here that `densepose_predictor_output` is a dataclass object
for field in fields(densepose_predictor_output):
field_value = getattr(densepose_predictor_output, field.name)
# slice tensors
if isinstance(field_value, torch.Tensor):
output_i_dict[field.name] = field_value[k : k + n_i]
# leave others as is
else:
output_i_dict[field.name] = field_value
detection_i.pred_densepose = PredictorOutput(**output_i_dict)
k += n_i
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/modeling/inference.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from typing import List
import torch
from detectron2.config import CfgNode
from detectron2.structures import Instances
from detectron2.structures.boxes import matched_boxlist_iou
class DensePoseDataFilter(object):
def __init__(self, cfg: CfgNode):
self.iou_threshold = cfg.MODEL.ROI_DENSEPOSE_HEAD.FG_IOU_THRESHOLD
self.keep_masks = cfg.MODEL.ROI_DENSEPOSE_HEAD.COARSE_SEGM_TRAINED_BY_MASKS
@torch.no_grad() # pyre-ignore[56]
def __call__(self, features: List[torch.Tensor], proposals_with_targets: List[Instances]):
"""
Filters proposals with targets to keep only the ones relevant for
DensePose training
Args:
features (list[Tensor]): input data as a list of features,
each feature is a tensor. Axis 0 represents the number of
images `N` in the input data; axes 1-3 are channels,
height, and width, which may vary between features
(e.g., if a feature pyramid is used).
proposals_with_targets (list[Instances]): length `N` list of
`Instances`. The i-th `Instances` contains instances
(proposals, GT) for the i-th input image,
Returns:
list[Tensor]: filtered features
list[Instances]: filtered proposals
"""
proposals_filtered = []
# TODO: the commented out code was supposed to correctly deal with situations
# where no valid DensePose GT is available for certain images. The corresponding
# image features were sliced and proposals were filtered. This led to performance
# deterioration, both in terms of runtime and in terms of evaluation results.
#
# feature_mask = torch.ones(
# len(proposals_with_targets),
# dtype=torch.bool,
# device=features[0].device if len(features) > 0 else torch.device("cpu"),
# )
for i, proposals_per_image in enumerate(proposals_with_targets):
if not proposals_per_image.has("gt_densepose") and (
not proposals_per_image.has("gt_masks") or not self.keep_masks
):
# feature_mask[i] = 0
continue
gt_boxes = proposals_per_image.gt_boxes
est_boxes = proposals_per_image.proposal_boxes
# apply match threshold for densepose head
iou = matched_boxlist_iou(gt_boxes, est_boxes)
iou_select = iou > self.iou_threshold
proposals_per_image = proposals_per_image[iou_select] # pyre-ignore[6]
N_gt_boxes = len(proposals_per_image.gt_boxes)
assert N_gt_boxes == len(proposals_per_image.proposal_boxes), (
f"The number of GT boxes {N_gt_boxes} is different from the "
f"number of proposal boxes {len(proposals_per_image.proposal_boxes)}"
)
# filter out any target without suitable annotation
if self.keep_masks:
gt_masks = (
proposals_per_image.gt_masks
if hasattr(proposals_per_image, "gt_masks")
else [None] * N_gt_boxes
)
else:
gt_masks = [None] * N_gt_boxes
gt_densepose = (
proposals_per_image.gt_densepose
if hasattr(proposals_per_image, "gt_densepose")
else [None] * N_gt_boxes
)
assert len(gt_masks) == N_gt_boxes
assert len(gt_densepose) == N_gt_boxes
selected_indices = [
i
for i, (dp_target, mask_target) in enumerate(zip(gt_densepose, gt_masks))
if (dp_target is not None) or (mask_target is not None)
]
# if not len(selected_indices):
# feature_mask[i] = 0
# continue
if len(selected_indices) != N_gt_boxes:
proposals_per_image = proposals_per_image[selected_indices] # pyre-ignore[6]
assert len(proposals_per_image.gt_boxes) == len(proposals_per_image.proposal_boxes)
proposals_filtered.append(proposals_per_image)
# features_filtered = [feature[feature_mask] for feature in features]
# return features_filtered, proposals_filtered
return features, proposals_filtered
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/modeling/filter.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from dataclasses import dataclass
from typing import Any, Iterable, List, Optional
import torch
from torch.nn import functional as F
from detectron2.structures import Instances
@dataclass
class DataForMaskLoss:
"""
Contains mask GT and estimated data for proposals from multiple images:
"""
# tensor of size (K, H, W) containing GT labels
masks_gt: Optional[torch.Tensor] = None
# tensor of size (K, C, H, W) containing estimated scores
masks_est: Optional[torch.Tensor] = None
def extract_data_for_mask_loss_from_matches(
proposals_targets: Iterable[Instances], estimated_segm: torch.Tensor
) -> DataForMaskLoss:
"""
Extract data for mask loss from instances that contain matched GT and
estimated bounding boxes.
Args:
proposals_targets: Iterable[Instances]
matched GT and estimated results, each item in the iterable
corresponds to data in 1 image
estimated_segm: tensor(K, C, S, S) of float - raw unnormalized
segmentation scores, here S is the size to which GT masks are
to be resized
Return:
masks_est: tensor(K, C, S, S) of float - class scores
masks_gt: tensor(K, S, S) of int64 - labels
"""
data = DataForMaskLoss()
masks_gt = []
offset = 0
assert estimated_segm.shape[2] == estimated_segm.shape[3], (
f"Expected estimated segmentation to have a square shape, "
f"but the actual shape is {estimated_segm.shape[2:]}"
)
mask_size = estimated_segm.shape[2]
num_proposals = sum(inst.proposal_boxes.tensor.size(0) for inst in proposals_targets)
num_estimated = estimated_segm.shape[0]
assert (
num_proposals == num_estimated
), "The number of proposals {} must be equal to the number of estimates {}".format(
num_proposals, num_estimated
)
for proposals_targets_per_image in proposals_targets:
n_i = proposals_targets_per_image.proposal_boxes.tensor.size(0)
if not n_i:
continue
gt_masks_per_image = proposals_targets_per_image.gt_masks.crop_and_resize(
proposals_targets_per_image.proposal_boxes.tensor, mask_size
).to(device=estimated_segm.device)
masks_gt.append(gt_masks_per_image)
offset += n_i
if masks_gt:
data.masks_est = estimated_segm
data.masks_gt = torch.cat(masks_gt, dim=0)
return data
class MaskLoss:
"""
Mask loss as cross-entropy for raw unnormalized scores given ground truth labels.
Mask ground truth labels are defined for the whole image and not only the
bounding box of interest. They are stored as objects that are assumed to implement
the `crop_and_resize` interface (e.g. BitMasks, PolygonMasks).
"""
def __call__(
self, proposals_with_gt: List[Instances], densepose_predictor_outputs: Any
) -> torch.Tensor:
"""
Computes segmentation loss as cross-entropy for raw unnormalized
scores given ground truth labels.
Args:
proposals_with_gt (list of Instances): detections with associated ground truth data
densepose_predictor_outputs: an object of a dataclass that contains predictor outputs
with estimated values; assumed to have the following attribute:
* coarse_segm (tensor of shape [N, D, S, S]): coarse segmentation estimates
as raw unnormalized scores
where N is the number of detections, S is the estimate size ( = width = height)
and D is the number of coarse segmentation channels.
Return:
Cross entropy for raw unnormalized scores for coarse segmentation given
ground truth labels from masks
"""
if not len(proposals_with_gt):
return self.fake_value(densepose_predictor_outputs)
# densepose outputs are computed for all images and all bounding boxes;
# i.e. if a batch has 4 images with (3, 1, 2, 1) proposals respectively,
# the outputs will have size(0) == 3+1+2+1 == 7
with torch.no_grad():
mask_loss_data = extract_data_for_mask_loss_from_matches(
proposals_with_gt, densepose_predictor_outputs.coarse_segm
)
if (mask_loss_data.masks_gt is None) or (mask_loss_data.masks_est is None):
return self.fake_value(densepose_predictor_outputs)
return F.cross_entropy(
mask_loss_data.masks_est, mask_loss_data.masks_gt.long() # pyre-ignore[16]
)
def fake_value(self, densepose_predictor_outputs: Any) -> torch.Tensor:
"""
Fake segmentation loss used when no suitable ground truth data
was found in a batch. The loss has a value 0 and is primarily used to
construct the computation graph, so that `DistributedDataParallel`
has similar graphs on all GPUs and can perform reduction properly.
Args:
densepose_predictor_outputs: DensePose predictor outputs, an object
of a dataclass that is assumed to have `coarse_segm`
attribute
Return:
Zero value loss with proper computation graph
"""
return densepose_predictor_outputs.coarse_segm.sum() * 0
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/modeling/losses/mask.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from dataclasses import dataclass
from typing import Any, Optional
import torch
from detectron2.structures import BoxMode, Instances
from .utils import AnnotationsAccumulator
@dataclass
class PackedCseAnnotations:
x_gt: torch.Tensor
y_gt: torch.Tensor
coarse_segm_gt: Optional[torch.Tensor]
vertex_mesh_ids_gt: torch.Tensor
vertex_ids_gt: torch.Tensor
bbox_xywh_gt: torch.Tensor
bbox_xywh_est: torch.Tensor
point_bbox_with_dp_indices: torch.Tensor
point_bbox_indices: torch.Tensor
bbox_indices: torch.Tensor
class CseAnnotationsAccumulator(AnnotationsAccumulator):
"""
Accumulates annotations by batches that correspond to objects detected on
individual images. Can pack them together into single tensors.
"""
def __init__(self):
self.x_gt = []
self.y_gt = []
self.s_gt = []
self.vertex_mesh_ids_gt = []
self.vertex_ids_gt = []
self.bbox_xywh_gt = []
self.bbox_xywh_est = []
self.point_bbox_with_dp_indices = []
self.point_bbox_indices = []
self.bbox_indices = []
self.nxt_bbox_with_dp_index = 0
self.nxt_bbox_index = 0
def accumulate(self, instances_one_image: Instances):
"""
Accumulate instances data for one image
Args:
instances_one_image (Instances): instances data to accumulate
"""
boxes_xywh_est = BoxMode.convert(
instances_one_image.proposal_boxes.tensor.clone(), BoxMode.XYXY_ABS, BoxMode.XYWH_ABS
)
boxes_xywh_gt = BoxMode.convert(
instances_one_image.gt_boxes.tensor.clone(), BoxMode.XYXY_ABS, BoxMode.XYWH_ABS
)
n_matches = len(boxes_xywh_gt)
assert n_matches == len(
boxes_xywh_est
), f"Got {len(boxes_xywh_est)} proposal boxes and {len(boxes_xywh_gt)} GT boxes"
if not n_matches:
# no detection - GT matches
return
if (
not hasattr(instances_one_image, "gt_densepose")
or instances_one_image.gt_densepose is None
):
# no densepose GT for the detections, just increase the bbox index
self.nxt_bbox_index += n_matches
return
for box_xywh_est, box_xywh_gt, dp_gt in zip(
boxes_xywh_est, boxes_xywh_gt, instances_one_image.gt_densepose
):
if (dp_gt is not None) and (len(dp_gt.x) > 0):
self._do_accumulate(box_xywh_gt, box_xywh_est, dp_gt) # pyre-ignore[6]
self.nxt_bbox_index += 1
def _do_accumulate(self, box_xywh_gt: torch.Tensor, box_xywh_est: torch.Tensor, dp_gt: Any):
"""
Accumulate instances data for one image, given that the data is not empty
Args:
box_xywh_gt (tensor): GT bounding box
box_xywh_est (tensor): estimated bounding box
dp_gt: GT densepose data with the following attributes:
- x: normalized X coordinates
- y: normalized Y coordinates
- segm: tensor of size [S, S] with coarse segmentation
-
"""
self.x_gt.append(dp_gt.x)
self.y_gt.append(dp_gt.y)
if hasattr(dp_gt, "segm"):
self.s_gt.append(dp_gt.segm.unsqueeze(0))
self.vertex_ids_gt.append(dp_gt.vertex_ids)
self.vertex_mesh_ids_gt.append(torch.full_like(dp_gt.vertex_ids, dp_gt.mesh_id))
self.bbox_xywh_gt.append(box_xywh_gt.view(-1, 4))
self.bbox_xywh_est.append(box_xywh_est.view(-1, 4))
self.point_bbox_with_dp_indices.append(
torch.full_like(dp_gt.vertex_ids, self.nxt_bbox_with_dp_index)
)
self.point_bbox_indices.append(torch.full_like(dp_gt.vertex_ids, self.nxt_bbox_index))
self.bbox_indices.append(self.nxt_bbox_index)
self.nxt_bbox_with_dp_index += 1
def pack(self) -> Optional[PackedCseAnnotations]:
"""
Pack data into tensors
"""
if not len(self.x_gt):
# TODO:
# returning proper empty annotations would require
# creating empty tensors of appropriate shape and
# type on an appropriate device;
# we return None so far to indicate empty annotations
return None
return PackedCseAnnotations(
x_gt=torch.cat(self.x_gt, 0),
y_gt=torch.cat(self.y_gt, 0),
vertex_mesh_ids_gt=torch.cat(self.vertex_mesh_ids_gt, 0),
vertex_ids_gt=torch.cat(self.vertex_ids_gt, 0),
# ignore segmentation annotations, if not all the instances contain those
coarse_segm_gt=torch.cat(self.s_gt, 0)
if len(self.s_gt) == len(self.bbox_xywh_gt)
else None,
bbox_xywh_gt=torch.cat(self.bbox_xywh_gt, 0),
bbox_xywh_est=torch.cat(self.bbox_xywh_est, 0),
point_bbox_with_dp_indices=torch.cat(self.point_bbox_with_dp_indices, 0),
point_bbox_indices=torch.cat(self.point_bbox_indices, 0),
bbox_indices=torch.as_tensor(
self.bbox_indices, dtype=torch.long, device=self.x_gt[0].device
),
)
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/modeling/losses/embed_utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from typing import Any, Dict, List
import torch
from torch import nn
from torch.nn import functional as F
from detectron2.config import CfgNode
from detectron2.structures import Instances
from densepose.data.meshes.catalog import MeshCatalog
from densepose.modeling.cse.utils import normalize_embeddings, squared_euclidean_distance_matrix
from densepose.structures.mesh import create_mesh
from .embed_utils import PackedCseAnnotations
from .utils import BilinearInterpolationHelper
class SoftEmbeddingLoss:
"""
Computes losses for estimated embeddings given annotated vertices.
Instances in a minibatch that correspond to the same mesh are grouped
together. For each group, loss is computed as cross-entropy for
unnormalized scores given ground truth mesh vertex ids.
Scores are based on:
1) squared distances between estimated vertex embeddings
and mesh vertex embeddings;
2) geodesic distances between vertices of a mesh
"""
def __init__(self, cfg: CfgNode):
"""
Initialize embedding loss from config
"""
self.embdist_gauss_sigma = cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.EMBEDDING_DIST_GAUSS_SIGMA
self.geodist_gauss_sigma = cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.GEODESIC_DIST_GAUSS_SIGMA
def __call__(
self,
proposals_with_gt: List[Instances],
densepose_predictor_outputs: Any,
packed_annotations: PackedCseAnnotations,
interpolator: BilinearInterpolationHelper,
embedder: nn.Module,
) -> Dict[int, torch.Tensor]:
"""
Produces losses for estimated embeddings given annotated vertices.
Embeddings for all the vertices of a mesh are computed by the embedder.
Embeddings for observed pixels are estimated by a predictor.
Losses are computed as cross-entropy for unnormalized scores given
ground truth vertex IDs.
1) squared distances between estimated vertex embeddings
and mesh vertex embeddings;
2) geodesic distances between vertices of a mesh
Args:
proposals_with_gt (list of Instances): detections with associated
ground truth data; each item corresponds to instances detected
on 1 image; the number of items corresponds to the number of
images in a batch
densepose_predictor_outputs: an object of a dataclass that contains predictor
outputs with estimated values; assumed to have the following attributes:
* embedding - embedding estimates, tensor of shape [N, D, S, S], where
N = number of instances (= sum N_i, where N_i is the number of
instances on image i)
D = embedding space dimensionality (MODEL.ROI_DENSEPOSE_HEAD.CSE.EMBED_SIZE)
S = output size (width and height)
packed_annotations (PackedCseAnnotations): contains various data useful
for loss computation, each data is packed into a single tensor
interpolator (BilinearInterpolationHelper): bilinear interpolation helper
embedder (nn.Module): module that computes vertex embeddings for different meshes
Return:
dict(int -> tensor): losses for different mesh IDs
"""
losses = {}
for mesh_id_tensor in packed_annotations.vertex_mesh_ids_gt.unique(): # pyre-ignore[16]
mesh_id = mesh_id_tensor.item()
mesh_name = MeshCatalog.get_mesh_name(mesh_id)
# valid points are those that fall into estimated bbox
# and correspond to the current mesh
j_valid = interpolator.j_valid * ( # pyre-ignore[16]
packed_annotations.vertex_mesh_ids_gt == mesh_id
)
if not torch.any(j_valid):
continue
# extract estimated embeddings for valid points
# -> tensor [J, D]
vertex_embeddings_i = normalize_embeddings(
interpolator.extract_at_points(
densepose_predictor_outputs.embedding,
slice_fine_segm=slice(None),
w_ylo_xlo=interpolator.w_ylo_xlo[:, None], # pyre-ignore[16]
w_ylo_xhi=interpolator.w_ylo_xhi[:, None], # pyre-ignore[16]
w_yhi_xlo=interpolator.w_yhi_xlo[:, None], # pyre-ignore[16]
w_yhi_xhi=interpolator.w_yhi_xhi[:, None], # pyre-ignore[16]
)[j_valid, :]
)
# extract vertex ids for valid points
# -> tensor [J]
vertex_indices_i = packed_annotations.vertex_ids_gt[j_valid]
# embeddings for all mesh vertices
# -> tensor [K, D]
mesh_vertex_embeddings = embedder(mesh_name)
# softmax values of geodesic distances for GT mesh vertices
# -> tensor [J, K]
mesh = create_mesh(mesh_name, mesh_vertex_embeddings.device)
geodist_softmax_values = F.softmax(
mesh.geodists[vertex_indices_i] / (-self.geodist_gauss_sigma), dim=1
)
# logsoftmax values for valid points
# -> tensor [J, K]
embdist_logsoftmax_values = F.log_softmax(
squared_euclidean_distance_matrix(vertex_embeddings_i, mesh_vertex_embeddings)
/ (-self.embdist_gauss_sigma),
dim=1,
)
losses[mesh_name] = (-geodist_softmax_values * embdist_logsoftmax_values).sum(1).mean()
# pyre-fixme[29]:
# `Union[BoundMethod[typing.Callable(torch.Tensor.__iter__)[[Named(self,
# torch.Tensor)], typing.Iterator[typing.Any]], torch.Tensor], nn.Module,
# torch.Tensor]` is not a function.
for mesh_name in embedder.mesh_names:
if mesh_name not in losses:
losses[mesh_name] = self.fake_value(
densepose_predictor_outputs, embedder, mesh_name
)
return losses
def fake_values(self, densepose_predictor_outputs: Any, embedder: nn.Module):
losses = {}
# pyre-fixme[29]:
# `Union[BoundMethod[typing.Callable(torch.Tensor.__iter__)[[Named(self,
# torch.Tensor)], typing.Iterator[typing.Any]], torch.Tensor], nn.Module,
# torch.Tensor]` is not a function.
for mesh_name in embedder.mesh_names:
losses[mesh_name] = self.fake_value(densepose_predictor_outputs, embedder, mesh_name)
return losses
def fake_value(self, densepose_predictor_outputs: Any, embedder: nn.Module, mesh_name: str):
return densepose_predictor_outputs.embedding.sum() * 0 + embedder(mesh_name).sum() * 0
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/modeling/losses/soft_embed.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from typing import Any, List
import torch
from torch.nn import functional as F
from detectron2.config import CfgNode
from detectron2.structures import Instances
from .utils import resample_data
class SegmentationLoss:
"""
Segmentation loss as cross-entropy for raw unnormalized scores given ground truth
labels. Segmentation ground truth labels are defined for the bounding box of
interest at some fixed resolution [S, S], where
S = MODEL.ROI_DENSEPOSE_HEAD.HEATMAP_SIZE.
"""
def __init__(self, cfg: CfgNode):
"""
Initialize segmentation loss from configuration options
Args:
cfg (CfgNode): configuration options
"""
self.heatmap_size = cfg.MODEL.ROI_DENSEPOSE_HEAD.HEATMAP_SIZE
self.n_segm_chan = cfg.MODEL.ROI_DENSEPOSE_HEAD.NUM_COARSE_SEGM_CHANNELS
def __call__(
self,
proposals_with_gt: List[Instances],
densepose_predictor_outputs: Any,
packed_annotations: Any,
) -> torch.Tensor:
"""
Compute segmentation loss as cross-entropy on aligned segmentation
ground truth and estimated scores.
Args:
proposals_with_gt (list of Instances): detections with associated ground truth data
densepose_predictor_outputs: an object of a dataclass that contains predictor outputs
with estimated values; assumed to have the following attributes:
* coarse_segm - coarse segmentation estimates, tensor of shape [N, D, S, S]
packed_annotations: packed annotations for efficient loss computation;
the following attributes are used:
- coarse_segm_gt
- bbox_xywh_gt
- bbox_xywh_est
"""
if packed_annotations.coarse_segm_gt is None:
return self.fake_value(densepose_predictor_outputs)
coarse_segm_est = densepose_predictor_outputs.coarse_segm[packed_annotations.bbox_indices]
with torch.no_grad():
coarse_segm_gt = resample_data(
packed_annotations.coarse_segm_gt.unsqueeze(1),
packed_annotations.bbox_xywh_gt,
packed_annotations.bbox_xywh_est,
self.heatmap_size,
self.heatmap_size,
mode="nearest",
padding_mode="zeros",
).squeeze(1)
if self.n_segm_chan == 2:
coarse_segm_gt = coarse_segm_gt > 0
return F.cross_entropy(coarse_segm_est, coarse_segm_gt.long())
def fake_value(self, densepose_predictor_outputs: Any) -> torch.Tensor:
"""
Fake segmentation loss used when no suitable ground truth data
was found in a batch. The loss has a value 0 and is primarily used to
construct the computation graph, so that `DistributedDataParallel`
has similar graphs on all GPUs and can perform reduction properly.
Args:
densepose_predictor_outputs: DensePose predictor outputs, an object
of a dataclass that is assumed to have `coarse_segm`
attribute
Return:
Zero value loss with proper computation graph
"""
return densepose_predictor_outputs.coarse_segm.sum() * 0
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/modeling/losses/segm.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from typing import Any, Dict, List
import torch
from torch import nn
from torch.nn import functional as F
from detectron2.config import CfgNode
from detectron2.structures import Instances
from densepose.data.meshes.catalog import MeshCatalog
from densepose.modeling.cse.utils import normalize_embeddings, squared_euclidean_distance_matrix
from .embed_utils import PackedCseAnnotations
from .utils import BilinearInterpolationHelper
class EmbeddingLoss:
"""
Computes losses for estimated embeddings given annotated vertices.
Instances in a minibatch that correspond to the same mesh are grouped
together. For each group, loss is computed as cross-entropy for
unnormalized scores given ground truth mesh vertex ids.
Scores are based on squared distances between estimated vertex embeddings
and mesh vertex embeddings.
"""
def __init__(self, cfg: CfgNode):
"""
Initialize embedding loss from config
"""
self.embdist_gauss_sigma = cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.EMBEDDING_DIST_GAUSS_SIGMA
def __call__(
self,
proposals_with_gt: List[Instances],
densepose_predictor_outputs: Any,
packed_annotations: PackedCseAnnotations,
interpolator: BilinearInterpolationHelper,
embedder: nn.Module,
) -> Dict[int, torch.Tensor]:
"""
Produces losses for estimated embeddings given annotated vertices.
Embeddings for all the vertices of a mesh are computed by the embedder.
Embeddings for observed pixels are estimated by a predictor.
Losses are computed as cross-entropy for squared distances between
observed vertex embeddings and all mesh vertex embeddings given
ground truth vertex IDs.
Args:
proposals_with_gt (list of Instances): detections with associated
ground truth data; each item corresponds to instances detected
on 1 image; the number of items corresponds to the number of
images in a batch
densepose_predictor_outputs: an object of a dataclass that contains predictor
outputs with estimated values; assumed to have the following attributes:
* embedding - embedding estimates, tensor of shape [N, D, S, S], where
N = number of instances (= sum N_i, where N_i is the number of
instances on image i)
D = embedding space dimensionality (MODEL.ROI_DENSEPOSE_HEAD.CSE.EMBED_SIZE)
S = output size (width and height)
packed_annotations (PackedCseAnnotations): contains various data useful
for loss computation, each data is packed into a single tensor
interpolator (BilinearInterpolationHelper): bilinear interpolation helper
embedder (nn.Module): module that computes vertex embeddings for different meshes
Return:
dict(int -> tensor): losses for different mesh IDs
"""
losses = {}
for mesh_id_tensor in packed_annotations.vertex_mesh_ids_gt.unique(): # pyre-ignore[16]
mesh_id = mesh_id_tensor.item()
mesh_name = MeshCatalog.get_mesh_name(mesh_id)
# valid points are those that fall into estimated bbox
# and correspond to the current mesh
j_valid = interpolator.j_valid * ( # pyre-ignore[16]
packed_annotations.vertex_mesh_ids_gt == mesh_id
)
if not torch.any(j_valid):
continue
# extract estimated embeddings for valid points
# -> tensor [J, D]
vertex_embeddings_i = normalize_embeddings(
interpolator.extract_at_points(
densepose_predictor_outputs.embedding,
slice_fine_segm=slice(None),
w_ylo_xlo=interpolator.w_ylo_xlo[:, None], # pyre-ignore[16]
w_ylo_xhi=interpolator.w_ylo_xhi[:, None], # pyre-ignore[16]
w_yhi_xlo=interpolator.w_yhi_xlo[:, None], # pyre-ignore[16]
w_yhi_xhi=interpolator.w_yhi_xhi[:, None], # pyre-ignore[16]
)[j_valid, :]
)
# extract vertex ids for valid points
# -> tensor [J]
vertex_indices_i = packed_annotations.vertex_ids_gt[j_valid]
# embeddings for all mesh vertices
# -> tensor [K, D]
mesh_vertex_embeddings = embedder(mesh_name)
# unnormalized scores for valid points
# -> tensor [J, K]
scores = squared_euclidean_distance_matrix(
vertex_embeddings_i, mesh_vertex_embeddings
) / (-self.embdist_gauss_sigma)
losses[mesh_name] = F.cross_entropy(scores, vertex_indices_i, ignore_index=-1)
# pyre-fixme[29]:
# `Union[BoundMethod[typing.Callable(torch.Tensor.__iter__)[[Named(self,
# torch.Tensor)], typing.Iterator[typing.Any]], torch.Tensor], nn.Module,
# torch.Tensor]` is not a function.
for mesh_name in embedder.mesh_names:
if mesh_name not in losses:
losses[mesh_name] = self.fake_value(
densepose_predictor_outputs, embedder, mesh_name
)
return losses
def fake_values(self, densepose_predictor_outputs: Any, embedder: nn.Module):
losses = {}
# pyre-fixme[29]:
# `Union[BoundMethod[typing.Callable(torch.Tensor.__iter__)[[Named(self,
# torch.Tensor)], typing.Iterator[typing.Any]], torch.Tensor], nn.Module,
# torch.Tensor]` is not a function.
for mesh_name in embedder.mesh_names:
losses[mesh_name] = self.fake_value(densepose_predictor_outputs, embedder, mesh_name)
return losses
def fake_value(self, densepose_predictor_outputs: Any, embedder: nn.Module, mesh_name: str):
return densepose_predictor_outputs.embedding.sum() * 0 + embedder(mesh_name).sum() * 0
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/modeling/losses/embed.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from typing import Any, List
import torch
from detectron2.config import CfgNode
from detectron2.structures import Instances
from .mask import MaskLoss
from .segm import SegmentationLoss
class MaskOrSegmentationLoss:
"""
Mask or segmentation loss as cross-entropy for raw unnormalized scores
given ground truth labels. Ground truth labels are either defined by coarse
segmentation annotation, or by mask annotation, depending on the config
value MODEL.ROI_DENSEPOSE_HEAD.COARSE_SEGM_TRAINED_BY_MASKS
"""
def __init__(self, cfg: CfgNode):
"""
Initialize segmentation loss from configuration options
Args:
cfg (CfgNode): configuration options
"""
self.segm_trained_by_masks = cfg.MODEL.ROI_DENSEPOSE_HEAD.COARSE_SEGM_TRAINED_BY_MASKS
if self.segm_trained_by_masks:
self.mask_loss = MaskLoss()
self.segm_loss = SegmentationLoss(cfg)
def __call__(
self,
proposals_with_gt: List[Instances],
densepose_predictor_outputs: Any,
packed_annotations: Any,
) -> torch.Tensor:
"""
Compute segmentation loss as cross-entropy between aligned unnormalized
score estimates and ground truth; with ground truth given
either by masks, or by coarse segmentation annotations.
Args:
proposals_with_gt (list of Instances): detections with associated ground truth data
densepose_predictor_outputs: an object of a dataclass that contains predictor outputs
with estimated values; assumed to have the following attributes:
* coarse_segm - coarse segmentation estimates, tensor of shape [N, D, S, S]
packed_annotations: packed annotations for efficient loss computation
Return:
tensor: loss value as cross-entropy for raw unnormalized scores
given ground truth labels
"""
if self.segm_trained_by_masks:
return self.mask_loss(proposals_with_gt, densepose_predictor_outputs)
return self.segm_loss(proposals_with_gt, densepose_predictor_outputs, packed_annotations)
def fake_value(self, densepose_predictor_outputs: Any) -> torch.Tensor:
"""
Fake segmentation loss used when no suitable ground truth data
was found in a batch. The loss has a value 0 and is primarily used to
construct the computation graph, so that `DistributedDataParallel`
has similar graphs on all GPUs and can perform reduction properly.
Args:
densepose_predictor_outputs: DensePose predictor outputs, an object
of a dataclass that is assumed to have `coarse_segm`
attribute
Return:
Zero value loss with proper computation graph
"""
return densepose_predictor_outputs.coarse_segm.sum() * 0
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/modeling/losses/mask_or_segm.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from detectron2.utils.registry import Registry
DENSEPOSE_LOSS_REGISTRY = Registry("DENSEPOSE_LOSS")
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/modeling/losses/registry.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import random
from typing import Tuple
import torch
from torch import nn
from torch.nn import functional as F
from detectron2.config import CfgNode
from densepose.structures.mesh import create_mesh
from .utils import sample_random_indices
class ShapeToShapeCycleLoss(nn.Module):
"""
Cycle Loss for Shapes.
Inspired by:
"Mapping in a Cycle: Sinkhorn Regularized Unsupervised Learning for Point Cloud Shapes".
"""
def __init__(self, cfg: CfgNode):
super().__init__()
self.shape_names = list(cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.EMBEDDERS.keys())
self.all_shape_pairs = [
(x, y) for i, x in enumerate(self.shape_names) for y in self.shape_names[i + 1 :]
]
random.shuffle(self.all_shape_pairs)
self.cur_pos = 0
self.norm_p = cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.SHAPE_TO_SHAPE_CYCLE_LOSS.NORM_P
self.temperature = cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.SHAPE_TO_SHAPE_CYCLE_LOSS.TEMPERATURE
self.max_num_vertices = (
cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.SHAPE_TO_SHAPE_CYCLE_LOSS.MAX_NUM_VERTICES
)
def _sample_random_pair(self) -> Tuple[str, str]:
"""
Produce a random pair of different mesh names
Return:
tuple(str, str): a pair of different mesh names
"""
if self.cur_pos >= len(self.all_shape_pairs):
random.shuffle(self.all_shape_pairs)
self.cur_pos = 0
shape_pair = self.all_shape_pairs[self.cur_pos]
self.cur_pos += 1
return shape_pair
def forward(self, embedder: nn.Module):
"""
Do a forward pass with a random pair (src, dst) pair of shapes
Args:
embedder (nn.Module): module that computes vertex embeddings for different meshes
"""
src_mesh_name, dst_mesh_name = self._sample_random_pair()
return self._forward_one_pair(embedder, src_mesh_name, dst_mesh_name)
def fake_value(self, embedder: nn.Module):
losses = []
for mesh_name in embedder.mesh_names: # pyre-ignore[29]
losses.append(embedder(mesh_name).sum() * 0)
return torch.mean(torch.stack(losses))
def _get_embeddings_and_geodists_for_mesh(
self, embedder: nn.Module, mesh_name: str
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Produces embeddings and geodesic distance tensors for a given mesh. May subsample
the mesh, if it contains too many vertices (controlled by
SHAPE_CYCLE_LOSS_MAX_NUM_VERTICES parameter).
Args:
embedder (nn.Module): module that computes embeddings for mesh vertices
mesh_name (str): mesh name
Return:
embeddings (torch.Tensor of size [N, D]): embeddings for selected mesh
vertices (N = number of selected vertices, D = embedding space dim)
geodists (torch.Tensor of size [N, N]): geodesic distances for the selected
mesh vertices (N = number of selected vertices)
"""
embeddings = embedder(mesh_name)
indices = sample_random_indices(
embeddings.shape[0], self.max_num_vertices, embeddings.device
)
mesh = create_mesh(mesh_name, embeddings.device)
geodists = mesh.geodists
if indices is not None:
embeddings = embeddings[indices]
geodists = geodists[torch.meshgrid(indices, indices)]
return embeddings, geodists
def _forward_one_pair(
self, embedder: nn.Module, mesh_name_1: str, mesh_name_2: str
) -> torch.Tensor:
"""
Do a forward pass with a selected pair of meshes
Args:
embedder (nn.Module): module that computes vertex embeddings for different meshes
mesh_name_1 (str): first mesh name
mesh_name_2 (str): second mesh name
Return:
Tensor containing the loss value
"""
embeddings_1, geodists_1 = self._get_embeddings_and_geodists_for_mesh(embedder, mesh_name_1)
embeddings_2, geodists_2 = self._get_embeddings_and_geodists_for_mesh(embedder, mesh_name_2)
sim_matrix_12 = embeddings_1.mm(embeddings_2.T) # pyre-ignore[16]
c_12 = F.softmax(sim_matrix_12 / self.temperature, dim=1)
c_21 = F.softmax(sim_matrix_12.T / self.temperature, dim=1)
c_11 = c_12.mm(c_21)
c_22 = c_21.mm(c_12)
loss_cycle_11 = torch.norm(geodists_1 * c_11, p=self.norm_p)
loss_cycle_22 = torch.norm(geodists_2 * c_22, p=self.norm_p)
return loss_cycle_11 + loss_cycle_22
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/modeling/losses/cycle_shape2shape.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from typing import Any, List
import torch
from torch import nn
from torch.nn import functional as F
from detectron2.config import CfgNode
from detectron2.structures import Instances
from densepose.data.meshes.catalog import MeshCatalog
from densepose.modeling.cse.utils import normalize_embeddings, squared_euclidean_distance_matrix
from .embed_utils import PackedCseAnnotations
from .mask import extract_data_for_mask_loss_from_matches
def _create_pixel_dist_matrix(grid_size: int) -> torch.Tensor:
rows = torch.arange(grid_size)
cols = torch.arange(grid_size)
# at index `i` contains [row, col], where
# row = i // grid_size
# col = i % grid_size
pix_coords = (
torch.stack(torch.meshgrid(rows, cols), -1).reshape((grid_size * grid_size, 2)).float()
)
return squared_euclidean_distance_matrix(pix_coords, pix_coords)
def _sample_fg_pixels_randperm(fg_mask: torch.Tensor, sample_size: int) -> torch.Tensor:
fg_mask_flattened = fg_mask.reshape((-1,))
num_pixels = int(fg_mask_flattened.sum().item())
fg_pixel_indices = fg_mask_flattened.nonzero(as_tuple=True)[0] # pyre-ignore[16]
if (sample_size <= 0) or (num_pixels <= sample_size):
return fg_pixel_indices
sample_indices = torch.randperm(num_pixels, device=fg_mask.device)[:sample_size]
return fg_pixel_indices[sample_indices]
def _sample_fg_pixels_multinomial(fg_mask: torch.Tensor, sample_size: int) -> torch.Tensor:
fg_mask_flattened = fg_mask.reshape((-1,))
num_pixels = int(fg_mask_flattened.sum().item())
if (sample_size <= 0) or (num_pixels <= sample_size):
return fg_mask_flattened.nonzero(as_tuple=True)[0] # pyre-ignore[16]
return fg_mask_flattened.float().multinomial(sample_size, replacement=False) # pyre-ignore[16]
class PixToShapeCycleLoss(nn.Module):
"""
Cycle loss for pixel-vertex correspondence
"""
def __init__(self, cfg: CfgNode):
super().__init__()
self.shape_names = list(cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.EMBEDDERS.keys())
self.embed_size = cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.EMBED_SIZE
self.norm_p = cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.PIX_TO_SHAPE_CYCLE_LOSS.NORM_P
self.use_all_meshes_not_gt_only = (
cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.PIX_TO_SHAPE_CYCLE_LOSS.USE_ALL_MESHES_NOT_GT_ONLY
)
self.num_pixels_to_sample = (
cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.PIX_TO_SHAPE_CYCLE_LOSS.NUM_PIXELS_TO_SAMPLE
)
self.pix_sigma = cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.PIX_TO_SHAPE_CYCLE_LOSS.PIXEL_SIGMA
self.temperature_pix_to_vertex = (
cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.PIX_TO_SHAPE_CYCLE_LOSS.TEMPERATURE_PIXEL_TO_VERTEX
)
self.temperature_vertex_to_pix = (
cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.PIX_TO_SHAPE_CYCLE_LOSS.TEMPERATURE_VERTEX_TO_PIXEL
)
self.pixel_dists = _create_pixel_dist_matrix(cfg.MODEL.ROI_DENSEPOSE_HEAD.HEATMAP_SIZE)
def forward(
self,
proposals_with_gt: List[Instances],
densepose_predictor_outputs: Any,
packed_annotations: PackedCseAnnotations,
embedder: nn.Module,
):
"""
Args:
proposals_with_gt (list of Instances): detections with associated
ground truth data; each item corresponds to instances detected
on 1 image; the number of items corresponds to the number of
images in a batch
densepose_predictor_outputs: an object of a dataclass that contains predictor
outputs with estimated values; assumed to have the following attributes:
* embedding - embedding estimates, tensor of shape [N, D, S, S], where
N = number of instances (= sum N_i, where N_i is the number of
instances on image i)
D = embedding space dimensionality (MODEL.ROI_DENSEPOSE_HEAD.CSE.EMBED_SIZE)
S = output size (width and height)
packed_annotations (PackedCseAnnotations): contains various data useful
for loss computation, each data is packed into a single tensor
embedder (nn.Module): module that computes vertex embeddings for different meshes
"""
pix_embeds = densepose_predictor_outputs.embedding
if self.pixel_dists.device != pix_embeds.device:
# should normally be done only once
self.pixel_dists = self.pixel_dists.to(device=pix_embeds.device)
with torch.no_grad():
mask_loss_data = extract_data_for_mask_loss_from_matches(
proposals_with_gt, densepose_predictor_outputs.coarse_segm
)
# GT masks - tensor of shape [N, S, S] of int64
masks_gt = mask_loss_data.masks_gt.long() # pyre-ignore[16]
assert len(pix_embeds) == len(masks_gt), (
f"Number of instances with embeddings {len(pix_embeds)} != "
f"number of instances with GT masks {len(masks_gt)}"
)
losses = []
mesh_names = (
self.shape_names
if self.use_all_meshes_not_gt_only
else [
MeshCatalog.get_mesh_name(mesh_id.item())
for mesh_id in packed_annotations.vertex_mesh_ids_gt.unique() # pyre-ignore[16]
]
)
for pixel_embeddings, mask_gt in zip(pix_embeds, masks_gt):
# pixel_embeddings [D, S, S]
# mask_gt [S, S]
for mesh_name in mesh_names:
mesh_vertex_embeddings = embedder(mesh_name)
# pixel indices [M]
pixel_indices_flattened = _sample_fg_pixels_randperm(
mask_gt, self.num_pixels_to_sample
)
# pixel distances [M, M]
pixel_dists = self.pixel_dists.to(pixel_embeddings.device)[
torch.meshgrid(pixel_indices_flattened, pixel_indices_flattened)
]
# pixel embeddings [M, D]
pixel_embeddings_sampled = normalize_embeddings(
pixel_embeddings.reshape((self.embed_size, -1))[:, pixel_indices_flattened].T
)
# pixel-vertex similarity [M, K]
sim_matrix = pixel_embeddings_sampled.mm( # pyre-ignore[16]
mesh_vertex_embeddings.T
)
c_pix_vertex = F.softmax(sim_matrix / self.temperature_pix_to_vertex, dim=1)
c_vertex_pix = F.softmax(sim_matrix.T / self.temperature_vertex_to_pix, dim=1)
c_cycle = c_pix_vertex.mm(c_vertex_pix)
loss_cycle = torch.norm(pixel_dists * c_cycle, p=self.norm_p)
losses.append(loss_cycle)
if len(losses) == 0:
return pix_embeds.sum() * 0
return torch.stack(losses, dim=0).mean()
def fake_value(self, densepose_predictor_outputs: Any, embedder: nn.Module):
losses = [
embedder(mesh_name).sum() * 0 for mesh_name in embedder.mesh_names # pyre-ignore[29]
]
losses.append(densepose_predictor_outputs.embedding.sum() * 0)
return torch.mean(torch.stack(losses))
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/modeling/losses/cycle_pix2shape.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from .chart import DensePoseChartLoss
from .chart_with_confidences import DensePoseChartWithConfidenceLoss
from .cse import DensePoseCseLoss
from .registry import DENSEPOSE_LOSS_REGISTRY
__all__ = [
"DensePoseChartLoss",
"DensePoseChartWithConfidenceLoss",
"DensePoseCseLoss",
"DENSEPOSE_LOSS_REGISTRY",
]
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/modeling/losses/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from typing import Any, List
import torch
from torch.nn import functional as F
from detectron2.config import CfgNode
from detectron2.structures import Instances
from .mask_or_segm import MaskOrSegmentationLoss
from .registry import DENSEPOSE_LOSS_REGISTRY
from .utils import (
BilinearInterpolationHelper,
ChartBasedAnnotationsAccumulator,
LossDict,
extract_packed_annotations_from_matches,
)
@DENSEPOSE_LOSS_REGISTRY.register()
class DensePoseChartLoss:
"""
DensePose loss for chart-based training. A mesh is split into charts,
each chart is given a label (I) and parametrized by 2 coordinates referred to
as U and V. Ground truth consists of a number of points annotated with
I, U and V values and coarse segmentation S defined for all pixels of the
object bounding box. In some cases (see `COARSE_SEGM_TRAINED_BY_MASKS`),
semantic segmentation annotations can be used as ground truth inputs as well.
Estimated values are tensors:
* U coordinates, tensor of shape [N, C, S, S]
* V coordinates, tensor of shape [N, C, S, S]
* fine segmentation estimates, tensor of shape [N, C, S, S] with raw unnormalized
scores for each fine segmentation label at each location
* coarse segmentation estimates, tensor of shape [N, D, S, S] with raw unnormalized
scores for each coarse segmentation label at each location
where N is the number of detections, C is the number of fine segmentation
labels, S is the estimate size ( = width = height) and D is the number of
coarse segmentation channels.
The losses are:
* regression (smooth L1) loss for U and V coordinates
* cross entropy loss for fine (I) and coarse (S) segmentations
Each loss has an associated weight
"""
def __init__(self, cfg: CfgNode):
"""
Initialize chart-based loss from configuration options
Args:
cfg (CfgNode): configuration options
"""
# fmt: off
self.heatmap_size = cfg.MODEL.ROI_DENSEPOSE_HEAD.HEATMAP_SIZE
self.w_points = cfg.MODEL.ROI_DENSEPOSE_HEAD.POINT_REGRESSION_WEIGHTS
self.w_part = cfg.MODEL.ROI_DENSEPOSE_HEAD.PART_WEIGHTS
self.w_segm = cfg.MODEL.ROI_DENSEPOSE_HEAD.INDEX_WEIGHTS
self.n_segm_chan = cfg.MODEL.ROI_DENSEPOSE_HEAD.NUM_COARSE_SEGM_CHANNELS
# fmt: on
self.segm_trained_by_masks = cfg.MODEL.ROI_DENSEPOSE_HEAD.COARSE_SEGM_TRAINED_BY_MASKS
self.segm_loss = MaskOrSegmentationLoss(cfg)
def __call__(
self, proposals_with_gt: List[Instances], densepose_predictor_outputs: Any, **kwargs
) -> LossDict:
"""
Produce chart-based DensePose losses
Args:
proposals_with_gt (list of Instances): detections with associated ground truth data
densepose_predictor_outputs: an object of a dataclass that contains predictor outputs
with estimated values; assumed to have the following attributes:
* coarse_segm - coarse segmentation estimates, tensor of shape [N, D, S, S]
* fine_segm - fine segmentation estimates, tensor of shape [N, C, S, S]
* u - U coordinate estimates per fine labels, tensor of shape [N, C, S, S]
* v - V coordinate estimates per fine labels, tensor of shape [N, C, S, S]
where N is the number of detections, C is the number of fine segmentation
labels, S is the estimate size ( = width = height) and D is the number of
coarse segmentation channels.
Return:
dict: str -> tensor: dict of losses with the following entries:
* `loss_densepose_U`: smooth L1 loss for U coordinate estimates
* `loss_densepose_V`: smooth L1 loss for V coordinate estimates
* `loss_densepose_I`: cross entropy for raw unnormalized scores for fine
segmentation estimates given ground truth labels;
* `loss_densepose_S`: cross entropy for raw unnormalized scores for coarse
segmentation estimates given ground truth labels;
"""
# densepose outputs are computed for all images and all bounding boxes;
# i.e. if a batch has 4 images with (3, 1, 2, 1) proposals respectively,
# the outputs will have size(0) == 3+1+2+1 == 7
if not len(proposals_with_gt):
return self.produce_fake_densepose_losses(densepose_predictor_outputs)
accumulator = ChartBasedAnnotationsAccumulator()
packed_annotations = extract_packed_annotations_from_matches(proposals_with_gt, accumulator)
# NOTE: we need to keep the same computation graph on all the GPUs to
# perform reduction properly. Hence even if we have no data on one
# of the GPUs, we still need to generate the computation graph.
# Add fake (zero) loss in the form Tensor.sum() * 0
if packed_annotations is None:
return self.produce_fake_densepose_losses(densepose_predictor_outputs)
h, w = densepose_predictor_outputs.u.shape[2:]
interpolator = BilinearInterpolationHelper.from_matches(
packed_annotations,
(h, w),
)
j_valid_fg = interpolator.j_valid * ( # pyre-ignore[16]
packed_annotations.fine_segm_labels_gt > 0
)
if not torch.any(j_valid_fg):
return self.produce_fake_densepose_losses(densepose_predictor_outputs)
losses_uv = self.produce_densepose_losses_uv(
proposals_with_gt,
densepose_predictor_outputs,
packed_annotations,
interpolator,
j_valid_fg, # pyre-ignore[6]
)
losses_segm = self.produce_densepose_losses_segm(
proposals_with_gt,
densepose_predictor_outputs,
packed_annotations,
interpolator,
j_valid_fg, # pyre-ignore[6]
)
return {**losses_uv, **losses_segm}
def produce_fake_densepose_losses(self, densepose_predictor_outputs: Any) -> LossDict:
"""
Fake losses for fine segmentation and U/V coordinates. These are used when
no suitable ground truth data was found in a batch. The loss has a value 0
and is primarily used to construct the computation graph, so that
`DistributedDataParallel` has similar graphs on all GPUs and can perform
reduction properly.
Args:
densepose_predictor_outputs: DensePose predictor outputs, an object
of a dataclass that is assumed to have the following attributes:
* fine_segm - fine segmentation estimates, tensor of shape [N, C, S, S]
* u - U coordinate estimates per fine labels, tensor of shape [N, C, S, S]
* v - V coordinate estimates per fine labels, tensor of shape [N, C, S, S]
Return:
dict: str -> tensor: dict of losses with the following entries:
* `loss_densepose_U`: has value 0
* `loss_densepose_V`: has value 0
* `loss_densepose_I`: has value 0
* `loss_densepose_S`: has value 0
"""
losses_uv = self.produce_fake_densepose_losses_uv(densepose_predictor_outputs)
losses_segm = self.produce_fake_densepose_losses_segm(densepose_predictor_outputs)
return {**losses_uv, **losses_segm}
def produce_fake_densepose_losses_uv(self, densepose_predictor_outputs: Any) -> LossDict:
"""
Fake losses for U/V coordinates. These are used when no suitable ground
truth data was found in a batch. The loss has a value 0
and is primarily used to construct the computation graph, so that
`DistributedDataParallel` has similar graphs on all GPUs and can perform
reduction properly.
Args:
densepose_predictor_outputs: DensePose predictor outputs, an object
of a dataclass that is assumed to have the following attributes:
* u - U coordinate estimates per fine labels, tensor of shape [N, C, S, S]
* v - V coordinate estimates per fine labels, tensor of shape [N, C, S, S]
Return:
dict: str -> tensor: dict of losses with the following entries:
* `loss_densepose_U`: has value 0
* `loss_densepose_V`: has value 0
"""
return {
"loss_densepose_U": densepose_predictor_outputs.u.sum() * 0,
"loss_densepose_V": densepose_predictor_outputs.v.sum() * 0,
}
def produce_fake_densepose_losses_segm(self, densepose_predictor_outputs: Any) -> LossDict:
"""
Fake losses for fine / coarse segmentation. These are used when
no suitable ground truth data was found in a batch. The loss has a value 0
and is primarily used to construct the computation graph, so that
`DistributedDataParallel` has similar graphs on all GPUs and can perform
reduction properly.
Args:
densepose_predictor_outputs: DensePose predictor outputs, an object
of a dataclass that is assumed to have the following attributes:
* fine_segm - fine segmentation estimates, tensor of shape [N, C, S, S]
* coarse_segm - coarse segmentation estimates, tensor of shape [N, D, S, S]
Return:
dict: str -> tensor: dict of losses with the following entries:
* `loss_densepose_I`: has value 0
* `loss_densepose_S`: has value 0, added only if `segm_trained_by_masks` is False
"""
losses = {
"loss_densepose_I": densepose_predictor_outputs.fine_segm.sum() * 0,
"loss_densepose_S": self.segm_loss.fake_value(densepose_predictor_outputs),
}
return losses
def produce_densepose_losses_uv(
self,
proposals_with_gt: List[Instances],
densepose_predictor_outputs: Any,
packed_annotations: Any,
interpolator: BilinearInterpolationHelper,
j_valid_fg: torch.Tensor,
) -> LossDict:
"""
Compute losses for U/V coordinates: smooth L1 loss between
estimated coordinates and the ground truth.
Args:
proposals_with_gt (list of Instances): detections with associated ground truth data
densepose_predictor_outputs: DensePose predictor outputs, an object
of a dataclass that is assumed to have the following attributes:
* u - U coordinate estimates per fine labels, tensor of shape [N, C, S, S]
* v - V coordinate estimates per fine labels, tensor of shape [N, C, S, S]
Return:
dict: str -> tensor: dict of losses with the following entries:
* `loss_densepose_U`: smooth L1 loss for U coordinate estimates
* `loss_densepose_V`: smooth L1 loss for V coordinate estimates
"""
u_gt = packed_annotations.u_gt[j_valid_fg]
u_est = interpolator.extract_at_points(densepose_predictor_outputs.u)[j_valid_fg]
v_gt = packed_annotations.v_gt[j_valid_fg]
v_est = interpolator.extract_at_points(densepose_predictor_outputs.v)[j_valid_fg]
return {
"loss_densepose_U": F.smooth_l1_loss(u_est, u_gt, reduction="sum") * self.w_points,
"loss_densepose_V": F.smooth_l1_loss(v_est, v_gt, reduction="sum") * self.w_points,
}
def produce_densepose_losses_segm(
self,
proposals_with_gt: List[Instances],
densepose_predictor_outputs: Any,
packed_annotations: Any,
interpolator: BilinearInterpolationHelper,
j_valid_fg: torch.Tensor,
) -> LossDict:
"""
Losses for fine / coarse segmentation: cross-entropy
for segmentation unnormalized scores given ground truth labels at
annotated points for fine segmentation and dense mask annotations
for coarse segmentation.
Args:
proposals_with_gt (list of Instances): detections with associated ground truth data
densepose_predictor_outputs: DensePose predictor outputs, an object
of a dataclass that is assumed to have the following attributes:
* fine_segm - fine segmentation estimates, tensor of shape [N, C, S, S]
* coarse_segm - coarse segmentation estimates, tensor of shape [N, D, S, S]
Return:
dict: str -> tensor: dict of losses with the following entries:
* `loss_densepose_I`: cross entropy for raw unnormalized scores for fine
segmentation estimates given ground truth labels
* `loss_densepose_S`: cross entropy for raw unnormalized scores for coarse
segmentation estimates given ground truth labels;
may be included if coarse segmentation is only trained
using DensePose ground truth; if additional supervision through
instance segmentation data is performed (`segm_trained_by_masks` is True),
this loss is handled by `produce_mask_losses` instead
"""
fine_segm_gt = packed_annotations.fine_segm_labels_gt[
interpolator.j_valid # pyre-ignore[16]
]
fine_segm_est = interpolator.extract_at_points(
densepose_predictor_outputs.fine_segm,
slice_fine_segm=slice(None),
w_ylo_xlo=interpolator.w_ylo_xlo[:, None], # pyre-ignore[16]
w_ylo_xhi=interpolator.w_ylo_xhi[:, None], # pyre-ignore[16]
w_yhi_xlo=interpolator.w_yhi_xlo[:, None], # pyre-ignore[16]
w_yhi_xhi=interpolator.w_yhi_xhi[:, None], # pyre-ignore[16]
)[interpolator.j_valid, :]
return {
"loss_densepose_I": F.cross_entropy(fine_segm_est, fine_segm_gt.long()) * self.w_part,
"loss_densepose_S": self.segm_loss(
proposals_with_gt, densepose_predictor_outputs, packed_annotations
)
* self.w_segm,
}
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/modeling/losses/chart.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import math
from typing import Any, List
import torch
from torch import nn
from torch.nn import functional as F
from detectron2.config import CfgNode
from detectron2.structures import Instances
from .. import DensePoseConfidenceModelConfig, DensePoseUVConfidenceType
from .chart import DensePoseChartLoss
from .registry import DENSEPOSE_LOSS_REGISTRY
from .utils import BilinearInterpolationHelper, LossDict
@DENSEPOSE_LOSS_REGISTRY.register()
class DensePoseChartWithConfidenceLoss(DensePoseChartLoss):
""" """
def __init__(self, cfg: CfgNode):
super().__init__(cfg)
self.confidence_model_cfg = DensePoseConfidenceModelConfig.from_cfg(cfg)
if self.confidence_model_cfg.uv_confidence.type == DensePoseUVConfidenceType.IID_ISO:
self.uv_loss_with_confidences = IIDIsotropicGaussianUVLoss(
self.confidence_model_cfg.uv_confidence.epsilon
)
elif self.confidence_model_cfg.uv_confidence.type == DensePoseUVConfidenceType.INDEP_ANISO:
self.uv_loss_with_confidences = IndepAnisotropicGaussianUVLoss(
self.confidence_model_cfg.uv_confidence.epsilon
)
def produce_fake_densepose_losses_uv(self, densepose_predictor_outputs: Any) -> LossDict:
"""
Overrides fake losses for fine segmentation and U/V coordinates to
include computation graphs for additional confidence parameters.
These are used when no suitable ground truth data was found in a batch.
The loss has a value 0 and is primarily used to construct the computation graph,
so that `DistributedDataParallel` has similar graphs on all GPUs and can
perform reduction properly.
Args:
densepose_predictor_outputs: DensePose predictor outputs, an object
of a dataclass that is assumed to have the following attributes:
* fine_segm - fine segmentation estimates, tensor of shape [N, C, S, S]
* u - U coordinate estimates per fine labels, tensor of shape [N, C, S, S]
* v - V coordinate estimates per fine labels, tensor of shape [N, C, S, S]
Return:
dict: str -> tensor: dict of losses with the following entries:
* `loss_densepose_U`: has value 0
* `loss_densepose_V`: has value 0
* `loss_densepose_I`: has value 0
"""
conf_type = self.confidence_model_cfg.uv_confidence.type
if self.confidence_model_cfg.uv_confidence.enabled:
loss_uv = (
densepose_predictor_outputs.u.sum() + densepose_predictor_outputs.v.sum()
) * 0
if conf_type == DensePoseUVConfidenceType.IID_ISO:
loss_uv += densepose_predictor_outputs.sigma_2.sum() * 0
elif conf_type == DensePoseUVConfidenceType.INDEP_ANISO:
loss_uv += (
densepose_predictor_outputs.sigma_2.sum()
+ densepose_predictor_outputs.kappa_u.sum()
+ densepose_predictor_outputs.kappa_v.sum()
) * 0
return {"loss_densepose_UV": loss_uv}
else:
return super().produce_fake_densepose_losses_uv(densepose_predictor_outputs)
def produce_densepose_losses_uv(
self,
proposals_with_gt: List[Instances],
densepose_predictor_outputs: Any,
packed_annotations: Any,
interpolator: BilinearInterpolationHelper,
j_valid_fg: torch.Tensor,
) -> LossDict:
conf_type = self.confidence_model_cfg.uv_confidence.type
if self.confidence_model_cfg.uv_confidence.enabled:
u_gt = packed_annotations.u_gt[j_valid_fg]
u_est = interpolator.extract_at_points(densepose_predictor_outputs.u)[j_valid_fg]
v_gt = packed_annotations.v_gt[j_valid_fg]
v_est = interpolator.extract_at_points(densepose_predictor_outputs.v)[j_valid_fg]
sigma_2_est = interpolator.extract_at_points(densepose_predictor_outputs.sigma_2)[
j_valid_fg
]
if conf_type == DensePoseUVConfidenceType.IID_ISO:
return {
"loss_densepose_UV": (
self.uv_loss_with_confidences(u_est, v_est, sigma_2_est, u_gt, v_gt)
* self.w_points
)
}
elif conf_type in [DensePoseUVConfidenceType.INDEP_ANISO]:
kappa_u_est = interpolator.extract_at_points(densepose_predictor_outputs.kappa_u)[
j_valid_fg
]
kappa_v_est = interpolator.extract_at_points(densepose_predictor_outputs.kappa_v)[
j_valid_fg
]
return {
"loss_densepose_UV": (
self.uv_loss_with_confidences(
u_est, v_est, sigma_2_est, kappa_u_est, kappa_v_est, u_gt, v_gt
)
* self.w_points
)
}
return super().produce_densepose_losses_uv(
proposals_with_gt,
densepose_predictor_outputs,
packed_annotations,
interpolator,
j_valid_fg,
)
class IIDIsotropicGaussianUVLoss(nn.Module):
"""
Loss for the case of iid residuals with isotropic covariance:
$Sigma_i = sigma_i^2 I$
The loss (negative log likelihood) is then:
$1/2 sum_{i=1}^n (log(2 pi) + 2 log sigma_i^2 + ||delta_i||^2 / sigma_i^2)$,
where $delta_i=(u - u', v - v')$ is a 2D vector containing UV coordinates
difference between estimated and ground truth UV values
For details, see:
N. Neverova, D. Novotny, A. Vedaldi "Correlated Uncertainty for Learning
Dense Correspondences from Noisy Labels", p. 918--926, in Proc. NIPS 2019
"""
def __init__(self, sigma_lower_bound: float):
super(IIDIsotropicGaussianUVLoss, self).__init__()
self.sigma_lower_bound = sigma_lower_bound
self.log2pi = math.log(2 * math.pi)
def forward(
self,
u: torch.Tensor,
v: torch.Tensor,
sigma_u: torch.Tensor,
target_u: torch.Tensor,
target_v: torch.Tensor,
):
# compute $\sigma_i^2$
# use sigma_lower_bound to avoid degenerate solution for variance
# (sigma -> 0)
sigma2 = F.softplus(sigma_u) + self.sigma_lower_bound
# compute \|delta_i\|^2
delta_t_delta = (u - target_u) ** 2 + (v - target_v) ** 2
# the total loss from the formula above:
loss = 0.5 * (self.log2pi + 2 * torch.log(sigma2) + delta_t_delta / sigma2)
return loss.sum()
class IndepAnisotropicGaussianUVLoss(nn.Module):
"""
Loss for the case of independent residuals with anisotropic covariances:
$Sigma_i = sigma_i^2 I + r_i r_i^T$
The loss (negative log likelihood) is then:
$1/2 sum_{i=1}^n (log(2 pi)
+ log sigma_i^2 (sigma_i^2 + ||r_i||^2)
+ ||delta_i||^2 / sigma_i^2
- <delta_i, r_i>^2 / (sigma_i^2 * (sigma_i^2 + ||r_i||^2)))$,
where $delta_i=(u - u', v - v')$ is a 2D vector containing UV coordinates
difference between estimated and ground truth UV values
For details, see:
N. Neverova, D. Novotny, A. Vedaldi "Correlated Uncertainty for Learning
Dense Correspondences from Noisy Labels", p. 918--926, in Proc. NIPS 2019
"""
def __init__(self, sigma_lower_bound: float):
super(IndepAnisotropicGaussianUVLoss, self).__init__()
self.sigma_lower_bound = sigma_lower_bound
self.log2pi = math.log(2 * math.pi)
def forward(
self,
u: torch.Tensor,
v: torch.Tensor,
sigma_u: torch.Tensor,
kappa_u_est: torch.Tensor,
kappa_v_est: torch.Tensor,
target_u: torch.Tensor,
target_v: torch.Tensor,
):
# compute $\sigma_i^2$
sigma2 = F.softplus(sigma_u) + self.sigma_lower_bound
# compute \|r_i\|^2
r_sqnorm2 = kappa_u_est ** 2 + kappa_v_est ** 2
delta_u = u - target_u
delta_v = v - target_v
# compute \|delta_i\|^2
delta_sqnorm = delta_u ** 2 + delta_v ** 2
delta_u_r_u = delta_u * kappa_u_est
delta_v_r_v = delta_v * kappa_v_est
# compute the scalar product <delta_i, r_i>
delta_r = delta_u_r_u + delta_v_r_v
# compute squared scalar product <delta_i, r_i>^2
delta_r_sqnorm = delta_r ** 2
denom2 = sigma2 * (sigma2 + r_sqnorm2)
loss = 0.5 * (
self.log2pi + torch.log(denom2) + delta_sqnorm / sigma2 - delta_r_sqnorm / denom2
)
return loss.sum() # pyre-ignore[16]
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/modeling/losses/chart_with_confidences.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from abc import ABC, abstractmethod
from dataclasses import dataclass
from typing import Any, Dict, List, Optional, Tuple
import torch
from torch.nn import functional as F
from detectron2.structures import BoxMode, Instances
from densepose import DensePoseDataRelative
LossDict = Dict[str, torch.Tensor]
def _linear_interpolation_utilities(v_norm, v0_src, size_src, v0_dst, size_dst, size_z):
"""
Computes utility values for linear interpolation at points v.
The points are given as normalized offsets in the source interval
(v0_src, v0_src + size_src), more precisely:
v = v0_src + v_norm * size_src / 256.0
The computed utilities include lower points v_lo, upper points v_hi,
interpolation weights v_w and flags j_valid indicating whether the
points falls into the destination interval (v0_dst, v0_dst + size_dst).
Args:
v_norm (:obj: `torch.Tensor`): tensor of size N containing
normalized point offsets
v0_src (:obj: `torch.Tensor`): tensor of size N containing
left bounds of source intervals for normalized points
size_src (:obj: `torch.Tensor`): tensor of size N containing
source interval sizes for normalized points
v0_dst (:obj: `torch.Tensor`): tensor of size N containing
left bounds of destination intervals
size_dst (:obj: `torch.Tensor`): tensor of size N containing
destination interval sizes
size_z (int): interval size for data to be interpolated
Returns:
v_lo (:obj: `torch.Tensor`): int tensor of size N containing
indices of lower values used for interpolation, all values are
integers from [0, size_z - 1]
v_hi (:obj: `torch.Tensor`): int tensor of size N containing
indices of upper values used for interpolation, all values are
integers from [0, size_z - 1]
v_w (:obj: `torch.Tensor`): float tensor of size N containing
interpolation weights
j_valid (:obj: `torch.Tensor`): uint8 tensor of size N containing
0 for points outside the estimation interval
(v0_est, v0_est + size_est) and 1 otherwise
"""
v = v0_src + v_norm * size_src / 256.0
j_valid = (v - v0_dst >= 0) * (v - v0_dst < size_dst)
v_grid = (v - v0_dst) * size_z / size_dst
v_lo = v_grid.floor().long().clamp(min=0, max=size_z - 1)
v_hi = (v_lo + 1).clamp(max=size_z - 1)
v_grid = torch.min(v_hi.float(), v_grid)
v_w = v_grid - v_lo.float()
return v_lo, v_hi, v_w, j_valid
class BilinearInterpolationHelper:
"""
Args:
packed_annotations: object that contains packed annotations
j_valid (:obj: `torch.Tensor`): uint8 tensor of size M containing
0 for points to be discarded and 1 for points to be selected
y_lo (:obj: `torch.Tensor`): int tensor of indices of upper values
in z_est for each point
y_hi (:obj: `torch.Tensor`): int tensor of indices of lower values
in z_est for each point
x_lo (:obj: `torch.Tensor`): int tensor of indices of left values
in z_est for each point
x_hi (:obj: `torch.Tensor`): int tensor of indices of right values
in z_est for each point
w_ylo_xlo (:obj: `torch.Tensor`): float tensor of size M;
contains upper-left value weight for each point
w_ylo_xhi (:obj: `torch.Tensor`): float tensor of size M;
contains upper-right value weight for each point
w_yhi_xlo (:obj: `torch.Tensor`): float tensor of size M;
contains lower-left value weight for each point
w_yhi_xhi (:obj: `torch.Tensor`): float tensor of size M;
contains lower-right value weight for each point
"""
def __init__(
self,
packed_annotations: Any,
j_valid: torch.Tensor,
y_lo: torch.Tensor,
y_hi: torch.Tensor,
x_lo: torch.Tensor,
x_hi: torch.Tensor,
w_ylo_xlo: torch.Tensor,
w_ylo_xhi: torch.Tensor,
w_yhi_xlo: torch.Tensor,
w_yhi_xhi: torch.Tensor,
):
for k, v in locals().items():
if k != "self":
setattr(self, k, v)
@staticmethod
def from_matches(
packed_annotations: Any, densepose_outputs_size_hw: Tuple[int, int]
) -> "BilinearInterpolationHelper":
"""
Args:
packed_annotations: annotations packed into tensors, the following
attributes are required:
- bbox_xywh_gt
- bbox_xywh_est
- x_gt
- y_gt
- point_bbox_with_dp_indices
- point_bbox_indices
densepose_outputs_size_hw (tuple [int, int]): resolution of
DensePose predictor outputs (H, W)
Return:
An instance of `BilinearInterpolationHelper` used to perform
interpolation for the given annotation points and output resolution
"""
zh, zw = densepose_outputs_size_hw
x0_gt, y0_gt, w_gt, h_gt = packed_annotations.bbox_xywh_gt[
packed_annotations.point_bbox_with_dp_indices
].unbind(dim=1)
x0_est, y0_est, w_est, h_est = packed_annotations.bbox_xywh_est[
packed_annotations.point_bbox_with_dp_indices
].unbind(dim=1)
x_lo, x_hi, x_w, jx_valid = _linear_interpolation_utilities(
packed_annotations.x_gt, x0_gt, w_gt, x0_est, w_est, zw
)
y_lo, y_hi, y_w, jy_valid = _linear_interpolation_utilities(
packed_annotations.y_gt, y0_gt, h_gt, y0_est, h_est, zh
)
j_valid = jx_valid * jy_valid
w_ylo_xlo = (1.0 - x_w) * (1.0 - y_w)
w_ylo_xhi = x_w * (1.0 - y_w)
w_yhi_xlo = (1.0 - x_w) * y_w
w_yhi_xhi = x_w * y_w
return BilinearInterpolationHelper(
packed_annotations,
j_valid,
y_lo,
y_hi,
x_lo,
x_hi,
w_ylo_xlo, # pyre-ignore[6]
w_ylo_xhi,
w_yhi_xlo,
w_yhi_xhi,
)
def extract_at_points(
self,
z_est,
slice_fine_segm=None,
w_ylo_xlo=None,
w_ylo_xhi=None,
w_yhi_xlo=None,
w_yhi_xhi=None,
):
"""
Extract ground truth values z_gt for valid point indices and estimated
values z_est using bilinear interpolation over top-left (y_lo, x_lo),
top-right (y_lo, x_hi), bottom-left (y_hi, x_lo) and bottom-right
(y_hi, x_hi) values in z_est with corresponding weights:
w_ylo_xlo, w_ylo_xhi, w_yhi_xlo and w_yhi_xhi.
Use slice_fine_segm to slice dim=1 in z_est
"""
slice_fine_segm = (
self.packed_annotations.fine_segm_labels_gt
if slice_fine_segm is None
else slice_fine_segm
)
w_ylo_xlo = self.w_ylo_xlo if w_ylo_xlo is None else w_ylo_xlo
w_ylo_xhi = self.w_ylo_xhi if w_ylo_xhi is None else w_ylo_xhi
w_yhi_xlo = self.w_yhi_xlo if w_yhi_xlo is None else w_yhi_xlo
w_yhi_xhi = self.w_yhi_xhi if w_yhi_xhi is None else w_yhi_xhi
index_bbox = self.packed_annotations.point_bbox_indices
z_est_sampled = (
z_est[index_bbox, slice_fine_segm, self.y_lo, self.x_lo] * w_ylo_xlo
+ z_est[index_bbox, slice_fine_segm, self.y_lo, self.x_hi] * w_ylo_xhi
+ z_est[index_bbox, slice_fine_segm, self.y_hi, self.x_lo] * w_yhi_xlo
+ z_est[index_bbox, slice_fine_segm, self.y_hi, self.x_hi] * w_yhi_xhi
)
return z_est_sampled
def resample_data(
z, bbox_xywh_src, bbox_xywh_dst, wout, hout, mode="nearest", padding_mode="zeros"
):
"""
Args:
z (:obj: `torch.Tensor`): tensor of size (N,C,H,W) with data to be
resampled
bbox_xywh_src (:obj: `torch.Tensor`): tensor of size (N,4) containing
source bounding boxes in format XYWH
bbox_xywh_dst (:obj: `torch.Tensor`): tensor of size (N,4) containing
destination bounding boxes in format XYWH
Return:
zresampled (:obj: `torch.Tensor`): tensor of size (N, C, Hout, Wout)
with resampled values of z, where D is the discretization size
"""
n = bbox_xywh_src.size(0)
assert n == bbox_xywh_dst.size(0), (
"The number of "
"source ROIs for resampling ({}) should be equal to the number "
"of destination ROIs ({})".format(bbox_xywh_src.size(0), bbox_xywh_dst.size(0))
)
x0src, y0src, wsrc, hsrc = bbox_xywh_src.unbind(dim=1)
x0dst, y0dst, wdst, hdst = bbox_xywh_dst.unbind(dim=1)
x0dst_norm = 2 * (x0dst - x0src) / wsrc - 1
y0dst_norm = 2 * (y0dst - y0src) / hsrc - 1
x1dst_norm = 2 * (x0dst + wdst - x0src) / wsrc - 1
y1dst_norm = 2 * (y0dst + hdst - y0src) / hsrc - 1
grid_w = torch.arange(wout, device=z.device, dtype=torch.float) / wout
grid_h = torch.arange(hout, device=z.device, dtype=torch.float) / hout
grid_w_expanded = grid_w[None, None, :].expand(n, hout, wout)
grid_h_expanded = grid_h[None, :, None].expand(n, hout, wout)
dx_expanded = (x1dst_norm - x0dst_norm)[:, None, None].expand(n, hout, wout)
dy_expanded = (y1dst_norm - y0dst_norm)[:, None, None].expand(n, hout, wout)
x0_expanded = x0dst_norm[:, None, None].expand(n, hout, wout)
y0_expanded = y0dst_norm[:, None, None].expand(n, hout, wout)
grid_x = grid_w_expanded * dx_expanded + x0_expanded
grid_y = grid_h_expanded * dy_expanded + y0_expanded
grid = torch.stack((grid_x, grid_y), dim=3)
# resample Z from (N, C, H, W) into (N, C, Hout, Wout)
zresampled = F.grid_sample(z, grid, mode=mode, padding_mode=padding_mode, align_corners=True)
return zresampled
class AnnotationsAccumulator(ABC):
"""
Abstract class for an accumulator for annotations that can produce
dense annotations packed into tensors.
"""
@abstractmethod
def accumulate(self, instances_one_image: Instances):
"""
Accumulate instances data for one image
Args:
instances_one_image (Instances): instances data to accumulate
"""
pass
@abstractmethod
def pack(self) -> Any:
"""
Pack data into tensors
"""
pass
@dataclass
class PackedChartBasedAnnotations:
"""
Packed annotations for chart-based model training. The following attributes
are defined:
- fine_segm_labels_gt (tensor [K] of `int64`): GT fine segmentation point labels
- x_gt (tensor [K] of `float32`): GT normalized X point coordinates
- y_gt (tensor [K] of `float32`): GT normalized Y point coordinates
- u_gt (tensor [K] of `float32`): GT point U values
- v_gt (tensor [K] of `float32`): GT point V values
- coarse_segm_gt (tensor [N, S, S] of `float32`): GT segmentation for bounding boxes
- bbox_xywh_gt (tensor [N, 4] of `float32`): selected GT bounding boxes in
XYWH format
- bbox_xywh_est (tensor [N, 4] of `float32`): selected matching estimated
bounding boxes in XYWH format
- point_bbox_with_dp_indices (tensor [K] of `int64`): indices of bounding boxes
with DensePose annotations that correspond to the point data
- point_bbox_indices (tensor [K] of `int64`): indices of bounding boxes
(not necessarily the selected ones with DensePose data) that correspond
to the point data
- bbox_indices (tensor [N] of `int64`): global indices of selected bounding
boxes with DensePose annotations; these indices could be used to access
features that are computed for all bounding boxes, not only the ones with
DensePose annotations.
Here K is the total number of points and N is the total number of instances
with DensePose annotations.
"""
fine_segm_labels_gt: torch.Tensor
x_gt: torch.Tensor
y_gt: torch.Tensor
u_gt: torch.Tensor
v_gt: torch.Tensor
coarse_segm_gt: Optional[torch.Tensor]
bbox_xywh_gt: torch.Tensor
bbox_xywh_est: torch.Tensor
point_bbox_with_dp_indices: torch.Tensor
point_bbox_indices: torch.Tensor
bbox_indices: torch.Tensor
class ChartBasedAnnotationsAccumulator(AnnotationsAccumulator):
"""
Accumulates annotations by batches that correspond to objects detected on
individual images. Can pack them together into single tensors.
"""
def __init__(self):
self.i_gt = []
self.x_gt = []
self.y_gt = []
self.u_gt = []
self.v_gt = []
self.s_gt = []
self.bbox_xywh_gt = []
self.bbox_xywh_est = []
self.point_bbox_with_dp_indices = []
self.point_bbox_indices = []
self.bbox_indices = []
self.nxt_bbox_with_dp_index = 0
self.nxt_bbox_index = 0
def accumulate(self, instances_one_image: Instances):
"""
Accumulate instances data for one image
Args:
instances_one_image (Instances): instances data to accumulate
"""
boxes_xywh_est = BoxMode.convert(
instances_one_image.proposal_boxes.tensor.clone(), BoxMode.XYXY_ABS, BoxMode.XYWH_ABS
)
boxes_xywh_gt = BoxMode.convert(
instances_one_image.gt_boxes.tensor.clone(), BoxMode.XYXY_ABS, BoxMode.XYWH_ABS
)
n_matches = len(boxes_xywh_gt)
assert n_matches == len(
boxes_xywh_est
), f"Got {len(boxes_xywh_est)} proposal boxes and {len(boxes_xywh_gt)} GT boxes"
if not n_matches:
# no detection - GT matches
return
if (
not hasattr(instances_one_image, "gt_densepose")
or instances_one_image.gt_densepose is None
):
# no densepose GT for the detections, just increase the bbox index
self.nxt_bbox_index += n_matches
return
for box_xywh_est, box_xywh_gt, dp_gt in zip(
boxes_xywh_est, boxes_xywh_gt, instances_one_image.gt_densepose
):
if (dp_gt is not None) and (len(dp_gt.x) > 0):
self._do_accumulate(box_xywh_gt, box_xywh_est, dp_gt) # pyre-ignore[6]
self.nxt_bbox_index += 1
def _do_accumulate(
self, box_xywh_gt: torch.Tensor, box_xywh_est: torch.Tensor, dp_gt: DensePoseDataRelative
):
"""
Accumulate instances data for one image, given that the data is not empty
Args:
box_xywh_gt (tensor): GT bounding box
box_xywh_est (tensor): estimated bounding box
dp_gt (DensePoseDataRelative): GT densepose data
"""
self.i_gt.append(dp_gt.i)
self.x_gt.append(dp_gt.x)
self.y_gt.append(dp_gt.y)
self.u_gt.append(dp_gt.u)
self.v_gt.append(dp_gt.v)
if hasattr(dp_gt, "segm"):
self.s_gt.append(dp_gt.segm.unsqueeze(0))
self.bbox_xywh_gt.append(box_xywh_gt.view(-1, 4))
self.bbox_xywh_est.append(box_xywh_est.view(-1, 4))
self.point_bbox_with_dp_indices.append(
torch.full_like(dp_gt.i, self.nxt_bbox_with_dp_index)
)
self.point_bbox_indices.append(torch.full_like(dp_gt.i, self.nxt_bbox_index))
self.bbox_indices.append(self.nxt_bbox_index)
self.nxt_bbox_with_dp_index += 1
def pack(self) -> Optional[PackedChartBasedAnnotations]:
"""
Pack data into tensors
"""
if not len(self.i_gt):
# TODO:
# returning proper empty annotations would require
# creating empty tensors of appropriate shape and
# type on an appropriate device;
# we return None so far to indicate empty annotations
return None
return PackedChartBasedAnnotations(
fine_segm_labels_gt=torch.cat(self.i_gt, 0).long(),
x_gt=torch.cat(self.x_gt, 0),
y_gt=torch.cat(self.y_gt, 0),
u_gt=torch.cat(self.u_gt, 0),
v_gt=torch.cat(self.v_gt, 0),
# ignore segmentation annotations, if not all the instances contain those
coarse_segm_gt=torch.cat(self.s_gt, 0)
if len(self.s_gt) == len(self.bbox_xywh_gt)
else None,
bbox_xywh_gt=torch.cat(self.bbox_xywh_gt, 0),
bbox_xywh_est=torch.cat(self.bbox_xywh_est, 0),
point_bbox_with_dp_indices=torch.cat(self.point_bbox_with_dp_indices, 0).long(),
point_bbox_indices=torch.cat(self.point_bbox_indices, 0).long(),
bbox_indices=torch.as_tensor(
self.bbox_indices, dtype=torch.long, device=self.x_gt[0].device
).long(),
)
def extract_packed_annotations_from_matches(
proposals_with_targets: List[Instances], accumulator: AnnotationsAccumulator
) -> Any:
for proposals_targets_per_image in proposals_with_targets:
accumulator.accumulate(proposals_targets_per_image)
return accumulator.pack()
def sample_random_indices(
n_indices: int, n_samples: int, device: Optional[torch.device] = None
) -> Optional[torch.Tensor]:
"""
Samples `n_samples` random indices from range `[0..n_indices - 1]`.
If `n_indices` is smaller than `n_samples`, returns `None` meaning that all indices
are selected.
Args:
n_indices (int): total number of indices
n_samples (int): number of indices to sample
device (torch.device): the desired device of returned tensor
Return:
Tensor of selected vertex indices, or `None`, if all vertices are selected
"""
if (n_samples <= 0) or (n_indices <= n_samples):
return None
indices = torch.randperm(n_indices, device=device)[:n_samples]
return indices
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/modeling/losses/utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from typing import Any, List
from torch import nn
from detectron2.config import CfgNode
from detectron2.structures import Instances
from .cycle_pix2shape import PixToShapeCycleLoss
from .cycle_shape2shape import ShapeToShapeCycleLoss
from .embed import EmbeddingLoss
from .embed_utils import CseAnnotationsAccumulator
from .mask_or_segm import MaskOrSegmentationLoss
from .registry import DENSEPOSE_LOSS_REGISTRY
from .soft_embed import SoftEmbeddingLoss
from .utils import BilinearInterpolationHelper, LossDict, extract_packed_annotations_from_matches
@DENSEPOSE_LOSS_REGISTRY.register()
class DensePoseCseLoss:
""" """
_EMBED_LOSS_REGISTRY = {
EmbeddingLoss.__name__: EmbeddingLoss,
SoftEmbeddingLoss.__name__: SoftEmbeddingLoss,
}
def __init__(self, cfg: CfgNode):
"""
Initialize CSE loss from configuration options
Args:
cfg (CfgNode): configuration options
"""
self.w_segm = cfg.MODEL.ROI_DENSEPOSE_HEAD.INDEX_WEIGHTS
self.w_embed = cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.EMBED_LOSS_WEIGHT
self.segm_loss = MaskOrSegmentationLoss(cfg)
self.embed_loss = DensePoseCseLoss.create_embed_loss(cfg)
self.do_shape2shape = cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.SHAPE_TO_SHAPE_CYCLE_LOSS.ENABLED
if self.do_shape2shape:
self.w_shape2shape = cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.SHAPE_TO_SHAPE_CYCLE_LOSS.WEIGHT
self.shape2shape_loss = ShapeToShapeCycleLoss(cfg)
self.do_pix2shape = cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.PIX_TO_SHAPE_CYCLE_LOSS.ENABLED
if self.do_pix2shape:
self.w_pix2shape = cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.PIX_TO_SHAPE_CYCLE_LOSS.WEIGHT
self.pix2shape_loss = PixToShapeCycleLoss(cfg)
@classmethod
def create_embed_loss(cls, cfg: CfgNode):
# registry not used here, since embedding losses are currently local
# and are not used anywhere else
return cls._EMBED_LOSS_REGISTRY[cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.EMBED_LOSS_NAME](cfg)
def __call__(
self,
proposals_with_gt: List[Instances],
densepose_predictor_outputs: Any,
embedder: nn.Module,
) -> LossDict:
if not len(proposals_with_gt):
return self.produce_fake_losses(densepose_predictor_outputs, embedder)
accumulator = CseAnnotationsAccumulator()
packed_annotations = extract_packed_annotations_from_matches(proposals_with_gt, accumulator)
if packed_annotations is None:
return self.produce_fake_losses(densepose_predictor_outputs, embedder)
h, w = densepose_predictor_outputs.embedding.shape[2:]
interpolator = BilinearInterpolationHelper.from_matches(
packed_annotations,
(h, w),
)
meshid_to_embed_losses = self.embed_loss(
proposals_with_gt,
densepose_predictor_outputs,
packed_annotations,
interpolator,
embedder,
)
embed_loss_dict = {
f"loss_densepose_E{meshid}": self.w_embed * meshid_to_embed_losses[meshid]
for meshid in meshid_to_embed_losses
}
all_loss_dict = {
"loss_densepose_S": self.w_segm
* self.segm_loss(proposals_with_gt, densepose_predictor_outputs, packed_annotations),
**embed_loss_dict,
}
if self.do_shape2shape:
all_loss_dict["loss_shape2shape"] = self.w_shape2shape * self.shape2shape_loss(embedder)
if self.do_pix2shape:
all_loss_dict["loss_pix2shape"] = self.w_pix2shape * self.pix2shape_loss(
proposals_with_gt, densepose_predictor_outputs, packed_annotations, embedder
)
return all_loss_dict
def produce_fake_losses(
self, densepose_predictor_outputs: Any, embedder: nn.Module
) -> LossDict:
meshname_to_embed_losses = self.embed_loss.fake_values(
densepose_predictor_outputs, embedder=embedder
)
embed_loss_dict = {
f"loss_densepose_E{mesh_name}": meshname_to_embed_losses[mesh_name]
for mesh_name in meshname_to_embed_losses
}
all_loss_dict = {
"loss_densepose_S": self.segm_loss.fake_value(densepose_predictor_outputs),
**embed_loss_dict,
}
if self.do_shape2shape:
all_loss_dict["loss_shape2shape"] = self.shape2shape_loss.fake_value(embedder)
if self.do_pix2shape:
all_loss_dict["loss_pix2shape"] = self.pix2shape_loss.fake_value(
densepose_predictor_outputs, embedder
)
return all_loss_dict
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/modeling/losses/cse.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from typing import Any
import torch
from torch.nn import functional as F
from detectron2.config import CfgNode
from detectron2.layers import ConvTranspose2d
from ...structures import decorate_predictor_output_class_with_confidences
from ..confidence import DensePoseConfidenceModelConfig, DensePoseUVConfidenceType
from ..utils import initialize_module_params
class DensePoseChartConfidencePredictorMixin:
"""
Predictor contains the last layers of a DensePose model that take DensePose head
outputs as an input and produce model outputs. Confidence predictor mixin is used
to generate confidences for segmentation and UV tensors estimated by some
base predictor. Several assumptions need to hold for the base predictor:
1) the `forward` method must return SIUV tuple as the first result (
S = coarse segmentation, I = fine segmentation, U and V are intrinsic
chart coordinates)
2) `interp2d` method must be defined to perform bilinear interpolation;
the same method is typically used for SIUV and confidences
Confidence predictor mixin provides confidence estimates, as described in:
N. Neverova et al., Correlated Uncertainty for Learning Dense Correspondences
from Noisy Labels, NeurIPS 2019
A. Sanakoyeu et al., Transferring Dense Pose to Proximal Animal Classes, CVPR 2020
"""
def __init__(self, cfg: CfgNode, input_channels: int):
"""
Initialize confidence predictor using configuration options.
Args:
cfg (CfgNode): configuration options
input_channels (int): number of input channels
"""
# we rely on base predictor to call nn.Module.__init__
super().__init__(cfg, input_channels) # pyre-ignore[19]
self.confidence_model_cfg = DensePoseConfidenceModelConfig.from_cfg(cfg)
self._initialize_confidence_estimation_layers(cfg, input_channels)
self._registry = {}
initialize_module_params(self) # pyre-ignore[6]
def _initialize_confidence_estimation_layers(self, cfg: CfgNode, dim_in: int):
"""
Initialize confidence estimation layers based on configuration options
Args:
cfg (CfgNode): configuration options
dim_in (int): number of input channels
"""
dim_out_patches = cfg.MODEL.ROI_DENSEPOSE_HEAD.NUM_PATCHES + 1
kernel_size = cfg.MODEL.ROI_DENSEPOSE_HEAD.DECONV_KERNEL
if self.confidence_model_cfg.uv_confidence.enabled:
if self.confidence_model_cfg.uv_confidence.type == DensePoseUVConfidenceType.IID_ISO:
self.sigma_2_lowres = ConvTranspose2d( # pyre-ignore[16]
dim_in, dim_out_patches, kernel_size, stride=2, padding=int(kernel_size / 2 - 1)
)
elif (
self.confidence_model_cfg.uv_confidence.type
== DensePoseUVConfidenceType.INDEP_ANISO
):
self.sigma_2_lowres = ConvTranspose2d(
dim_in, dim_out_patches, kernel_size, stride=2, padding=int(kernel_size / 2 - 1)
)
self.kappa_u_lowres = ConvTranspose2d( # pyre-ignore[16]
dim_in, dim_out_patches, kernel_size, stride=2, padding=int(kernel_size / 2 - 1)
)
self.kappa_v_lowres = ConvTranspose2d( # pyre-ignore[16]
dim_in, dim_out_patches, kernel_size, stride=2, padding=int(kernel_size / 2 - 1)
)
else:
raise ValueError(
f"Unknown confidence model type: "
f"{self.confidence_model_cfg.confidence_model_type}"
)
if self.confidence_model_cfg.segm_confidence.enabled:
self.fine_segm_confidence_lowres = ConvTranspose2d( # pyre-ignore[16]
dim_in, 1, kernel_size, stride=2, padding=int(kernel_size / 2 - 1)
)
self.coarse_segm_confidence_lowres = ConvTranspose2d( # pyre-ignore[16]
dim_in, 1, kernel_size, stride=2, padding=int(kernel_size / 2 - 1)
)
def forward(self, head_outputs: torch.Tensor):
"""
Perform forward operation on head outputs used as inputs for the predictor.
Calls forward method from the base predictor and uses its outputs to compute
confidences.
Args:
head_outputs (Tensor): head outputs used as predictor inputs
Return:
An instance of outputs with confidences,
see `decorate_predictor_output_class_with_confidences`
"""
# assuming base class returns SIUV estimates in its first result
base_predictor_outputs = super().forward(head_outputs) # pyre-ignore[16]
# create output instance by extending base predictor outputs:
output = self._create_output_instance(base_predictor_outputs)
if self.confidence_model_cfg.uv_confidence.enabled:
if self.confidence_model_cfg.uv_confidence.type == DensePoseUVConfidenceType.IID_ISO:
# assuming base class defines interp2d method for bilinear interpolation
output.sigma_2 = self.interp2d(self.sigma_2_lowres(head_outputs)) # pyre-ignore[16]
elif (
self.confidence_model_cfg.uv_confidence.type
== DensePoseUVConfidenceType.INDEP_ANISO
):
# assuming base class defines interp2d method for bilinear interpolation
output.sigma_2 = self.interp2d(self.sigma_2_lowres(head_outputs))
output.kappa_u = self.interp2d(self.kappa_u_lowres(head_outputs)) # pyre-ignore[16]
output.kappa_v = self.interp2d(self.kappa_v_lowres(head_outputs)) # pyre-ignore[16]
else:
raise ValueError(
f"Unknown confidence model type: "
f"{self.confidence_model_cfg.confidence_model_type}"
)
if self.confidence_model_cfg.segm_confidence.enabled:
# base predictor outputs are assumed to have `fine_segm` and `coarse_segm` attributes
# base predictor is assumed to define `interp2d` method for bilinear interpolation
output.fine_segm_confidence = (
F.softplus(
self.interp2d(self.fine_segm_confidence_lowres(head_outputs)) # pyre-ignore[16]
)
+ self.confidence_model_cfg.segm_confidence.epsilon
)
output.fine_segm = base_predictor_outputs.fine_segm * torch.repeat_interleave(
output.fine_segm_confidence, base_predictor_outputs.fine_segm.shape[1], dim=1
)
output.coarse_segm_confidence = (
F.softplus(
self.interp2d(
self.coarse_segm_confidence_lowres(head_outputs) # pyre-ignore[16]
)
)
+ self.confidence_model_cfg.segm_confidence.epsilon
)
output.coarse_segm = base_predictor_outputs.coarse_segm * torch.repeat_interleave(
output.coarse_segm_confidence, base_predictor_outputs.coarse_segm.shape[1], dim=1
)
return output
def _create_output_instance(self, base_predictor_outputs: Any):
"""
Create an instance of predictor outputs by copying the outputs from the
base predictor and initializing confidence
Args:
base_predictor_outputs: an instance of base predictor outputs
(the outputs type is assumed to be a dataclass)
Return:
An instance of outputs with confidences
"""
PredictorOutput = decorate_predictor_output_class_with_confidences(
type(base_predictor_outputs) # pyre-ignore[6]
)
# base_predictor_outputs is assumed to be a dataclass
# reassign all the fields from base_predictor_outputs (no deep copy!), add new fields
output = PredictorOutput(
**base_predictor_outputs.__dict__,
coarse_segm_confidence=None,
fine_segm_confidence=None,
sigma_1=None,
sigma_2=None,
kappa_u=None,
kappa_v=None,
)
return output
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/modeling/predictors/chart_confidence.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from detectron2.utils.registry import Registry
DENSEPOSE_PREDICTOR_REGISTRY = Registry("DENSEPOSE_PREDICTOR")
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/modeling/predictors/registry.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from typing import Any
import torch
from torch.nn import functional as F
from detectron2.config import CfgNode
from detectron2.layers import ConvTranspose2d
from densepose.modeling.confidence import DensePoseConfidenceModelConfig
from densepose.modeling.utils import initialize_module_params
from densepose.structures import decorate_cse_predictor_output_class_with_confidences
class DensePoseEmbeddingConfidencePredictorMixin:
"""
Predictor contains the last layers of a DensePose model that take DensePose head
outputs as an input and produce model outputs. Confidence predictor mixin is used
to generate confidences for coarse segmentation estimated by some
base predictor. Several assumptions need to hold for the base predictor:
1) the `forward` method must return CSE DensePose head outputs,
tensor of shape [N, D, H, W]
2) `interp2d` method must be defined to perform bilinear interpolation;
the same method is typically used for masks and confidences
Confidence predictor mixin provides confidence estimates, as described in:
N. Neverova et al., Correlated Uncertainty for Learning Dense Correspondences
from Noisy Labels, NeurIPS 2019
A. Sanakoyeu et al., Transferring Dense Pose to Proximal Animal Classes, CVPR 2020
"""
def __init__(self, cfg: CfgNode, input_channels: int):
"""
Initialize confidence predictor using configuration options.
Args:
cfg (CfgNode): configuration options
input_channels (int): number of input channels
"""
# we rely on base predictor to call nn.Module.__init__
super().__init__(cfg, input_channels) # pyre-ignore[19]
self.confidence_model_cfg = DensePoseConfidenceModelConfig.from_cfg(cfg)
self._initialize_confidence_estimation_layers(cfg, input_channels)
self._registry = {}
initialize_module_params(self) # pyre-ignore[6]
def _initialize_confidence_estimation_layers(self, cfg: CfgNode, dim_in: int):
"""
Initialize confidence estimation layers based on configuration options
Args:
cfg (CfgNode): configuration options
dim_in (int): number of input channels
"""
kernel_size = cfg.MODEL.ROI_DENSEPOSE_HEAD.DECONV_KERNEL
if self.confidence_model_cfg.segm_confidence.enabled:
self.coarse_segm_confidence_lowres = ConvTranspose2d( # pyre-ignore[16]
dim_in, 1, kernel_size, stride=2, padding=int(kernel_size / 2 - 1)
)
def forward(self, head_outputs: torch.Tensor):
"""
Perform forward operation on head outputs used as inputs for the predictor.
Calls forward method from the base predictor and uses its outputs to compute
confidences.
Args:
head_outputs (Tensor): head outputs used as predictor inputs
Return:
An instance of outputs with confidences,
see `decorate_cse_predictor_output_class_with_confidences`
"""
# assuming base class returns SIUV estimates in its first result
base_predictor_outputs = super().forward(head_outputs) # pyre-ignore[16]
# create output instance by extending base predictor outputs:
output = self._create_output_instance(base_predictor_outputs)
if self.confidence_model_cfg.segm_confidence.enabled:
# base predictor outputs are assumed to have `coarse_segm` attribute
# base predictor is assumed to define `interp2d` method for bilinear interpolation
output.coarse_segm_confidence = (
F.softplus(
self.interp2d( # pyre-ignore[16]
self.coarse_segm_confidence_lowres(head_outputs) # pyre-ignore[16]
)
)
+ self.confidence_model_cfg.segm_confidence.epsilon
)
output.coarse_segm = base_predictor_outputs.coarse_segm * torch.repeat_interleave(
output.coarse_segm_confidence, base_predictor_outputs.coarse_segm.shape[1], dim=1
)
return output
def _create_output_instance(self, base_predictor_outputs: Any):
"""
Create an instance of predictor outputs by copying the outputs from the
base predictor and initializing confidence
Args:
base_predictor_outputs: an instance of base predictor outputs
(the outputs type is assumed to be a dataclass)
Return:
An instance of outputs with confidences
"""
PredictorOutput = decorate_cse_predictor_output_class_with_confidences(
type(base_predictor_outputs) # pyre-ignore[6]
)
# base_predictor_outputs is assumed to be a dataclass
# reassign all the fields from base_predictor_outputs (no deep copy!), add new fields
output = PredictorOutput(
**base_predictor_outputs.__dict__,
coarse_segm_confidence=None,
)
return output
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/modeling/predictors/cse_confidence.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from . import DensePoseChartConfidencePredictorMixin, DensePoseChartPredictor
from .registry import DENSEPOSE_PREDICTOR_REGISTRY
@DENSEPOSE_PREDICTOR_REGISTRY.register()
class DensePoseChartWithConfidencePredictor(
DensePoseChartConfidencePredictorMixin, DensePoseChartPredictor
):
"""
Predictor that combines chart and chart confidence estimation
"""
pass
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/modeling/predictors/chart_with_confidence.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from .chart import DensePoseChartPredictor
from .chart_confidence import DensePoseChartConfidencePredictorMixin
from .chart_with_confidence import DensePoseChartWithConfidencePredictor
from .cse import DensePoseEmbeddingPredictor
from .cse_confidence import DensePoseEmbeddingConfidencePredictorMixin
from .cse_with_confidence import DensePoseEmbeddingWithConfidencePredictor
from .registry import DENSEPOSE_PREDICTOR_REGISTRY
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/modeling/predictors/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import torch
from torch import nn
from detectron2.config import CfgNode
from detectron2.layers import ConvTranspose2d, interpolate
from ...structures import DensePoseChartPredictorOutput
from ..utils import initialize_module_params
from .registry import DENSEPOSE_PREDICTOR_REGISTRY
@DENSEPOSE_PREDICTOR_REGISTRY.register()
class DensePoseChartPredictor(nn.Module):
"""
Predictor (last layers of a DensePose model) that takes DensePose head outputs as an input
and produces 4 tensors which represent DensePose results for predefined body parts
(patches / charts):
* coarse segmentation, a tensor of shape [N, K, Hout, Wout]
* fine segmentation, a tensor of shape [N, C, Hout, Wout]
* U coordinates, a tensor of shape [N, C, Hout, Wout]
* V coordinates, a tensor of shape [N, C, Hout, Wout]
where
- N is the number of instances
- K is the number of coarse segmentation channels (
2 = foreground / background,
15 = one of 14 body parts / background)
- C is the number of fine segmentation channels (
24 fine body parts / background)
- Hout and Wout are height and width of predictions
"""
def __init__(self, cfg: CfgNode, input_channels: int):
"""
Initialize predictor using configuration options
Args:
cfg (CfgNode): configuration options
input_channels (int): input tensor size along the channel dimension
"""
super().__init__()
dim_in = input_channels
n_segm_chan = cfg.MODEL.ROI_DENSEPOSE_HEAD.NUM_COARSE_SEGM_CHANNELS
dim_out_patches = cfg.MODEL.ROI_DENSEPOSE_HEAD.NUM_PATCHES + 1
kernel_size = cfg.MODEL.ROI_DENSEPOSE_HEAD.DECONV_KERNEL
# coarse segmentation
self.ann_index_lowres = ConvTranspose2d(
dim_in, n_segm_chan, kernel_size, stride=2, padding=int(kernel_size / 2 - 1)
)
# fine segmentation
self.index_uv_lowres = ConvTranspose2d(
dim_in, dim_out_patches, kernel_size, stride=2, padding=int(kernel_size / 2 - 1)
)
# U
self.u_lowres = ConvTranspose2d(
dim_in, dim_out_patches, kernel_size, stride=2, padding=int(kernel_size / 2 - 1)
)
# V
self.v_lowres = ConvTranspose2d(
dim_in, dim_out_patches, kernel_size, stride=2, padding=int(kernel_size / 2 - 1)
)
self.scale_factor = cfg.MODEL.ROI_DENSEPOSE_HEAD.UP_SCALE
initialize_module_params(self)
def interp2d(self, tensor_nchw: torch.Tensor):
"""
Bilinear interpolation method to be used for upscaling
Args:
tensor_nchw (tensor): tensor of shape (N, C, H, W)
Return:
tensor of shape (N, C, Hout, Wout), where Hout and Wout are computed
by applying the scale factor to H and W
"""
return interpolate(
tensor_nchw, scale_factor=self.scale_factor, mode="bilinear", align_corners=False
)
def forward(self, head_outputs: torch.Tensor):
"""
Perform forward step on DensePose head outputs
Args:
head_outputs (tensor): DensePose head outputs, tensor of shape [N, D, H, W]
Return:
An instance of DensePoseChartPredictorOutput
"""
return DensePoseChartPredictorOutput(
coarse_segm=self.interp2d(self.ann_index_lowres(head_outputs)),
fine_segm=self.interp2d(self.index_uv_lowres(head_outputs)),
u=self.interp2d(self.u_lowres(head_outputs)),
v=self.interp2d(self.v_lowres(head_outputs)),
)
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/modeling/predictors/chart.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from . import DensePoseEmbeddingConfidencePredictorMixin, DensePoseEmbeddingPredictor
from .registry import DENSEPOSE_PREDICTOR_REGISTRY
@DENSEPOSE_PREDICTOR_REGISTRY.register()
class DensePoseEmbeddingWithConfidencePredictor(
DensePoseEmbeddingConfidencePredictorMixin, DensePoseEmbeddingPredictor
):
"""
Predictor that combines CSE and CSE confidence estimation
"""
pass
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/modeling/predictors/cse_with_confidence.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import torch
from torch import nn
from detectron2.config import CfgNode
from detectron2.layers import ConvTranspose2d, interpolate
from ...structures import DensePoseEmbeddingPredictorOutput
from ..utils import initialize_module_params
from .registry import DENSEPOSE_PREDICTOR_REGISTRY
@DENSEPOSE_PREDICTOR_REGISTRY.register()
class DensePoseEmbeddingPredictor(nn.Module):
"""
Last layers of a DensePose model that take DensePose head outputs as an input
and produce model outputs for continuous surface embeddings (CSE).
"""
def __init__(self, cfg: CfgNode, input_channels: int):
"""
Initialize predictor using configuration options
Args:
cfg (CfgNode): configuration options
input_channels (int): input tensor size along the channel dimension
"""
super().__init__()
dim_in = input_channels
n_segm_chan = cfg.MODEL.ROI_DENSEPOSE_HEAD.NUM_COARSE_SEGM_CHANNELS
embed_size = cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.EMBED_SIZE
kernel_size = cfg.MODEL.ROI_DENSEPOSE_HEAD.DECONV_KERNEL
# coarse segmentation
self.coarse_segm_lowres = ConvTranspose2d(
dim_in, n_segm_chan, kernel_size, stride=2, padding=int(kernel_size / 2 - 1)
)
# embedding
self.embed_lowres = ConvTranspose2d(
dim_in, embed_size, kernel_size, stride=2, padding=int(kernel_size / 2 - 1)
)
self.scale_factor = cfg.MODEL.ROI_DENSEPOSE_HEAD.UP_SCALE
initialize_module_params(self)
def interp2d(self, tensor_nchw: torch.Tensor):
"""
Bilinear interpolation method to be used for upscaling
Args:
tensor_nchw (tensor): tensor of shape (N, C, H, W)
Return:
tensor of shape (N, C, Hout, Wout), where Hout and Wout are computed
by applying the scale factor to H and W
"""
return interpolate(
tensor_nchw, scale_factor=self.scale_factor, mode="bilinear", align_corners=False
)
def forward(self, head_outputs):
"""
Perform forward step on DensePose head outputs
Args:
head_outputs (tensor): DensePose head outputs, tensor of shape [N, D, H, W]
"""
embed_lowres = self.embed_lowres(head_outputs)
coarse_segm_lowres = self.coarse_segm_lowres(head_outputs)
embed = self.interp2d(embed_lowres)
coarse_segm = self.interp2d(coarse_segm_lowres)
return DensePoseEmbeddingPredictorOutput(embedding=embed, coarse_segm=coarse_segm)
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/modeling/predictors/cse.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from .vertex_direct_embedder import VertexDirectEmbedder
from .vertex_feature_embedder import VertexFeatureEmbedder
from .embedder import Embedder
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/modeling/cse/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import torch
from torch.nn import functional as F
def squared_euclidean_distance_matrix(pts1: torch.Tensor, pts2: torch.Tensor) -> torch.Tensor:
"""
Get squared Euclidean Distance Matrix
Computes pairwise squared Euclidean distances between points
Args:
pts1: Tensor [M x D], M is the number of points, D is feature dimensionality
pts2: Tensor [N x D], N is the number of points, D is feature dimensionality
Return:
Tensor [M, N]: matrix of squared Euclidean distances; at index (m, n)
it contains || pts1[m] - pts2[n] ||^2
"""
edm = torch.mm(-2 * pts1, pts2.t())
edm += (pts1 * pts1).sum(1, keepdim=True) + (pts2 * pts2).sum(1, keepdim=True).t()
return edm.contiguous()
def normalize_embeddings(embeddings: torch.Tensor, epsilon: float = 1e-6) -> torch.Tensor:
"""
Normalize N D-dimensional embedding vectors arranged in a tensor [N, D]
Args:
embeddings (tensor [N, D]): N D-dimensional embedding vectors
epsilon (float): minimum value for a vector norm
Return:
Normalized embeddings (tensor [N, D]), such that L2 vector norms are all equal to 1.
"""
return embeddings / torch.clamp(
embeddings.norm(p=None, dim=1, keepdim=True), min=epsilon # pyre-ignore[6]
)
def get_closest_vertices_mask_from_ES(
E: torch.Tensor,
S: torch.Tensor,
h: int,
w: int,
mesh_vertex_embeddings: torch.Tensor,
device: torch.device,
):
"""
Interpolate Embeddings and Segmentations to the size of a given bounding box,
and compute closest vertices and the segmentation mask
Args:
E (tensor [1, D, H, W]): D-dimensional embedding vectors for every point of the
default-sized box
S (tensor [1, 2, H, W]): 2-dimensional segmentation mask for every point of the
default-sized box
h (int): height of the target bounding box
w (int): width of the target bounding box
mesh_vertex_embeddings (tensor [N, D]): vertex embeddings for a chosen mesh
N is the number of vertices in the mesh, D is feature dimensionality
device (torch.device): device to move the tensors to
Return:
Closest Vertices (tensor [h, w]), int, for every point of the resulting box
Segmentation mask (tensor [h, w]), boolean, for every point of the resulting box
"""
embedding_resized = F.interpolate(E, size=(h, w), mode="bilinear")[0].to(device)
coarse_segm_resized = F.interpolate(S, size=(h, w), mode="bilinear")[0].to(device)
mask = coarse_segm_resized.argmax(0) > 0
closest_vertices = torch.zeros(mask.shape, dtype=torch.long, device=device)
all_embeddings = embedding_resized[:, mask].t()
size_chunk = 10_000 # Chunking to avoid possible OOM
edm = []
if len(all_embeddings) == 0:
return closest_vertices, mask
for chunk in range((len(all_embeddings) - 1) // size_chunk + 1):
chunk_embeddings = all_embeddings[size_chunk * chunk : size_chunk * (chunk + 1)]
edm.append(
torch.argmin(
squared_euclidean_distance_matrix(chunk_embeddings, mesh_vertex_embeddings), dim=1
)
)
closest_vertices[mask] = torch.cat(edm)
return closest_vertices, mask
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/modeling/cse/utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import logging
import numpy as np
import pickle
from enum import Enum
from typing import Optional
import torch
from torch import nn
from detectron2.config import CfgNode
from detectron2.utils.file_io import PathManager
from .vertex_direct_embedder import VertexDirectEmbedder
from .vertex_feature_embedder import VertexFeatureEmbedder
class EmbedderType(Enum):
"""
Embedder type which defines how vertices are mapped into the embedding space:
- "vertex_direct": direct vertex embedding
- "vertex_feature": embedding vertex features
"""
VERTEX_DIRECT = "vertex_direct"
VERTEX_FEATURE = "vertex_feature"
def create_embedder(embedder_spec: CfgNode, embedder_dim: int) -> nn.Module:
"""
Create an embedder based on the provided configuration
Args:
embedder_spec (CfgNode): embedder configuration
embedder_dim (int): embedding space dimensionality
Return:
An embedder instance for the specified configuration
Raises ValueError, in case of unexpected embedder type
"""
embedder_type = EmbedderType(embedder_spec.TYPE)
if embedder_type == EmbedderType.VERTEX_DIRECT:
embedder = VertexDirectEmbedder(
num_vertices=embedder_spec.NUM_VERTICES,
embed_dim=embedder_dim,
)
if embedder_spec.INIT_FILE != "":
embedder.load(embedder_spec.INIT_FILE)
elif embedder_type == EmbedderType.VERTEX_FEATURE:
embedder = VertexFeatureEmbedder(
num_vertices=embedder_spec.NUM_VERTICES,
feature_dim=embedder_spec.FEATURE_DIM,
embed_dim=embedder_dim,
train_features=embedder_spec.FEATURES_TRAINABLE,
)
if embedder_spec.INIT_FILE != "":
embedder.load(embedder_spec.INIT_FILE)
else:
raise ValueError(f"Unexpected embedder type {embedder_type}")
if not embedder_spec.IS_TRAINABLE:
# pyre-fixme[29]: `Union[nn.Module, torch.Tensor]` is not a function.
embedder.requires_grad_(False)
return embedder
class Embedder(nn.Module):
"""
Embedder module that serves as a container for embedders to use with different
meshes. Extends Module to automatically save / load state dict.
"""
DEFAULT_MODEL_CHECKPOINT_PREFIX = "roi_heads.embedder."
def __init__(self, cfg: CfgNode):
"""
Initialize mesh embedders. An embedder for mesh `i` is stored in a submodule
"embedder_{i}".
Args:
cfg (CfgNode): configuration options
"""
super(Embedder, self).__init__()
self.mesh_names = set()
embedder_dim = cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.EMBED_SIZE
logger = logging.getLogger(__name__)
for mesh_name, embedder_spec in cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.EMBEDDERS.items():
logger.info(f"Adding embedder embedder_{mesh_name} with spec {embedder_spec}")
# pyre-fixme[29]: `Union[nn.Module, torch.Tensor]` is not a function.
self.add_module(f"embedder_{mesh_name}", create_embedder(embedder_spec, embedder_dim))
self.mesh_names.add(mesh_name)
if cfg.MODEL.WEIGHTS != "":
self.load_from_model_checkpoint(cfg.MODEL.WEIGHTS)
def load_from_model_checkpoint(self, fpath: str, prefix: Optional[str] = None):
if prefix is None:
prefix = Embedder.DEFAULT_MODEL_CHECKPOINT_PREFIX
state_dict = None
if fpath.endswith(".pkl"):
with PathManager.open(fpath, "rb") as hFile:
state_dict = pickle.load(hFile, encoding="latin1") # pyre-ignore[6]
else:
with PathManager.open(fpath, "rb") as hFile:
state_dict = torch.load(hFile, map_location=torch.device("cpu"))
if state_dict is not None and "model" in state_dict:
state_dict_local = {}
for key in state_dict["model"]:
if key.startswith(prefix):
v_key = state_dict["model"][key]
if isinstance(v_key, np.ndarray):
v_key = torch.from_numpy(v_key)
state_dict_local[key[len(prefix) :]] = v_key
# non-strict loading to finetune on different meshes
self.load_state_dict(state_dict_local, strict=False) # pyre-ignore[28]
def forward(self, mesh_name: str) -> torch.Tensor:
"""
Produce vertex embeddings for the specific mesh; vertex embeddings are
a tensor of shape [N, D] where:
N = number of vertices
D = number of dimensions in the embedding space
Args:
mesh_name (str): name of a mesh for which to obtain vertex embeddings
Return:
Vertex embeddings, a tensor of shape [N, D]
"""
return getattr(self, f"embedder_{mesh_name}")()
def has_embeddings(self, mesh_name: str) -> bool:
return hasattr(self, f"embedder_{mesh_name}")
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/modeling/cse/embedder.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import pickle
import torch
from torch import nn
from detectron2.utils.file_io import PathManager
from .utils import normalize_embeddings
class VertexDirectEmbedder(nn.Module):
"""
Class responsible for embedding vertices. Vertex embeddings take
the form of a tensor of size [N, D], where
N = number of vertices
D = number of dimensions in the embedding space
"""
def __init__(self, num_vertices: int, embed_dim: int):
"""
Initialize embedder, set random embeddings
Args:
num_vertices (int): number of vertices to embed
embed_dim (int): number of dimensions in the embedding space
"""
super(VertexDirectEmbedder, self).__init__()
self.embeddings = nn.Parameter(torch.Tensor(num_vertices, embed_dim))
self.reset_parameters()
@torch.no_grad()
def reset_parameters(self):
"""
Reset embeddings to random values
"""
self.embeddings.zero_()
def forward(self) -> torch.Tensor:
"""
Produce vertex embeddings, a tensor of shape [N, D] where:
N = number of vertices
D = number of dimensions in the embedding space
Return:
Full vertex embeddings, a tensor of shape [N, D]
"""
return normalize_embeddings(self.embeddings)
@torch.no_grad() # pyre-ignore[56]
def load(self, fpath: str):
"""
Load data from a file
Args:
fpath (str): file path to load data from
"""
with PathManager.open(fpath, "rb") as hFile:
data = pickle.load(hFile) # pyre-ignore[6]
for name in ["embeddings"]:
if name in data:
getattr(self, name).copy_(
torch.tensor(data[name]).float().to(device=getattr(self, name).device)
)
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/modeling/cse/vertex_direct_embedder.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import pickle
import torch
from torch import nn
from detectron2.utils.file_io import PathManager
from .utils import normalize_embeddings
class VertexFeatureEmbedder(nn.Module):
"""
Class responsible for embedding vertex features. Mapping from
feature space to the embedding space is a tensor of size [K, D], where
K = number of dimensions in the feature space
D = number of dimensions in the embedding space
Vertex features is a tensor of size [N, K], where
N = number of vertices
K = number of dimensions in the feature space
Vertex embeddings are computed as F * E = tensor of size [N, D]
"""
def __init__(
self, num_vertices: int, feature_dim: int, embed_dim: int, train_features: bool = False
):
"""
Initialize embedder, set random embeddings
Args:
num_vertices (int): number of vertices to embed
feature_dim (int): number of dimensions in the feature space
embed_dim (int): number of dimensions in the embedding space
train_features (bool): determines whether vertex features should
be trained (default: False)
"""
super(VertexFeatureEmbedder, self).__init__()
if train_features:
self.features = nn.Parameter(torch.Tensor(num_vertices, feature_dim))
else:
self.register_buffer("features", torch.Tensor(num_vertices, feature_dim))
self.embeddings = nn.Parameter(torch.Tensor(feature_dim, embed_dim))
self.reset_parameters()
@torch.no_grad()
def reset_parameters(self):
self.features.zero_()
self.embeddings.zero_()
def forward(self) -> torch.Tensor:
"""
Produce vertex embeddings, a tensor of shape [N, D] where:
N = number of vertices
D = number of dimensions in the embedding space
Return:
Full vertex embeddings, a tensor of shape [N, D]
"""
return normalize_embeddings(torch.mm(self.features, self.embeddings))
@torch.no_grad() # pyre-ignore[56]
def load(self, fpath: str):
"""
Load data from a file
Args:
fpath (str): file path to load data from
"""
with PathManager.open(fpath, "rb") as hFile:
data = pickle.load(hFile) # pyre-ignore[6]
for name in ["features", "embeddings"]:
if name in data:
getattr(self, name).copy_(
torch.tensor(data[name]).float().to(device=getattr(self, name).device)
)
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/modeling/cse/vertex_feature_embedder.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import numpy as np
from typing import Dict, List, Optional
import fvcore.nn.weight_init as weight_init
import torch
import torch.nn as nn
from torch.nn import functional as F
from detectron2.layers import Conv2d, ShapeSpec, get_norm
from detectron2.modeling import ROI_HEADS_REGISTRY, StandardROIHeads
from detectron2.modeling.poolers import ROIPooler
from detectron2.modeling.roi_heads import select_foreground_proposals
from detectron2.structures import ImageList, Instances
from .. import (
build_densepose_data_filter,
build_densepose_embedder,
build_densepose_head,
build_densepose_losses,
build_densepose_predictor,
densepose_inference,
)
class Decoder(nn.Module):
"""
A semantic segmentation head described in detail in the Panoptic Feature Pyramid Networks paper
(https://arxiv.org/abs/1901.02446). It takes FPN features as input and merges information from
all levels of the FPN into single output.
"""
def __init__(self, cfg, input_shape: Dict[str, ShapeSpec], in_features):
super(Decoder, self).__init__()
# fmt: off
self.in_features = in_features
feature_strides = {k: v.stride for k, v in input_shape.items()}
feature_channels = {k: v.channels for k, v in input_shape.items()}
num_classes = cfg.MODEL.ROI_DENSEPOSE_HEAD.DECODER_NUM_CLASSES
conv_dims = cfg.MODEL.ROI_DENSEPOSE_HEAD.DECODER_CONV_DIMS
self.common_stride = cfg.MODEL.ROI_DENSEPOSE_HEAD.DECODER_COMMON_STRIDE
norm = cfg.MODEL.ROI_DENSEPOSE_HEAD.DECODER_NORM
# fmt: on
self.scale_heads = []
for in_feature in self.in_features:
head_ops = []
head_length = max(
1, int(np.log2(feature_strides[in_feature]) - np.log2(self.common_stride))
)
for k in range(head_length):
conv = Conv2d(
feature_channels[in_feature] if k == 0 else conv_dims,
conv_dims,
kernel_size=3,
stride=1,
padding=1,
bias=not norm,
norm=get_norm(norm, conv_dims),
activation=F.relu,
)
weight_init.c2_msra_fill(conv)
head_ops.append(conv)
if feature_strides[in_feature] != self.common_stride:
head_ops.append(
nn.Upsample(scale_factor=2, mode="bilinear", align_corners=False)
)
self.scale_heads.append(nn.Sequential(*head_ops))
# pyre-fixme[29]: `Union[nn.Module, torch.Tensor]` is not a function.
self.add_module(in_feature, self.scale_heads[-1])
self.predictor = Conv2d(conv_dims, num_classes, kernel_size=1, stride=1, padding=0)
weight_init.c2_msra_fill(self.predictor)
def forward(self, features: List[torch.Tensor]):
for i, _ in enumerate(self.in_features):
if i == 0:
x = self.scale_heads[i](features[i])
else:
x = x + self.scale_heads[i](features[i])
x = self.predictor(x)
return x
@ROI_HEADS_REGISTRY.register()
class DensePoseROIHeads(StandardROIHeads):
"""
A Standard ROIHeads which contains an addition of DensePose head.
"""
def __init__(self, cfg, input_shape):
super().__init__(cfg, input_shape)
self._init_densepose_head(cfg, input_shape)
def _init_densepose_head(self, cfg, input_shape):
# fmt: off
self.densepose_on = cfg.MODEL.DENSEPOSE_ON
if not self.densepose_on:
return
self.densepose_data_filter = build_densepose_data_filter(cfg)
dp_pooler_resolution = cfg.MODEL.ROI_DENSEPOSE_HEAD.POOLER_RESOLUTION
dp_pooler_sampling_ratio = cfg.MODEL.ROI_DENSEPOSE_HEAD.POOLER_SAMPLING_RATIO
dp_pooler_type = cfg.MODEL.ROI_DENSEPOSE_HEAD.POOLER_TYPE
self.use_decoder = cfg.MODEL.ROI_DENSEPOSE_HEAD.DECODER_ON
# fmt: on
if self.use_decoder:
dp_pooler_scales = (1.0 / input_shape[self.in_features[0]].stride,)
else:
dp_pooler_scales = tuple(1.0 / input_shape[k].stride for k in self.in_features)
in_channels = [input_shape[f].channels for f in self.in_features][0]
if self.use_decoder:
self.decoder = Decoder(cfg, input_shape, self.in_features)
self.densepose_pooler = ROIPooler(
output_size=dp_pooler_resolution,
scales=dp_pooler_scales,
sampling_ratio=dp_pooler_sampling_ratio,
pooler_type=dp_pooler_type,
)
self.densepose_head = build_densepose_head(cfg, in_channels)
self.densepose_predictor = build_densepose_predictor(
cfg, self.densepose_head.n_out_channels
)
self.densepose_losses = build_densepose_losses(cfg)
self.embedder = build_densepose_embedder(cfg)
def _forward_densepose(self, features: Dict[str, torch.Tensor], instances: List[Instances]):
"""
Forward logic of the densepose prediction branch.
Args:
features (dict[str, Tensor]): input data as a mapping from feature
map name to tensor. Axis 0 represents the number of images `N` in
the input data; axes 1-3 are channels, height, and width, which may
vary between feature maps (e.g., if a feature pyramid is used).
instances (list[Instances]): length `N` list of `Instances`. The i-th
`Instances` contains instances for the i-th input image,
In training, they can be the proposals.
In inference, they can be the predicted boxes.
Returns:
In training, a dict of losses.
In inference, update `instances` with new fields "densepose" and return it.
"""
if not self.densepose_on:
return {} if self.training else instances
features_list = [features[f] for f in self.in_features]
if self.training:
proposals, _ = select_foreground_proposals(instances, self.num_classes)
features_list, proposals = self.densepose_data_filter(features_list, proposals)
if len(proposals) > 0:
proposal_boxes = [x.proposal_boxes for x in proposals]
if self.use_decoder:
# pyre-fixme[29]: `Union[nn.Module, torch.Tensor]` is not a
# function.
features_list = [self.decoder(features_list)]
features_dp = self.densepose_pooler(features_list, proposal_boxes)
densepose_head_outputs = self.densepose_head(features_dp)
densepose_predictor_outputs = self.densepose_predictor(densepose_head_outputs)
densepose_loss_dict = self.densepose_losses(
proposals, densepose_predictor_outputs, embedder=self.embedder
)
return densepose_loss_dict
else:
pred_boxes = [x.pred_boxes for x in instances]
if self.use_decoder:
# pyre-fixme[29]: `Union[nn.Module, torch.Tensor]` is not a function.
features_list = [self.decoder(features_list)]
features_dp = self.densepose_pooler(features_list, pred_boxes)
if len(features_dp) > 0:
densepose_head_outputs = self.densepose_head(features_dp)
densepose_predictor_outputs = self.densepose_predictor(densepose_head_outputs)
else:
densepose_predictor_outputs = None
densepose_inference(densepose_predictor_outputs, instances)
return instances
def forward(
self,
images: ImageList,
features: Dict[str, torch.Tensor],
proposals: List[Instances],
targets: Optional[List[Instances]] = None,
):
instances, losses = super().forward(images, features, proposals, targets)
del targets, images
if self.training:
losses.update(self._forward_densepose(features, instances))
return instances, losses
def forward_with_given_boxes(
self, features: Dict[str, torch.Tensor], instances: List[Instances]
):
"""
Use the given boxes in `instances` to produce other (non-box) per-ROI outputs.
This is useful for downstream tasks where a box is known, but need to obtain
other attributes (outputs of other heads).
Test-time augmentation also uses this.
Args:
features: same as in `forward()`
instances (list[Instances]): instances to predict other outputs. Expect the keys
"pred_boxes" and "pred_classes" to exist.
Returns:
instances (list[Instances]):
the same `Instances` objects, with extra
fields such as `pred_masks` or `pred_keypoints`.
"""
instances = super().forward_with_given_boxes(features, instances)
instances = self._forward_densepose(features, instances)
return instances
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/modeling/roi_heads/roi_head.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import fvcore.nn.weight_init as weight_init
import torch
from torch import nn
from torch.nn import functional as F
from detectron2.config import CfgNode
from detectron2.layers import Conv2d
from .registry import ROI_DENSEPOSE_HEAD_REGISTRY
@ROI_DENSEPOSE_HEAD_REGISTRY.register()
class DensePoseDeepLabHead(nn.Module):
"""
DensePose head using DeepLabV3 model from
"Rethinking Atrous Convolution for Semantic Image Segmentation"
<https://arxiv.org/abs/1706.05587>.
"""
def __init__(self, cfg: CfgNode, input_channels: int):
super(DensePoseDeepLabHead, self).__init__()
# fmt: off
hidden_dim = cfg.MODEL.ROI_DENSEPOSE_HEAD.CONV_HEAD_DIM
kernel_size = cfg.MODEL.ROI_DENSEPOSE_HEAD.CONV_HEAD_KERNEL
norm = cfg.MODEL.ROI_DENSEPOSE_HEAD.DEEPLAB.NORM
self.n_stacked_convs = cfg.MODEL.ROI_DENSEPOSE_HEAD.NUM_STACKED_CONVS
self.use_nonlocal = cfg.MODEL.ROI_DENSEPOSE_HEAD.DEEPLAB.NONLOCAL_ON
# fmt: on
pad_size = kernel_size // 2
n_channels = input_channels
self.ASPP = ASPP(input_channels, [6, 12, 56], n_channels) # 6, 12, 56
# pyre-fixme[29]: `Union[nn.Module, torch.Tensor]` is not a function.
self.add_module("ASPP", self.ASPP)
if self.use_nonlocal:
self.NLBlock = NONLocalBlock2D(input_channels, bn_layer=True)
# pyre-fixme[29]: `Union[nn.Module, torch.Tensor]` is not a function.
self.add_module("NLBlock", self.NLBlock)
# weight_init.c2_msra_fill(self.ASPP)
for i in range(self.n_stacked_convs):
norm_module = nn.GroupNorm(32, hidden_dim) if norm == "GN" else None
layer = Conv2d(
n_channels,
hidden_dim,
kernel_size,
stride=1,
padding=pad_size,
bias=not norm,
norm=norm_module,
)
weight_init.c2_msra_fill(layer)
n_channels = hidden_dim
layer_name = self._get_layer_name(i)
# pyre-fixme[29]: `Union[nn.Module, torch.Tensor]` is not a function.
self.add_module(layer_name, layer)
self.n_out_channels = hidden_dim
# initialize_module_params(self)
def forward(self, features):
x0 = features
x = self.ASPP(x0)
if self.use_nonlocal:
x = self.NLBlock(x)
output = x
for i in range(self.n_stacked_convs):
layer_name = self._get_layer_name(i)
x = getattr(self, layer_name)(x)
x = F.relu(x)
output = x
return output
def _get_layer_name(self, i: int):
layer_name = "body_conv_fcn{}".format(i + 1)
return layer_name
# Copied from
# https://github.com/pytorch/vision/blob/master/torchvision/models/segmentation/deeplabv3.py
# See https://arxiv.org/pdf/1706.05587.pdf for details
class ASPPConv(nn.Sequential): # pyre-ignore[11]
def __init__(self, in_channels, out_channels, dilation):
modules = [
nn.Conv2d(
in_channels, out_channels, 3, padding=dilation, dilation=dilation, bias=False
),
nn.GroupNorm(32, out_channels),
nn.ReLU(),
]
super(ASPPConv, self).__init__(*modules)
class ASPPPooling(nn.Sequential):
def __init__(self, in_channels, out_channels):
super(ASPPPooling, self).__init__(
nn.AdaptiveAvgPool2d(1),
nn.Conv2d(in_channels, out_channels, 1, bias=False),
nn.GroupNorm(32, out_channels),
nn.ReLU(),
)
def forward(self, x):
size = x.shape[-2:]
x = super(ASPPPooling, self).forward(x)
return F.interpolate(x, size=size, mode="bilinear", align_corners=False)
class ASPP(nn.Module):
def __init__(self, in_channels, atrous_rates, out_channels):
super(ASPP, self).__init__()
modules = []
modules.append(
nn.Sequential(
nn.Conv2d(in_channels, out_channels, 1, bias=False),
nn.GroupNorm(32, out_channels),
nn.ReLU(),
)
)
rate1, rate2, rate3 = tuple(atrous_rates)
modules.append(ASPPConv(in_channels, out_channels, rate1))
modules.append(ASPPConv(in_channels, out_channels, rate2))
modules.append(ASPPConv(in_channels, out_channels, rate3))
modules.append(ASPPPooling(in_channels, out_channels))
self.convs = nn.ModuleList(modules)
self.project = nn.Sequential(
nn.Conv2d(5 * out_channels, out_channels, 1, bias=False),
# nn.BatchNorm2d(out_channels),
nn.ReLU()
# nn.Dropout(0.5)
)
def forward(self, x):
res = []
for conv in self.convs:
res.append(conv(x))
res = torch.cat(res, dim=1)
return self.project(res)
# copied from
# https://github.com/AlexHex7/Non-local_pytorch/blob/master/lib/non_local_embedded_gaussian.py
# See https://arxiv.org/abs/1711.07971 for details
class _NonLocalBlockND(nn.Module):
def __init__(
self, in_channels, inter_channels=None, dimension=3, sub_sample=True, bn_layer=True
):
super(_NonLocalBlockND, self).__init__()
assert dimension in [1, 2, 3]
self.dimension = dimension
self.sub_sample = sub_sample
self.in_channels = in_channels
self.inter_channels = inter_channels
if self.inter_channels is None:
self.inter_channels = in_channels // 2
if self.inter_channels == 0:
self.inter_channels = 1
if dimension == 3:
conv_nd = nn.Conv3d
max_pool_layer = nn.MaxPool3d(kernel_size=(1, 2, 2))
bn = nn.GroupNorm # (32, hidden_dim) #nn.BatchNorm3d
elif dimension == 2:
conv_nd = nn.Conv2d
max_pool_layer = nn.MaxPool2d(kernel_size=(2, 2))
bn = nn.GroupNorm # (32, hidden_dim)nn.BatchNorm2d
else:
conv_nd = nn.Conv1d
max_pool_layer = nn.MaxPool1d(kernel_size=2)
bn = nn.GroupNorm # (32, hidden_dim)nn.BatchNorm1d
self.g = conv_nd(
in_channels=self.in_channels,
out_channels=self.inter_channels,
kernel_size=1,
stride=1,
padding=0,
)
if bn_layer:
self.W = nn.Sequential(
conv_nd(
in_channels=self.inter_channels,
out_channels=self.in_channels,
kernel_size=1,
stride=1,
padding=0,
),
bn(32, self.in_channels),
)
nn.init.constant_(self.W[1].weight, 0)
nn.init.constant_(self.W[1].bias, 0)
else:
self.W = conv_nd(
in_channels=self.inter_channels,
out_channels=self.in_channels,
kernel_size=1,
stride=1,
padding=0,
)
nn.init.constant_(self.W.weight, 0)
nn.init.constant_(self.W.bias, 0)
self.theta = conv_nd(
in_channels=self.in_channels,
out_channels=self.inter_channels,
kernel_size=1,
stride=1,
padding=0,
)
self.phi = conv_nd(
in_channels=self.in_channels,
out_channels=self.inter_channels,
kernel_size=1,
stride=1,
padding=0,
)
if sub_sample:
self.g = nn.Sequential(self.g, max_pool_layer)
self.phi = nn.Sequential(self.phi, max_pool_layer)
def forward(self, x):
"""
:param x: (b, c, t, h, w)
:return:
"""
batch_size = x.size(0)
g_x = self.g(x).view(batch_size, self.inter_channels, -1)
g_x = g_x.permute(0, 2, 1)
theta_x = self.theta(x).view(batch_size, self.inter_channels, -1)
theta_x = theta_x.permute(0, 2, 1)
phi_x = self.phi(x).view(batch_size, self.inter_channels, -1)
f = torch.matmul(theta_x, phi_x)
f_div_C = F.softmax(f, dim=-1)
y = torch.matmul(f_div_C, g_x)
y = y.permute(0, 2, 1).contiguous()
y = y.view(batch_size, self.inter_channels, *x.size()[2:])
W_y = self.W(y)
z = W_y + x
return z
class NONLocalBlock2D(_NonLocalBlockND):
def __init__(self, in_channels, inter_channels=None, sub_sample=True, bn_layer=True):
super(NONLocalBlock2D, self).__init__(
in_channels,
inter_channels=inter_channels,
dimension=2,
sub_sample=sub_sample,
bn_layer=bn_layer,
)
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/modeling/roi_heads/deeplab.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from detectron2.utils.registry import Registry
ROI_DENSEPOSE_HEAD_REGISTRY = Registry("ROI_DENSEPOSE_HEAD")
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/modeling/roi_heads/registry.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from .v1convx import DensePoseV1ConvXHead
from .deeplab import DensePoseDeepLabHead
from .registry import ROI_DENSEPOSE_HEAD_REGISTRY
from .roi_head import Decoder, DensePoseROIHeads
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/modeling/roi_heads/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import torch
from torch import nn
from torch.nn import functional as F
from detectron2.config import CfgNode
from detectron2.layers import Conv2d
from ..utils import initialize_module_params
from .registry import ROI_DENSEPOSE_HEAD_REGISTRY
@ROI_DENSEPOSE_HEAD_REGISTRY.register()
class DensePoseV1ConvXHead(nn.Module):
"""
Fully convolutional DensePose head.
"""
def __init__(self, cfg: CfgNode, input_channels: int):
"""
Initialize DensePose fully convolutional head
Args:
cfg (CfgNode): configuration options
input_channels (int): number of input channels
"""
super(DensePoseV1ConvXHead, self).__init__()
# fmt: off
hidden_dim = cfg.MODEL.ROI_DENSEPOSE_HEAD.CONV_HEAD_DIM
kernel_size = cfg.MODEL.ROI_DENSEPOSE_HEAD.CONV_HEAD_KERNEL
self.n_stacked_convs = cfg.MODEL.ROI_DENSEPOSE_HEAD.NUM_STACKED_CONVS
# fmt: on
pad_size = kernel_size // 2
n_channels = input_channels
for i in range(self.n_stacked_convs):
layer = Conv2d(n_channels, hidden_dim, kernel_size, stride=1, padding=pad_size)
layer_name = self._get_layer_name(i)
# pyre-fixme[29]: `Union[nn.Module, torch.Tensor]` is not a function.
self.add_module(layer_name, layer)
n_channels = hidden_dim
self.n_out_channels = n_channels
initialize_module_params(self)
def forward(self, features: torch.Tensor):
"""
Apply DensePose fully convolutional head to the input features
Args:
features (tensor): input features
Result:
A tensor of DensePose head outputs
"""
x = features
output = x
for i in range(self.n_stacked_convs):
layer_name = self._get_layer_name(i)
x = getattr(self, layer_name)(x)
x = F.relu(x)
output = x
return output
def _get_layer_name(self, i: int):
layer_name = "body_conv_fcn{}".format(i + 1)
return layer_name
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/modeling/roi_heads/v1convx.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import logging
import numpy as np
from typing import List, Optional, Tuple
import cv2
import torch
from densepose.structures import DensePoseDataRelative
from ..structures import DensePoseChartResult
from .base import Boxes, Image, MatrixVisualizer
class DensePoseResultsVisualizer(object):
def visualize(
self,
image_bgr: Image,
results_and_boxes_xywh: Tuple[Optional[List[DensePoseChartResult]], Optional[Boxes]],
) -> Image:
densepose_result, boxes_xywh = results_and_boxes_xywh
if densepose_result is None or boxes_xywh is None:
return image_bgr
boxes_xywh = boxes_xywh.cpu().numpy()
context = self.create_visualization_context(image_bgr)
for i, result in enumerate(densepose_result):
iuv_array = torch.cat(
(result.labels[None].type(torch.float32), result.uv * 255.0)
).type(torch.uint8)
self.visualize_iuv_arr(context, iuv_array.cpu().numpy(), boxes_xywh[i])
image_bgr = self.context_to_image_bgr(context)
return image_bgr
def create_visualization_context(self, image_bgr: Image):
return image_bgr
def visualize_iuv_arr(self, context, iuv_arr: np.ndarray, bbox_xywh) -> None:
pass
def context_to_image_bgr(self, context):
return context
def get_image_bgr_from_context(self, context):
return context
class DensePoseMaskedColormapResultsVisualizer(DensePoseResultsVisualizer):
def __init__(
self,
data_extractor,
segm_extractor,
inplace=True,
cmap=cv2.COLORMAP_PARULA,
alpha=0.7,
val_scale=1.0,
**kwargs,
):
self.mask_visualizer = MatrixVisualizer(
inplace=inplace, cmap=cmap, val_scale=val_scale, alpha=alpha
)
self.data_extractor = data_extractor
self.segm_extractor = segm_extractor
def context_to_image_bgr(self, context):
return context
def visualize_iuv_arr(self, context, iuv_arr: np.ndarray, bbox_xywh) -> None:
image_bgr = self.get_image_bgr_from_context(context)
matrix = self.data_extractor(iuv_arr)
segm = self.segm_extractor(iuv_arr)
mask = np.zeros(matrix.shape, dtype=np.uint8)
mask[segm > 0] = 1
image_bgr = self.mask_visualizer.visualize(image_bgr, mask, matrix, bbox_xywh)
def _extract_i_from_iuvarr(iuv_arr):
return iuv_arr[0, :, :]
def _extract_u_from_iuvarr(iuv_arr):
return iuv_arr[1, :, :]
def _extract_v_from_iuvarr(iuv_arr):
return iuv_arr[2, :, :]
class DensePoseResultsMplContourVisualizer(DensePoseResultsVisualizer):
def __init__(self, levels=10, **kwargs):
self.levels = levels
self.plot_args = kwargs
def create_visualization_context(self, image_bgr: Image):
import matplotlib.pyplot as plt
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
context = {}
context["image_bgr"] = image_bgr
dpi = 100
height_inches = float(image_bgr.shape[0]) / dpi
width_inches = float(image_bgr.shape[1]) / dpi
fig = plt.figure(figsize=(width_inches, height_inches), dpi=dpi)
plt.axes([0, 0, 1, 1])
plt.axis("off")
context["fig"] = fig
canvas = FigureCanvas(fig)
context["canvas"] = canvas
extent = (0, image_bgr.shape[1], image_bgr.shape[0], 0)
plt.imshow(image_bgr[:, :, ::-1], extent=extent)
return context
def context_to_image_bgr(self, context):
fig = context["fig"]
w, h = map(int, fig.get_size_inches() * fig.get_dpi())
canvas = context["canvas"]
canvas.draw()
image_1d = np.fromstring(canvas.tostring_rgb(), dtype="uint8")
image_rgb = image_1d.reshape(h, w, 3)
image_bgr = image_rgb[:, :, ::-1].copy()
return image_bgr
def visualize_iuv_arr(self, context, iuv_arr: np.ndarray, bbox_xywh: Boxes) -> None:
import matplotlib.pyplot as plt
u = _extract_u_from_iuvarr(iuv_arr).astype(float) / 255.0
v = _extract_v_from_iuvarr(iuv_arr).astype(float) / 255.0
extent = (
bbox_xywh[0],
bbox_xywh[0] + bbox_xywh[2],
bbox_xywh[1],
bbox_xywh[1] + bbox_xywh[3],
)
plt.contour(u, self.levels, extent=extent, **self.plot_args)
plt.contour(v, self.levels, extent=extent, **self.plot_args)
class DensePoseResultsCustomContourVisualizer(DensePoseResultsVisualizer):
"""
Contour visualization using marching squares
"""
def __init__(self, levels=10, **kwargs):
# TODO: colormap is hardcoded
cmap = cv2.COLORMAP_PARULA
if isinstance(levels, int):
self.levels = np.linspace(0, 1, levels)
else:
self.levels = levels
if "linewidths" in kwargs:
self.linewidths = kwargs["linewidths"]
else:
self.linewidths = [1] * len(self.levels)
self.plot_args = kwargs
img_colors_bgr = cv2.applyColorMap((self.levels * 255).astype(np.uint8), cmap)
self.level_colors_bgr = [
[int(v) for v in img_color_bgr.ravel()] for img_color_bgr in img_colors_bgr
]
def visualize_iuv_arr(self, context, iuv_arr: np.ndarray, bbox_xywh: Boxes) -> None:
image_bgr = self.get_image_bgr_from_context(context)
segm = _extract_i_from_iuvarr(iuv_arr)
u = _extract_u_from_iuvarr(iuv_arr).astype(float) / 255.0
v = _extract_v_from_iuvarr(iuv_arr).astype(float) / 255.0
self._contours(image_bgr, u, segm, bbox_xywh)
self._contours(image_bgr, v, segm, bbox_xywh)
def _contours(self, image_bgr, arr, segm, bbox_xywh):
for part_idx in range(1, DensePoseDataRelative.N_PART_LABELS + 1):
mask = segm == part_idx
if not np.any(mask):
continue
arr_min = np.amin(arr[mask])
arr_max = np.amax(arr[mask])
I, J = np.nonzero(mask)
i0 = np.amin(I)
i1 = np.amax(I) + 1
j0 = np.amin(J)
j1 = np.amax(J) + 1
if (j1 == j0 + 1) or (i1 == i0 + 1):
continue
Nw = arr.shape[1] - 1
Nh = arr.shape[0] - 1
for level_idx, level in enumerate(self.levels):
if (level < arr_min) or (level > arr_max):
continue
vp = arr[i0:i1, j0:j1] >= level
bin_codes = vp[:-1, :-1] + vp[1:, :-1] * 2 + vp[1:, 1:] * 4 + vp[:-1, 1:] * 8
mp = mask[i0:i1, j0:j1]
bin_mask_codes = mp[:-1, :-1] + mp[1:, :-1] * 2 + mp[1:, 1:] * 4 + mp[:-1, 1:] * 8
it = np.nditer(bin_codes, flags=["multi_index"])
color_bgr = self.level_colors_bgr[level_idx]
linewidth = self.linewidths[level_idx]
while not it.finished:
if (it[0] != 0) and (it[0] != 15):
i, j = it.multi_index
if bin_mask_codes[i, j] != 0:
self._draw_line(
image_bgr,
arr,
mask,
level,
color_bgr,
linewidth,
it[0],
it.multi_index,
bbox_xywh,
Nw,
Nh,
(i0, j0),
)
it.iternext()
def _draw_line(
self,
image_bgr,
arr,
mask,
v,
color_bgr,
linewidth,
bin_code,
multi_idx,
bbox_xywh,
Nw,
Nh,
offset,
):
lines = self._bin_code_2_lines(arr, v, bin_code, multi_idx, Nw, Nh, offset)
x0, y0, w, h = bbox_xywh
x1 = x0 + w
y1 = y0 + h
for line in lines:
x0r, y0r = line[0]
x1r, y1r = line[1]
pt0 = (int(x0 + x0r * (x1 - x0)), int(y0 + y0r * (y1 - y0)))
pt1 = (int(x0 + x1r * (x1 - x0)), int(y0 + y1r * (y1 - y0)))
cv2.line(image_bgr, pt0, pt1, color_bgr, linewidth)
def _bin_code_2_lines(self, arr, v, bin_code, multi_idx, Nw, Nh, offset):
i0, j0 = offset
i, j = multi_idx
i += i0
j += j0
v0, v1, v2, v3 = arr[i, j], arr[i + 1, j], arr[i + 1, j + 1], arr[i, j + 1]
x0i = float(j) / Nw
y0j = float(i) / Nh
He = 1.0 / Nh
We = 1.0 / Nw
if (bin_code == 1) or (bin_code == 14):
a = (v - v0) / (v1 - v0)
b = (v - v0) / (v3 - v0)
pt1 = (x0i, y0j + a * He)
pt2 = (x0i + b * We, y0j)
return [(pt1, pt2)]
elif (bin_code == 2) or (bin_code == 13):
a = (v - v0) / (v1 - v0)
b = (v - v1) / (v2 - v1)
pt1 = (x0i, y0j + a * He)
pt2 = (x0i + b * We, y0j + He)
return [(pt1, pt2)]
elif (bin_code == 3) or (bin_code == 12):
a = (v - v0) / (v3 - v0)
b = (v - v1) / (v2 - v1)
pt1 = (x0i + a * We, y0j)
pt2 = (x0i + b * We, y0j + He)
return [(pt1, pt2)]
elif (bin_code == 4) or (bin_code == 11):
a = (v - v1) / (v2 - v1)
b = (v - v3) / (v2 - v3)
pt1 = (x0i + a * We, y0j + He)
pt2 = (x0i + We, y0j + b * He)
return [(pt1, pt2)]
elif (bin_code == 6) or (bin_code == 9):
a = (v - v0) / (v1 - v0)
b = (v - v3) / (v2 - v3)
pt1 = (x0i, y0j + a * He)
pt2 = (x0i + We, y0j + b * He)
return [(pt1, pt2)]
elif (bin_code == 7) or (bin_code == 8):
a = (v - v0) / (v3 - v0)
b = (v - v3) / (v2 - v3)
pt1 = (x0i + a * We, y0j)
pt2 = (x0i + We, y0j + b * He)
return [(pt1, pt2)]
elif bin_code == 5:
a1 = (v - v0) / (v1 - v0)
b1 = (v - v1) / (v2 - v1)
pt11 = (x0i, y0j + a1 * He)
pt12 = (x0i + b1 * We, y0j + He)
a2 = (v - v0) / (v3 - v0)
b2 = (v - v3) / (v2 - v3)
pt21 = (x0i + a2 * We, y0j)
pt22 = (x0i + We, y0j + b2 * He)
return [(pt11, pt12), (pt21, pt22)]
elif bin_code == 10:
a1 = (v - v0) / (v3 - v0)
b1 = (v - v0) / (v1 - v0)
pt11 = (x0i + a1 * We, y0j)
pt12 = (x0i, y0j + b1 * He)
a2 = (v - v1) / (v2 - v1)
b2 = (v - v3) / (v2 - v3)
pt21 = (x0i + a2 * We, y0j + He)
pt22 = (x0i + We, y0j + b2 * He)
return [(pt11, pt12), (pt21, pt22)]
return []
try:
import matplotlib
matplotlib.use("Agg") # pyre-ignore[16]
DensePoseResultsContourVisualizer = DensePoseResultsMplContourVisualizer
except ModuleNotFoundError:
logger = logging.getLogger(__name__)
logger.warning("Could not import matplotlib, using custom contour visualizer")
DensePoseResultsContourVisualizer = DensePoseResultsCustomContourVisualizer
class DensePoseResultsFineSegmentationVisualizer(DensePoseMaskedColormapResultsVisualizer):
def __init__(self, inplace=True, cmap=cv2.COLORMAP_PARULA, alpha=0.7, **kwargs):
super(DensePoseResultsFineSegmentationVisualizer, self).__init__(
_extract_i_from_iuvarr,
_extract_i_from_iuvarr,
inplace,
cmap,
alpha,
val_scale=255.0 / DensePoseDataRelative.N_PART_LABELS,
**kwargs,
)
class DensePoseResultsUVisualizer(DensePoseMaskedColormapResultsVisualizer):
def __init__(self, inplace=True, cmap=cv2.COLORMAP_PARULA, alpha=0.7, **kwargs):
super(DensePoseResultsUVisualizer, self).__init__(
_extract_u_from_iuvarr,
_extract_i_from_iuvarr,
inplace,
cmap,
alpha,
val_scale=1.0,
**kwargs,
)
class DensePoseResultsVVisualizer(DensePoseMaskedColormapResultsVisualizer):
def __init__(self, inplace=True, cmap=cv2.COLORMAP_PARULA, alpha=0.7, **kwargs):
super(DensePoseResultsVVisualizer, self).__init__(
_extract_v_from_iuvarr,
_extract_i_from_iuvarr,
inplace,
cmap,
alpha,
val_scale=1.0,
**kwargs,
)
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/vis/densepose_results.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import numpy as np
from typing import Iterable, Optional, Tuple
import cv2
from densepose.structures import DensePoseDataRelative
from .base import Boxes, Image, MatrixVisualizer, PointsVisualizer
class DensePoseDataCoarseSegmentationVisualizer(object):
"""
Visualizer for ground truth segmentation
"""
def __init__(self, inplace=True, cmap=cv2.COLORMAP_PARULA, alpha=0.7, **kwargs):
self.mask_visualizer = MatrixVisualizer(
inplace=inplace,
cmap=cmap,
val_scale=255.0 / DensePoseDataRelative.N_BODY_PARTS,
alpha=alpha,
)
def visualize(
self,
image_bgr: Image,
bbox_densepose_datas: Optional[Tuple[Iterable[Boxes], Iterable[DensePoseDataRelative]]],
) -> Image:
if bbox_densepose_datas is None:
return image_bgr
for bbox_xywh, densepose_data in zip(*bbox_densepose_datas):
matrix = densepose_data.segm.numpy()
mask = np.zeros(matrix.shape, dtype=np.uint8)
mask[matrix > 0] = 1
image_bgr = self.mask_visualizer.visualize(image_bgr, mask, matrix, bbox_xywh.numpy())
return image_bgr
class DensePoseDataPointsVisualizer(object):
def __init__(self, densepose_data_to_value_fn=None, cmap=cv2.COLORMAP_PARULA, **kwargs):
self.points_visualizer = PointsVisualizer()
self.densepose_data_to_value_fn = densepose_data_to_value_fn
self.cmap = cmap
def visualize(
self,
image_bgr: Image,
bbox_densepose_datas: Optional[Tuple[Iterable[Boxes], Iterable[DensePoseDataRelative]]],
) -> Image:
if bbox_densepose_datas is None:
return image_bgr
for bbox_xywh, densepose_data in zip(*bbox_densepose_datas):
x0, y0, w, h = bbox_xywh.numpy()
x = densepose_data.x.numpy() * w / 255.0 + x0
y = densepose_data.y.numpy() * h / 255.0 + y0
pts_xy = zip(x, y)
if self.densepose_data_to_value_fn is None:
image_bgr = self.points_visualizer.visualize(image_bgr, pts_xy)
else:
v = self.densepose_data_to_value_fn(densepose_data)
img_colors_bgr = cv2.applyColorMap(v, self.cmap)
colors_bgr = [
[int(v) for v in img_color_bgr.ravel()] for img_color_bgr in img_colors_bgr
]
image_bgr = self.points_visualizer.visualize(image_bgr, pts_xy, colors_bgr)
return image_bgr
def _densepose_data_u_for_cmap(densepose_data):
u = np.clip(densepose_data.u.numpy(), 0, 1) * 255.0
return u.astype(np.uint8)
def _densepose_data_v_for_cmap(densepose_data):
v = np.clip(densepose_data.v.numpy(), 0, 1) * 255.0
return v.astype(np.uint8)
def _densepose_data_i_for_cmap(densepose_data):
i = (
np.clip(densepose_data.i.numpy(), 0.0, DensePoseDataRelative.N_PART_LABELS)
* 255.0
/ DensePoseDataRelative.N_PART_LABELS
)
return i.astype(np.uint8)
class DensePoseDataPointsUVisualizer(DensePoseDataPointsVisualizer):
def __init__(self, **kwargs):
super(DensePoseDataPointsUVisualizer, self).__init__(
densepose_data_to_value_fn=_densepose_data_u_for_cmap, **kwargs
)
class DensePoseDataPointsVVisualizer(DensePoseDataPointsVisualizer):
def __init__(self, **kwargs):
super(DensePoseDataPointsVVisualizer, self).__init__(
densepose_data_to_value_fn=_densepose_data_v_for_cmap, **kwargs
)
class DensePoseDataPointsIVisualizer(DensePoseDataPointsVisualizer):
def __init__(self, **kwargs):
super(DensePoseDataPointsIVisualizer, self).__init__(
densepose_data_to_value_fn=_densepose_data_i_for_cmap, **kwargs
)
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/vis/densepose_data_points.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import numpy as np
from typing import Optional, Tuple
import cv2
from densepose.structures import DensePoseDataRelative
from ..structures import DensePoseChartPredictorOutput
from .base import Boxes, Image, MatrixVisualizer
class DensePoseOutputsVisualizer(object):
def __init__(
self, inplace=True, cmap=cv2.COLORMAP_PARULA, alpha=0.7, to_visualize=None, **kwargs
):
assert to_visualize in "IUV", "can only visualize IUV"
self.to_visualize = to_visualize
if self.to_visualize == "I":
val_scale = 255.0 / DensePoseDataRelative.N_PART_LABELS
else:
val_scale = 1.0
self.mask_visualizer = MatrixVisualizer(
inplace=inplace, cmap=cmap, val_scale=val_scale, alpha=alpha
)
def visualize(
self,
image_bgr: Image,
dp_output_with_bboxes: Tuple[Optional[DensePoseChartPredictorOutput], Optional[Boxes]],
) -> Image:
densepose_output, bboxes_xywh = dp_output_with_bboxes
if densepose_output is None or bboxes_xywh is None:
return image_bgr
assert isinstance(
densepose_output, DensePoseChartPredictorOutput
), "DensePoseChartPredictorOutput expected, {} encountered".format(type(densepose_output))
S = densepose_output.coarse_segm
I = densepose_output.fine_segm # noqa
U = densepose_output.u
V = densepose_output.v
N = S.size(0)
assert N == I.size(
0
), "densepose outputs S {} and I {}" " should have equal first dim size".format(
S.size(), I.size()
)
assert N == U.size(
0
), "densepose outputs S {} and U {}" " should have equal first dim size".format(
S.size(), U.size()
)
assert N == V.size(
0
), "densepose outputs S {} and V {}" " should have equal first dim size".format(
S.size(), V.size()
)
assert N == len(
bboxes_xywh
), "number of bounding boxes {}" " should be equal to first dim size of outputs {}".format(
len(bboxes_xywh), N
)
for n in range(N):
Sn = S[n].argmax(dim=0)
In = I[n].argmax(dim=0) * (Sn > 0).long()
segmentation = In.cpu().numpy().astype(np.uint8)
mask = np.zeros(segmentation.shape, dtype=np.uint8)
mask[segmentation > 0] = 1
bbox_xywh = bboxes_xywh[n]
if self.to_visualize == "I":
vis = segmentation
elif self.to_visualize in "UV":
U_or_Vn = {"U": U, "V": V}[self.to_visualize][n].cpu().numpy().astype(np.float32)
vis = np.zeros(segmentation.shape, dtype=np.float32)
for partId in range(U_or_Vn.shape[0]):
vis[segmentation == partId] = (
U_or_Vn[partId][segmentation == partId].clip(0, 1) * 255
)
image_bgr = self.mask_visualizer.visualize(image_bgr, mask, vis, bbox_xywh)
return image_bgr
class DensePoseOutputsUVisualizer(DensePoseOutputsVisualizer):
def __init__(self, inplace=True, cmap=cv2.COLORMAP_PARULA, alpha=0.7, **kwargs):
super().__init__(inplace=inplace, cmap=cmap, alpha=alpha, to_visualize="U", **kwargs)
class DensePoseOutputsVVisualizer(DensePoseOutputsVisualizer):
def __init__(self, inplace=True, cmap=cv2.COLORMAP_PARULA, alpha=0.7, **kwargs):
super().__init__(inplace=inplace, cmap=cmap, alpha=alpha, to_visualize="V", **kwargs)
class DensePoseOutputsFineSegmentationVisualizer(DensePoseOutputsVisualizer):
def __init__(self, inplace=True, cmap=cv2.COLORMAP_PARULA, alpha=0.7, **kwargs):
super().__init__(inplace=inplace, cmap=cmap, alpha=alpha, to_visualize="I", **kwargs)
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/vis/densepose_outputs_iuv.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import logging
from typing import List, Optional, Sequence, Tuple
import torch
from detectron2.layers.nms import batched_nms
from detectron2.structures.instances import Instances
from densepose.converters import ToChartResultConverterWithConfidences
from densepose.structures import (
DensePoseChartResultWithConfidences,
DensePoseEmbeddingPredictorOutput,
)
from densepose.vis.bounding_box import BoundingBoxVisualizer, ScoredBoundingBoxVisualizer
from densepose.vis.densepose_outputs_vertex import DensePoseOutputsVertexVisualizer
from densepose.vis.densepose_results import DensePoseResultsVisualizer
from .base import CompoundVisualizer
Scores = Sequence[float]
DensePoseChartResultsWithConfidences = List[DensePoseChartResultWithConfidences]
def extract_scores_from_instances(instances: Instances, select=None):
if instances.has("scores"):
return instances.scores if select is None else instances.scores[select]
return None
def extract_boxes_xywh_from_instances(instances: Instances, select=None):
if instances.has("pred_boxes"):
boxes_xywh = instances.pred_boxes.tensor.clone()
boxes_xywh[:, 2] -= boxes_xywh[:, 0]
boxes_xywh[:, 3] -= boxes_xywh[:, 1]
return boxes_xywh if select is None else boxes_xywh[select]
return None
def create_extractor(visualizer: object):
"""
Create an extractor for the provided visualizer
"""
if isinstance(visualizer, CompoundVisualizer):
extractors = [create_extractor(v) for v in visualizer.visualizers]
return CompoundExtractor(extractors)
elif isinstance(visualizer, DensePoseResultsVisualizer):
return DensePoseResultExtractor()
elif isinstance(visualizer, ScoredBoundingBoxVisualizer):
return CompoundExtractor([extract_boxes_xywh_from_instances, extract_scores_from_instances])
elif isinstance(visualizer, BoundingBoxVisualizer):
return extract_boxes_xywh_from_instances
elif isinstance(visualizer, DensePoseOutputsVertexVisualizer):
return DensePoseOutputsExtractor()
else:
logger = logging.getLogger(__name__)
logger.error(f"Could not create extractor for {visualizer}")
return None
class BoundingBoxExtractor(object):
"""
Extracts bounding boxes from instances
"""
def __call__(self, instances: Instances):
boxes_xywh = extract_boxes_xywh_from_instances(instances)
return boxes_xywh
class ScoredBoundingBoxExtractor(object):
"""
Extracts bounding boxes from instances
"""
def __call__(self, instances: Instances, select=None):
scores = extract_scores_from_instances(instances)
boxes_xywh = extract_boxes_xywh_from_instances(instances)
if (scores is None) or (boxes_xywh is None):
return (boxes_xywh, scores)
if select is not None:
scores = scores[select]
boxes_xywh = boxes_xywh[select]
return (boxes_xywh, scores)
class DensePoseResultExtractor(object):
"""
Extracts DensePose chart result with confidences from instances
"""
def __call__(
self, instances: Instances, select=None
) -> Tuple[Optional[DensePoseChartResultsWithConfidences], Optional[torch.Tensor]]:
if instances.has("pred_densepose") and instances.has("pred_boxes"):
dpout = instances.pred_densepose
boxes_xyxy = instances.pred_boxes
boxes_xywh = extract_boxes_xywh_from_instances(instances)
if select is not None:
dpout = dpout[select]
boxes_xyxy = boxes_xyxy[select]
converter = ToChartResultConverterWithConfidences()
results = [converter.convert(dpout[i], boxes_xyxy[[i]]) for i in range(len(dpout))]
return results, boxes_xywh
else:
return None, None
class DensePoseOutputsExtractor(object):
"""
Extracts DensePose result from instances
"""
def __call__(
self,
instances: Instances,
select=None,
) -> Tuple[
Optional[DensePoseEmbeddingPredictorOutput], Optional[torch.Tensor], Optional[List[int]]
]:
if not (instances.has("pred_densepose") and instances.has("pred_boxes")):
return None, None, None
dpout = instances.pred_densepose
boxes_xyxy = instances.pred_boxes
boxes_xywh = extract_boxes_xywh_from_instances(instances)
if instances.has("pred_classes"):
classes = instances.pred_classes.tolist()
else:
classes = None
if select is not None:
dpout = dpout[select]
boxes_xyxy = boxes_xyxy[select]
if classes is not None:
classes = classes[select]
return dpout, boxes_xywh, classes
class CompoundExtractor(object):
"""
Extracts data for CompoundVisualizer
"""
def __init__(self, extractors):
self.extractors = extractors
def __call__(self, instances: Instances, select=None):
datas = []
for extractor in self.extractors:
data = extractor(instances, select)
datas.append(data)
return datas
class NmsFilteredExtractor(object):
"""
Extracts data in the format accepted by NmsFilteredVisualizer
"""
def __init__(self, extractor, iou_threshold):
self.extractor = extractor
self.iou_threshold = iou_threshold
def __call__(self, instances: Instances, select=None):
scores = extract_scores_from_instances(instances)
boxes_xywh = extract_boxes_xywh_from_instances(instances)
if boxes_xywh is None:
return None
select_local_idx = batched_nms(
boxes_xywh,
scores,
torch.zeros(len(scores), dtype=torch.int32),
iou_threshold=self.iou_threshold,
).squeeze()
select_local = torch.zeros(len(boxes_xywh), dtype=torch.bool, device=boxes_xywh.device)
select_local[select_local_idx] = True
select = select_local if select is None else (select & select_local)
return self.extractor(instances, select=select)
class ScoreThresholdedExtractor(object):
"""
Extracts data in the format accepted by ScoreThresholdedVisualizer
"""
def __init__(self, extractor, min_score):
self.extractor = extractor
self.min_score = min_score
def __call__(self, instances: Instances, select=None):
scores = extract_scores_from_instances(instances)
if scores is None:
return None
select_local = scores > self.min_score
select = select_local if select is None else (select & select_local)
data = self.extractor(instances, select=select)
return data
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/vis/extractor.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from .base import RectangleVisualizer, TextVisualizer
class BoundingBoxVisualizer(object):
def __init__(self):
self.rectangle_visualizer = RectangleVisualizer()
def visualize(self, image_bgr, boxes_xywh):
for bbox_xywh in boxes_xywh:
image_bgr = self.rectangle_visualizer.visualize(image_bgr, bbox_xywh)
return image_bgr
class ScoredBoundingBoxVisualizer(object):
def __init__(self, bbox_visualizer_params=None, score_visualizer_params=None, **kwargs):
if bbox_visualizer_params is None:
bbox_visualizer_params = {}
if score_visualizer_params is None:
score_visualizer_params = {}
self.visualizer_bbox = RectangleVisualizer(**bbox_visualizer_params)
self.visualizer_score = TextVisualizer(**score_visualizer_params)
def visualize(self, image_bgr, scored_bboxes):
boxes_xywh, box_scores = scored_bboxes
assert len(boxes_xywh) == len(
box_scores
), "Number of bounding boxes {} should be equal to the number of scores {}".format(
len(boxes_xywh), len(box_scores)
)
for i, box_xywh in enumerate(boxes_xywh):
score_i = box_scores[i]
image_bgr = self.visualizer_bbox.visualize(image_bgr, box_xywh)
score_txt = "{0:6.4f}".format(score_i)
topleft_xy = box_xywh[0], box_xywh[1]
image_bgr = self.visualizer_score.visualize(image_bgr, score_txt, topleft_xy)
return image_bgr
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/vis/bounding_box.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import json
import numpy as np
from functools import lru_cache
from typing import Dict, List, Optional, Tuple
import cv2
import torch
from detectron2.utils.file_io import PathManager
from densepose.modeling import build_densepose_embedder
from densepose.modeling.cse.utils import get_closest_vertices_mask_from_ES
from ..data.utils import get_class_to_mesh_name_mapping
from ..structures import DensePoseEmbeddingPredictorOutput
from ..structures.mesh import create_mesh
from .base import Boxes, Image, MatrixVisualizer
from .densepose_results_textures import get_texture_atlas
@lru_cache()
def get_xyz_vertex_embedding(mesh_name: str, device: torch.device):
#if mesh_name == "smpl_27554":
# embed_path = PathManager.get_local_path(
# "https://dl.fbaipublicfiles.com/densepose/data/cse/mds_d=256.npy"
# )
# embed_map, _ = np.load(embed_path, allow_pickle=True)
# embed_map = torch.tensor(embed_map).float()[:, 0]
# embed_map -= embed_map.min()
# embed_map /= embed_map.max()
#else:
# mesh = create_mesh(mesh_name, device)
# embed_map = mesh.vertices.sum(dim=1)
# embed_map -= embed_map.min()
# embed_map /= embed_map.max()
# embed_map = embed_map ** 2
mesh = create_mesh(mesh_name, device)
vertex_colors = mesh.vertices.clone()
vertex_colors -= vertex_colors.min(axis=0)[0]
vertex_colors /= vertex_colors.max(axis=0)[0]
embed_map = vertex_colors[:, [2, 1, 0]]
return embed_map
class DensePoseOutputsVertexVisualizer(object):
def __init__(
self,
cfg,
inplace=True,
cmap=cv2.COLORMAP_JET,
alpha=0.7,
device="cuda",
default_class=0,
**kwargs,
):
self.mask_visualizer = MatrixVisualizer(
inplace=inplace, cmap=cmap, val_scale=1.0, alpha=alpha
)
self.class_to_mesh_name = get_class_to_mesh_name_mapping(cfg)
self.embedder = build_densepose_embedder(cfg)
self.device = torch.device(device)
self.default_class = default_class
self.mesh_vertex_embeddings = {
mesh_name: self.embedder(mesh_name).to(self.device)
for mesh_name in self.class_to_mesh_name.values()
if self.embedder.has_embeddings(mesh_name)
}
def visualize(
self,
image_bgr: Image,
outputs_boxes_xywh_classes: Tuple[
Optional[DensePoseEmbeddingPredictorOutput], Optional[Boxes], Optional[List[int]]
],
) -> Image:
if outputs_boxes_xywh_classes[0] is None:
return image_bgr
S, E, N, bboxes_xywh, pred_classes = self.extract_and_check_outputs_and_boxes(
outputs_boxes_xywh_classes
)
for n in range(N):
x, y, w, h = bboxes_xywh[n].int().tolist()
mesh_name = self.class_to_mesh_name[pred_classes[n]]
closest_vertices, mask = get_closest_vertices_mask_from_ES(
E[[n]],
S[[n]],
h,
w,
self.mesh_vertex_embeddings[mesh_name],
self.device,
)
embed_map = get_xyz_vertex_embedding(mesh_name, self.device)
vis = (embed_map[closest_vertices].clip(0, 1) * 255.0).cpu().numpy()
mask_numpy = mask.cpu().numpy().astype(dtype=np.uint8)
image_bgr = self.mask_visualizer.visualize(image_bgr, mask_numpy, vis, [x, y, w, h])
return image_bgr
def extract_and_check_outputs_and_boxes(self, outputs_boxes_xywh_classes):
densepose_output, bboxes_xywh, pred_classes = outputs_boxes_xywh_classes
if pred_classes is None:
pred_classes = [self.default_class] * len(bboxes_xywh)
assert isinstance(
densepose_output, DensePoseEmbeddingPredictorOutput
), "DensePoseEmbeddingPredictorOutput expected, {} encountered".format(
type(densepose_output)
)
S = densepose_output.coarse_segm
E = densepose_output.embedding
N = S.size(0)
assert N == E.size(
0
), "CSE coarse_segm {} and embeddings {}" " should have equal first dim size".format(
S.size(), E.size()
)
assert N == len(
bboxes_xywh
), "number of bounding boxes {}" " should be equal to first dim size of outputs {}".format(
len(bboxes_xywh), N
)
assert N == len(pred_classes), (
"number of predicted classes {}"
" should be equal to first dim size of outputs {}".format(len(bboxes_xywh), N)
)
return S, E, N, bboxes_xywh, pred_classes
def get_texture_atlases(json_str: Optional[str]) -> Optional[Dict[str, Optional[np.ndarray]]]:
"""
json_str is a JSON string representing a mesh_name -> texture_atlas_path dictionary
"""
if json_str is None:
return None
paths = json.loads(json_str)
return {mesh_name: get_texture_atlas(path) for mesh_name, path in paths.items()}
class DensePoseOutputsTextureVisualizer(DensePoseOutputsVertexVisualizer):
def __init__(
self,
cfg,
texture_atlases_dict,
device="cuda",
default_class=0,
**kwargs,
):
self.embedder = build_densepose_embedder(cfg)
self.texture_image_dict = {}
self.alpha_dict = {}
for mesh_name in texture_atlases_dict.keys():
if texture_atlases_dict[mesh_name].shape[-1] == 4: # Image with alpha channel
self.alpha_dict[mesh_name] = texture_atlases_dict[mesh_name][:, :, -1] / 255.0
self.texture_image_dict[mesh_name] = texture_atlases_dict[mesh_name][:, :, :3]
else:
self.alpha_dict[mesh_name] = texture_atlases_dict[mesh_name].sum(axis=-1) > 0
self.texture_image_dict[mesh_name] = texture_atlases_dict[mesh_name]
self.device = torch.device(device)
self.class_to_mesh_name = get_class_to_mesh_name_mapping(cfg)
self.default_class = default_class
self.mesh_vertex_embeddings = {
mesh_name: self.embedder(mesh_name).to(self.device)
for mesh_name in self.class_to_mesh_name.values()
}
def visualize(
self,
image_bgr: Image,
outputs_boxes_xywh_classes: Tuple[
Optional[DensePoseEmbeddingPredictorOutput], Optional[Boxes], Optional[List[int]]
],
) -> Image:
image_target_bgr = image_bgr.copy()
if outputs_boxes_xywh_classes[0] is None:
return image_target_bgr
S, E, N, bboxes_xywh, pred_classes = self.extract_and_check_outputs_and_boxes(
outputs_boxes_xywh_classes
)
meshes = {
p: create_mesh(self.class_to_mesh_name[p], self.device) for p in np.unique(pred_classes)
}
for n in range(N):
x, y, w, h = bboxes_xywh[n].int().cpu().numpy()
mesh_name = self.class_to_mesh_name[pred_classes[n]]
closest_vertices, mask = get_closest_vertices_mask_from_ES(
E[[n]],
S[[n]],
h,
w,
self.mesh_vertex_embeddings[mesh_name],
self.device,
)
uv_array = meshes[pred_classes[n]].texcoords[closest_vertices].permute((2, 0, 1))
uv_array = uv_array.cpu().numpy().clip(0, 1)
textured_image = self.generate_image_with_texture(
image_target_bgr[y : y + h, x : x + w],
uv_array,
mask.cpu().numpy(),
self.class_to_mesh_name[pred_classes[n]],
)
if textured_image is None:
continue
image_target_bgr[y : y + h, x : x + w] = textured_image
return image_target_bgr
def generate_image_with_texture(self, bbox_image_bgr, uv_array, mask, mesh_name):
alpha = self.alpha_dict.get(mesh_name)
texture_image = self.texture_image_dict.get(mesh_name)
if alpha is None or texture_image is None:
return None
U, V = uv_array
x_index = (U * texture_image.shape[1]).astype(int)
y_index = (V * texture_image.shape[0]).astype(int)
local_texture = texture_image[y_index, x_index][mask]
local_alpha = np.expand_dims(alpha[y_index, x_index][mask], -1)
output_image = bbox_image_bgr.copy()
output_image[mask] = output_image[mask] * (1 - local_alpha) + local_texture * local_alpha
return output_image.astype(np.uint8)
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/vis/densepose_outputs_vertex.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import numpy as np
from typing import List, Optional, Tuple
import torch
from detectron2.data.detection_utils import read_image
from ..structures import DensePoseChartResult
from .base import Boxes, Image
from .densepose_results import DensePoseResultsVisualizer
def get_texture_atlas(path: Optional[str]) -> Optional[np.ndarray]:
if path is None:
return None
# Reading images like that downsamples 16-bit images to 8-bit
# If 16-bit images are needed, we can replace that by cv2.imread with the
# cv2.IMREAD_UNCHANGED flag (with cv2 we also need it to keep alpha channels)
# The rest of the pipeline would need to be adapted to 16-bit images too
bgr_image = read_image(path)
rgb_image = np.copy(bgr_image) # Convert BGR -> RGB
rgb_image[:, :, :3] = rgb_image[:, :, 2::-1] # Works with alpha channel
return rgb_image
class DensePoseResultsVisualizerWithTexture(DensePoseResultsVisualizer):
"""
texture_atlas: An image, size 6N * 4N, with N * N squares for each of the 24 body parts.
It must follow the grid found at https://github.com/facebookresearch/DensePose/blob/master/DensePoseData/demo_data/texture_atlas_200.png # noqa
For each body part, U is proportional to the x coordinate, and (1 - V) to y
"""
def __init__(self, texture_atlas, **kwargs):
self.texture_atlas = texture_atlas
self.body_part_size = texture_atlas.shape[0] // 6
assert self.body_part_size == texture_atlas.shape[1] // 4
def visualize(
self,
image_bgr: Image,
results_and_boxes_xywh: Tuple[Optional[List[DensePoseChartResult]], Optional[Boxes]],
) -> Image:
densepose_result, boxes_xywh = results_and_boxes_xywh
if densepose_result is None or boxes_xywh is None:
return image_bgr
boxes_xywh = boxes_xywh.int().cpu().numpy()
texture_image, alpha = self.get_texture()
for i, result in enumerate(densepose_result):
iuv_array = torch.cat((result.labels[None], result.uv.clamp(0, 1)))
x, y, w, h = boxes_xywh[i]
bbox_image = image_bgr[y : y + h, x : x + w]
image_bgr[y : y + h, x : x + w] = self.generate_image_with_texture(
texture_image, alpha, bbox_image, iuv_array.cpu().numpy()
)
return image_bgr
def get_texture(self):
N = self.body_part_size
texture_image = np.zeros([24, N, N, self.texture_atlas.shape[-1]])
for i in range(4):
for j in range(6):
texture_image[(6 * i + j), :, :, :] = self.texture_atlas[
N * j : N * (j + 1), N * i : N * (i + 1), :
]
if texture_image.shape[-1] == 4: # Image with alpha channel
alpha = texture_image[:, :, :, -1] / 255.0
texture_image = texture_image[:, :, :, :3]
else:
alpha = texture_image.sum(axis=-1) > 0
return texture_image, alpha
def generate_image_with_texture(self, texture_image, alpha, bbox_image_bgr, iuv_array):
I, U, V = iuv_array
generated_image_bgr = bbox_image_bgr.copy()
for PartInd in range(1, 25):
x, y = np.where(I == PartInd)
x_index = (U[x, y] * (self.body_part_size - 1)).astype(int)
y_index = ((1 - V[x, y]) * (self.body_part_size - 1)).astype(int)
part_alpha = np.expand_dims(alpha[PartInd - 1, y_index, x_index], -1)
generated_image_bgr[I == PartInd] = (
generated_image_bgr[I == PartInd] * (1 - part_alpha)
+ texture_image[PartInd - 1, y_index, x_index] * part_alpha
)
return generated_image_bgr.astype(np.uint8)
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/vis/densepose_results_textures.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import logging
import numpy as np
import cv2
import torch
Image = np.ndarray
Boxes = torch.Tensor
class MatrixVisualizer(object):
"""
Base visualizer for matrix data
"""
def __init__(
self,
inplace=True,
cmap=cv2.COLORMAP_PARULA,
val_scale=1.0,
alpha=0.7,
interp_method_matrix=cv2.INTER_LINEAR,
interp_method_mask=cv2.INTER_NEAREST,
):
self.inplace = inplace
self.cmap = cmap
self.val_scale = val_scale
self.alpha = alpha
self.interp_method_matrix = interp_method_matrix
self.interp_method_mask = interp_method_mask
def visualize(self, image_bgr, mask, matrix, bbox_xywh):
self._check_image(image_bgr)
self._check_mask_matrix(mask, matrix)
if self.inplace:
image_target_bgr = image_bgr
else:
image_target_bgr = image_bgr * 0
x, y, w, h = [int(v) for v in bbox_xywh]
if w <= 0 or h <= 0:
return image_bgr
mask, matrix = self._resize(mask, matrix, w, h)
mask_bg = np.tile((mask == 0)[:, :, np.newaxis], [1, 1, 3])
matrix_scaled = matrix.astype(np.float32) * self.val_scale
_EPSILON = 1e-6
if np.any(matrix_scaled > 255 + _EPSILON):
logger = logging.getLogger(__name__)
logger.warning(
f"Matrix has values > {255 + _EPSILON} after " f"scaling, clipping to [0..255]"
)
#matrix_scaled_8u = matrix_scaled.clip(0, 255).astype(np.uint8)
#matrix_vis = cv2.applyColorMap(matrix_scaled_8u, self.cmap)
mask_bg = np.tile((mask == 0)[:, :, np.newaxis], [1, 1, 3])
matrix_vis = matrix.copy()
matrix_vis[mask_bg] = image_target_bgr[y : y + h, x : x + w, :][mask_bg]
image_target_bgr[y : y + h, x : x + w, :] = (
image_target_bgr[y : y + h, x : x + w, :] * (1.0 - self.alpha) + matrix_vis * self.alpha
)
return image_target_bgr.astype(np.uint8)
def _resize(self, mask, matrix, w, h):
if (w != mask.shape[1]) or (h != mask.shape[0]):
mask = cv2.resize(mask, (w, h), self.interp_method_mask)
if (w != matrix.shape[1]) or (h != matrix.shape[0]):
matrix = cv2.resize(matrix, (w, h), self.interp_method_matrix)
return mask, matrix
def _check_image(self, image_rgb):
assert len(image_rgb.shape) == 3
assert image_rgb.shape[2] == 3
assert image_rgb.dtype == np.uint8
def _check_mask_matrix(self, mask, matrix):
#assert len(matrix.shape) == 2
assert len(mask.shape) == 2
assert mask.dtype == np.uint8
class RectangleVisualizer(object):
_COLOR_GREEN = (18, 127, 15)
def __init__(self, color=_COLOR_GREEN, thickness=1):
self.color = color
self.thickness = thickness
def visualize(self, image_bgr, bbox_xywh, color=None, thickness=None):
x, y, w, h = bbox_xywh
color = color or self.color
thickness = thickness or self.thickness
cv2.rectangle(image_bgr, (int(x), int(y)), (int(x + w), int(y + h)), color, thickness)
return image_bgr
class PointsVisualizer(object):
_COLOR_GREEN = (18, 127, 15)
def __init__(self, color_bgr=_COLOR_GREEN, r=5):
self.color_bgr = color_bgr
self.r = r
def visualize(self, image_bgr, pts_xy, colors_bgr=None, rs=None):
for j, pt_xy in enumerate(pts_xy):
x, y = pt_xy
color_bgr = colors_bgr[j] if colors_bgr is not None else self.color_bgr
r = rs[j] if rs is not None else self.r
cv2.circle(image_bgr, (x, y), r, color_bgr, -1)
return image_bgr
class TextVisualizer(object):
_COLOR_GRAY = (218, 227, 218)
_COLOR_WHITE = (255, 255, 255)
def __init__(
self,
font_face=cv2.FONT_HERSHEY_SIMPLEX,
font_color_bgr=_COLOR_GRAY,
font_scale=0.35,
font_line_type=cv2.LINE_AA,
font_line_thickness=1,
fill_color_bgr=_COLOR_WHITE,
fill_color_transparency=1.0,
frame_color_bgr=_COLOR_WHITE,
frame_color_transparency=1.0,
frame_thickness=1,
):
self.font_face = font_face
self.font_color_bgr = font_color_bgr
self.font_scale = font_scale
self.font_line_type = font_line_type
self.font_line_thickness = font_line_thickness
self.fill_color_bgr = fill_color_bgr
self.fill_color_transparency = fill_color_transparency
self.frame_color_bgr = frame_color_bgr
self.frame_color_transparency = frame_color_transparency
self.frame_thickness = frame_thickness
def visualize(self, image_bgr, txt, topleft_xy):
txt_w, txt_h = self.get_text_size_wh(txt)
topleft_xy = tuple(map(int, topleft_xy))
x, y = topleft_xy
if self.frame_color_transparency < 1.0:
t = self.frame_thickness
image_bgr[y - t : y + txt_h + t, x - t : x + txt_w + t, :] = (
image_bgr[y - t : y + txt_h + t, x - t : x + txt_w + t, :]
* self.frame_color_transparency
+ np.array(self.frame_color_bgr) * (1.0 - self.frame_color_transparency)
).astype(np.float)
if self.fill_color_transparency < 1.0:
image_bgr[y : y + txt_h, x : x + txt_w, :] = (
image_bgr[y : y + txt_h, x : x + txt_w, :] * self.fill_color_transparency
+ np.array(self.fill_color_bgr) * (1.0 - self.fill_color_transparency)
).astype(np.float)
cv2.putText(
image_bgr,
txt,
topleft_xy,
self.font_face,
self.font_scale,
self.font_color_bgr,
self.font_line_thickness,
self.font_line_type,
)
return image_bgr
def get_text_size_wh(self, txt):
((txt_w, txt_h), _) = cv2.getTextSize(
txt, self.font_face, self.font_scale, self.font_line_thickness
)
return txt_w, txt_h
class CompoundVisualizer(object):
def __init__(self, visualizers):
self.visualizers = visualizers
def visualize(self, image_bgr, data):
assert len(data) == len(
self.visualizers
), "The number of datas {} should match the number of visualizers" " {}".format(
len(data), len(self.visualizers)
)
image = image_bgr
for i, visualizer in enumerate(self.visualizers):
image = visualizer.visualize(image, data[i])
return image
def __str__(self):
visualizer_str = ", ".join([str(v) for v in self.visualizers])
return "Compound Visualizer [{}]".format(visualizer_str)
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/vis/base.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import json
import logging
from typing import List, Optional
import torch
from torch import nn
from detectron2.utils.file_io import PathManager
from densepose.structures.mesh import create_mesh
class MeshAlignmentEvaluator:
"""
Class for evaluation of 3D mesh alignment based on the learned vertex embeddings
"""
def __init__(self, embedder: nn.Module, mesh_names: Optional[List[str]]):
self.embedder = embedder
# use the provided mesh names if not None and not an empty list
self.mesh_names = mesh_names if mesh_names else embedder.mesh_names
self.logger = logging.getLogger(__name__)
with PathManager.open(
"https://dl.fbaipublicfiles.com/densepose/data/cse/mesh_keyvertices_v0.json", "r"
) as f:
self.mesh_keyvertices = json.load(f)
def evaluate(self):
ge_per_mesh = {}
gps_per_mesh = {}
for mesh_name_1 in self.mesh_names:
avg_errors = []
avg_gps = []
embeddings_1 = self.embedder(mesh_name_1)
keyvertices_1 = self.mesh_keyvertices[mesh_name_1]
keyvertex_names_1 = list(keyvertices_1.keys())
keyvertex_indices_1 = [keyvertices_1[name] for name in keyvertex_names_1]
for mesh_name_2 in self.mesh_names:
if mesh_name_1 == mesh_name_2:
continue
embeddings_2 = self.embedder(mesh_name_2)
keyvertices_2 = self.mesh_keyvertices[mesh_name_2]
sim_matrix_12 = embeddings_1[keyvertex_indices_1].mm(embeddings_2.T)
vertices_2_matching_keyvertices_1 = sim_matrix_12.argmax(axis=1)
mesh_2 = create_mesh(mesh_name_2, embeddings_2.device)
geodists = mesh_2.geodists[
vertices_2_matching_keyvertices_1,
[keyvertices_2[name] for name in keyvertex_names_1],
]
Current_Mean_Distances = 0.255
gps = (-(geodists ** 2) / (2 * (Current_Mean_Distances ** 2))).exp()
avg_errors.append(geodists.mean().item())
avg_gps.append(gps.mean().item())
ge_mean = torch.as_tensor(avg_errors).mean().item()
gps_mean = torch.as_tensor(avg_gps).mean().item()
ge_per_mesh[mesh_name_1] = ge_mean
gps_per_mesh[mesh_name_1] = gps_mean
ge_mean_global = torch.as_tensor(list(ge_per_mesh.values())).mean().item()
gps_mean_global = torch.as_tensor(list(gps_per_mesh.values())).mean().item()
per_mesh_metrics = {
"GE": ge_per_mesh,
"GPS": gps_per_mesh,
}
return ge_mean_global, gps_mean_global, per_mesh_metrics
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/evaluation/mesh_alignment_evaluator.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.