python_code
stringlengths 0
4.04M
| repo_name
stringlengths 8
58
| file_path
stringlengths 5
147
|
---|---|---|
# Copyright (c) Facebook, Inc. and its affiliates.
import io
import numpy as np
import os
from dataclasses import dataclass
from functools import reduce
from operator import mul
from typing import BinaryIO, Dict, Optional, Tuple
import torch
from detectron2.utils.comm import gather, get_rank
from detectron2.utils.file_io import PathManager
@dataclass
class SizeData:
dtype: str
shape: Tuple[int]
def _calculate_record_field_size_b(data_schema: Dict[str, SizeData], field_name: str) -> int:
schema = data_schema[field_name]
element_size_b = np.dtype(schema.dtype).itemsize
record_field_size_b = reduce(mul, schema.shape) * element_size_b
return record_field_size_b
def _calculate_record_size_b(data_schema: Dict[str, SizeData]) -> int:
record_size_b = 0
for field_name in data_schema:
record_field_size_b = _calculate_record_field_size_b(data_schema, field_name)
record_size_b += record_field_size_b
return record_size_b
def _calculate_record_field_sizes_b(data_schema: Dict[str, SizeData]) -> Dict[str, int]:
field_sizes_b = {}
for field_name in data_schema:
field_sizes_b[field_name] = _calculate_record_field_size_b(data_schema, field_name)
return field_sizes_b
class SingleProcessTensorStorage:
"""
Compact tensor storage to keep tensor data of predefined size and type.
"""
def __init__(self, data_schema: Dict[str, SizeData], storage_impl: BinaryIO):
"""
Construct tensor storage based on information on data shape and size.
Internally uses numpy to interpret the type specification.
The storage must support operations `seek(offset, whence=os.SEEK_SET)` and
`read(size)` to be able to perform the `get` operation.
The storage must support operation `write(bytes)` to be able to perform
the `put` operation.
Args:
data_schema (dict: str -> SizeData): dictionary which maps tensor name
to its size data (shape and data type), e.g.
```
{
"coarse_segm": SizeData(dtype="float32", shape=(112, 112)),
"embedding": SizeData(dtype="float32", shape=(16, 112, 112)),
}
```
storage_impl (BinaryIO): io instance that handles file-like seek, read
and write operations, e.g. a file handle or a memory buffer like io.BytesIO
"""
self.data_schema = data_schema
self.record_size_b = _calculate_record_size_b(data_schema)
self.record_field_sizes_b = _calculate_record_field_sizes_b(data_schema)
self.storage_impl = storage_impl
self.next_record_id = 0
def get(self, record_id: int) -> Dict[str, torch.Tensor]:
"""
Load tensors from the storage by record ID
Args:
record_id (int): Record ID, for which to load the data
Return:
dict: str -> tensor: tensor name mapped to tensor data, recorded under the provided ID
"""
self.storage_impl.seek(record_id * self.record_size_b, os.SEEK_SET)
data_bytes = self.storage_impl.read(self.record_size_b)
assert len(data_bytes) == self.record_size_b, (
f"Expected data size {self.record_size_b} B could not be read: "
f"got {len(data_bytes)} B"
)
record = {}
cur_idx = 0
# it's important to read and write in the same order
for field_name in sorted(self.data_schema):
schema = self.data_schema[field_name]
field_size_b = self.record_field_sizes_b[field_name]
chunk = data_bytes[cur_idx : cur_idx + field_size_b]
data_np = np.frombuffer(
chunk, dtype=schema.dtype, count=reduce(mul, schema.shape)
).reshape(schema.shape)
record[field_name] = torch.from_numpy(data_np)
cur_idx += field_size_b
return record
def put(self, data: Dict[str, torch.Tensor]) -> int:
"""
Store tensors in the storage
Args:
data (dict: str -> tensor): data to store, a dictionary which maps
tensor names into tensors; tensor shapes must match those specified
in data schema.
Return:
int: record ID, under which the data is stored
"""
# it's important to read and write in the same order
for field_name in sorted(self.data_schema):
assert (
field_name in data
), f"Field '{field_name}' not present in data: data keys are {data.keys()}"
value = data[field_name]
assert value.shape == self.data_schema[field_name].shape, (
f"Mismatched tensor shapes for field '{field_name}': "
f"expected {self.data_schema[field_name].shape}, got {value.shape}"
)
data_bytes = value.cpu().numpy().tobytes()
assert len(data_bytes) == self.record_field_sizes_b[field_name], (
f"Expected field {field_name} to be of size "
f"{self.record_field_sizes_b[field_name]} B, got {len(data_bytes)} B"
)
self.storage_impl.write(data_bytes)
record_id = self.next_record_id
self.next_record_id += 1
return record_id
class SingleProcessFileTensorStorage(SingleProcessTensorStorage):
"""
Implementation of a single process tensor storage which stores data in a file
"""
def __init__(self, data_schema: Dict[str, SizeData], fpath: str, mode: str):
self.fpath = fpath
assert "b" in mode, f"Tensor storage should be opened in binary mode, got '{mode}'"
if "w" in mode:
file_h = PathManager.open(fpath, mode)
elif "r" in mode:
local_fpath = PathManager.get_local_path(fpath)
file_h = open(local_fpath, mode)
else:
raise ValueError(f"Unsupported file mode {mode}, supported modes: rb, wb")
super().__init__(data_schema, file_h) # pyre-ignore[6]
class SingleProcessRamTensorStorage(SingleProcessTensorStorage):
"""
Implementation of a single process tensor storage which stores data in RAM
"""
def __init__(self, data_schema: Dict[str, SizeData], buf: io.BytesIO):
super().__init__(data_schema, buf)
class MultiProcessTensorStorage:
"""
Representation of a set of tensor storages created by individual processes,
allows to access those storages from a single owner process. The storages
should either be shared or broadcasted to the owner process.
The processes are identified by their rank, data is uniquely defined by
the rank of the process and the record ID.
"""
def __init__(self, rank_to_storage: Dict[int, SingleProcessTensorStorage]):
self.rank_to_storage = rank_to_storage
def get(self, rank: int, record_id: int) -> Dict[str, torch.Tensor]:
storage = self.rank_to_storage[rank]
return storage.get(record_id)
def put(self, rank: int, data: Dict[str, torch.Tensor]) -> int:
storage = self.rank_to_storage[rank]
return storage.put(data)
class MultiProcessFileTensorStorage(MultiProcessTensorStorage):
def __init__(self, data_schema: Dict[str, SizeData], rank_to_fpath: Dict[int, str], mode: str):
rank_to_storage = {
rank: SingleProcessFileTensorStorage(data_schema, fpath, mode)
for rank, fpath in rank_to_fpath.items()
}
super().__init__(rank_to_storage) # pyre-ignore[6]
class MultiProcessRamTensorStorage(MultiProcessTensorStorage):
def __init__(self, data_schema: Dict[str, SizeData], rank_to_buffer: Dict[int, io.BytesIO]):
rank_to_storage = {
rank: SingleProcessRamTensorStorage(data_schema, buf)
for rank, buf in rank_to_buffer.items()
}
super().__init__(rank_to_storage) # pyre-ignore[6]
def _ram_storage_gather(
storage: SingleProcessRamTensorStorage, dst_rank: int = 0
) -> Optional[MultiProcessRamTensorStorage]:
storage.storage_impl.seek(0, os.SEEK_SET)
# TODO: overhead, pickling a bytes object, can just pass bytes in a tensor directly
# see detectron2/utils.comm.py
data_list = gather(storage.storage_impl.read(), dst=dst_rank)
if get_rank() != dst_rank:
return None
rank_to_buffer = {i: io.BytesIO(data_list[i]) for i in range(len(data_list))}
multiprocess_storage = MultiProcessRamTensorStorage(storage.data_schema, rank_to_buffer)
return multiprocess_storage
def _file_storage_gather(
storage: SingleProcessFileTensorStorage,
dst_rank: int = 0,
mode: str = "rb",
) -> Optional[MultiProcessFileTensorStorage]:
storage.storage_impl.close()
fpath_list = gather(storage.fpath, dst=dst_rank)
if get_rank() != dst_rank:
return None
rank_to_fpath = {i: fpath_list[i] for i in range(len(fpath_list))}
return MultiProcessFileTensorStorage(storage.data_schema, rank_to_fpath, mode)
def storage_gather(
storage: SingleProcessTensorStorage, dst_rank: int = 0
) -> Optional[MultiProcessTensorStorage]:
if isinstance(storage, SingleProcessRamTensorStorage):
return _ram_storage_gather(storage, dst_rank)
elif isinstance(storage, SingleProcessFileTensorStorage):
return _file_storage_gather(storage, dst_rank)
raise Exception(f"Unsupported storage for gather operation: {storage}")
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/evaluation/tensor_storage.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from .evaluator import DensePoseCOCOEvaluator
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/evaluation/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from detectron2.data.catalog import Metadata
from detectron2.evaluation import COCOEvaluator
from densepose.data.datasets.coco import (
get_contiguous_id_to_category_id_map,
maybe_filter_categories_cocoapi,
)
def _maybe_add_iscrowd_annotations(cocoapi):
for ann in cocoapi.dataset["annotations"]:
if "iscrowd" not in ann:
ann["iscrowd"] = 0
class Detectron2COCOEvaluatorAdapter(COCOEvaluator):
def __init__(
self,
dataset_name,
output_dir=None,
distributed=True,
):
super().__init__(dataset_name, output_dir=output_dir, distributed=distributed)
maybe_filter_categories_cocoapi(dataset_name, self._coco_api)
_maybe_add_iscrowd_annotations(self._coco_api)
# substitute category metadata to account for categories
# that are mapped to the same contiguous id
if hasattr(self._metadata, "thing_dataset_id_to_contiguous_id"):
self._maybe_substitute_metadata()
def _maybe_substitute_metadata(self):
cont_id_2_cat_id = get_contiguous_id_to_category_id_map(self._metadata)
cat_id_2_cont_id = self._metadata.thing_dataset_id_to_contiguous_id
if len(cont_id_2_cat_id) == len(cat_id_2_cont_id):
return
cat_id_2_cont_id_injective = {}
for cat_id, cont_id in cat_id_2_cont_id.items():
if (cont_id in cont_id_2_cat_id) and (cont_id_2_cat_id[cont_id] == cat_id):
cat_id_2_cont_id_injective[cat_id] = cont_id
metadata_new = Metadata(name=self._metadata.name)
for key, value in self._metadata.__dict__.items():
if key == "thing_dataset_id_to_contiguous_id":
setattr(metadata_new, key, cat_id_2_cont_id_injective)
else:
setattr(metadata_new, key, value)
self._metadata = metadata_new
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/evaluation/d2_evaluator_adapter.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# This is a modified version of cocoeval.py where we also have the densepose evaluation.
__author__ = "tsungyi"
import copy
import datetime
import logging
import numpy as np
import pickle
import time
from collections import defaultdict
from enum import Enum
from typing import Any, Dict, Tuple
import scipy.spatial.distance as ssd
import torch
import torch.nn.functional as F
from pycocotools import mask as maskUtils
from scipy.io import loadmat
from scipy.ndimage import zoom as spzoom
from detectron2.utils.file_io import PathManager
from densepose.converters.chart_output_to_chart_result import resample_uv_tensors_to_bbox
from densepose.converters.segm_to_mask import (
resample_coarse_segm_tensor_to_bbox,
resample_fine_and_coarse_segm_tensors_to_bbox,
)
from densepose.modeling.cse.utils import squared_euclidean_distance_matrix
from densepose.structures import DensePoseDataRelative
from densepose.structures.mesh import create_mesh
logger = logging.getLogger(__name__)
class DensePoseEvalMode(str, Enum):
# use both masks and geodesic distances (GPS * IOU) to compute scores
GPSM = "gpsm"
# use only geodesic distances (GPS) to compute scores
GPS = "gps"
# use only masks (IOU) to compute scores
IOU = "iou"
class DensePoseDataMode(str, Enum):
# use estimated IUV data (default mode)
IUV_DT = "iuvdt"
# use ground truth IUV data
IUV_GT = "iuvgt"
# use ground truth labels I and set UV to 0
I_GT_UV_0 = "igtuv0"
# use ground truth labels I and estimated UV coordinates
I_GT_UV_DT = "igtuvdt"
# use estimated labels I and set UV to 0
I_DT_UV_0 = "idtuv0"
class DensePoseCocoEval(object):
# Interface for evaluating detection on the Microsoft COCO dataset.
#
# The usage for CocoEval is as follows:
# cocoGt=..., cocoDt=... # load dataset and results
# E = CocoEval(cocoGt,cocoDt); # initialize CocoEval object
# E.params.recThrs = ...; # set parameters as desired
# E.evaluate(); # run per image evaluation
# E.accumulate(); # accumulate per image results
# E.summarize(); # display summary metrics of results
# For example usage see evalDemo.m and http://mscoco.org/.
#
# The evaluation parameters are as follows (defaults in brackets):
# imgIds - [all] N img ids to use for evaluation
# catIds - [all] K cat ids to use for evaluation
# iouThrs - [.5:.05:.95] T=10 IoU thresholds for evaluation
# recThrs - [0:.01:1] R=101 recall thresholds for evaluation
# areaRng - [...] A=4 object area ranges for evaluation
# maxDets - [1 10 100] M=3 thresholds on max detections per image
# iouType - ['segm'] set iouType to 'segm', 'bbox', 'keypoints' or 'densepose'
# iouType replaced the now DEPRECATED useSegm parameter.
# useCats - [1] if true use category labels for evaluation
# Note: if useCats=0 category labels are ignored as in proposal scoring.
# Note: multiple areaRngs [Ax2] and maxDets [Mx1] can be specified.
#
# evaluate(): evaluates detections on every image and every category and
# concats the results into the "evalImgs" with fields:
# dtIds - [1xD] id for each of the D detections (dt)
# gtIds - [1xG] id for each of the G ground truths (gt)
# dtMatches - [TxD] matching gt id at each IoU or 0
# gtMatches - [TxG] matching dt id at each IoU or 0
# dtScores - [1xD] confidence of each dt
# gtIgnore - [1xG] ignore flag for each gt
# dtIgnore - [TxD] ignore flag for each dt at each IoU
#
# accumulate(): accumulates the per-image, per-category evaluation
# results in "evalImgs" into the dictionary "eval" with fields:
# params - parameters used for evaluation
# date - date evaluation was performed
# counts - [T,R,K,A,M] parameter dimensions (see above)
# precision - [TxRxKxAxM] precision for every evaluation setting
# recall - [TxKxAxM] max recall for every evaluation setting
# Note: precision and recall==-1 for settings with no gt objects.
#
# See also coco, mask, pycocoDemo, pycocoEvalDemo
#
# Microsoft COCO Toolbox. version 2.0
# Data, paper, and tutorials available at: http://mscoco.org/
# Code written by Piotr Dollar and Tsung-Yi Lin, 2015.
# Licensed under the Simplified BSD License [see coco/license.txt]
def __init__(
self,
cocoGt=None,
cocoDt=None,
iouType: str = "densepose",
multi_storage=None,
embedder=None,
dpEvalMode: DensePoseEvalMode = DensePoseEvalMode.GPS,
dpDataMode: DensePoseDataMode = DensePoseDataMode.IUV_DT,
):
"""
Initialize CocoEval using coco APIs for gt and dt
:param cocoGt: coco object with ground truth annotations
:param cocoDt: coco object with detection results
:return: None
"""
self.cocoGt = cocoGt # ground truth COCO API
self.cocoDt = cocoDt # detections COCO API
self.multi_storage = multi_storage
self.embedder = embedder
self._dpEvalMode = dpEvalMode
self._dpDataMode = dpDataMode
self.evalImgs = defaultdict(list) # per-image per-category eval results [KxAxI]
self.eval = {} # accumulated evaluation results
self._gts = defaultdict(list) # gt for evaluation
self._dts = defaultdict(list) # dt for evaluation
self.params = Params(iouType=iouType) # parameters
self._paramsEval = {} # parameters for evaluation
self.stats = [] # result summarization
self.ious = {} # ious between all gts and dts
if cocoGt is not None:
self.params.imgIds = sorted(cocoGt.getImgIds())
self.params.catIds = sorted(cocoGt.getCatIds())
self.ignoreThrBB = 0.7
self.ignoreThrUV = 0.9
def _loadGEval(self):
smpl_subdiv_fpath = PathManager.get_local_path(
"https://dl.fbaipublicfiles.com/densepose/data/SMPL_subdiv.mat"
)
pdist_transform_fpath = PathManager.get_local_path(
"https://dl.fbaipublicfiles.com/densepose/data/SMPL_SUBDIV_TRANSFORM.mat"
)
pdist_matrix_fpath = PathManager.get_local_path(
"https://dl.fbaipublicfiles.com/densepose/data/Pdist_matrix.pkl", timeout_sec=120
)
SMPL_subdiv = loadmat(smpl_subdiv_fpath)
self.PDIST_transform = loadmat(pdist_transform_fpath)
self.PDIST_transform = self.PDIST_transform["index"].squeeze()
UV = np.array([SMPL_subdiv["U_subdiv"], SMPL_subdiv["V_subdiv"]]).squeeze()
ClosestVertInds = np.arange(UV.shape[1]) + 1
self.Part_UVs = []
self.Part_ClosestVertInds = []
for i in np.arange(24):
self.Part_UVs.append(UV[:, SMPL_subdiv["Part_ID_subdiv"].squeeze() == (i + 1)])
self.Part_ClosestVertInds.append(
ClosestVertInds[SMPL_subdiv["Part_ID_subdiv"].squeeze() == (i + 1)]
)
with open(pdist_matrix_fpath, "rb") as hFile:
arrays = pickle.load(hFile, encoding="latin1")
self.Pdist_matrix = arrays["Pdist_matrix"]
self.Part_ids = np.array(SMPL_subdiv["Part_ID_subdiv"].squeeze())
# Mean geodesic distances for parts.
self.Mean_Distances = np.array([0, 0.351, 0.107, 0.126, 0.237, 0.173, 0.142, 0.128, 0.150])
# Coarse Part labels.
self.CoarseParts = np.array(
[0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8]
)
def _prepare(self):
"""
Prepare ._gts and ._dts for evaluation based on params
:return: None
"""
def _toMask(anns, coco):
# modify ann['segmentation'] by reference
for ann in anns:
# safeguard for invalid segmentation annotation;
# annotations containing empty lists exist in the posetrack
# dataset. This is not a correct segmentation annotation
# in terms of COCO format; we need to deal with it somehow
segm = ann["segmentation"]
if type(segm) == list and len(segm) == 0:
ann["segmentation"] = None
continue
rle = coco.annToRLE(ann)
ann["segmentation"] = rle
def _getIgnoreRegion(iid, coco):
img = coco.imgs[iid]
if "ignore_regions_x" not in img.keys():
return None
if len(img["ignore_regions_x"]) == 0:
return None
rgns_merged = [
[v for xy in zip(region_x, region_y) for v in xy]
for region_x, region_y in zip(img["ignore_regions_x"], img["ignore_regions_y"])
]
rles = maskUtils.frPyObjects(rgns_merged, img["height"], img["width"])
rle = maskUtils.merge(rles)
return maskUtils.decode(rle)
def _checkIgnore(dt, iregion):
if iregion is None:
return True
bb = np.array(dt["bbox"]).astype(np.int)
x1, y1, x2, y2 = bb[0], bb[1], bb[0] + bb[2], bb[1] + bb[3]
x2 = min([x2, iregion.shape[1]])
y2 = min([y2, iregion.shape[0]])
if bb[2] * bb[3] == 0:
return False
crop_iregion = iregion[y1:y2, x1:x2]
if crop_iregion.sum() == 0:
return True
if "densepose" not in dt.keys(): # filtering boxes
return crop_iregion.sum() / bb[2] / bb[3] < self.ignoreThrBB
# filtering UVs
ignoremask = np.require(crop_iregion, requirements=["F"])
mask = self._extract_mask(dt)
uvmask = np.require(np.asarray(mask > 0), dtype=np.uint8, requirements=["F"])
uvmask_ = maskUtils.encode(uvmask)
ignoremask_ = maskUtils.encode(ignoremask)
uviou = maskUtils.iou([uvmask_], [ignoremask_], [1])[0]
return uviou < self.ignoreThrUV
p = self.params
if p.useCats:
gts = self.cocoGt.loadAnns(self.cocoGt.getAnnIds(imgIds=p.imgIds, catIds=p.catIds))
dts = self.cocoDt.loadAnns(self.cocoDt.getAnnIds(imgIds=p.imgIds, catIds=p.catIds))
else:
gts = self.cocoGt.loadAnns(self.cocoGt.getAnnIds(imgIds=p.imgIds))
dts = self.cocoDt.loadAnns(self.cocoDt.getAnnIds(imgIds=p.imgIds))
imns = self.cocoGt.loadImgs(p.imgIds)
self.size_mapping = {}
for im in imns:
self.size_mapping[im["id"]] = [im["height"], im["width"]]
# if iouType == 'uv', add point gt annotations
if p.iouType == "densepose":
self._loadGEval()
# convert ground truth to mask if iouType == 'segm'
if p.iouType == "segm":
_toMask(gts, self.cocoGt)
_toMask(dts, self.cocoDt)
# set ignore flag
for gt in gts:
gt["ignore"] = gt["ignore"] if "ignore" in gt else 0
gt["ignore"] = "iscrowd" in gt and gt["iscrowd"]
if p.iouType == "keypoints":
gt["ignore"] = (gt["num_keypoints"] == 0) or gt["ignore"]
if p.iouType == "densepose":
gt["ignore"] = ("dp_x" in gt) == 0
if p.iouType == "segm":
gt["ignore"] = gt["segmentation"] is None
self._gts = defaultdict(list) # gt for evaluation
self._dts = defaultdict(list) # dt for evaluation
self._igrgns = defaultdict(list)
for gt in gts:
iid = gt["image_id"]
if iid not in self._igrgns.keys():
self._igrgns[iid] = _getIgnoreRegion(iid, self.cocoGt)
if _checkIgnore(gt, self._igrgns[iid]):
self._gts[iid, gt["category_id"]].append(gt)
for dt in dts:
iid = dt["image_id"]
if (iid not in self._igrgns) or _checkIgnore(dt, self._igrgns[iid]):
self._dts[iid, dt["category_id"]].append(dt)
self.evalImgs = defaultdict(list) # per-image per-category evaluation results
self.eval = {} # accumulated evaluation results
def evaluate(self):
"""
Run per image evaluation on given images and store results (a list of dict) in self.evalImgs
:return: None
"""
tic = time.time()
logger.info("Running per image DensePose evaluation... {}".format(self.params.iouType))
p = self.params
# add backward compatibility if useSegm is specified in params
if p.useSegm is not None:
p.iouType = "segm" if p.useSegm == 1 else "bbox"
logger.info("useSegm (deprecated) is not None. Running DensePose evaluation")
p.imgIds = list(np.unique(p.imgIds))
if p.useCats:
p.catIds = list(np.unique(p.catIds))
p.maxDets = sorted(p.maxDets)
self.params = p
self._prepare()
# loop through images, area range, max detection number
catIds = p.catIds if p.useCats else [-1]
if p.iouType in ["segm", "bbox"]:
computeIoU = self.computeIoU
elif p.iouType == "keypoints":
computeIoU = self.computeOks
elif p.iouType == "densepose":
computeIoU = self.computeOgps
if self._dpEvalMode in {DensePoseEvalMode.GPSM, DensePoseEvalMode.IOU}:
self.real_ious = {
(imgId, catId): self.computeDPIoU(imgId, catId)
for imgId in p.imgIds
for catId in catIds
}
self.ious = {
(imgId, catId): computeIoU(imgId, catId) for imgId in p.imgIds for catId in catIds
}
evaluateImg = self.evaluateImg
maxDet = p.maxDets[-1]
self.evalImgs = [
evaluateImg(imgId, catId, areaRng, maxDet)
for catId in catIds
for areaRng in p.areaRng
for imgId in p.imgIds
]
self._paramsEval = copy.deepcopy(self.params)
toc = time.time()
logger.info("DensePose evaluation DONE (t={:0.2f}s).".format(toc - tic))
def getDensePoseMask(self, polys):
maskGen = np.zeros([256, 256])
stop = min(len(polys) + 1, 15)
for i in range(1, stop):
if polys[i - 1]:
currentMask = maskUtils.decode(polys[i - 1])
maskGen[currentMask > 0] = i
return maskGen
def _generate_rlemask_on_image(self, mask, imgId, data):
bbox_xywh = np.array(data["bbox"])
x, y, w, h = bbox_xywh
im_h, im_w = self.size_mapping[imgId]
im_mask = np.zeros((im_h, im_w), dtype=np.uint8)
if mask is not None:
x0 = max(int(x), 0)
x1 = min(int(x + w), im_w, int(x) + mask.shape[1])
y0 = max(int(y), 0)
y1 = min(int(y + h), im_h, int(y) + mask.shape[0])
y = int(y)
x = int(x)
im_mask[y0:y1, x0:x1] = mask[y0 - y : y1 - y, x0 - x : x1 - x]
im_mask = np.require(np.asarray(im_mask > 0), dtype=np.uint8, requirements=["F"])
rle_mask = maskUtils.encode(np.array(im_mask[:, :, np.newaxis], order="F"))[0]
return rle_mask
def computeDPIoU(self, imgId, catId):
p = self.params
if p.useCats:
gt = self._gts[imgId, catId]
dt = self._dts[imgId, catId]
else:
gt = [_ for cId in p.catIds for _ in self._gts[imgId, cId]]
dt = [_ for cId in p.catIds for _ in self._dts[imgId, cId]]
if len(gt) == 0 and len(dt) == 0:
return []
inds = np.argsort([-d["score"] for d in dt], kind="mergesort")
dt = [dt[i] for i in inds]
if len(dt) > p.maxDets[-1]:
dt = dt[0 : p.maxDets[-1]]
gtmasks = []
for g in gt:
if DensePoseDataRelative.S_KEY in g:
# convert DensePose mask to a binary mask
mask = np.minimum(self.getDensePoseMask(g[DensePoseDataRelative.S_KEY]), 1.0)
_, _, w, h = g["bbox"]
scale_x = float(max(w, 1)) / mask.shape[1]
scale_y = float(max(h, 1)) / mask.shape[0]
mask = spzoom(mask, (scale_y, scale_x), order=1, prefilter=False)
mask = np.array(mask > 0.5, dtype=np.uint8)
rle_mask = self._generate_rlemask_on_image(mask, imgId, g)
elif "segmentation" in g:
segmentation = g["segmentation"]
if isinstance(segmentation, list) and segmentation:
# polygons
im_h, im_w = self.size_mapping[imgId]
rles = maskUtils.frPyObjects(segmentation, im_h, im_w)
rle_mask = maskUtils.merge(rles)
elif isinstance(segmentation, dict):
if isinstance(segmentation["counts"], list):
# uncompressed RLE
im_h, im_w = self.size_mapping[imgId]
rle_mask = maskUtils.frPyObjects(segmentation, im_h, im_w)
else:
# compressed RLE
rle_mask = segmentation
else:
rle_mask = self._generate_rlemask_on_image(None, imgId, g)
else:
rle_mask = self._generate_rlemask_on_image(None, imgId, g)
gtmasks.append(rle_mask)
dtmasks = []
for d in dt:
mask = self._extract_mask(d)
mask = np.require(np.asarray(mask > 0), dtype=np.uint8, requirements=["F"])
rle_mask = self._generate_rlemask_on_image(mask, imgId, d)
dtmasks.append(rle_mask)
# compute iou between each dt and gt region
iscrowd = [int(o.get("iscrowd", 0)) for o in gt]
iousDP = maskUtils.iou(dtmasks, gtmasks, iscrowd)
return iousDP
def computeIoU(self, imgId, catId):
p = self.params
if p.useCats:
gt = self._gts[imgId, catId]
dt = self._dts[imgId, catId]
else:
gt = [_ for cId in p.catIds for _ in self._gts[imgId, cId]]
dt = [_ for cId in p.catIds for _ in self._dts[imgId, cId]]
if len(gt) == 0 and len(dt) == 0:
return []
inds = np.argsort([-d["score"] for d in dt], kind="mergesort")
dt = [dt[i] for i in inds]
if len(dt) > p.maxDets[-1]:
dt = dt[0 : p.maxDets[-1]]
if p.iouType == "segm":
g = [g["segmentation"] for g in gt if g["segmentation"] is not None]
d = [d["segmentation"] for d in dt if d["segmentation"] is not None]
elif p.iouType == "bbox":
g = [g["bbox"] for g in gt]
d = [d["bbox"] for d in dt]
else:
raise Exception("unknown iouType for iou computation")
# compute iou between each dt and gt region
iscrowd = [int(o.get("iscrowd", 0)) for o in gt]
ious = maskUtils.iou(d, g, iscrowd)
return ious
def computeOks(self, imgId, catId):
p = self.params
# dimension here should be Nxm
gts = self._gts[imgId, catId]
dts = self._dts[imgId, catId]
inds = np.argsort([-d["score"] for d in dts], kind="mergesort")
dts = [dts[i] for i in inds]
if len(dts) > p.maxDets[-1]:
dts = dts[0 : p.maxDets[-1]]
# if len(gts) == 0 and len(dts) == 0:
if len(gts) == 0 or len(dts) == 0:
return []
ious = np.zeros((len(dts), len(gts)))
sigmas = (
np.array(
[
0.26,
0.25,
0.25,
0.35,
0.35,
0.79,
0.79,
0.72,
0.72,
0.62,
0.62,
1.07,
1.07,
0.87,
0.87,
0.89,
0.89,
]
)
/ 10.0
)
vars = (sigmas * 2) ** 2
k = len(sigmas)
# compute oks between each detection and ground truth object
for j, gt in enumerate(gts):
# create bounds for ignore regions(double the gt bbox)
g = np.array(gt["keypoints"])
xg = g[0::3]
yg = g[1::3]
vg = g[2::3]
k1 = np.count_nonzero(vg > 0)
bb = gt["bbox"]
x0 = bb[0] - bb[2]
x1 = bb[0] + bb[2] * 2
y0 = bb[1] - bb[3]
y1 = bb[1] + bb[3] * 2
for i, dt in enumerate(dts):
d = np.array(dt["keypoints"])
xd = d[0::3]
yd = d[1::3]
if k1 > 0:
# measure the per-keypoint distance if keypoints visible
dx = xd - xg
dy = yd - yg
else:
# measure minimum distance to keypoints in (x0,y0) & (x1,y1)
z = np.zeros(k)
dx = np.max((z, x0 - xd), axis=0) + np.max((z, xd - x1), axis=0)
dy = np.max((z, y0 - yd), axis=0) + np.max((z, yd - y1), axis=0)
e = (dx ** 2 + dy ** 2) / vars / (gt["area"] + np.spacing(1)) / 2
if k1 > 0:
e = e[vg > 0]
ious[i, j] = np.sum(np.exp(-e)) / e.shape[0]
return ious
def _extract_mask(self, dt: Dict[str, Any]) -> np.ndarray:
if "densepose" in dt:
densepose_results_quantized = dt["densepose"]
return densepose_results_quantized.labels_uv_uint8[0].numpy()
elif "cse_mask" in dt:
return dt["cse_mask"]
elif "coarse_segm" in dt:
dy = max(int(dt["bbox"][3]), 1)
dx = max(int(dt["bbox"][2]), 1)
return (
F.interpolate(
dt["coarse_segm"].unsqueeze(0), (dy, dx), mode="bilinear", align_corners=False
)
.squeeze(0)
.argmax(0)
.numpy()
.astype(np.uint8)
)
elif "record_id" in dt:
assert (
self.multi_storage is not None
), f"Storage record id encountered in a detection {dt}, but no storage provided!"
record = self.multi_storage.get(dt["rank"], dt["record_id"])
coarse_segm = record["coarse_segm"]
dy = max(int(dt["bbox"][3]), 1)
dx = max(int(dt["bbox"][2]), 1)
return (
F.interpolate(
coarse_segm.unsqueeze(0), (dy, dx), mode="bilinear", align_corners=False
)
.squeeze(0)
.argmax(0)
.numpy()
.astype(np.uint8)
)
else:
raise Exception(f"No mask data in the detection: {dt}")
raise ValueError('The prediction dict needs to contain either "densepose" or "cse_mask"')
def _extract_iuv(
self, densepose_data: np.ndarray, py: np.ndarray, px: np.ndarray, gt: Dict[str, Any]
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""
Extract arrays of I, U and V values at given points as numpy arrays
given the data mode stored in self._dpDataMode
"""
if self._dpDataMode == DensePoseDataMode.IUV_DT:
# estimated labels and UV (default)
ipoints = densepose_data[0, py, px]
upoints = densepose_data[1, py, px] / 255.0 # convert from uint8 by /255.
vpoints = densepose_data[2, py, px] / 255.0
elif self._dpDataMode == DensePoseDataMode.IUV_GT:
# ground truth
ipoints = np.array(gt["dp_I"])
upoints = np.array(gt["dp_U"])
vpoints = np.array(gt["dp_V"])
elif self._dpDataMode == DensePoseDataMode.I_GT_UV_0:
# ground truth labels, UV = 0
ipoints = np.array(gt["dp_I"])
upoints = upoints * 0.0
vpoints = vpoints * 0.0
elif self._dpDataMode == DensePoseDataMode.I_GT_UV_DT:
# ground truth labels, estimated UV
ipoints = np.array(gt["dp_I"])
upoints = densepose_data[1, py, px] / 255.0 # convert from uint8 by /255.
vpoints = densepose_data[2, py, px] / 255.0
elif self._dpDataMode == DensePoseDataMode.I_DT_UV_0:
# estimated labels, UV = 0
ipoints = densepose_data[0, py, px]
upoints = upoints * 0.0
vpoints = vpoints * 0.0
else:
raise ValueError(f"Unknown data mode: {self._dpDataMode}")
return ipoints, upoints, vpoints
def computeOgps_single_pair(self, dt, gt, py, px, pt_mask):
if "densepose" in dt:
ipoints, upoints, vpoints = self.extract_iuv_from_quantized(dt, gt, py, px, pt_mask)
return self.computeOgps_single_pair_iuv(dt, gt, ipoints, upoints, vpoints)
elif "u" in dt:
ipoints, upoints, vpoints = self.extract_iuv_from_raw(dt, gt, py, px, pt_mask)
return self.computeOgps_single_pair_iuv(dt, gt, ipoints, upoints, vpoints)
elif "record_id" in dt:
assert (
self.multi_storage is not None
), f"Storage record id encountered in detection {dt}, but no storage provided!"
record = self.multi_storage.get(dt["rank"], dt["record_id"])
record["bbox"] = dt["bbox"]
if "u" in record:
ipoints, upoints, vpoints = self.extract_iuv_from_raw(record, gt, py, px, pt_mask)
return self.computeOgps_single_pair_iuv(dt, gt, ipoints, upoints, vpoints)
elif "embedding" in record:
return self.computeOgps_single_pair_cse(
dt,
gt,
py,
px,
pt_mask,
record["coarse_segm"],
record["embedding"],
record["bbox"],
)
else:
raise Exception(f"Unknown record format: {record}")
elif "embedding" in dt:
return self.computeOgps_single_pair_cse(
dt, gt, py, px, pt_mask, dt["coarse_segm"], dt["embedding"], dt["bbox"]
)
raise Exception(f"Unknown detection format: {dt}")
def extract_iuv_from_quantized(self, dt, gt, py, px, pt_mask):
densepose_results_quantized = dt["densepose"]
ipoints, upoints, vpoints = self._extract_iuv(
densepose_results_quantized.labels_uv_uint8.numpy(), py, px, gt
)
ipoints[pt_mask == -1] = 0
return ipoints, upoints, vpoints
def extract_iuv_from_raw(self, dt, gt, py, px, pt_mask):
labels_dt = resample_fine_and_coarse_segm_tensors_to_bbox(
dt["fine_segm"].unsqueeze(0),
dt["coarse_segm"].unsqueeze(0),
dt["bbox"],
)
uv = resample_uv_tensors_to_bbox(
dt["u"].unsqueeze(0), dt["v"].unsqueeze(0), labels_dt.squeeze(0), dt["bbox"]
)
labels_uv_uint8 = torch.cat((labels_dt.byte(), (uv * 255).clamp(0, 255).byte()))
ipoints, upoints, vpoints = self._extract_iuv(labels_uv_uint8.numpy(), py, px, gt)
ipoints[pt_mask == -1] = 0
return ipoints, upoints, vpoints
def computeOgps_single_pair_iuv(self, dt, gt, ipoints, upoints, vpoints):
cVertsGT, ClosestVertsGTTransformed = self.findAllClosestVertsGT(gt)
cVerts = self.findAllClosestVertsUV(upoints, vpoints, ipoints)
# Get pairwise geodesic distances between gt and estimated mesh points.
dist = self.getDistancesUV(ClosestVertsGTTransformed, cVerts)
# Compute the Ogps measure.
# Find the mean geodesic normalization distance for
# each GT point, based on which part it is on.
Current_Mean_Distances = self.Mean_Distances[
self.CoarseParts[self.Part_ids[cVertsGT[cVertsGT > 0].astype(int) - 1]]
]
return dist, Current_Mean_Distances
def computeOgps_single_pair_cse(
self, dt, gt, py, px, pt_mask, coarse_segm, embedding, bbox_xywh_abs
):
# 0-based mesh vertex indices
cVertsGT = torch.as_tensor(gt["dp_vertex"], dtype=torch.int64)
# label for each pixel of the bbox, [H, W] tensor of long
labels_dt = resample_coarse_segm_tensor_to_bbox(
coarse_segm.unsqueeze(0), bbox_xywh_abs
).squeeze(0)
x, y, w, h = bbox_xywh_abs
# embedding for each pixel of the bbox, [D, H, W] tensor of float32
embedding = F.interpolate(
embedding.unsqueeze(0), (int(h), int(w)), mode="bilinear", align_corners=False
).squeeze(0)
# valid locations py, px
py_pt = torch.from_numpy(py[pt_mask > -1])
px_pt = torch.from_numpy(px[pt_mask > -1])
cVerts = torch.ones_like(cVertsGT) * -1
cVerts[pt_mask > -1] = self.findClosestVertsCse(
embedding, py_pt, px_pt, labels_dt, gt["ref_model"]
)
# Get pairwise geodesic distances between gt and estimated mesh points.
dist = self.getDistancesCse(cVertsGT, cVerts, gt["ref_model"])
# normalize distances
if (gt["ref_model"] == "smpl_27554") and ("dp_I" in gt):
Current_Mean_Distances = self.Mean_Distances[
self.CoarseParts[np.array(gt["dp_I"], dtype=int)]
]
else:
Current_Mean_Distances = 0.255
return dist, Current_Mean_Distances
def computeOgps(self, imgId, catId):
p = self.params
# dimension here should be Nxm
g = self._gts[imgId, catId]
d = self._dts[imgId, catId]
inds = np.argsort([-d_["score"] for d_ in d], kind="mergesort")
d = [d[i] for i in inds]
if len(d) > p.maxDets[-1]:
d = d[0 : p.maxDets[-1]]
# if len(gts) == 0 and len(dts) == 0:
if len(g) == 0 or len(d) == 0:
return []
ious = np.zeros((len(d), len(g)))
# compute opgs between each detection and ground truth object
# sigma = self.sigma #0.255 # dist = 0.3m corresponds to ogps = 0.5
# 1 # dist = 0.3m corresponds to ogps = 0.96
# 1.45 # dist = 1.7m (person height) corresponds to ogps = 0.5)
for j, gt in enumerate(g):
if not gt["ignore"]:
g_ = gt["bbox"]
for i, dt in enumerate(d):
#
dy = int(dt["bbox"][3])
dx = int(dt["bbox"][2])
dp_x = np.array(gt["dp_x"]) * g_[2] / 255.0
dp_y = np.array(gt["dp_y"]) * g_[3] / 255.0
py = (dp_y + g_[1] - dt["bbox"][1]).astype(np.int)
px = (dp_x + g_[0] - dt["bbox"][0]).astype(np.int)
#
pts = np.zeros(len(px))
pts[px >= dx] = -1
pts[py >= dy] = -1
pts[px < 0] = -1
pts[py < 0] = -1
if len(pts) < 1:
ogps = 0.0
elif np.max(pts) == -1:
ogps = 0.0
else:
px[pts == -1] = 0
py[pts == -1] = 0
dists_between_matches, dist_norm_coeffs = self.computeOgps_single_pair(
dt, gt, py, px, pts
)
# Compute gps
ogps_values = np.exp(
-(dists_between_matches ** 2) / (2 * (dist_norm_coeffs ** 2))
)
#
ogps = np.mean(ogps_values) if len(ogps_values) > 0 else 0.0
ious[i, j] = ogps
gbb = [gt["bbox"] for gt in g]
dbb = [dt["bbox"] for dt in d]
# compute iou between each dt and gt region
iscrowd = [int(o.get("iscrowd", 0)) for o in g]
ious_bb = maskUtils.iou(dbb, gbb, iscrowd)
return ious, ious_bb
def evaluateImg(self, imgId, catId, aRng, maxDet):
"""
perform evaluation for single category and image
:return: dict (single image results)
"""
p = self.params
if p.useCats:
gt = self._gts[imgId, catId]
dt = self._dts[imgId, catId]
else:
gt = [_ for cId in p.catIds for _ in self._gts[imgId, cId]]
dt = [_ for cId in p.catIds for _ in self._dts[imgId, cId]]
if len(gt) == 0 and len(dt) == 0:
return None
for g in gt:
# g['_ignore'] = g['ignore']
if g["ignore"] or (g["area"] < aRng[0] or g["area"] > aRng[1]):
g["_ignore"] = True
else:
g["_ignore"] = False
# sort dt highest score first, sort gt ignore last
gtind = np.argsort([g["_ignore"] for g in gt], kind="mergesort")
gt = [gt[i] for i in gtind]
dtind = np.argsort([-d["score"] for d in dt], kind="mergesort")
dt = [dt[i] for i in dtind[0:maxDet]]
iscrowd = [int(o.get("iscrowd", 0)) for o in gt]
# load computed ious
if p.iouType == "densepose":
# print('Checking the length', len(self.ious[imgId, catId]))
# if len(self.ious[imgId, catId]) == 0:
# print(self.ious[imgId, catId])
ious = (
self.ious[imgId, catId][0][:, gtind]
if len(self.ious[imgId, catId]) > 0
else self.ious[imgId, catId]
)
ioubs = (
self.ious[imgId, catId][1][:, gtind]
if len(self.ious[imgId, catId]) > 0
else self.ious[imgId, catId]
)
if self._dpEvalMode in {DensePoseEvalMode.GPSM, DensePoseEvalMode.IOU}:
iousM = (
self.real_ious[imgId, catId][:, gtind]
if len(self.real_ious[imgId, catId]) > 0
else self.real_ious[imgId, catId]
)
else:
ious = (
self.ious[imgId, catId][:, gtind]
if len(self.ious[imgId, catId]) > 0
else self.ious[imgId, catId]
)
T = len(p.iouThrs)
G = len(gt)
D = len(dt)
gtm = np.zeros((T, G))
dtm = np.zeros((T, D))
gtIg = np.array([g["_ignore"] for g in gt])
dtIg = np.zeros((T, D))
if np.all(gtIg) and p.iouType == "densepose":
dtIg = np.logical_or(dtIg, True)
if len(ious) > 0: # and not p.iouType == 'densepose':
for tind, t in enumerate(p.iouThrs):
for dind, d in enumerate(dt):
# information about best match so far (m=-1 -> unmatched)
iou = min([t, 1 - 1e-10])
m = -1
for gind, _g in enumerate(gt):
# if this gt already matched, and not a crowd, continue
if gtm[tind, gind] > 0 and not iscrowd[gind]:
continue
# if dt matched to reg gt, and on ignore gt, stop
if m > -1 and gtIg[m] == 0 and gtIg[gind] == 1:
break
if p.iouType == "densepose":
if self._dpEvalMode == DensePoseEvalMode.GPSM:
new_iou = np.sqrt(iousM[dind, gind] * ious[dind, gind])
elif self._dpEvalMode == DensePoseEvalMode.IOU:
new_iou = iousM[dind, gind]
elif self._dpEvalMode == DensePoseEvalMode.GPS:
new_iou = ious[dind, gind]
else:
new_iou = ious[dind, gind]
if new_iou < iou:
continue
if new_iou == 0.0:
continue
# if match successful and best so far, store appropriately
iou = new_iou
m = gind
# if match made store id of match for both dt and gt
if m == -1:
continue
dtIg[tind, dind] = gtIg[m]
dtm[tind, dind] = gt[m]["id"]
gtm[tind, m] = d["id"]
if p.iouType == "densepose":
if not len(ioubs) == 0:
for dind, d in enumerate(dt):
# information about best match so far (m=-1 -> unmatched)
if dtm[tind, dind] == 0:
ioub = 0.8
m = -1
for gind, _g in enumerate(gt):
# if this gt already matched, and not a crowd, continue
if gtm[tind, gind] > 0 and not iscrowd[gind]:
continue
# continue to next gt unless better match made
if ioubs[dind, gind] < ioub:
continue
# if match successful and best so far, store appropriately
ioub = ioubs[dind, gind]
m = gind
# if match made store id of match for both dt and gt
if m > -1:
dtIg[:, dind] = gtIg[m]
if gtIg[m]:
dtm[tind, dind] = gt[m]["id"]
gtm[tind, m] = d["id"]
# set unmatched detections outside of area range to ignore
a = np.array([d["area"] < aRng[0] or d["area"] > aRng[1] for d in dt]).reshape((1, len(dt)))
dtIg = np.logical_or(dtIg, np.logical_and(dtm == 0, np.repeat(a, T, 0)))
# store results for given image and category
# print('Done with the function', len(self.ious[imgId, catId]))
return {
"image_id": imgId,
"category_id": catId,
"aRng": aRng,
"maxDet": maxDet,
"dtIds": [d["id"] for d in dt],
"gtIds": [g["id"] for g in gt],
"dtMatches": dtm,
"gtMatches": gtm,
"dtScores": [d["score"] for d in dt],
"gtIgnore": gtIg,
"dtIgnore": dtIg,
}
def accumulate(self, p=None):
"""
Accumulate per image evaluation results and store the result in self.eval
:param p: input params for evaluation
:return: None
"""
logger.info("Accumulating evaluation results...")
tic = time.time()
if not self.evalImgs:
logger.info("Please run evaluate() first")
# allows input customized parameters
if p is None:
p = self.params
p.catIds = p.catIds if p.useCats == 1 else [-1]
T = len(p.iouThrs)
R = len(p.recThrs)
K = len(p.catIds) if p.useCats else 1
A = len(p.areaRng)
M = len(p.maxDets)
precision = -(np.ones((T, R, K, A, M))) # -1 for the precision of absent categories
recall = -(np.ones((T, K, A, M)))
# create dictionary for future indexing
logger.info("Categories: {}".format(p.catIds))
_pe = self._paramsEval
catIds = _pe.catIds if _pe.useCats else [-1]
setK = set(catIds)
setA = set(map(tuple, _pe.areaRng))
setM = set(_pe.maxDets)
setI = set(_pe.imgIds)
# get inds to evaluate
k_list = [n for n, k in enumerate(p.catIds) if k in setK]
m_list = [m for n, m in enumerate(p.maxDets) if m in setM]
a_list = [n for n, a in enumerate(map(lambda x: tuple(x), p.areaRng)) if a in setA]
i_list = [n for n, i in enumerate(p.imgIds) if i in setI]
I0 = len(_pe.imgIds)
A0 = len(_pe.areaRng)
# retrieve E at each category, area range, and max number of detections
for k, k0 in enumerate(k_list):
Nk = k0 * A0 * I0
for a, a0 in enumerate(a_list):
Na = a0 * I0
for m, maxDet in enumerate(m_list):
E = [self.evalImgs[Nk + Na + i] for i in i_list]
E = [e for e in E if e is not None]
if len(E) == 0:
continue
dtScores = np.concatenate([e["dtScores"][0:maxDet] for e in E])
# different sorting method generates slightly different results.
# mergesort is used to be consistent as Matlab implementation.
inds = np.argsort(-dtScores, kind="mergesort")
dtm = np.concatenate([e["dtMatches"][:, 0:maxDet] for e in E], axis=1)[:, inds]
dtIg = np.concatenate([e["dtIgnore"][:, 0:maxDet] for e in E], axis=1)[:, inds]
gtIg = np.concatenate([e["gtIgnore"] for e in E])
npig = np.count_nonzero(gtIg == 0)
if npig == 0:
continue
tps = np.logical_and(dtm, np.logical_not(dtIg))
fps = np.logical_and(np.logical_not(dtm), np.logical_not(dtIg))
tp_sum = np.cumsum(tps, axis=1).astype(dtype=np.float)
fp_sum = np.cumsum(fps, axis=1).astype(dtype=np.float)
for t, (tp, fp) in enumerate(zip(tp_sum, fp_sum)):
tp = np.array(tp)
fp = np.array(fp)
nd = len(tp)
rc = tp / npig
pr = tp / (fp + tp + np.spacing(1))
q = np.zeros((R,))
if nd:
recall[t, k, a, m] = rc[-1]
else:
recall[t, k, a, m] = 0
# numpy is slow without cython optimization for accessing elements
# use python array gets significant speed improvement
pr = pr.tolist()
q = q.tolist()
for i in range(nd - 1, 0, -1):
if pr[i] > pr[i - 1]:
pr[i - 1] = pr[i]
inds = np.searchsorted(rc, p.recThrs, side="left")
try:
for ri, pi in enumerate(inds):
q[ri] = pr[pi]
except Exception:
pass
precision[t, :, k, a, m] = np.array(q)
logger.info(
"Final: max precision {}, min precision {}".format(np.max(precision), np.min(precision))
)
self.eval = {
"params": p,
"counts": [T, R, K, A, M],
"date": datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
"precision": precision,
"recall": recall,
}
toc = time.time()
logger.info("DONE (t={:0.2f}s).".format(toc - tic))
def summarize(self):
"""
Compute and display summary metrics for evaluation results.
Note this function can *only* be applied on the default parameter setting
"""
def _summarize(ap=1, iouThr=None, areaRng="all", maxDets=100):
p = self.params
iStr = " {:<18} {} @[ {}={:<9} | area={:>6s} | maxDets={:>3d} ] = {:0.3f}"
titleStr = "Average Precision" if ap == 1 else "Average Recall"
typeStr = "(AP)" if ap == 1 else "(AR)"
measure = "IoU"
if self.params.iouType == "keypoints":
measure = "OKS"
elif self.params.iouType == "densepose":
measure = "OGPS"
iouStr = (
"{:0.2f}:{:0.2f}".format(p.iouThrs[0], p.iouThrs[-1])
if iouThr is None
else "{:0.2f}".format(iouThr)
)
aind = [i for i, aRng in enumerate(p.areaRngLbl) if aRng == areaRng]
mind = [i for i, mDet in enumerate(p.maxDets) if mDet == maxDets]
if ap == 1:
# dimension of precision: [TxRxKxAxM]
s = self.eval["precision"]
# IoU
if iouThr is not None:
t = np.where(np.abs(iouThr - p.iouThrs) < 0.001)[0]
s = s[t]
s = s[:, :, :, aind, mind]
else:
# dimension of recall: [TxKxAxM]
s = self.eval["recall"]
if iouThr is not None:
t = np.where(np.abs(iouThr - p.iouThrs) < 0.001)[0]
s = s[t]
s = s[:, :, aind, mind]
if len(s[s > -1]) == 0:
mean_s = -1
else:
mean_s = np.mean(s[s > -1])
logger.info(iStr.format(titleStr, typeStr, measure, iouStr, areaRng, maxDets, mean_s))
return mean_s
def _summarizeDets():
stats = np.zeros((12,))
stats[0] = _summarize(1)
stats[1] = _summarize(1, iouThr=0.5, maxDets=self.params.maxDets[2])
stats[2] = _summarize(1, iouThr=0.75, maxDets=self.params.maxDets[2])
stats[3] = _summarize(1, areaRng="small", maxDets=self.params.maxDets[2])
stats[4] = _summarize(1, areaRng="medium", maxDets=self.params.maxDets[2])
stats[5] = _summarize(1, areaRng="large", maxDets=self.params.maxDets[2])
stats[6] = _summarize(0, maxDets=self.params.maxDets[0])
stats[7] = _summarize(0, maxDets=self.params.maxDets[1])
stats[8] = _summarize(0, maxDets=self.params.maxDets[2])
stats[9] = _summarize(0, areaRng="small", maxDets=self.params.maxDets[2])
stats[10] = _summarize(0, areaRng="medium", maxDets=self.params.maxDets[2])
stats[11] = _summarize(0, areaRng="large", maxDets=self.params.maxDets[2])
return stats
def _summarizeKps():
stats = np.zeros((10,))
stats[0] = _summarize(1, maxDets=20)
stats[1] = _summarize(1, maxDets=20, iouThr=0.5)
stats[2] = _summarize(1, maxDets=20, iouThr=0.75)
stats[3] = _summarize(1, maxDets=20, areaRng="medium")
stats[4] = _summarize(1, maxDets=20, areaRng="large")
stats[5] = _summarize(0, maxDets=20)
stats[6] = _summarize(0, maxDets=20, iouThr=0.5)
stats[7] = _summarize(0, maxDets=20, iouThr=0.75)
stats[8] = _summarize(0, maxDets=20, areaRng="medium")
stats[9] = _summarize(0, maxDets=20, areaRng="large")
return stats
def _summarizeUvs():
stats = [_summarize(1, maxDets=self.params.maxDets[0])]
min_threshold = self.params.iouThrs.min()
if min_threshold <= 0.201:
stats += [_summarize(1, maxDets=self.params.maxDets[0], iouThr=0.2)]
if min_threshold <= 0.301:
stats += [_summarize(1, maxDets=self.params.maxDets[0], iouThr=0.3)]
if min_threshold <= 0.401:
stats += [_summarize(1, maxDets=self.params.maxDets[0], iouThr=0.4)]
stats += [
_summarize(1, maxDets=self.params.maxDets[0], iouThr=0.5),
_summarize(1, maxDets=self.params.maxDets[0], iouThr=0.75),
_summarize(1, maxDets=self.params.maxDets[0], areaRng="medium"),
_summarize(1, maxDets=self.params.maxDets[0], areaRng="large"),
_summarize(0, maxDets=self.params.maxDets[0]),
_summarize(0, maxDets=self.params.maxDets[0], iouThr=0.5),
_summarize(0, maxDets=self.params.maxDets[0], iouThr=0.75),
_summarize(0, maxDets=self.params.maxDets[0], areaRng="medium"),
_summarize(0, maxDets=self.params.maxDets[0], areaRng="large"),
]
return np.array(stats)
def _summarizeUvsOld():
stats = np.zeros((18,))
stats[0] = _summarize(1, maxDets=self.params.maxDets[0])
stats[1] = _summarize(1, maxDets=self.params.maxDets[0], iouThr=0.5)
stats[2] = _summarize(1, maxDets=self.params.maxDets[0], iouThr=0.55)
stats[3] = _summarize(1, maxDets=self.params.maxDets[0], iouThr=0.60)
stats[4] = _summarize(1, maxDets=self.params.maxDets[0], iouThr=0.65)
stats[5] = _summarize(1, maxDets=self.params.maxDets[0], iouThr=0.70)
stats[6] = _summarize(1, maxDets=self.params.maxDets[0], iouThr=0.75)
stats[7] = _summarize(1, maxDets=self.params.maxDets[0], iouThr=0.80)
stats[8] = _summarize(1, maxDets=self.params.maxDets[0], iouThr=0.85)
stats[9] = _summarize(1, maxDets=self.params.maxDets[0], iouThr=0.90)
stats[10] = _summarize(1, maxDets=self.params.maxDets[0], iouThr=0.95)
stats[11] = _summarize(1, maxDets=self.params.maxDets[0], areaRng="medium")
stats[12] = _summarize(1, maxDets=self.params.maxDets[0], areaRng="large")
stats[13] = _summarize(0, maxDets=self.params.maxDets[0])
stats[14] = _summarize(0, maxDets=self.params.maxDets[0], iouThr=0.5)
stats[15] = _summarize(0, maxDets=self.params.maxDets[0], iouThr=0.75)
stats[16] = _summarize(0, maxDets=self.params.maxDets[0], areaRng="medium")
stats[17] = _summarize(0, maxDets=self.params.maxDets[0], areaRng="large")
return stats
if not self.eval:
raise Exception("Please run accumulate() first")
iouType = self.params.iouType
if iouType in ["segm", "bbox"]:
summarize = _summarizeDets
elif iouType in ["keypoints"]:
summarize = _summarizeKps
elif iouType in ["densepose"]:
summarize = _summarizeUvs
self.stats = summarize()
def __str__(self):
self.summarize()
# ================ functions for dense pose ==============================
def findAllClosestVertsUV(self, U_points, V_points, Index_points):
ClosestVerts = np.ones(Index_points.shape) * -1
for i in np.arange(24):
#
if (i + 1) in Index_points:
UVs = np.array(
[U_points[Index_points == (i + 1)], V_points[Index_points == (i + 1)]]
)
Current_Part_UVs = self.Part_UVs[i]
Current_Part_ClosestVertInds = self.Part_ClosestVertInds[i]
D = ssd.cdist(Current_Part_UVs.transpose(), UVs.transpose()).squeeze()
ClosestVerts[Index_points == (i + 1)] = Current_Part_ClosestVertInds[
np.argmin(D, axis=0)
]
ClosestVertsTransformed = self.PDIST_transform[ClosestVerts.astype(int) - 1]
ClosestVertsTransformed[ClosestVerts < 0] = 0
return ClosestVertsTransformed
def findClosestVertsCse(self, embedding, py, px, mask, mesh_name):
mesh_vertex_embeddings = self.embedder(mesh_name)
pixel_embeddings = embedding[:, py, px].t().to(device="cuda")
mask_vals = mask[py, px]
edm = squared_euclidean_distance_matrix(pixel_embeddings, mesh_vertex_embeddings)
vertex_indices = edm.argmin(dim=1).cpu()
vertex_indices[mask_vals <= 0] = -1
return vertex_indices
def findAllClosestVertsGT(self, gt):
#
I_gt = np.array(gt["dp_I"])
U_gt = np.array(gt["dp_U"])
V_gt = np.array(gt["dp_V"])
#
# print(I_gt)
#
ClosestVertsGT = np.ones(I_gt.shape) * -1
for i in np.arange(24):
if (i + 1) in I_gt:
UVs = np.array([U_gt[I_gt == (i + 1)], V_gt[I_gt == (i + 1)]])
Current_Part_UVs = self.Part_UVs[i]
Current_Part_ClosestVertInds = self.Part_ClosestVertInds[i]
D = ssd.cdist(Current_Part_UVs.transpose(), UVs.transpose()).squeeze()
ClosestVertsGT[I_gt == (i + 1)] = Current_Part_ClosestVertInds[np.argmin(D, axis=0)]
#
ClosestVertsGTTransformed = self.PDIST_transform[ClosestVertsGT.astype(int) - 1]
ClosestVertsGTTransformed[ClosestVertsGT < 0] = 0
return ClosestVertsGT, ClosestVertsGTTransformed
def getDistancesCse(self, cVertsGT, cVerts, mesh_name):
geodists_vertices = torch.ones_like(cVertsGT) * float("inf")
selected = (cVertsGT >= 0) * (cVerts >= 0)
mesh = create_mesh(mesh_name, "cpu")
geodists_vertices[selected] = mesh.geodists[cVertsGT[selected], cVerts[selected]]
return geodists_vertices.numpy()
def getDistancesUV(self, cVertsGT, cVerts):
#
n = 27554
dists = []
for d in range(len(cVertsGT)):
if cVertsGT[d] > 0:
if cVerts[d] > 0:
i = cVertsGT[d] - 1
j = cVerts[d] - 1
if j == i:
dists.append(0)
elif j > i:
ccc = i
i = j
j = ccc
i = n - i - 1
j = n - j - 1
k = (n * (n - 1) / 2) - (n - i) * ((n - i) - 1) / 2 + j - i - 1
k = (n * n - n) / 2 - k - 1
dists.append(self.Pdist_matrix[int(k)][0])
else:
i = n - i - 1
j = n - j - 1
k = (n * (n - 1) / 2) - (n - i) * ((n - i) - 1) / 2 + j - i - 1
k = (n * n - n) / 2 - k - 1
dists.append(self.Pdist_matrix[int(k)][0])
else:
dists.append(np.inf)
return np.atleast_1d(np.array(dists).squeeze())
class Params:
"""
Params for coco evaluation api
"""
def setDetParams(self):
self.imgIds = []
self.catIds = []
# np.arange causes trouble. the data point on arange is slightly larger than the true value
self.iouThrs = np.linspace(0.5, 0.95, int(np.round((0.95 - 0.5) / 0.05)) + 1, endpoint=True)
self.recThrs = np.linspace(0.0, 1.00, int(np.round((1.00 - 0.0) / 0.01)) + 1, endpoint=True)
self.maxDets = [1, 10, 100]
self.areaRng = [
[0 ** 2, 1e5 ** 2],
[0 ** 2, 32 ** 2],
[32 ** 2, 96 ** 2],
[96 ** 2, 1e5 ** 2],
]
self.areaRngLbl = ["all", "small", "medium", "large"]
self.useCats = 1
def setKpParams(self):
self.imgIds = []
self.catIds = []
# np.arange causes trouble. the data point on arange is slightly larger than the true value
self.iouThrs = np.linspace(0.5, 0.95, np.round((0.95 - 0.5) / 0.05) + 1, endpoint=True)
self.recThrs = np.linspace(0.0, 1.00, np.round((1.00 - 0.0) / 0.01) + 1, endpoint=True)
self.maxDets = [20]
self.areaRng = [[0 ** 2, 1e5 ** 2], [32 ** 2, 96 ** 2], [96 ** 2, 1e5 ** 2]]
self.areaRngLbl = ["all", "medium", "large"]
self.useCats = 1
def setUvParams(self):
self.imgIds = []
self.catIds = []
self.iouThrs = np.linspace(0.5, 0.95, int(np.round((0.95 - 0.5) / 0.05)) + 1, endpoint=True)
self.recThrs = np.linspace(0.0, 1.00, int(np.round((1.00 - 0.0) / 0.01)) + 1, endpoint=True)
self.maxDets = [20]
self.areaRng = [[0 ** 2, 1e5 ** 2], [32 ** 2, 96 ** 2], [96 ** 2, 1e5 ** 2]]
self.areaRngLbl = ["all", "medium", "large"]
self.useCats = 1
def __init__(self, iouType="segm"):
if iouType == "segm" or iouType == "bbox":
self.setDetParams()
elif iouType == "keypoints":
self.setKpParams()
elif iouType == "densepose":
self.setUvParams()
else:
raise Exception("iouType not supported")
self.iouType = iouType
# useSegm is deprecated
self.useSegm = None
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/evaluation/densepose_coco_evaluation.py
|
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
import contextlib
import copy
import io
import itertools
import logging
import numpy as np
import os
from collections import OrderedDict
from typing import Dict, Iterable, List, Optional
import pycocotools.mask as mask_utils
import torch
from pycocotools.coco import COCO
from tabulate import tabulate
from detectron2.config import CfgNode
from detectron2.data import MetadataCatalog
from detectron2.evaluation import DatasetEvaluator
from detectron2.structures import BoxMode
from detectron2.utils.comm import gather, get_rank, is_main_process, synchronize
from detectron2.utils.file_io import PathManager
from detectron2.utils.logger import create_small_table
from densepose.converters import ToChartResultConverter, ToMaskConverter
from densepose.data.datasets.coco import maybe_filter_and_map_categories_cocoapi
from densepose.structures import (
DensePoseChartPredictorOutput,
DensePoseEmbeddingPredictorOutput,
quantize_densepose_chart_result,
)
from .densepose_coco_evaluation import DensePoseCocoEval, DensePoseEvalMode
from .mesh_alignment_evaluator import MeshAlignmentEvaluator
from .tensor_storage import (
SingleProcessFileTensorStorage,
SingleProcessRamTensorStorage,
SingleProcessTensorStorage,
SizeData,
storage_gather,
)
class DensePoseCOCOEvaluator(DatasetEvaluator):
def __init__(
self,
dataset_name,
distributed,
output_dir=None,
evaluator_type: str = "iuv",
min_iou_threshold: float = 0.5,
storage: Optional[SingleProcessTensorStorage] = None,
embedder=None,
should_evaluate_mesh_alignment: bool = False,
mesh_alignment_mesh_names: Optional[List[str]] = None,
):
self._embedder = embedder
self._distributed = distributed
self._output_dir = output_dir
self._evaluator_type = evaluator_type
self._storage = storage
self._should_evaluate_mesh_alignment = should_evaluate_mesh_alignment
assert not (
should_evaluate_mesh_alignment and embedder is None
), "Mesh alignment evaluation is activated, but no vertex embedder provided!"
if should_evaluate_mesh_alignment:
self._mesh_alignment_evaluator = MeshAlignmentEvaluator(
embedder,
mesh_alignment_mesh_names,
)
self._cpu_device = torch.device("cpu")
self._logger = logging.getLogger(__name__)
self._metadata = MetadataCatalog.get(dataset_name)
self._min_threshold = min_iou_threshold
json_file = PathManager.get_local_path(self._metadata.json_file)
with contextlib.redirect_stdout(io.StringIO()):
self._coco_api = COCO(json_file)
maybe_filter_and_map_categories_cocoapi(dataset_name, self._coco_api)
def reset(self):
self._predictions = []
def process(self, inputs, outputs):
"""
Args:
inputs: the inputs to a COCO model (e.g., GeneralizedRCNN).
It is a list of dict. Each dict corresponds to an image and
contains keys like "height", "width", "file_name", "image_id".
outputs: the outputs of a COCO model. It is a list of dicts with key
"instances" that contains :class:`Instances`.
The :class:`Instances` object needs to have `densepose` field.
"""
for input, output in zip(inputs, outputs):
instances = output["instances"].to(self._cpu_device)
if not instances.has("pred_densepose"):
continue
prediction_list = prediction_to_dict(
instances,
input["image_id"],
self._embedder,
self._metadata.class_to_mesh_name,
self._storage is not None,
)
if self._storage is not None:
for prediction_dict in prediction_list:
dict_to_store = {}
for field_name in self._storage.data_schema:
dict_to_store[field_name] = prediction_dict[field_name]
record_id = self._storage.put(dict_to_store)
prediction_dict["record_id"] = record_id
prediction_dict["rank"] = get_rank()
for field_name in self._storage.data_schema:
del prediction_dict[field_name]
self._predictions.extend(prediction_list)
def evaluate(self, img_ids=None):
if self._distributed:
synchronize()
predictions = gather(self._predictions)
predictions = list(itertools.chain(*predictions))
else:
predictions = self._predictions
multi_storage = storage_gather(self._storage) if self._storage is not None else None
if not is_main_process():
return
return copy.deepcopy(self._eval_predictions(predictions, multi_storage, img_ids))
def _eval_predictions(self, predictions, multi_storage=None, img_ids=None):
"""
Evaluate predictions on densepose.
Return results with the metrics of the tasks.
"""
self._logger.info("Preparing results for COCO format ...")
if self._output_dir:
PathManager.mkdirs(self._output_dir)
file_path = os.path.join(self._output_dir, "coco_densepose_predictions.pth")
with PathManager.open(file_path, "wb") as f:
torch.save(predictions, f)
self._logger.info("Evaluating predictions ...")
res = OrderedDict()
results_gps, results_gpsm, results_segm = _evaluate_predictions_on_coco(
self._coco_api,
predictions,
multi_storage,
self._embedder,
class_names=self._metadata.get("thing_classes"),
min_threshold=self._min_threshold,
img_ids=img_ids,
)
res["densepose_gps"] = results_gps
res["densepose_gpsm"] = results_gpsm
res["densepose_segm"] = results_segm
if self._should_evaluate_mesh_alignment:
res["densepose_mesh_alignment"] = self._evaluate_mesh_alignment()
return res
def _evaluate_mesh_alignment(self):
self._logger.info("Mesh alignment evaluation ...")
mean_ge, mean_gps, per_mesh_metrics = self._mesh_alignment_evaluator.evaluate()
results = {
"GE": mean_ge * 100,
"GPS": mean_gps * 100,
}
mesh_names = set()
for metric_name in per_mesh_metrics:
for mesh_name, value in per_mesh_metrics[metric_name].items():
results[f"{metric_name}-{mesh_name}"] = value * 100
mesh_names.add(mesh_name)
self._print_mesh_alignment_results(results, mesh_names)
return results
def _print_mesh_alignment_results(self, results: Dict[str, float], mesh_names: Iterable[str]):
self._logger.info("Evaluation results for densepose, mesh alignment:")
self._logger.info(f'| {"Mesh":13s} | {"GErr":7s} | {"GPS":7s} |')
self._logger.info("| :-----------: | :-----: | :-----: |")
for mesh_name in mesh_names:
ge_key = f"GE-{mesh_name}"
ge_str = f"{results[ge_key]:.4f}" if ge_key in results else " "
gps_key = f"GPS-{mesh_name}"
gps_str = f"{results[gps_key]:.4f}" if gps_key in results else " "
self._logger.info(f"| {mesh_name:13s} | {ge_str:7s} | {gps_str:7s} |")
self._logger.info("| :-------------------------------: |")
ge_key = "GE"
ge_str = f"{results[ge_key]:.4f}" if ge_key in results else " "
gps_key = "GPS"
gps_str = f"{results[gps_key]:.4f}" if gps_key in results else " "
self._logger.info(f'| {"MEAN":13s} | {ge_str:7s} | {gps_str:7s} |')
def prediction_to_dict(instances, img_id, embedder, class_to_mesh_name, use_storage):
"""
Args:
instances (Instances): the output of the model
img_id (str): the image id in COCO
Returns:
list[dict]: the results in densepose evaluation format
"""
scores = instances.scores.tolist()
classes = instances.pred_classes.tolist()
raw_boxes_xywh = BoxMode.convert(
instances.pred_boxes.tensor.clone(), BoxMode.XYXY_ABS, BoxMode.XYWH_ABS
)
if isinstance(instances.pred_densepose, DensePoseEmbeddingPredictorOutput):
results_densepose = densepose_cse_predictions_to_dict(
instances, embedder, class_to_mesh_name, use_storage
)
elif isinstance(instances.pred_densepose, DensePoseChartPredictorOutput):
if not use_storage:
results_densepose = densepose_chart_predictions_to_dict(instances)
else:
results_densepose = densepose_chart_predictions_to_storage_dict(instances)
results = []
for k in range(len(instances)):
result = {
"image_id": img_id,
"category_id": classes[k],
"bbox": raw_boxes_xywh[k].tolist(),
"score": scores[k],
}
results.append({**result, **results_densepose[k]})
return results
def densepose_chart_predictions_to_dict(instances):
segmentations = ToMaskConverter.convert(
instances.pred_densepose, instances.pred_boxes, instances.image_size
)
results = []
for k in range(len(instances)):
densepose_results_quantized = quantize_densepose_chart_result(
ToChartResultConverter.convert(instances.pred_densepose[k], instances.pred_boxes[k])
)
densepose_results_quantized.labels_uv_uint8 = (
densepose_results_quantized.labels_uv_uint8.cpu()
)
segmentation = segmentations.tensor[k]
segmentation_encoded = mask_utils.encode(
np.require(segmentation.numpy(), dtype=np.uint8, requirements=["F"])
)
segmentation_encoded["counts"] = segmentation_encoded["counts"].decode("utf-8")
result = {
"densepose": densepose_results_quantized,
"segmentation": segmentation_encoded,
}
results.append(result)
return results
def densepose_chart_predictions_to_storage_dict(instances):
results = []
for k in range(len(instances)):
densepose_predictor_output = instances.pred_densepose[k]
result = {
"coarse_segm": densepose_predictor_output.coarse_segm.squeeze(0).cpu(),
"fine_segm": densepose_predictor_output.fine_segm.squeeze(0).cpu(),
"u": densepose_predictor_output.u.squeeze(0).cpu(),
"v": densepose_predictor_output.v.squeeze(0).cpu(),
}
results.append(result)
return results
def densepose_cse_predictions_to_dict(instances, embedder, class_to_mesh_name, use_storage):
results = []
for k in range(len(instances)):
cse = instances.pred_densepose[k]
results.append(
{
"coarse_segm": cse.coarse_segm[0].cpu(),
"embedding": cse.embedding[0].cpu(),
}
)
return results
def _evaluate_predictions_on_coco(
coco_gt,
coco_results,
multi_storage=None,
embedder=None,
class_names=None,
min_threshold=0.5,
img_ids=None,
):
logger = logging.getLogger(__name__)
densepose_metrics = _get_densepose_metrics(min_threshold)
if len(coco_results) == 0: # cocoapi does not handle empty results very well
logger.warn("No predictions from the model! Set scores to -1")
results_gps = {metric: -1 for metric in densepose_metrics}
results_gpsm = {metric: -1 for metric in densepose_metrics}
results_segm = {metric: -1 for metric in densepose_metrics}
return results_gps, results_gpsm, results_segm
coco_dt = coco_gt.loadRes(coco_results)
results = []
for eval_mode_name in ["GPS", "GPSM", "IOU"]:
eval_mode = getattr(DensePoseEvalMode, eval_mode_name)
coco_eval = DensePoseCocoEval(
coco_gt, coco_dt, "densepose", multi_storage, embedder, dpEvalMode=eval_mode
)
result = _derive_results_from_coco_eval(
coco_eval, eval_mode_name, densepose_metrics, class_names, min_threshold, img_ids
)
results.append(result)
return results
def _get_densepose_metrics(min_threshold=0.5):
metrics = ["AP"]
if min_threshold <= 0.201:
metrics += ["AP20"]
if min_threshold <= 0.301:
metrics += ["AP30"]
if min_threshold <= 0.401:
metrics += ["AP40"]
metrics.extend(["AP50", "AP75", "APm", "APl", "AR", "AR50", "AR75", "ARm", "ARl"])
return metrics
def _derive_results_from_coco_eval(
coco_eval, eval_mode_name, metrics, class_names, min_threshold, img_ids
):
if img_ids is not None:
coco_eval.params.imgIds = img_ids
coco_eval.params.iouThrs = np.linspace(
min_threshold, 0.95, int(np.round((0.95 - min_threshold) / 0.05)) + 1, endpoint=True
)
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
results = {metric: float(coco_eval.stats[idx] * 100) for idx, metric in enumerate(metrics)}
logger = logging.getLogger(__name__)
logger.info(
f"Evaluation results for densepose, {eval_mode_name} metric: \n"
+ create_small_table(results)
)
if class_names is None or len(class_names) <= 1:
return results
# Compute per-category AP, the same way as it is done in D2
# (see detectron2/evaluation/coco_evaluation.py):
precisions = coco_eval.eval["precision"]
# precision has dims (iou, recall, cls, area range, max dets)
assert len(class_names) == precisions.shape[2]
results_per_category = []
for idx, name in enumerate(class_names):
# area range index 0: all area ranges
# max dets index -1: typically 100 per image
precision = precisions[:, :, idx, 0, -1]
precision = precision[precision > -1]
ap = np.mean(precision) if precision.size else float("nan")
results_per_category.append((f"{name}", float(ap * 100)))
# tabulate it
n_cols = min(6, len(results_per_category) * 2)
results_flatten = list(itertools.chain(*results_per_category))
results_2d = itertools.zip_longest(*[results_flatten[i::n_cols] for i in range(n_cols)])
table = tabulate(
results_2d,
tablefmt="pipe",
floatfmt=".3f",
headers=["category", "AP"] * (n_cols // 2),
numalign="left",
)
logger.info(f"Per-category {eval_mode_name} AP: \n" + table)
results.update({"AP-" + name: ap for name, ap in results_per_category})
return results
def build_densepose_evaluator_storage(cfg: CfgNode, output_folder: str):
storage_spec = cfg.DENSEPOSE_EVALUATION.STORAGE
if storage_spec == "none":
return None
evaluator_type = cfg.DENSEPOSE_EVALUATION.TYPE
# common output tensor sizes
hout = cfg.MODEL.ROI_DENSEPOSE_HEAD.HEATMAP_SIZE
wout = cfg.MODEL.ROI_DENSEPOSE_HEAD.HEATMAP_SIZE
n_csc = cfg.MODEL.ROI_DENSEPOSE_HEAD.NUM_COARSE_SEGM_CHANNELS
# specific output tensors
if evaluator_type == "iuv":
n_fsc = cfg.MODEL.ROI_DENSEPOSE_HEAD.NUM_PATCHES + 1
schema = {
"coarse_segm": SizeData(dtype="float32", shape=(n_csc, hout, wout)),
"fine_segm": SizeData(dtype="float32", shape=(n_fsc, hout, wout)),
"u": SizeData(dtype="float32", shape=(n_fsc, hout, wout)),
"v": SizeData(dtype="float32", shape=(n_fsc, hout, wout)),
}
elif evaluator_type == "cse":
embed_size = cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.EMBED_SIZE
schema = {
"coarse_segm": SizeData(dtype="float32", shape=(n_csc, hout, wout)),
"embedding": SizeData(dtype="float32", shape=(embed_size, hout, wout)),
}
else:
raise ValueError(f"Unknown evaluator type: {evaluator_type}")
# storage types
if storage_spec == "ram":
storage = SingleProcessRamTensorStorage(schema, io.BytesIO())
elif storage_spec == "file":
fpath = os.path.join(output_folder, f"DensePoseEvaluatorStorage.{get_rank()}.bin")
PathManager.mkdirs(output_folder)
storage = SingleProcessFileTensorStorage(schema, fpath, "wb")
else:
raise ValueError(f"Unknown storage specification: {storage_spec}")
return storage
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/evaluation/evaluator.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import itertools
import logging
import numpy as np
from collections import UserDict, defaultdict
from dataclasses import dataclass
from typing import Any, Callable, Collection, Dict, Iterable, List, Optional, Sequence, Tuple
import torch
from torch.utils.data.dataset import Dataset
from detectron2.config import CfgNode
from detectron2.data.build import build_detection_test_loader as d2_build_detection_test_loader
from detectron2.data.build import build_detection_train_loader as d2_build_detection_train_loader
from detectron2.data.build import (
load_proposals_into_dataset,
print_instances_class_histogram,
trivial_batch_collator,
worker_init_reset_seed,
)
from detectron2.data.catalog import DatasetCatalog, Metadata, MetadataCatalog
from detectron2.data.samplers import TrainingSampler
from detectron2.utils.comm import get_world_size
from densepose.config import get_bootstrap_dataset_config
from densepose.modeling import build_densepose_embedder
from .combined_loader import CombinedDataLoader, Loader
from .dataset_mapper import DatasetMapper
from .datasets.coco import DENSEPOSE_CSE_KEYS_WITHOUT_MASK, DENSEPOSE_IUV_KEYS_WITHOUT_MASK
from .datasets.dataset_type import DatasetType
from .inference_based_loader import InferenceBasedLoader, ScoreBasedFilter
from .samplers import (
DensePoseConfidenceBasedSampler,
DensePoseCSEConfidenceBasedSampler,
DensePoseCSEUniformSampler,
DensePoseUniformSampler,
MaskFromDensePoseSampler,
PredictionToGroundTruthSampler,
)
from .transform import ImageResizeTransform
from .utils import get_category_to_class_mapping, get_class_to_mesh_name_mapping
from .video import (
FirstKFramesSelector,
FrameSelectionStrategy,
LastKFramesSelector,
RandomKFramesSelector,
VideoKeyframeDataset,
video_list_from_file,
)
__all__ = ["build_detection_train_loader", "build_detection_test_loader"]
Instance = Dict[str, Any]
InstancePredicate = Callable[[Instance], bool]
def _compute_num_images_per_worker(cfg: CfgNode):
num_workers = get_world_size()
images_per_batch = cfg.SOLVER.IMS_PER_BATCH
assert (
images_per_batch % num_workers == 0
), "SOLVER.IMS_PER_BATCH ({}) must be divisible by the number of workers ({}).".format(
images_per_batch, num_workers
)
assert (
images_per_batch >= num_workers
), "SOLVER.IMS_PER_BATCH ({}) must be larger than the number of workers ({}).".format(
images_per_batch, num_workers
)
images_per_worker = images_per_batch // num_workers
return images_per_worker
def _map_category_id_to_contiguous_id(dataset_name: str, dataset_dicts: Iterable[Instance]):
meta = MetadataCatalog.get(dataset_name)
for dataset_dict in dataset_dicts:
for ann in dataset_dict["annotations"]:
ann["category_id"] = meta.thing_dataset_id_to_contiguous_id[ann["category_id"]]
@dataclass
class _DatasetCategory:
"""
Class representing category data in a dataset:
- id: category ID, as specified in the dataset annotations file
- name: category name, as specified in the dataset annotations file
- mapped_id: category ID after applying category maps (DATASETS.CATEGORY_MAPS config option)
- mapped_name: category name after applying category maps
- dataset_name: dataset in which the category is defined
For example, when training models in a class-agnostic manner, one could take LVIS 1.0
dataset and map the animal categories to the same category as human data from COCO:
id = 225
name = "cat"
mapped_id = 1
mapped_name = "person"
dataset_name = "lvis_v1_animals_dp_train"
"""
id: int
name: str
mapped_id: int
mapped_name: str
dataset_name: str
_MergedCategoriesT = Dict[int, List[_DatasetCategory]]
def _add_category_id_to_contiguous_id_maps_to_metadata(merged_categories: _MergedCategoriesT):
merged_categories_per_dataset = {}
for contiguous_cat_id, cat_id in enumerate(sorted(merged_categories.keys())):
for cat in merged_categories[cat_id]:
if cat.dataset_name not in merged_categories_per_dataset:
merged_categories_per_dataset[cat.dataset_name] = defaultdict(list)
merged_categories_per_dataset[cat.dataset_name][cat_id].append(
(
contiguous_cat_id,
cat,
)
)
logger = logging.getLogger(__name__)
for dataset_name, merged_categories in merged_categories_per_dataset.items():
meta = MetadataCatalog.get(dataset_name)
if not hasattr(meta, "thing_classes"):
meta.thing_classes = []
meta.thing_dataset_id_to_contiguous_id = {}
meta.thing_dataset_id_to_merged_id = {}
else:
meta.thing_classes.clear()
meta.thing_dataset_id_to_contiguous_id.clear()
meta.thing_dataset_id_to_merged_id.clear()
logger.info(f"Dataset {dataset_name}: category ID to contiguous ID mapping:")
for _cat_id, categories in sorted(merged_categories.items()):
added_to_thing_classes = False
for contiguous_cat_id, cat in categories:
if not added_to_thing_classes:
meta.thing_classes.append(cat.mapped_name)
added_to_thing_classes = True
meta.thing_dataset_id_to_contiguous_id[cat.id] = contiguous_cat_id
meta.thing_dataset_id_to_merged_id[cat.id] = cat.mapped_id
logger.info(f"{cat.id} ({cat.name}) -> {contiguous_cat_id}")
def _maybe_create_general_keep_instance_predicate(cfg: CfgNode) -> Optional[InstancePredicate]:
def has_annotations(instance: Instance) -> bool:
return "annotations" in instance
def has_only_crowd_anotations(instance: Instance) -> bool:
for ann in instance["annotations"]:
if ann.get("is_crowd", 0) == 0:
return False
return True
def general_keep_instance_predicate(instance: Instance) -> bool:
return has_annotations(instance) and not has_only_crowd_anotations(instance)
if not cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS:
return None
return general_keep_instance_predicate
def _maybe_create_keypoints_keep_instance_predicate(cfg: CfgNode) -> Optional[InstancePredicate]:
min_num_keypoints = cfg.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE
def has_sufficient_num_keypoints(instance: Instance) -> bool:
num_kpts = sum(
(np.array(ann["keypoints"][2::3]) > 0).sum()
for ann in instance["annotations"]
if "keypoints" in ann
)
return num_kpts >= min_num_keypoints
if cfg.MODEL.KEYPOINT_ON and (min_num_keypoints > 0):
return has_sufficient_num_keypoints
return None
def _maybe_create_mask_keep_instance_predicate(cfg: CfgNode) -> Optional[InstancePredicate]:
if not cfg.MODEL.MASK_ON:
return None
def has_mask_annotations(instance: Instance) -> bool:
return any("segmentation" in ann for ann in instance["annotations"])
return has_mask_annotations
def _maybe_create_densepose_keep_instance_predicate(cfg: CfgNode) -> Optional[InstancePredicate]:
if not cfg.MODEL.DENSEPOSE_ON:
return None
use_masks = cfg.MODEL.ROI_DENSEPOSE_HEAD.COARSE_SEGM_TRAINED_BY_MASKS
def has_densepose_annotations(instance: Instance) -> bool:
for ann in instance["annotations"]:
if all(key in ann for key in DENSEPOSE_IUV_KEYS_WITHOUT_MASK) or all(
key in ann for key in DENSEPOSE_CSE_KEYS_WITHOUT_MASK
):
return True
if use_masks and "segmentation" in ann:
return True
return False
return has_densepose_annotations
def _maybe_create_specific_keep_instance_predicate(cfg: CfgNode) -> Optional[InstancePredicate]:
specific_predicate_creators = [
_maybe_create_keypoints_keep_instance_predicate,
_maybe_create_mask_keep_instance_predicate,
_maybe_create_densepose_keep_instance_predicate,
]
predicates = [creator(cfg) for creator in specific_predicate_creators]
predicates = [p for p in predicates if p is not None]
if not predicates:
return None
def combined_predicate(instance: Instance) -> bool:
return any(p(instance) for p in predicates)
return combined_predicate
def _get_train_keep_instance_predicate(cfg: CfgNode):
general_keep_predicate = _maybe_create_general_keep_instance_predicate(cfg)
combined_specific_keep_predicate = _maybe_create_specific_keep_instance_predicate(cfg)
def combined_general_specific_keep_predicate(instance: Instance) -> bool:
return general_keep_predicate(instance) and combined_specific_keep_predicate(instance)
if (general_keep_predicate is None) and (combined_specific_keep_predicate is None):
return None
if general_keep_predicate is None:
return combined_specific_keep_predicate
if combined_specific_keep_predicate is None:
return general_keep_predicate
return combined_general_specific_keep_predicate
def _get_test_keep_instance_predicate(cfg: CfgNode):
general_keep_predicate = _maybe_create_general_keep_instance_predicate(cfg)
return general_keep_predicate
def _maybe_filter_and_map_categories(
dataset_name: str, dataset_dicts: List[Instance]
) -> List[Instance]:
meta = MetadataCatalog.get(dataset_name)
category_id_map = meta.thing_dataset_id_to_contiguous_id
filtered_dataset_dicts = []
for dataset_dict in dataset_dicts:
anns = []
for ann in dataset_dict["annotations"]:
cat_id = ann["category_id"]
if cat_id not in category_id_map:
continue
ann["category_id"] = category_id_map[cat_id]
anns.append(ann)
dataset_dict["annotations"] = anns
filtered_dataset_dicts.append(dataset_dict)
return filtered_dataset_dicts
def _add_category_whitelists_to_metadata(cfg: CfgNode):
for dataset_name, whitelisted_cat_ids in cfg.DATASETS.WHITELISTED_CATEGORIES.items():
meta = MetadataCatalog.get(dataset_name)
meta.whitelisted_categories = whitelisted_cat_ids
logger = logging.getLogger(__name__)
logger.info(
"Whitelisted categories for dataset {}: {}".format(
dataset_name, meta.whitelisted_categories
)
)
def _add_category_maps_to_metadata(cfg: CfgNode):
for dataset_name, category_map in cfg.DATASETS.CATEGORY_MAPS.items():
category_map = {
int(cat_id_src): int(cat_id_dst) for cat_id_src, cat_id_dst in category_map.items()
}
meta = MetadataCatalog.get(dataset_name)
meta.category_map = category_map
logger = logging.getLogger(__name__)
logger.info("Category maps for dataset {}: {}".format(dataset_name, meta.category_map))
def _add_category_info_to_bootstrapping_metadata(dataset_name: str, dataset_cfg: CfgNode):
meta = MetadataCatalog.get(dataset_name)
meta.category_to_class_mapping = get_category_to_class_mapping(dataset_cfg)
meta.categories = dataset_cfg.CATEGORIES
meta.max_count_per_category = dataset_cfg.MAX_COUNT_PER_CATEGORY
logger = logging.getLogger(__name__)
logger.info(
"Category to class mapping for dataset {}: {}".format(
dataset_name, meta.category_to_class_mapping
)
)
def _maybe_add_class_to_mesh_name_map_to_metadata(dataset_names: List[str], cfg: CfgNode):
for dataset_name in dataset_names:
meta = MetadataCatalog.get(dataset_name)
if not hasattr(meta, "class_to_mesh_name"):
meta.class_to_mesh_name = get_class_to_mesh_name_mapping(cfg)
def _merge_categories(dataset_names: Collection[str]) -> _MergedCategoriesT:
merged_categories = defaultdict(list)
category_names = {}
for dataset_name in dataset_names:
meta = MetadataCatalog.get(dataset_name)
whitelisted_categories = meta.get("whitelisted_categories")
category_map = meta.get("category_map", {})
cat_ids = (
whitelisted_categories if whitelisted_categories is not None else meta.categories.keys()
)
for cat_id in cat_ids:
cat_name = meta.categories[cat_id]
cat_id_mapped = category_map.get(cat_id, cat_id)
if cat_id_mapped == cat_id or cat_id_mapped in cat_ids:
category_names[cat_id] = cat_name
else:
category_names[cat_id] = str(cat_id_mapped)
# assign temporary mapped category name, this name can be changed
# during the second pass, since mapped ID can correspond to a category
# from a different dataset
cat_name_mapped = meta.categories[cat_id_mapped]
merged_categories[cat_id_mapped].append(
_DatasetCategory(
id=cat_id,
name=cat_name,
mapped_id=cat_id_mapped,
mapped_name=cat_name_mapped,
dataset_name=dataset_name,
)
)
# second pass to assign proper mapped category names
for cat_id, categories in merged_categories.items():
for cat in categories:
if cat_id in category_names and cat.mapped_name != category_names[cat_id]:
cat.mapped_name = category_names[cat_id]
return merged_categories
def _warn_if_merged_different_categories(merged_categories: _MergedCategoriesT):
logger = logging.getLogger(__name__)
for cat_id in merged_categories:
merged_categories_i = merged_categories[cat_id]
first_cat_name = merged_categories_i[0].name
if len(merged_categories_i) > 1 and not all(
cat.name == first_cat_name for cat in merged_categories_i[1:]
):
cat_summary_str = ", ".join(
[f"{cat.id} ({cat.name}) from {cat.dataset_name}" for cat in merged_categories_i]
)
logger.warning(
f"Merged category {cat_id} corresponds to the following categories: "
f"{cat_summary_str}"
)
def combine_detection_dataset_dicts(
dataset_names: Collection[str],
keep_instance_predicate: Optional[InstancePredicate] = None,
proposal_files: Optional[Collection[str]] = None,
) -> List[Instance]:
"""
Load and prepare dataset dicts for training / testing
Args:
dataset_names (Collection[str]): a list of dataset names
keep_instance_predicate (Callable: Dict[str, Any] -> bool): predicate
applied to instance dicts which defines whether to keep the instance
proposal_files (Collection[str]): if given, a list of object proposal files
that match each dataset in `dataset_names`.
"""
assert len(dataset_names)
if proposal_files is None:
proposal_files = [None] * len(dataset_names)
assert len(dataset_names) == len(proposal_files)
# load datasets and metadata
dataset_name_to_dicts = {}
for dataset_name in dataset_names:
dataset_name_to_dicts[dataset_name] = DatasetCatalog.get(dataset_name)
assert len(dataset_name_to_dicts), f"Dataset '{dataset_name}' is empty!"
# merge categories, requires category metadata to be loaded
# cat_id -> [(orig_cat_id, cat_name, dataset_name)]
merged_categories = _merge_categories(dataset_names)
_warn_if_merged_different_categories(merged_categories)
merged_category_names = [
merged_categories[cat_id][0].mapped_name for cat_id in sorted(merged_categories)
]
# map to contiguous category IDs
_add_category_id_to_contiguous_id_maps_to_metadata(merged_categories)
# load annotations and dataset metadata
for dataset_name, proposal_file in zip(dataset_names, proposal_files):
dataset_dicts = dataset_name_to_dicts[dataset_name]
assert len(dataset_dicts), f"Dataset '{dataset_name}' is empty!"
if proposal_file is not None:
dataset_dicts = load_proposals_into_dataset(dataset_dicts, proposal_file)
dataset_dicts = _maybe_filter_and_map_categories(dataset_name, dataset_dicts)
print_instances_class_histogram(dataset_dicts, merged_category_names)
dataset_name_to_dicts[dataset_name] = dataset_dicts
if keep_instance_predicate is not None:
all_datasets_dicts_plain = [
d
for d in itertools.chain.from_iterable(dataset_name_to_dicts.values())
if keep_instance_predicate(d)
]
else:
all_datasets_dicts_plain = list(
itertools.chain.from_iterable(dataset_name_to_dicts.values())
)
return all_datasets_dicts_plain
def build_detection_train_loader(cfg: CfgNode, mapper=None):
"""
A data loader is created in a way similar to that of Detectron2.
The main differences are:
- it allows to combine datasets with different but compatible object category sets
The data loader is created by the following steps:
1. Use the dataset names in config to query :class:`DatasetCatalog`, and obtain a list of dicts.
2. Start workers to work on the dicts. Each worker will:
* Map each metadata dict into another format to be consumed by the model.
* Batch them by simply putting dicts into a list.
The batched ``list[mapped_dict]`` is what this dataloader will return.
Args:
cfg (CfgNode): the config
mapper (callable): a callable which takes a sample (dict) from dataset and
returns the format to be consumed by the model.
By default it will be `DatasetMapper(cfg, True)`.
Returns:
an infinite iterator of training data
"""
_add_category_whitelists_to_metadata(cfg)
_add_category_maps_to_metadata(cfg)
_maybe_add_class_to_mesh_name_map_to_metadata(cfg.DATASETS.TRAIN, cfg)
dataset_dicts = combine_detection_dataset_dicts(
cfg.DATASETS.TRAIN,
keep_instance_predicate=_get_train_keep_instance_predicate(cfg),
proposal_files=cfg.DATASETS.PROPOSAL_FILES_TRAIN if cfg.MODEL.LOAD_PROPOSALS else None,
)
if mapper is None:
mapper = DatasetMapper(cfg, True)
return d2_build_detection_train_loader(cfg, dataset=dataset_dicts, mapper=mapper)
def build_detection_test_loader(cfg, dataset_name, mapper=None):
"""
Similar to `build_detection_train_loader`.
But this function uses the given `dataset_name` argument (instead of the names in cfg),
and uses batch size 1.
Args:
cfg: a detectron2 CfgNode
dataset_name (str): a name of the dataset that's available in the DatasetCatalog
mapper (callable): a callable which takes a sample (dict) from dataset
and returns the format to be consumed by the model.
By default it will be `DatasetMapper(cfg, False)`.
Returns:
DataLoader: a torch DataLoader, that loads the given detection
dataset, with test-time transformation and batching.
"""
_add_category_whitelists_to_metadata(cfg)
_add_category_maps_to_metadata(cfg)
_maybe_add_class_to_mesh_name_map_to_metadata([dataset_name], cfg)
dataset_dicts = combine_detection_dataset_dicts(
[dataset_name],
keep_instance_predicate=_get_test_keep_instance_predicate(cfg),
proposal_files=[
cfg.DATASETS.PROPOSAL_FILES_TEST[list(cfg.DATASETS.TEST).index(dataset_name)]
]
if cfg.MODEL.LOAD_PROPOSALS
else None,
)
sampler = None
if not cfg.DENSEPOSE_EVALUATION.DISTRIBUTED_INFERENCE:
sampler = torch.utils.data.SequentialSampler(dataset_dicts)
if mapper is None:
mapper = DatasetMapper(cfg, False)
return d2_build_detection_test_loader(
dataset_dicts, mapper=mapper, num_workers=cfg.DATALOADER.NUM_WORKERS, sampler=sampler
)
def build_frame_selector(cfg: CfgNode):
strategy = FrameSelectionStrategy(cfg.STRATEGY)
if strategy == FrameSelectionStrategy.RANDOM_K:
frame_selector = RandomKFramesSelector(cfg.NUM_IMAGES)
elif strategy == FrameSelectionStrategy.FIRST_K:
frame_selector = FirstKFramesSelector(cfg.NUM_IMAGES)
elif strategy == FrameSelectionStrategy.LAST_K:
frame_selector = LastKFramesSelector(cfg.NUM_IMAGES)
elif strategy == FrameSelectionStrategy.ALL:
frame_selector = None
return frame_selector
def build_transform(cfg: CfgNode, data_type: str):
if cfg.TYPE == "resize":
if data_type == "image":
return ImageResizeTransform(cfg.MIN_SIZE, cfg.MAX_SIZE)
raise ValueError(f"Unknown transform {cfg.TYPE} for data type {data_type}")
def build_combined_loader(cfg: CfgNode, loaders: Collection[Loader], ratios: Sequence[float]):
images_per_worker = _compute_num_images_per_worker(cfg)
return CombinedDataLoader(loaders, images_per_worker, ratios)
def build_bootstrap_dataset(dataset_name: str, cfg: CfgNode) -> Sequence[torch.Tensor]:
"""
Build dataset that provides data to bootstrap on
Args:
dataset_name (str): Name of the dataset, needs to have associated metadata
to load the data
cfg (CfgNode): bootstrapping config
Returns:
Sequence[Tensor] - dataset that provides image batches, Tensors of size
[N, C, H, W] of type float32
"""
logger = logging.getLogger(__name__)
_add_category_info_to_bootstrapping_metadata(dataset_name, cfg)
meta = MetadataCatalog.get(dataset_name)
factory = BootstrapDatasetFactoryCatalog.get(meta.dataset_type)
dataset = None
if factory is not None:
dataset = factory(meta, cfg)
if dataset is None:
logger.warning(f"Failed to create dataset {dataset_name} of type {meta.dataset_type}")
return dataset
def build_data_sampler(cfg: CfgNode, sampler_cfg: CfgNode, embedder: Optional[torch.nn.Module]):
if sampler_cfg.TYPE == "densepose_uniform":
data_sampler = PredictionToGroundTruthSampler()
# transform densepose pred -> gt
data_sampler.register_sampler(
"pred_densepose",
"gt_densepose",
DensePoseUniformSampler(count_per_class=sampler_cfg.COUNT_PER_CLASS),
)
data_sampler.register_sampler("pred_densepose", "gt_masks", MaskFromDensePoseSampler())
return data_sampler
elif sampler_cfg.TYPE == "densepose_UV_confidence":
data_sampler = PredictionToGroundTruthSampler()
# transform densepose pred -> gt
data_sampler.register_sampler(
"pred_densepose",
"gt_densepose",
DensePoseConfidenceBasedSampler(
confidence_channel="sigma_2",
count_per_class=sampler_cfg.COUNT_PER_CLASS,
search_proportion=0.5,
),
)
data_sampler.register_sampler("pred_densepose", "gt_masks", MaskFromDensePoseSampler())
return data_sampler
elif sampler_cfg.TYPE == "densepose_fine_segm_confidence":
data_sampler = PredictionToGroundTruthSampler()
# transform densepose pred -> gt
data_sampler.register_sampler(
"pred_densepose",
"gt_densepose",
DensePoseConfidenceBasedSampler(
confidence_channel="fine_segm_confidence",
count_per_class=sampler_cfg.COUNT_PER_CLASS,
search_proportion=0.5,
),
)
data_sampler.register_sampler("pred_densepose", "gt_masks", MaskFromDensePoseSampler())
return data_sampler
elif sampler_cfg.TYPE == "densepose_coarse_segm_confidence":
data_sampler = PredictionToGroundTruthSampler()
# transform densepose pred -> gt
data_sampler.register_sampler(
"pred_densepose",
"gt_densepose",
DensePoseConfidenceBasedSampler(
confidence_channel="coarse_segm_confidence",
count_per_class=sampler_cfg.COUNT_PER_CLASS,
search_proportion=0.5,
),
)
data_sampler.register_sampler("pred_densepose", "gt_masks", MaskFromDensePoseSampler())
return data_sampler
elif sampler_cfg.TYPE == "densepose_cse_uniform":
assert embedder is not None
data_sampler = PredictionToGroundTruthSampler()
# transform densepose pred -> gt
data_sampler.register_sampler(
"pred_densepose",
"gt_densepose",
DensePoseCSEUniformSampler(
cfg=cfg,
use_gt_categories=sampler_cfg.USE_GROUND_TRUTH_CATEGORIES,
embedder=embedder,
count_per_class=sampler_cfg.COUNT_PER_CLASS,
),
)
data_sampler.register_sampler("pred_densepose", "gt_masks", MaskFromDensePoseSampler())
return data_sampler
elif sampler_cfg.TYPE == "densepose_cse_coarse_segm_confidence":
assert embedder is not None
data_sampler = PredictionToGroundTruthSampler()
# transform densepose pred -> gt
data_sampler.register_sampler(
"pred_densepose",
"gt_densepose",
DensePoseCSEConfidenceBasedSampler(
cfg=cfg,
use_gt_categories=sampler_cfg.USE_GROUND_TRUTH_CATEGORIES,
embedder=embedder,
confidence_channel="coarse_segm_confidence",
count_per_class=sampler_cfg.COUNT_PER_CLASS,
search_proportion=0.5,
),
)
data_sampler.register_sampler("pred_densepose", "gt_masks", MaskFromDensePoseSampler())
return data_sampler
raise ValueError(f"Unknown data sampler type {sampler_cfg.TYPE}")
def build_data_filter(cfg: CfgNode):
if cfg.TYPE == "detection_score":
min_score = cfg.MIN_VALUE
return ScoreBasedFilter(min_score=min_score)
raise ValueError(f"Unknown data filter type {cfg.TYPE}")
def build_inference_based_loader(
cfg: CfgNode,
dataset_cfg: CfgNode,
model: torch.nn.Module,
embedder: Optional[torch.nn.Module] = None,
) -> InferenceBasedLoader:
"""
Constructs data loader based on inference results of a model.
"""
dataset = build_bootstrap_dataset(dataset_cfg.DATASET, dataset_cfg.IMAGE_LOADER)
meta = MetadataCatalog.get(dataset_cfg.DATASET)
training_sampler = TrainingSampler(len(dataset))
data_loader = torch.utils.data.DataLoader(
dataset, # pyre-ignore[6]
batch_size=dataset_cfg.IMAGE_LOADER.BATCH_SIZE,
sampler=training_sampler,
num_workers=dataset_cfg.IMAGE_LOADER.NUM_WORKERS,
collate_fn=trivial_batch_collator,
worker_init_fn=worker_init_reset_seed,
)
return InferenceBasedLoader(
model,
data_loader=data_loader,
data_sampler=build_data_sampler(cfg, dataset_cfg.DATA_SAMPLER, embedder),
data_filter=build_data_filter(dataset_cfg.FILTER),
shuffle=True,
batch_size=dataset_cfg.INFERENCE.OUTPUT_BATCH_SIZE,
inference_batch_size=dataset_cfg.INFERENCE.INPUT_BATCH_SIZE,
category_to_class_mapping=meta.category_to_class_mapping,
)
def has_inference_based_loaders(cfg: CfgNode) -> bool:
"""
Returns True, if at least one inferense-based loader must
be instantiated for training
"""
return len(cfg.BOOTSTRAP_DATASETS) > 0
def build_inference_based_loaders(
cfg: CfgNode, model: torch.nn.Module
) -> Tuple[List[InferenceBasedLoader], List[float]]:
loaders = []
ratios = []
embedder = build_densepose_embedder(cfg)
for dataset_spec in cfg.BOOTSTRAP_DATASETS:
dataset_cfg = get_bootstrap_dataset_config().clone()
dataset_cfg.merge_from_other_cfg(CfgNode(dataset_spec))
loader = build_inference_based_loader(cfg, dataset_cfg, model, embedder)
loaders.append(loader)
ratios.append(dataset_cfg.RATIO)
return loaders, ratios
def build_video_list_dataset(meta: Metadata, cfg: CfgNode):
video_list_fpath = meta.video_list_fpath
video_base_path = meta.video_base_path
category = meta.category
if cfg.TYPE == "video_keyframe":
frame_selector = build_frame_selector(cfg.SELECT)
transform = build_transform(cfg.TRANSFORM, data_type="image")
video_list = video_list_from_file(video_list_fpath, video_base_path)
keyframe_helper_fpath = cfg.KEYFRAME_HELPER if hasattr(cfg, "KEYFRAME_HELPER") else None
return VideoKeyframeDataset(
video_list, category, frame_selector, transform, keyframe_helper_fpath
)
class _BootstrapDatasetFactoryCatalog(UserDict):
"""
A global dictionary that stores information about bootstrapped datasets creation functions
from metadata and config, for diverse DatasetType
"""
def register(self, dataset_type: DatasetType, factory: Callable[[Metadata, CfgNode], Dataset]):
"""
Args:
dataset_type (DatasetType): a DatasetType e.g. DatasetType.VIDEO_LIST
factory (Callable[Metadata, CfgNode]): a callable which takes Metadata and cfg
arguments and returns a dataset object.
"""
assert dataset_type not in self, "Dataset '{}' is already registered!".format(dataset_type)
self[dataset_type] = factory
BootstrapDatasetFactoryCatalog = _BootstrapDatasetFactoryCatalog()
BootstrapDatasetFactoryCatalog.register(DatasetType.VIDEO_LIST, build_video_list_dataset)
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/data/build.py
|
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
import logging
import numpy as np
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from torch.utils.data.dataset import Dataset
from detectron2.data.detection_utils import read_image
ImageTransform = Callable[[torch.Tensor], torch.Tensor]
class ImageListDataset(Dataset):
"""
Dataset that provides images from a list.
"""
_EMPTY_IMAGE = torch.empty((0, 3, 1, 1))
def __init__(
self,
image_list: List[str],
category_list: Union[str, List[str], None] = None,
transform: Optional[ImageTransform] = None,
):
"""
Args:
image_list (List[str]): list of paths to image files
category_list (Union[str, List[str], None]): list of animal categories for
each image. If it is a string, or None, this applies to all images
"""
if type(category_list) == list:
self.category_list = category_list
else:
self.category_list = [category_list] * len(image_list)
assert len(image_list) == len(
self.category_list
), "length of image and category lists must be equal"
self.image_list = image_list
self.transform = transform
def __getitem__(self, idx: int) -> Dict[str, Any]:
"""
Gets selected images from the list
Args:
idx (int): video index in the video list file
Returns:
A dictionary containing two keys:
images (torch.Tensor): tensor of size [N, 3, H, W] (N = 1, or 0 for _EMPTY_IMAGE)
categories (List[str]): categories of the frames
"""
categories = [self.category_list[idx]]
fpath = self.image_list[idx]
transform = self.transform
try:
image = torch.from_numpy(np.ascontiguousarray(read_image(fpath, format="BGR")))
image = image.permute(2, 0, 1).unsqueeze(0).float() # HWC -> NCHW
if transform is not None:
image = transform(image)
return {"images": image, "categories": categories}
except (OSError, RuntimeError) as e:
logger = logging.getLogger(__name__)
logger.warning(f"Error opening image file container {fpath}: {e}")
return {"images": self._EMPTY_IMAGE, "categories": []}
def __len__(self):
return len(self.image_list)
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/data/image_list_dataset.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import random
from typing import Any, Callable, Dict, Iterable, Iterator, List, Optional, Tuple
import torch
from torch import nn
SampledData = Any
ModelOutput = Any
def _grouper(iterable: Iterable[Any], n: int, fillvalue=None) -> Iterator[Tuple[Any]]:
"""
Group elements of an iterable by chunks of size `n`, e.g.
grouper(range(9), 4) ->
(0, 1, 2, 3), (4, 5, 6, 7), (8, None, None, None)
"""
it = iter(iterable)
while True:
values = []
for _ in range(n):
try:
value = next(it)
except StopIteration:
if values:
values.extend([fillvalue] * (n - len(values)))
yield tuple(values) # pyre-ignore[7]
return
values.append(value)
yield tuple(values) # pyre-ignore[7]
class ScoreBasedFilter:
"""
Filters entries in model output based on their scores
Discards all entries with score less than the specified minimum
"""
def __init__(self, min_score: float = 0.8):
self.min_score = min_score
def __call__(self, model_output: ModelOutput) -> ModelOutput:
for model_output_i in model_output:
instances = model_output_i["instances"]
if not instances.has("scores"):
continue
instances_filtered = instances[instances.scores >= self.min_score]
model_output_i["instances"] = instances_filtered
return model_output
class InferenceBasedLoader:
"""
Data loader based on results inferred by a model. Consists of:
- a data loader that provides batches of images
- a model that is used to infer the results
- a data sampler that converts inferred results to annotations
"""
def __init__(
self,
model: nn.Module,
data_loader: Iterable[List[Dict[str, Any]]],
data_sampler: Optional[Callable[[ModelOutput], List[SampledData]]] = None,
data_filter: Optional[Callable[[ModelOutput], ModelOutput]] = None,
shuffle: bool = True,
batch_size: int = 4,
inference_batch_size: int = 4,
drop_last: bool = False,
category_to_class_mapping: Optional[dict] = None,
):
"""
Constructor
Args:
model (torch.nn.Module): model used to produce data
data_loader (Iterable[List[Dict[str, Any]]]): iterable that provides
dictionaries with "images" and "categories" fields to perform inference on
data_sampler (Callable: ModelOutput -> SampledData): functor
that produces annotation data from inference results;
(optional, default: None)
data_filter (Callable: ModelOutput -> ModelOutput): filter
that selects model outputs for further processing
(optional, default: None)
shuffle (bool): if True, the input images get shuffled
batch_size (int): batch size for the produced annotation data
inference_batch_size (int): batch size for input images
drop_last (bool): if True, drop the last batch if it is undersized
category_to_class_mapping (dict): category to class mapping
"""
self.model = model
self.model.eval()
self.data_loader = data_loader
self.data_sampler = data_sampler
self.data_filter = data_filter
self.shuffle = shuffle
self.batch_size = batch_size
self.inference_batch_size = inference_batch_size
self.drop_last = drop_last
if category_to_class_mapping is not None:
self.category_to_class_mapping = category_to_class_mapping
else:
self.category_to_class_mapping = {}
def __iter__(self) -> Iterator[List[SampledData]]:
for batch in self.data_loader:
# batch : List[Dict[str: Tensor[N, C, H, W], str: Optional[str]]]
# images_batch : Tensor[N, C, H, W]
# image : Tensor[C, H, W]
images_and_categories = [
{"image": image, "category": category}
for element in batch
for image, category in zip(element["images"], element["categories"])
]
if not images_and_categories:
continue
if self.shuffle:
random.shuffle(images_and_categories)
yield from self._produce_data(images_and_categories) # pyre-ignore[6]
def _produce_data(
self, images_and_categories: List[Tuple[torch.Tensor, Optional[str]]]
) -> Iterator[List[SampledData]]:
"""
Produce batches of data from images
Args:
images_and_categories (List[Tuple[torch.Tensor, Optional[str]]]):
list of images and corresponding categories to process
Returns:
Iterator over batches of data sampled from model outputs
"""
data_batches: List[SampledData] = []
category_to_class_mapping = self.category_to_class_mapping
batched_images_and_categories = _grouper(images_and_categories, self.inference_batch_size)
for batch in batched_images_and_categories:
batch = [
{
"image": image_and_category["image"].to(self.model.device),
"category": image_and_category["category"],
}
for image_and_category in batch
if image_and_category is not None
]
if not batch:
continue
with torch.no_grad():
model_output = self.model(batch)
for model_output_i, batch_i in zip(model_output, batch):
assert len(batch_i["image"].shape) == 3
model_output_i["image"] = batch_i["image"]
instance_class = category_to_class_mapping.get(batch_i["category"], 0)
model_output_i["instances"].dataset_classes = torch.tensor(
[instance_class] * len(model_output_i["instances"])
)
model_output_filtered = (
model_output
if self.data_filter is None
else self.data_filter(model_output) # pyre-ignore[29]
)
data = (
model_output_filtered
if self.data_sampler is None
else self.data_sampler(model_output_filtered) # pyre-ignore[29]
)
for data_i in data:
if len(data_i["instances"]):
data_batches.append(data_i)
if len(data_batches) >= self.batch_size:
yield data_batches[: self.batch_size]
data_batches = data_batches[self.batch_size :]
if not self.drop_last and data_batches:
yield data_batches
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/data/inference_based_loader.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from .meshes import builtin
from .build import (
build_detection_test_loader,
build_detection_train_loader,
build_combined_loader,
build_frame_selector,
build_inference_based_loaders,
has_inference_based_loaders,
BootstrapDatasetFactoryCatalog,
)
from .combined_loader import CombinedDataLoader
from .dataset_mapper import DatasetMapper
from .inference_based_loader import InferenceBasedLoader, ScoreBasedFilter
from .image_list_dataset import ImageListDataset
from .utils import is_relative_local_path, maybe_prepend_base_path
# ensure the builtin datasets are registered
from . import datasets
# ensure the bootstrap datasets builders are registered
from . import build
__all__ = [k for k in globals().keys() if not k.startswith("_")]
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/data/__init__.py
|
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
import copy
import logging
from typing import Any, Dict, List, Tuple
import torch
from detectron2.data import MetadataCatalog
from detectron2.data import detection_utils as utils
from detectron2.data import transforms as T
from detectron2.layers import ROIAlign
from detectron2.structures import BoxMode
from detectron2.utils.file_io import PathManager
from densepose.structures import DensePoseDataRelative, DensePoseList, DensePoseTransformData
def build_augmentation(cfg, is_train):
logger = logging.getLogger(__name__)
result = utils.build_augmentation(cfg, is_train)
if is_train:
random_rotation = T.RandomRotation(
cfg.INPUT.ROTATION_ANGLES, expand=False, sample_style="choice"
)
result.append(random_rotation)
logger.info("DensePose-specific augmentation used in training: " + str(random_rotation))
return result
class DatasetMapper:
"""
A customized version of `detectron2.data.DatasetMapper`
"""
def __init__(self, cfg, is_train=True):
self.augmentation = build_augmentation(cfg, is_train)
# fmt: off
self.img_format = cfg.INPUT.FORMAT
self.mask_on = (
cfg.MODEL.MASK_ON or (
cfg.MODEL.DENSEPOSE_ON
and cfg.MODEL.ROI_DENSEPOSE_HEAD.COARSE_SEGM_TRAINED_BY_MASKS)
)
self.keypoint_on = cfg.MODEL.KEYPOINT_ON
self.densepose_on = cfg.MODEL.DENSEPOSE_ON
assert not cfg.MODEL.LOAD_PROPOSALS, "not supported yet"
# fmt: on
if self.keypoint_on and is_train:
# Flip only makes sense in training
self.keypoint_hflip_indices = utils.create_keypoint_hflip_indices(cfg.DATASETS.TRAIN)
else:
self.keypoint_hflip_indices = None
if self.densepose_on:
densepose_transform_srcs = [
MetadataCatalog.get(ds).densepose_transform_src
for ds in cfg.DATASETS.TRAIN + cfg.DATASETS.TEST
]
assert len(densepose_transform_srcs) > 0
# TODO: check that DensePose transformation data is the same for
# all the datasets. Otherwise one would have to pass DB ID with
# each entry to select proper transformation data. For now, since
# all DensePose annotated data uses the same data semantics, we
# omit this check.
densepose_transform_data_fpath = PathManager.get_local_path(densepose_transform_srcs[0])
self.densepose_transform_data = DensePoseTransformData.load(
densepose_transform_data_fpath
)
self.is_train = is_train
def __call__(self, dataset_dict):
"""
Args:
dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.
Returns:
dict: a format that builtin models in detectron2 accept
"""
dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below
image = utils.read_image(dataset_dict["file_name"], format=self.img_format)
utils.check_image_size(dataset_dict, image)
image, transforms = T.apply_transform_gens(self.augmentation, image)
image_shape = image.shape[:2] # h, w
dataset_dict["image"] = torch.as_tensor(image.transpose(2, 0, 1).astype("float32"))
if not self.is_train:
dataset_dict.pop("annotations", None)
return dataset_dict
for anno in dataset_dict["annotations"]:
if not self.mask_on:
anno.pop("segmentation", None)
if not self.keypoint_on:
anno.pop("keypoints", None)
# USER: Implement additional transformations if you have other types of data
# USER: Don't call transpose_densepose if you don't need
annos = [
self._transform_densepose(
utils.transform_instance_annotations(
obj, transforms, image_shape, keypoint_hflip_indices=self.keypoint_hflip_indices
),
transforms,
)
for obj in dataset_dict.pop("annotations")
if obj.get("iscrowd", 0) == 0
]
if self.mask_on:
self._add_densepose_masks_as_segmentation(annos, image_shape)
instances = utils.annotations_to_instances(annos, image_shape, mask_format="bitmask")
densepose_annotations = [obj.get("densepose") for obj in annos]
if densepose_annotations and not all(v is None for v in densepose_annotations):
instances.gt_densepose = DensePoseList(
densepose_annotations, instances.gt_boxes, image_shape
)
dataset_dict["instances"] = instances[instances.gt_boxes.nonempty()]
return dataset_dict
def _transform_densepose(self, annotation, transforms):
if not self.densepose_on:
return annotation
# Handle densepose annotations
is_valid, reason_not_valid = DensePoseDataRelative.validate_annotation(annotation)
if is_valid:
densepose_data = DensePoseDataRelative(annotation, cleanup=True)
densepose_data.apply_transform(transforms, self.densepose_transform_data)
annotation["densepose"] = densepose_data
else:
# logger = logging.getLogger(__name__)
# logger.debug("Could not load DensePose annotation: {}".format(reason_not_valid))
DensePoseDataRelative.cleanup_annotation(annotation)
# NOTE: annotations for certain instances may be unavailable.
# 'None' is accepted by the DensePostList data structure.
annotation["densepose"] = None
return annotation
def _add_densepose_masks_as_segmentation(
self, annotations: List[Dict[str, Any]], image_shape_hw: Tuple[int, int]
):
for obj in annotations:
if ("densepose" not in obj) or ("segmentation" in obj):
continue
# DP segmentation: torch.Tensor [S, S] of float32, S=256
segm_dp = torch.zeros_like(obj["densepose"].segm)
segm_dp[obj["densepose"].segm > 0] = 1
segm_h, segm_w = segm_dp.shape
bbox_segm_dp = torch.tensor((0, 0, segm_h - 1, segm_w - 1), dtype=torch.float32)
# image bbox
x0, y0, x1, y1 = (
v.item() for v in BoxMode.convert(obj["bbox"], obj["bbox_mode"], BoxMode.XYXY_ABS)
)
segm_aligned = (
ROIAlign((y1 - y0, x1 - x0), 1.0, 0, aligned=True)
.forward(segm_dp.view(1, 1, *segm_dp.shape), bbox_segm_dp)
.squeeze()
)
image_mask = torch.zeros(*image_shape_hw, dtype=torch.float32)
image_mask[y0:y1, x0:x1] = segm_aligned
# segmentation for BitMask: np.array [H, W] of np.bool
obj["segmentation"] = image_mask >= 0.5
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/data/dataset_mapper.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import os
from typing import Dict, Optional
from detectron2.config import CfgNode
def is_relative_local_path(path: str):
path_str = os.fsdecode(path)
return ("://" not in path_str) and not os.path.isabs(path)
def maybe_prepend_base_path(base_path: Optional[str], path: str):
"""
Prepends the provided path with a base path prefix if:
1) base path is not None;
2) path is a local path
"""
if base_path is None:
return path
if is_relative_local_path(path):
return os.path.join(base_path, path)
return path
def get_class_to_mesh_name_mapping(cfg: CfgNode) -> Dict[int, str]:
return {
int(class_id): mesh_name
for class_id, mesh_name in cfg.DATASETS.CLASS_TO_MESH_NAME_MAPPING.items()
}
def get_category_to_class_mapping(dataset_cfg: CfgNode) -> Dict[str, int]:
return {
category: int(class_id)
for category, class_id in dataset_cfg.CATEGORY_TO_CLASS_MAPPING.items()
}
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/data/utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import random
from collections import deque
from typing import Any, Collection, Deque, Iterable, Iterator, List, Sequence
Loader = Iterable[Any]
def _pooled_next(iterator: Iterator[Any], pool: Deque[Any]):
if not pool:
pool.extend(next(iterator))
return pool.popleft()
class CombinedDataLoader:
"""
Combines data loaders using the provided sampling ratios
"""
BATCH_COUNT = 100
def __init__(self, loaders: Collection[Loader], batch_size: int, ratios: Sequence[float]):
self.loaders = loaders
self.batch_size = batch_size
self.ratios = ratios
def __iter__(self) -> Iterator[List[Any]]:
iters = [iter(loader) for loader in self.loaders]
indices = []
pool = [deque()] * len(iters)
# infinite iterator, as in D2
while True:
if not indices:
# just a buffer of indices, its size doesn't matter
# as long as it's a multiple of batch_size
k = self.batch_size * self.BATCH_COUNT
indices = random.choices(range(len(self.loaders)), self.ratios, k=k)
try:
batch = [_pooled_next(iters[i], pool[i]) for i in indices[: self.batch_size]]
except StopIteration:
break
indices = indices[self.batch_size :]
yield batch
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/data/combined_loader.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from .frame_selector import (
FrameSelectionStrategy,
RandomKFramesSelector,
FirstKFramesSelector,
LastKFramesSelector,
FrameTsList,
FrameSelector,
)
from .video_keyframe_dataset import (
VideoKeyframeDataset,
video_list_from_file,
list_keyframes,
read_keyframes,
)
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/data/video/__init__.py
|
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
import csv
import logging
import numpy as np
from typing import Any, Callable, Dict, List, Optional, Union
import av
import torch
from torch.utils.data.dataset import Dataset
from detectron2.utils.file_io import PathManager
from ..utils import maybe_prepend_base_path
from .frame_selector import FrameSelector, FrameTsList
FrameList = List[av.frame.Frame] # pyre-ignore[16]
FrameTransform = Callable[[torch.Tensor], torch.Tensor]
def list_keyframes(video_fpath: str, video_stream_idx: int = 0) -> FrameTsList:
"""
Traverses all keyframes of a video file. Returns a list of keyframe
timestamps. Timestamps are counts in timebase units.
Args:
video_fpath (str): Video file path
video_stream_idx (int): Video stream index (default: 0)
Returns:
List[int]: list of keyframe timestaps (timestamp is a count in timebase
units)
"""
try:
with PathManager.open(video_fpath, "rb") as io:
container = av.open(io, mode="r")
stream = container.streams.video[video_stream_idx]
keyframes = []
pts = -1
# Note: even though we request forward seeks for keyframes, sometimes
# a keyframe in backwards direction is returned. We introduce tolerance
# as a max count of ignored backward seeks
tolerance_backward_seeks = 2
while True:
try:
container.seek(pts + 1, backward=False, any_frame=False, stream=stream)
except av.AVError as e:
# the exception occurs when the video length is exceeded,
# we then return whatever data we've already collected
logger = logging.getLogger(__name__)
logger.debug(
f"List keyframes: Error seeking video file {video_fpath}, "
f"video stream {video_stream_idx}, pts {pts + 1}, AV error: {e}"
)
return keyframes
except OSError as e:
logger = logging.getLogger(__name__)
logger.warning(
f"List keyframes: Error seeking video file {video_fpath}, "
f"video stream {video_stream_idx}, pts {pts + 1}, OS error: {e}"
)
return []
packet = next(container.demux(video=video_stream_idx))
if packet.pts is not None and packet.pts <= pts:
logger = logging.getLogger(__name__)
logger.warning(
f"Video file {video_fpath}, stream {video_stream_idx}: "
f"bad seek for packet {pts + 1} (got packet {packet.pts}), "
f"tolerance {tolerance_backward_seeks}."
)
tolerance_backward_seeks -= 1
if tolerance_backward_seeks == 0:
return []
pts += 1
continue
tolerance_backward_seeks = 2
pts = packet.pts
if pts is None:
return keyframes
if packet.is_keyframe:
keyframes.append(pts)
return keyframes
except OSError as e:
logger = logging.getLogger(__name__)
logger.warning(
f"List keyframes: Error opening video file container {video_fpath}, " f"OS error: {e}"
)
except RuntimeError as e:
logger = logging.getLogger(__name__)
logger.warning(
f"List keyframes: Error opening video file container {video_fpath}, "
f"Runtime error: {e}"
)
return []
def read_keyframes(
video_fpath: str, keyframes: FrameTsList, video_stream_idx: int = 0
) -> FrameList: # pyre-ignore[11]
"""
Reads keyframe data from a video file.
Args:
video_fpath (str): Video file path
keyframes (List[int]): List of keyframe timestamps (as counts in
timebase units to be used in container seek operations)
video_stream_idx (int): Video stream index (default: 0)
Returns:
List[Frame]: list of frames that correspond to the specified timestamps
"""
try:
with PathManager.open(video_fpath, "rb") as io:
container = av.open(io)
stream = container.streams.video[video_stream_idx]
frames = []
for pts in keyframes:
try:
container.seek(pts, any_frame=False, stream=stream)
frame = next(container.decode(video=0))
frames.append(frame)
except av.AVError as e:
logger = logging.getLogger(__name__)
logger.warning(
f"Read keyframes: Error seeking video file {video_fpath}, "
f"video stream {video_stream_idx}, pts {pts}, AV error: {e}"
)
container.close()
return frames
except OSError as e:
logger = logging.getLogger(__name__)
logger.warning(
f"Read keyframes: Error seeking video file {video_fpath}, "
f"video stream {video_stream_idx}, pts {pts}, OS error: {e}"
)
container.close()
return frames
except StopIteration:
logger = logging.getLogger(__name__)
logger.warning(
f"Read keyframes: Error decoding frame from {video_fpath}, "
f"video stream {video_stream_idx}, pts {pts}"
)
container.close()
return frames
container.close()
return frames
except OSError as e:
logger = logging.getLogger(__name__)
logger.warning(
f"Read keyframes: Error opening video file container {video_fpath}, OS error: {e}"
)
except RuntimeError as e:
logger = logging.getLogger(__name__)
logger.warning(
f"Read keyframes: Error opening video file container {video_fpath}, Runtime error: {e}"
)
return []
def video_list_from_file(video_list_fpath: str, base_path: Optional[str] = None):
"""
Create a list of paths to video files from a text file.
Args:
video_list_fpath (str): path to a plain text file with the list of videos
base_path (str): base path for entries from the video list (default: None)
"""
video_list = []
with PathManager.open(video_list_fpath, "r") as io:
for line in io:
video_list.append(maybe_prepend_base_path(base_path, str(line.strip())))
return video_list
def read_keyframe_helper_data(fpath: str):
"""
Read keyframe data from a file in CSV format: the header should contain
"video_id" and "keyframes" fields. Value specifications are:
video_id: int
keyframes: list(int)
Example of contents:
video_id,keyframes
2,"[1,11,21,31,41,51,61,71,81]"
Args:
fpath (str): File containing keyframe data
Return:
video_id_to_keyframes (dict: int -> list(int)): for a given video ID it
contains a list of keyframes for that video
"""
video_id_to_keyframes = {}
try:
with PathManager.open(fpath, "r") as io:
csv_reader = csv.reader(io) # pyre-ignore[6]
header = next(csv_reader)
video_id_idx = header.index("video_id")
keyframes_idx = header.index("keyframes")
for row in csv_reader:
video_id = int(row[video_id_idx])
assert (
video_id not in video_id_to_keyframes
), f"Duplicate keyframes entry for video {fpath}"
video_id_to_keyframes[video_id] = (
[int(v) for v in row[keyframes_idx][1:-1].split(",")]
if len(row[keyframes_idx]) > 2
else []
)
except Exception as e:
logger = logging.getLogger(__name__)
logger.warning(f"Error reading keyframe helper data from {fpath}: {e}")
return video_id_to_keyframes
class VideoKeyframeDataset(Dataset):
"""
Dataset that provides keyframes for a set of videos.
"""
_EMPTY_FRAMES = torch.empty((0, 3, 1, 1))
def __init__(
self,
video_list: List[str],
category_list: Union[str, List[str], None] = None,
frame_selector: Optional[FrameSelector] = None,
transform: Optional[FrameTransform] = None,
keyframe_helper_fpath: Optional[str] = None,
):
"""
Dataset constructor
Args:
video_list (List[str]): list of paths to video files
category_list (Union[str, List[str], None]): list of animal categories for each
video file. If it is a string, or None, this applies to all videos
frame_selector (Callable: KeyFrameList -> KeyFrameList):
selects keyframes to process, keyframes are given by
packet timestamps in timebase counts. If None, all keyframes
are selected (default: None)
transform (Callable: torch.Tensor -> torch.Tensor):
transforms a batch of RGB images (tensors of size [B, 3, H, W]),
returns a tensor of the same size. If None, no transform is
applied (default: None)
"""
if type(category_list) == list:
self.category_list = category_list
else:
self.category_list = [category_list] * len(video_list)
assert len(video_list) == len(
self.category_list
), "length of video and category lists must be equal"
self.video_list = video_list
self.frame_selector = frame_selector
self.transform = transform
self.keyframe_helper_data = (
read_keyframe_helper_data(keyframe_helper_fpath)
if keyframe_helper_fpath is not None
else None
)
def __getitem__(self, idx: int) -> Dict[str, Any]:
"""
Gets selected keyframes from a given video
Args:
idx (int): video index in the video list file
Returns:
A dictionary containing two keys:
images (torch.Tensor): tensor of size [N, H, W, 3] or of size
defined by the transform that contains keyframes data
categories (List[str]): categories of the frames
"""
categories = [self.category_list[idx]]
fpath = self.video_list[idx]
keyframes = (
list_keyframes(fpath)
if self.keyframe_helper_data is None or idx not in self.keyframe_helper_data
else self.keyframe_helper_data[idx]
)
transform = self.transform
frame_selector = self.frame_selector
if not keyframes:
return {"images": self._EMPTY_FRAMES, "categories": []}
if frame_selector is not None:
keyframes = frame_selector(keyframes)
frames = read_keyframes(fpath, keyframes)
if not frames:
return {"images": self._EMPTY_FRAMES, "categories": []}
frames = np.stack([frame.to_rgb().to_ndarray() for frame in frames])
frames = torch.as_tensor(frames, device=torch.device("cpu"))
frames = frames[..., [2, 1, 0]] # RGB -> BGR
frames = frames.permute(0, 3, 1, 2).float() # NHWC -> NCHW
if transform is not None:
frames = transform(frames)
return {"images": frames, "categories": categories}
def __len__(self):
return len(self.video_list)
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/data/video/video_keyframe_dataset.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import random
from collections.abc import Callable
from enum import Enum
from typing import Callable as TCallable
from typing import List
FrameTsList = List[int]
FrameSelector = TCallable[[FrameTsList], FrameTsList]
class FrameSelectionStrategy(Enum):
"""
Frame selection strategy used with videos:
- "random_k": select k random frames
- "first_k": select k first frames
- "last_k": select k last frames
- "all": select all frames
"""
# fmt: off
RANDOM_K = "random_k"
FIRST_K = "first_k"
LAST_K = "last_k"
ALL = "all"
# fmt: on
class RandomKFramesSelector(Callable): # pyre-ignore[39]
"""
Selector that retains at most `k` random frames
"""
def __init__(self, k: int):
self.k = k
def __call__(self, frame_tss: FrameTsList) -> FrameTsList:
"""
Select `k` random frames
Args:
frames_tss (List[int]): timestamps of input frames
Returns:
List[int]: timestamps of selected frames
"""
return random.sample(frame_tss, min(self.k, len(frame_tss)))
class FirstKFramesSelector(Callable): # pyre-ignore[39]
"""
Selector that retains at most `k` first frames
"""
def __init__(self, k: int):
self.k = k
def __call__(self, frame_tss: FrameTsList) -> FrameTsList:
"""
Select `k` first frames
Args:
frames_tss (List[int]): timestamps of input frames
Returns:
List[int]: timestamps of selected frames
"""
return frame_tss[: self.k]
class LastKFramesSelector(Callable): # pyre-ignore[39]
"""
Selector that retains at most `k` last frames from video data
"""
def __init__(self, k: int):
self.k = k
def __call__(self, frame_tss: FrameTsList) -> FrameTsList:
"""
Select `k` last frames
Args:
frames_tss (List[int]): timestamps of input frames
Returns:
List[int]: timestamps of selected frames
"""
return frame_tss[-self.k :]
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/data/video/frame_selector.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from typing import Optional
from detectron2.data import DatasetCatalog, MetadataCatalog
from ..utils import maybe_prepend_base_path
from .dataset_type import DatasetType
CHIMPNSEE_DATASET_NAME = "chimpnsee"
def register_dataset(datasets_root: Optional[str] = None):
def empty_load_callback():
pass
video_list_fpath = maybe_prepend_base_path(
datasets_root,
"chimpnsee/cdna.eva.mpg.de/video_list.txt",
)
video_base_path = maybe_prepend_base_path(datasets_root, "chimpnsee/cdna.eva.mpg.de")
DatasetCatalog.register(CHIMPNSEE_DATASET_NAME, empty_load_callback)
MetadataCatalog.get(CHIMPNSEE_DATASET_NAME).set(
dataset_type=DatasetType.VIDEO_LIST,
video_list_fpath=video_list_fpath,
video_base_path=video_base_path,
category="chimpanzee",
)
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/data/datasets/chimpnsee.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import contextlib
import io
import logging
import os
from collections import defaultdict
from dataclasses import dataclass
from typing import Any, Dict, Iterable, List, Optional
from fvcore.common.timer import Timer
from detectron2.data import DatasetCatalog, MetadataCatalog
from detectron2.structures import BoxMode
from detectron2.utils.file_io import PathManager
from ..utils import maybe_prepend_base_path
DENSEPOSE_MASK_KEY = "dp_masks"
DENSEPOSE_IUV_KEYS_WITHOUT_MASK = ["dp_x", "dp_y", "dp_I", "dp_U", "dp_V"]
DENSEPOSE_CSE_KEYS_WITHOUT_MASK = ["dp_x", "dp_y", "dp_vertex", "ref_model"]
DENSEPOSE_ALL_POSSIBLE_KEYS = set(
DENSEPOSE_IUV_KEYS_WITHOUT_MASK + DENSEPOSE_CSE_KEYS_WITHOUT_MASK + [DENSEPOSE_MASK_KEY]
)
DENSEPOSE_METADATA_URL_PREFIX = "https://dl.fbaipublicfiles.com/densepose/data/"
@dataclass
class CocoDatasetInfo:
name: str
images_root: str
annotations_fpath: str
DATASETS = [
CocoDatasetInfo(
name="densepose_coco_2014_train",
images_root="coco/train2014",
annotations_fpath="coco/annotations/densepose_train2014.json",
),
CocoDatasetInfo(
name="densepose_coco_2014_minival",
images_root="coco/val2014",
annotations_fpath="coco/annotations/densepose_minival2014.json",
),
CocoDatasetInfo(
name="densepose_coco_2014_minival_100",
images_root="coco/val2014",
annotations_fpath="coco/annotations/densepose_minival2014_100.json",
),
CocoDatasetInfo(
name="densepose_coco_2014_valminusminival",
images_root="coco/val2014",
annotations_fpath="coco/annotations/densepose_valminusminival2014.json",
),
CocoDatasetInfo(
name="densepose_coco_2014_train_cse",
images_root="coco/train2014",
annotations_fpath="coco_cse/densepose_train2014_cse.json",
),
CocoDatasetInfo(
name="densepose_coco_2014_minival_cse",
images_root="coco/val2014",
annotations_fpath="coco_cse/densepose_minival2014_cse.json",
),
CocoDatasetInfo(
name="densepose_coco_2014_minival_100_cse",
images_root="coco/val2014",
annotations_fpath="coco_cse/densepose_minival2014_100_cse.json",
),
CocoDatasetInfo(
name="densepose_coco_2014_valminusminival_cse",
images_root="coco/val2014",
annotations_fpath="coco_cse/densepose_valminusminival2014_cse.json",
),
CocoDatasetInfo(
name="densepose_chimps",
images_root="densepose_chimps/images",
annotations_fpath="densepose_chimps/densepose_chimps_densepose.json",
),
CocoDatasetInfo(
name="densepose_chimps_cse_train",
images_root="densepose_chimps/images",
annotations_fpath="densepose_chimps/densepose_chimps_cse_train.json",
),
CocoDatasetInfo(
name="densepose_chimps_cse_val",
images_root="densepose_chimps/images",
annotations_fpath="densepose_chimps/densepose_chimps_cse_val.json",
),
CocoDatasetInfo(
name="posetrack2017_train",
images_root="posetrack2017/posetrack_data_2017",
annotations_fpath="posetrack2017/densepose_posetrack_train2017.json",
),
CocoDatasetInfo(
name="posetrack2017_val",
images_root="posetrack2017/posetrack_data_2017",
annotations_fpath="posetrack2017/densepose_posetrack_val2017.json",
),
CocoDatasetInfo(
name="lvis_v05_train",
images_root="coco/train2017",
annotations_fpath="lvis/lvis_v0.5_plus_dp_train.json",
),
CocoDatasetInfo(
name="lvis_v05_val",
images_root="coco/val2017",
annotations_fpath="lvis/lvis_v0.5_plus_dp_val.json",
),
]
BASE_DATASETS = [
CocoDatasetInfo(
name="base_coco_2017_train",
images_root="coco/train2017",
annotations_fpath="coco/annotations/instances_train2017.json",
),
CocoDatasetInfo(
name="base_coco_2017_val",
images_root="coco/val2017",
annotations_fpath="coco/annotations/instances_val2017.json",
),
CocoDatasetInfo(
name="base_coco_2017_val_100",
images_root="coco/val2017",
annotations_fpath="coco/annotations/instances_val2017_100.json",
),
]
def get_metadata(base_path: Optional[str]) -> Dict[str, Any]:
"""
Returns metadata associated with COCO DensePose datasets
Args:
base_path: Optional[str]
Base path used to load metadata from
Returns:
Dict[str, Any]
Metadata in the form of a dictionary
"""
meta = {
"densepose_transform_src": maybe_prepend_base_path(base_path, "UV_symmetry_transforms.mat"),
"densepose_smpl_subdiv": maybe_prepend_base_path(base_path, "SMPL_subdiv.mat"),
"densepose_smpl_subdiv_transform": maybe_prepend_base_path(
base_path,
"SMPL_SUBDIV_TRANSFORM.mat",
),
}
return meta
def _load_coco_annotations(json_file: str):
"""
Load COCO annotations from a JSON file
Args:
json_file: str
Path to the file to load annotations from
Returns:
Instance of `pycocotools.coco.COCO` that provides access to annotations
data
"""
from pycocotools.coco import COCO
logger = logging.getLogger(__name__)
timer = Timer()
with contextlib.redirect_stdout(io.StringIO()):
coco_api = COCO(json_file)
if timer.seconds() > 1:
logger.info("Loading {} takes {:.2f} seconds.".format(json_file, timer.seconds()))
return coco_api
def _add_categories_metadata(dataset_name: str, categories: List[Dict[str, Any]]):
meta = MetadataCatalog.get(dataset_name)
meta.categories = {c["id"]: c["name"] for c in categories}
logger = logging.getLogger(__name__)
logger.info("Dataset {} categories: {}".format(dataset_name, meta.categories))
def _verify_annotations_have_unique_ids(json_file: str, anns: List[List[Dict[str, Any]]]):
if "minival" in json_file:
# Skip validation on COCO2014 valminusminival and minival annotations
# The ratio of buggy annotations there is tiny and does not affect accuracy
# Therefore we explicitly white-list them
return
ann_ids = [ann["id"] for anns_per_image in anns for ann in anns_per_image]
assert len(set(ann_ids)) == len(ann_ids), "Annotation ids in '{}' are not unique!".format(
json_file
)
def _maybe_add_bbox(obj: Dict[str, Any], ann_dict: Dict[str, Any]):
if "bbox" not in ann_dict:
return
obj["bbox"] = ann_dict["bbox"]
obj["bbox_mode"] = BoxMode.XYWH_ABS
def _maybe_add_segm(obj: Dict[str, Any], ann_dict: Dict[str, Any]):
if "segmentation" not in ann_dict:
return
segm = ann_dict["segmentation"]
if not isinstance(segm, dict):
# filter out invalid polygons (< 3 points)
segm = [poly for poly in segm if len(poly) % 2 == 0 and len(poly) >= 6]
if len(segm) == 0:
return
obj["segmentation"] = segm
def _maybe_add_keypoints(obj: Dict[str, Any], ann_dict: Dict[str, Any]):
if "keypoints" not in ann_dict:
return
keypts = ann_dict["keypoints"] # list[int]
for idx, v in enumerate(keypts):
if idx % 3 != 2:
# COCO's segmentation coordinates are floating points in [0, H or W],
# but keypoint coordinates are integers in [0, H-1 or W-1]
# Therefore we assume the coordinates are "pixel indices" and
# add 0.5 to convert to floating point coordinates.
keypts[idx] = v + 0.5
obj["keypoints"] = keypts
def _maybe_add_densepose(obj: Dict[str, Any], ann_dict: Dict[str, Any]):
for key in DENSEPOSE_ALL_POSSIBLE_KEYS:
if key in ann_dict:
obj[key] = ann_dict[key]
def _combine_images_with_annotations(
dataset_name: str,
image_root: str,
img_datas: Iterable[Dict[str, Any]],
ann_datas: Iterable[Iterable[Dict[str, Any]]],
):
ann_keys = ["iscrowd", "category_id"]
dataset_dicts = []
contains_video_frame_info = False
for img_dict, ann_dicts in zip(img_datas, ann_datas):
record = {}
record["file_name"] = os.path.join(image_root, img_dict["file_name"])
record["height"] = img_dict["height"]
record["width"] = img_dict["width"]
record["image_id"] = img_dict["id"]
record["dataset"] = dataset_name
if "frame_id" in img_dict:
record["frame_id"] = img_dict["frame_id"]
record["video_id"] = img_dict.get("vid_id", None)
contains_video_frame_info = True
objs = []
for ann_dict in ann_dicts:
assert ann_dict["image_id"] == record["image_id"]
assert ann_dict.get("ignore", 0) == 0
obj = {key: ann_dict[key] for key in ann_keys if key in ann_dict}
_maybe_add_bbox(obj, ann_dict)
_maybe_add_segm(obj, ann_dict)
_maybe_add_keypoints(obj, ann_dict)
_maybe_add_densepose(obj, ann_dict)
objs.append(obj)
record["annotations"] = objs
dataset_dicts.append(record)
if contains_video_frame_info:
create_video_frame_mapping(dataset_name, dataset_dicts)
return dataset_dicts
def get_contiguous_id_to_category_id_map(metadata):
cat_id_2_cont_id = metadata.thing_dataset_id_to_contiguous_id
cont_id_2_cat_id = {}
for cat_id, cont_id in cat_id_2_cont_id.items():
if cont_id in cont_id_2_cat_id:
continue
cont_id_2_cat_id[cont_id] = cat_id
return cont_id_2_cat_id
def maybe_filter_categories_cocoapi(dataset_name, coco_api):
meta = MetadataCatalog.get(dataset_name)
cont_id_2_cat_id = get_contiguous_id_to_category_id_map(meta)
cat_id_2_cont_id = meta.thing_dataset_id_to_contiguous_id
# filter categories
cats = []
for cat in coco_api.dataset["categories"]:
cat_id = cat["id"]
if cat_id not in cat_id_2_cont_id:
continue
cont_id = cat_id_2_cont_id[cat_id]
if (cont_id in cont_id_2_cat_id) and (cont_id_2_cat_id[cont_id] == cat_id):
cats.append(cat)
coco_api.dataset["categories"] = cats
# filter annotations, if multiple categories are mapped to a single
# contiguous ID, use only one category ID and map all annotations to that category ID
anns = []
for ann in coco_api.dataset["annotations"]:
cat_id = ann["category_id"]
if cat_id not in cat_id_2_cont_id:
continue
cont_id = cat_id_2_cont_id[cat_id]
ann["category_id"] = cont_id_2_cat_id[cont_id]
anns.append(ann)
coco_api.dataset["annotations"] = anns
# recreate index
coco_api.createIndex()
def maybe_filter_and_map_categories_cocoapi(dataset_name, coco_api):
meta = MetadataCatalog.get(dataset_name)
category_id_map = meta.thing_dataset_id_to_contiguous_id
# map categories
cats = []
for cat in coco_api.dataset["categories"]:
cat_id = cat["id"]
if cat_id not in category_id_map:
continue
cat["id"] = category_id_map[cat_id]
cats.append(cat)
coco_api.dataset["categories"] = cats
# map annotation categories
anns = []
for ann in coco_api.dataset["annotations"]:
cat_id = ann["category_id"]
if cat_id not in category_id_map:
continue
ann["category_id"] = category_id_map[cat_id]
anns.append(ann)
coco_api.dataset["annotations"] = anns
# recreate index
coco_api.createIndex()
def create_video_frame_mapping(dataset_name, dataset_dicts):
mapping = defaultdict(dict)
for d in dataset_dicts:
video_id = d.get("video_id")
if video_id is None:
continue
mapping[video_id].update({d["frame_id"]: d["file_name"]})
MetadataCatalog.get(dataset_name).set(video_frame_mapping=mapping)
def load_coco_json(annotations_json_file: str, image_root: str, dataset_name: str):
"""
Loads a JSON file with annotations in COCO instances format.
Replaces `detectron2.data.datasets.coco.load_coco_json` to handle metadata
in a more flexible way. Postpones category mapping to a later stage to be
able to combine several datasets with different (but coherent) sets of
categories.
Args:
annotations_json_file: str
Path to the JSON file with annotations in COCO instances format.
image_root: str
directory that contains all the images
dataset_name: str
the name that identifies a dataset, e.g. "densepose_coco_2014_train"
extra_annotation_keys: Optional[List[str]]
If provided, these keys are used to extract additional data from
the annotations.
"""
coco_api = _load_coco_annotations(PathManager.get_local_path(annotations_json_file))
_add_categories_metadata(dataset_name, coco_api.loadCats(coco_api.getCatIds()))
# sort indices for reproducible results
img_ids = sorted(coco_api.imgs.keys())
# imgs is a list of dicts, each looks something like:
# {'license': 4,
# 'url': 'http://farm6.staticflickr.com/5454/9413846304_881d5e5c3b_z.jpg',
# 'file_name': 'COCO_val2014_000000001268.jpg',
# 'height': 427,
# 'width': 640,
# 'date_captured': '2013-11-17 05:57:24',
# 'id': 1268}
imgs = coco_api.loadImgs(img_ids)
logger = logging.getLogger(__name__)
logger.info("Loaded {} images in COCO format from {}".format(len(imgs), annotations_json_file))
# anns is a list[list[dict]], where each dict is an annotation
# record for an object. The inner list enumerates the objects in an image
# and the outer list enumerates over images.
anns = [coco_api.imgToAnns[img_id] for img_id in img_ids]
_verify_annotations_have_unique_ids(annotations_json_file, anns)
dataset_records = _combine_images_with_annotations(dataset_name, image_root, imgs, anns)
return dataset_records
def register_dataset(dataset_data: CocoDatasetInfo, datasets_root: Optional[str] = None):
"""
Registers provided COCO DensePose dataset
Args:
dataset_data: CocoDatasetInfo
Dataset data
datasets_root: Optional[str]
Datasets root folder (default: None)
"""
annotations_fpath = maybe_prepend_base_path(datasets_root, dataset_data.annotations_fpath)
images_root = maybe_prepend_base_path(datasets_root, dataset_data.images_root)
def load_annotations():
return load_coco_json(
annotations_json_file=annotations_fpath,
image_root=images_root,
dataset_name=dataset_data.name,
)
DatasetCatalog.register(dataset_data.name, load_annotations)
MetadataCatalog.get(dataset_data.name).set(
json_file=annotations_fpath,
image_root=images_root,
**get_metadata(DENSEPOSE_METADATA_URL_PREFIX)
)
def register_datasets(
datasets_data: Iterable[CocoDatasetInfo], datasets_root: Optional[str] = None
):
"""
Registers provided COCO DensePose datasets
Args:
datasets_data: Iterable[CocoDatasetInfo]
An iterable of dataset datas
datasets_root: Optional[str]
Datasets root folder (default: None)
"""
for dataset_data in datasets_data:
register_dataset(dataset_data, datasets_root)
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/data/datasets/coco.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from . import builtin # ensure the builtin datasets are registered
__all__ = [k for k in globals().keys() if "builtin" not in k and not k.startswith("_")]
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/data/datasets/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import logging
import os
from typing import Any, Dict, Iterable, List, Optional
from fvcore.common.timer import Timer
from detectron2.data import DatasetCatalog, MetadataCatalog
from detectron2.data.datasets.lvis import get_lvis_instances_meta
from detectron2.structures import BoxMode
from detectron2.utils.file_io import PathManager
from ..utils import maybe_prepend_base_path
from .coco import (
DENSEPOSE_ALL_POSSIBLE_KEYS,
DENSEPOSE_METADATA_URL_PREFIX,
CocoDatasetInfo,
get_metadata,
)
DATASETS = [
CocoDatasetInfo(
name="densepose_lvis_v1_ds1_train_v1",
images_root="coco_",
annotations_fpath="lvis/densepose_lvis_v1_ds1_train_v1.json",
),
CocoDatasetInfo(
name="densepose_lvis_v1_ds1_val_v1",
images_root="coco_",
annotations_fpath="lvis/densepose_lvis_v1_ds1_val_v1.json",
),
CocoDatasetInfo(
name="densepose_lvis_v1_ds2_train_v1",
images_root="coco_",
annotations_fpath="lvis/densepose_lvis_v1_ds2_train_v1.json",
),
CocoDatasetInfo(
name="densepose_lvis_v1_ds2_val_v1",
images_root="coco_",
annotations_fpath="lvis/densepose_lvis_v1_ds2_val_v1.json",
),
CocoDatasetInfo(
name="densepose_lvis_v1_ds1_val_animals_100",
images_root="coco_",
annotations_fpath="lvis/densepose_lvis_v1_val_animals_100_v2.json",
),
]
def _load_lvis_annotations(json_file: str):
"""
Load COCO annotations from a JSON file
Args:
json_file: str
Path to the file to load annotations from
Returns:
Instance of `pycocotools.coco.COCO` that provides access to annotations
data
"""
from lvis import LVIS
json_file = PathManager.get_local_path(json_file)
logger = logging.getLogger(__name__)
timer = Timer()
lvis_api = LVIS(json_file)
if timer.seconds() > 1:
logger.info("Loading {} takes {:.2f} seconds.".format(json_file, timer.seconds()))
return lvis_api
def _add_categories_metadata(dataset_name: str):
metadict = get_lvis_instances_meta(dataset_name)
categories = metadict["thing_classes"]
metadata = MetadataCatalog.get(dataset_name)
metadata.categories = {i + 1: categories[i] for i in range(len(categories))}
logger = logging.getLogger(__name__)
logger.info(f"Dataset {dataset_name} has {len(categories)} categories")
def _verify_annotations_have_unique_ids(json_file: str, anns: List[List[Dict[str, Any]]]):
ann_ids = [ann["id"] for anns_per_image in anns for ann in anns_per_image]
assert len(set(ann_ids)) == len(ann_ids), "Annotation ids in '{}' are not unique!".format(
json_file
)
def _maybe_add_bbox(obj: Dict[str, Any], ann_dict: Dict[str, Any]):
if "bbox" not in ann_dict:
return
obj["bbox"] = ann_dict["bbox"]
obj["bbox_mode"] = BoxMode.XYWH_ABS
def _maybe_add_segm(obj: Dict[str, Any], ann_dict: Dict[str, Any]):
if "segmentation" not in ann_dict:
return
segm = ann_dict["segmentation"]
if not isinstance(segm, dict):
# filter out invalid polygons (< 3 points)
segm = [poly for poly in segm if len(poly) % 2 == 0 and len(poly) >= 6]
if len(segm) == 0:
return
obj["segmentation"] = segm
def _maybe_add_keypoints(obj: Dict[str, Any], ann_dict: Dict[str, Any]):
if "keypoints" not in ann_dict:
return
keypts = ann_dict["keypoints"] # list[int]
for idx, v in enumerate(keypts):
if idx % 3 != 2:
# COCO's segmentation coordinates are floating points in [0, H or W],
# but keypoint coordinates are integers in [0, H-1 or W-1]
# Therefore we assume the coordinates are "pixel indices" and
# add 0.5 to convert to floating point coordinates.
keypts[idx] = v + 0.5
obj["keypoints"] = keypts
def _maybe_add_densepose(obj: Dict[str, Any], ann_dict: Dict[str, Any]):
for key in DENSEPOSE_ALL_POSSIBLE_KEYS:
if key in ann_dict:
obj[key] = ann_dict[key]
def _combine_images_with_annotations(
dataset_name: str,
image_root: str,
img_datas: Iterable[Dict[str, Any]],
ann_datas: Iterable[Iterable[Dict[str, Any]]],
):
dataset_dicts = []
def get_file_name(img_root, img_dict):
# Determine the path including the split folder ("train2017", "val2017", "test2017") from
# the coco_url field. Example:
# 'coco_url': 'http://images.cocodataset.org/train2017/000000155379.jpg'
split_folder, file_name = img_dict["coco_url"].split("/")[-2:]
return os.path.join(img_root + split_folder, file_name)
for img_dict, ann_dicts in zip(img_datas, ann_datas):
record = {}
record["file_name"] = get_file_name(image_root, img_dict)
record["height"] = img_dict["height"]
record["width"] = img_dict["width"]
record["not_exhaustive_category_ids"] = img_dict.get("not_exhaustive_category_ids", [])
record["neg_category_ids"] = img_dict.get("neg_category_ids", [])
record["image_id"] = img_dict["id"]
record["dataset"] = dataset_name
objs = []
for ann_dict in ann_dicts:
assert ann_dict["image_id"] == record["image_id"]
obj = {}
_maybe_add_bbox(obj, ann_dict)
obj["iscrowd"] = ann_dict.get("iscrowd", 0)
obj["category_id"] = ann_dict["category_id"]
_maybe_add_segm(obj, ann_dict)
_maybe_add_keypoints(obj, ann_dict)
_maybe_add_densepose(obj, ann_dict)
objs.append(obj)
record["annotations"] = objs
dataset_dicts.append(record)
return dataset_dicts
def load_lvis_json(annotations_json_file: str, image_root: str, dataset_name: str):
"""
Loads a JSON file with annotations in LVIS instances format.
Replaces `detectron2.data.datasets.coco.load_lvis_json` to handle metadata
in a more flexible way. Postpones category mapping to a later stage to be
able to combine several datasets with different (but coherent) sets of
categories.
Args:
annotations_json_file: str
Path to the JSON file with annotations in COCO instances format.
image_root: str
directory that contains all the images
dataset_name: str
the name that identifies a dataset, e.g. "densepose_coco_2014_train"
extra_annotation_keys: Optional[List[str]]
If provided, these keys are used to extract additional data from
the annotations.
"""
lvis_api = _load_lvis_annotations(PathManager.get_local_path(annotations_json_file))
_add_categories_metadata(dataset_name)
# sort indices for reproducible results
img_ids = sorted(lvis_api.imgs.keys())
# imgs is a list of dicts, each looks something like:
# {'license': 4,
# 'url': 'http://farm6.staticflickr.com/5454/9413846304_881d5e5c3b_z.jpg',
# 'file_name': 'COCO_val2014_000000001268.jpg',
# 'height': 427,
# 'width': 640,
# 'date_captured': '2013-11-17 05:57:24',
# 'id': 1268}
imgs = lvis_api.load_imgs(img_ids)
logger = logging.getLogger(__name__)
logger.info("Loaded {} images in LVIS format from {}".format(len(imgs), annotations_json_file))
# anns is a list[list[dict]], where each dict is an annotation
# record for an object. The inner list enumerates the objects in an image
# and the outer list enumerates over images.
anns = [lvis_api.img_ann_map[img_id] for img_id in img_ids]
_verify_annotations_have_unique_ids(annotations_json_file, anns)
dataset_records = _combine_images_with_annotations(dataset_name, image_root, imgs, anns)
return dataset_records
def register_dataset(dataset_data: CocoDatasetInfo, datasets_root: Optional[str] = None):
"""
Registers provided LVIS DensePose dataset
Args:
dataset_data: CocoDatasetInfo
Dataset data
datasets_root: Optional[str]
Datasets root folder (default: None)
"""
annotations_fpath = maybe_prepend_base_path(datasets_root, dataset_data.annotations_fpath)
images_root = maybe_prepend_base_path(datasets_root, dataset_data.images_root)
def load_annotations():
return load_lvis_json(
annotations_json_file=annotations_fpath,
image_root=images_root,
dataset_name=dataset_data.name,
)
DatasetCatalog.register(dataset_data.name, load_annotations)
MetadataCatalog.get(dataset_data.name).set(
json_file=annotations_fpath,
image_root=images_root,
evaluator_type="lvis",
**get_metadata(DENSEPOSE_METADATA_URL_PREFIX),
)
def register_datasets(
datasets_data: Iterable[CocoDatasetInfo], datasets_root: Optional[str] = None
):
"""
Registers provided LVIS DensePose datasets
Args:
datasets_data: Iterable[CocoDatasetInfo]
An iterable of dataset datas
datasets_root: Optional[str]
Datasets root folder (default: None)
"""
for dataset_data in datasets_data:
register_dataset(dataset_data, datasets_root)
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/data/datasets/lvis.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from enum import Enum
class DatasetType(Enum):
"""
Dataset type, mostly used for datasets that contain data to bootstrap models on
"""
VIDEO_LIST = "video_list"
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/data/datasets/dataset_type.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from .chimpnsee import register_dataset as register_chimpnsee_dataset
from .coco import BASE_DATASETS as BASE_COCO_DATASETS
from .coco import DATASETS as COCO_DATASETS
from .coco import register_datasets as register_coco_datasets
from .lvis import DATASETS as LVIS_DATASETS
from .lvis import register_datasets as register_lvis_datasets
DEFAULT_DATASETS_ROOT = "datasets"
register_coco_datasets(COCO_DATASETS, DEFAULT_DATASETS_ROOT)
register_coco_datasets(BASE_COCO_DATASETS, DEFAULT_DATASETS_ROOT)
register_lvis_datasets(LVIS_DATASETS, DEFAULT_DATASETS_ROOT)
register_chimpnsee_dataset(DEFAULT_DATASETS_ROOT) # pyre-ignore[19]
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/data/datasets/builtin.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import logging
from collections import UserDict
from dataclasses import dataclass
from typing import Iterable, Optional
from ..utils import maybe_prepend_base_path
@dataclass
class MeshInfo:
name: str
data: str
geodists: Optional[str] = None
symmetry: Optional[str] = None
texcoords: Optional[str] = None
class _MeshCatalog(UserDict):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.mesh_ids = {}
self.mesh_names = {}
self.max_mesh_id = -1
def __setitem__(self, key, value):
if key in self:
logger = logging.getLogger(__name__)
logger.warning(
f"Overwriting mesh catalog entry '{key}': old value {self[key]}"
f", new value {value}"
)
mesh_id = self.mesh_ids[key]
else:
self.max_mesh_id += 1
mesh_id = self.max_mesh_id
super().__setitem__(key, value)
self.mesh_ids[key] = mesh_id
self.mesh_names[mesh_id] = key
def get_mesh_id(self, shape_name: str) -> int:
return self.mesh_ids[shape_name]
def get_mesh_name(self, mesh_id: int) -> str:
return self.mesh_names[mesh_id]
MeshCatalog = _MeshCatalog()
def register_mesh(mesh_info: MeshInfo, base_path: Optional[str]):
geodists, symmetry, texcoords = mesh_info.geodists, mesh_info.symmetry, mesh_info.texcoords
if geodists:
geodists = maybe_prepend_base_path(base_path, geodists)
if symmetry:
symmetry = maybe_prepend_base_path(base_path, symmetry)
if texcoords:
texcoords = maybe_prepend_base_path(base_path, texcoords)
MeshCatalog[mesh_info.name] = MeshInfo(
name=mesh_info.name,
data=maybe_prepend_base_path(base_path, mesh_info.data),
geodists=geodists,
symmetry=symmetry,
texcoords=texcoords,
)
def register_meshes(mesh_infos: Iterable[MeshInfo], base_path: Optional[str]):
for mesh_info in mesh_infos:
register_mesh(mesh_info, base_path)
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/data/meshes/catalog.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from . import builtin
__all__ = [k for k in globals().keys() if "builtin" not in k and not k.startswith("_")]
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/data/meshes/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from .catalog import MeshInfo, register_meshes
DENSEPOSE_MESHES_DIR = "https://dl.fbaipublicfiles.com/densepose/meshes/"
MESHES = [
MeshInfo(
name="smpl_27554",
data="smpl_27554.pkl",
geodists="geodists/geodists_smpl_27554.pkl",
symmetry="symmetry/symmetry_smpl_27554.pkl",
texcoords="texcoords/texcoords_smpl_27554.pkl",
),
MeshInfo(
name="chimp_5029",
data="chimp_5029.pkl",
geodists="geodists/geodists_chimp_5029.pkl",
symmetry="symmetry/symmetry_chimp_5029.pkl",
texcoords="texcoords/texcoords_chimp_5029.pkl",
),
MeshInfo(
name="cat_5001",
data="cat_5001.pkl",
geodists="geodists/geodists_cat_5001.pkl",
symmetry="symmetry/symmetry_cat_5001.pkl",
texcoords="texcoords/texcoords_cat_5001.pkl",
),
MeshInfo(
name="cat_7466",
data="cat_7466.pkl",
geodists="geodists/geodists_cat_7466.pkl",
symmetry="symmetry/symmetry_cat_7466.pkl",
texcoords="texcoords/texcoords_cat_7466.pkl",
),
MeshInfo(
name="sheep_5004",
data="sheep_5004.pkl",
geodists="geodists/geodists_sheep_5004.pkl",
symmetry="symmetry/symmetry_sheep_5004.pkl",
texcoords="texcoords/texcoords_sheep_5004.pkl",
),
MeshInfo(
name="zebra_5002",
data="zebra_5002.pkl",
geodists="geodists/geodists_zebra_5002.pkl",
symmetry="symmetry/symmetry_zebra_5002.pkl",
texcoords="texcoords/texcoords_zebra_5002.pkl",
),
MeshInfo(
name="horse_5004",
data="horse_5004.pkl",
geodists="geodists/geodists_horse_5004.pkl",
symmetry="symmetry/symmetry_horse_5004.pkl",
texcoords="texcoords/texcoords_zebra_5002.pkl",
),
MeshInfo(
name="giraffe_5002",
data="giraffe_5002.pkl",
geodists="geodists/geodists_giraffe_5002.pkl",
symmetry="symmetry/symmetry_giraffe_5002.pkl",
texcoords="texcoords/texcoords_giraffe_5002.pkl",
),
MeshInfo(
name="elephant_5002",
data="elephant_5002.pkl",
geodists="geodists/geodists_elephant_5002.pkl",
symmetry="symmetry/symmetry_elephant_5002.pkl",
texcoords="texcoords/texcoords_elephant_5002.pkl",
),
MeshInfo(
name="dog_5002",
data="dog_5002.pkl",
geodists="geodists/geodists_dog_5002.pkl",
symmetry="symmetry/symmetry_dog_5002.pkl",
texcoords="texcoords/texcoords_dog_5002.pkl",
),
MeshInfo(
name="dog_7466",
data="dog_7466.pkl",
geodists="geodists/geodists_dog_7466.pkl",
symmetry="symmetry/symmetry_dog_7466.pkl",
texcoords="texcoords/texcoords_dog_7466.pkl",
),
MeshInfo(
name="cow_5002",
data="cow_5002.pkl",
geodists="geodists/geodists_cow_5002.pkl",
symmetry="symmetry/symmetry_cow_5002.pkl",
texcoords="texcoords/texcoords_cow_5002.pkl",
),
MeshInfo(
name="bear_4936",
data="bear_4936.pkl",
geodists="geodists/geodists_bear_4936.pkl",
symmetry="symmetry/symmetry_bear_4936.pkl",
texcoords="texcoords/texcoords_bear_4936.pkl",
),
]
register_meshes(MESHES, DENSEPOSE_MESHES_DIR)
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/data/meshes/builtin.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from detectron2.structures import BitMasks, Instances
from densepose.converters import ToMaskConverter
class MaskFromDensePoseSampler:
"""
Produce mask GT from DensePose predictions
This sampler simply converts DensePose predictions to BitMasks
that a contain a bool tensor of the size of the input image
"""
def __call__(self, instances: Instances) -> BitMasks:
"""
Converts predicted data from `instances` into the GT mask data
Args:
instances (Instances): predicted results, expected to have `pred_densepose` field
Returns:
Boolean Tensor of the size of the input image that has non-zero
values at pixels that are estimated to belong to the detected object
"""
return ToMaskConverter.convert(
instances.pred_densepose, instances.pred_boxes, instances.image_size
)
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/data/samplers/mask_from_densepose.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional
from detectron2.structures import Instances
ModelOutput = Dict[str, Any]
SampledData = Dict[str, Any]
@dataclass
class _Sampler:
"""
Sampler registry entry that contains:
- src (str): source field to sample from (deleted after sampling)
- dst (Optional[str]): destination field to sample to, if not None
- func (Optional[Callable: Any -> Any]): function that performs sampling,
if None, reference copy is performed
"""
src: str
dst: Optional[str]
func: Optional[Callable[[Any], Any]]
class PredictionToGroundTruthSampler:
"""
Sampler implementation that converts predictions to GT using registered
samplers for different fields of `Instances`.
"""
def __init__(self, dataset_name: str = ""):
self.dataset_name = dataset_name
self._samplers = {}
self.register_sampler("pred_boxes", "gt_boxes", None)
self.register_sampler("pred_classes", "gt_classes", None)
# delete scores
self.register_sampler("scores")
def __call__(self, model_output: List[ModelOutput]) -> List[SampledData]:
"""
Transform model output into ground truth data through sampling
Args:
model_output (Dict[str, Any]): model output
Returns:
Dict[str, Any]: sampled data
"""
for model_output_i in model_output:
instances: Instances = model_output_i["instances"]
# transform data in each field
for _, sampler in self._samplers.items():
if not instances.has(sampler.src) or sampler.dst is None:
continue
if sampler.func is None:
instances.set(sampler.dst, instances.get(sampler.src))
else:
instances.set(sampler.dst, sampler.func(instances))
# delete model output data that was transformed
for _, sampler in self._samplers.items():
if sampler.src != sampler.dst and instances.has(sampler.src):
instances.remove(sampler.src)
model_output_i["dataset"] = self.dataset_name
return model_output
def register_sampler(
self,
prediction_attr: str,
gt_attr: Optional[str] = None,
func: Optional[Callable[[Any], Any]] = None,
):
"""
Register sampler for a field
Args:
prediction_attr (str): field to replace with a sampled value
gt_attr (Optional[str]): field to store the sampled value to, if not None
func (Optional[Callable: Any -> Any]): sampler function
"""
self._samplers[(prediction_attr, gt_attr)] = _Sampler(
src=prediction_attr, dst=gt_attr, func=func
)
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/data/samplers/prediction_to_gt.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from .densepose_uniform import DensePoseUniformSampler
from .densepose_confidence_based import DensePoseConfidenceBasedSampler
from .densepose_cse_uniform import DensePoseCSEUniformSampler
from .densepose_cse_confidence_based import DensePoseCSEConfidenceBasedSampler
from .mask_from_densepose import MaskFromDensePoseSampler
from .prediction_to_gt import PredictionToGroundTruthSampler
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/data/samplers/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from .densepose_cse_base import DensePoseCSEBaseSampler
from .densepose_uniform import DensePoseUniformSampler
class DensePoseCSEUniformSampler(DensePoseCSEBaseSampler, DensePoseUniformSampler):
"""
Uniform Sampler for CSE
"""
pass
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/data/samplers/densepose_cse_uniform.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from typing import Any, Dict, List, Tuple
import torch
from torch.nn import functional as F
from detectron2.structures import BoxMode, Instances
from densepose.converters import ToChartResultConverter
from densepose.converters.base import IntTupleBox, make_int_box
from densepose.structures import DensePoseDataRelative, DensePoseList
class DensePoseBaseSampler:
"""
Base DensePose sampler to produce DensePose data from DensePose predictions.
Samples for each class are drawn according to some distribution over all pixels estimated
to belong to that class.
"""
def __init__(self, count_per_class: int = 8):
"""
Constructor
Args:
count_per_class (int): the sampler produces at most `count_per_class`
samples for each category
"""
self.count_per_class = count_per_class
def __call__(self, instances: Instances) -> DensePoseList:
"""
Convert DensePose predictions (an instance of `DensePoseChartPredictorOutput`)
into DensePose annotations data (an instance of `DensePoseList`)
"""
boxes_xyxy_abs = instances.pred_boxes.tensor.clone().cpu()
boxes_xywh_abs = BoxMode.convert(boxes_xyxy_abs, BoxMode.XYXY_ABS, BoxMode.XYWH_ABS)
dp_datas = []
for i in range(len(boxes_xywh_abs)):
annotation_i = self._sample(instances[i], make_int_box(boxes_xywh_abs[i]))
annotation_i[DensePoseDataRelative.S_KEY] = self._resample_mask( # pyre-ignore[6]
instances[i].pred_densepose
)
dp_datas.append(DensePoseDataRelative(annotation_i))
# create densepose annotations on CPU
dp_list = DensePoseList(dp_datas, boxes_xyxy_abs, instances.image_size)
return dp_list
def _sample(self, instance: Instances, bbox_xywh: IntTupleBox) -> Dict[str, List[Any]]:
"""
Sample DensPoseDataRelative from estimation results
"""
labels, dp_result = self._produce_labels_and_results(instance)
annotation = {
DensePoseDataRelative.X_KEY: [],
DensePoseDataRelative.Y_KEY: [],
DensePoseDataRelative.U_KEY: [],
DensePoseDataRelative.V_KEY: [],
DensePoseDataRelative.I_KEY: [],
}
n, h, w = dp_result.shape
for part_id in range(1, DensePoseDataRelative.N_PART_LABELS + 1):
# indices - tuple of 3 1D tensors of size k
# 0: index along the first dimension N
# 1: index along H dimension
# 2: index along W dimension
indices = torch.nonzero(labels.expand(n, h, w) == part_id, as_tuple=True)
# values - an array of size [n, k]
# n: number of channels (U, V, confidences)
# k: number of points labeled with part_id
values = dp_result[indices].view(n, -1)
k = values.shape[1]
count = min(self.count_per_class, k)
if count <= 0:
continue
index_sample = self._produce_index_sample(values, count)
sampled_values = values[:, index_sample]
sampled_y = indices[1][index_sample] + 0.5
sampled_x = indices[2][index_sample] + 0.5
# prepare / normalize data
x = (sampled_x / w * 256.0).cpu().tolist()
y = (sampled_y / h * 256.0).cpu().tolist()
u = sampled_values[0].clamp(0, 1).cpu().tolist()
v = sampled_values[1].clamp(0, 1).cpu().tolist()
fine_segm_labels = [part_id] * count
# extend annotations
annotation[DensePoseDataRelative.X_KEY].extend(x)
annotation[DensePoseDataRelative.Y_KEY].extend(y)
annotation[DensePoseDataRelative.U_KEY].extend(u)
annotation[DensePoseDataRelative.V_KEY].extend(v)
annotation[DensePoseDataRelative.I_KEY].extend(fine_segm_labels)
return annotation
def _produce_index_sample(self, values: torch.Tensor, count: int):
"""
Abstract method to produce a sample of indices to select data
To be implemented in descendants
Args:
values (torch.Tensor): an array of size [n, k] that contains
estimated values (U, V, confidences);
n: number of channels (U, V, confidences)
k: number of points labeled with part_id
count (int): number of samples to produce, should be positive and <= k
Return:
list(int): indices of values (along axis 1) selected as a sample
"""
raise NotImplementedError
def _produce_labels_and_results(self, instance: Instances) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Method to get labels and DensePose results from an instance
Args:
instance (Instances): an instance of `DensePoseChartPredictorOutput`
Return:
labels (torch.Tensor): shape [H, W], DensePose segmentation labels
dp_result (torch.Tensor): shape [2, H, W], stacked DensePose results u and v
"""
converter = ToChartResultConverter
chart_result = converter.convert(instance.pred_densepose, instance.pred_boxes)
labels, dp_result = chart_result.labels.cpu(), chart_result.uv.cpu()
return labels, dp_result
def _resample_mask(self, output: Any) -> torch.Tensor:
"""
Convert DensePose predictor output to segmentation annotation - tensors of size
(256, 256) and type `int64`.
Args:
output: DensePose predictor output with the following attributes:
- coarse_segm: tensor of size [N, D, H, W] with unnormalized coarse
segmentation scores
- fine_segm: tensor of size [N, C, H, W] with unnormalized fine
segmentation scores
Return:
Tensor of size (S, S) and type `int64` with coarse segmentation annotations,
where S = DensePoseDataRelative.MASK_SIZE
"""
sz = DensePoseDataRelative.MASK_SIZE
S = (
F.interpolate(output.coarse_segm, (sz, sz), mode="bilinear", align_corners=False)
.argmax(dim=1)
.long()
)
I = (
(
F.interpolate(
output.fine_segm, (sz, sz), mode="bilinear", align_corners=False
).argmax(dim=1)
* (S > 0).long()
)
.squeeze()
.cpu()
)
# Map fine segmentation results to coarse segmentation ground truth
# TODO: extract this into separate classes
# coarse segmentation: 1 = Torso, 2 = Right Hand, 3 = Left Hand,
# 4 = Left Foot, 5 = Right Foot, 6 = Upper Leg Right, 7 = Upper Leg Left,
# 8 = Lower Leg Right, 9 = Lower Leg Left, 10 = Upper Arm Left,
# 11 = Upper Arm Right, 12 = Lower Arm Left, 13 = Lower Arm Right,
# 14 = Head
# fine segmentation: 1, 2 = Torso, 3 = Right Hand, 4 = Left Hand,
# 5 = Left Foot, 6 = Right Foot, 7, 9 = Upper Leg Right,
# 8, 10 = Upper Leg Left, 11, 13 = Lower Leg Right,
# 12, 14 = Lower Leg Left, 15, 17 = Upper Arm Left,
# 16, 18 = Upper Arm Right, 19, 21 = Lower Arm Left,
# 20, 22 = Lower Arm Right, 23, 24 = Head
FINE_TO_COARSE_SEGMENTATION = {
1: 1,
2: 1,
3: 2,
4: 3,
5: 4,
6: 5,
7: 6,
8: 7,
9: 6,
10: 7,
11: 8,
12: 9,
13: 8,
14: 9,
15: 10,
16: 11,
17: 10,
18: 11,
19: 12,
20: 13,
21: 12,
22: 13,
23: 14,
24: 14,
}
mask = torch.zeros((sz, sz), dtype=torch.int64, device=torch.device("cpu"))
for i in range(DensePoseDataRelative.N_PART_LABELS):
mask[I == i + 1] = FINE_TO_COARSE_SEGMENTATION[i + 1]
return mask
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/data/samplers/densepose_base.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import random
import torch
from .densepose_base import DensePoseBaseSampler
class DensePoseUniformSampler(DensePoseBaseSampler):
"""
Samples DensePose data from DensePose predictions.
Samples for each class are drawn uniformly over all pixels estimated
to belong to that class.
"""
def __init__(self, count_per_class: int = 8):
"""
Constructor
Args:
count_per_class (int): the sampler produces at most `count_per_class`
samples for each category
"""
super().__init__(count_per_class)
def _produce_index_sample(self, values: torch.Tensor, count: int):
"""
Produce a uniform sample of indices to select data
Args:
values (torch.Tensor): an array of size [n, k] that contains
estimated values (U, V, confidences);
n: number of channels (U, V, confidences)
k: number of points labeled with part_id
count (int): number of samples to produce, should be positive and <= k
Return:
list(int): indices of values (along axis 1) selected as a sample
"""
k = values.shape[1]
return random.sample(range(k), count)
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/data/samplers/densepose_uniform.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import random
from typing import Optional, Tuple
import torch
from torch.nn import functional as F
from detectron2.config import CfgNode
from detectron2.structures import Instances
from densepose.converters.base import IntTupleBox
from .densepose_cse_base import DensePoseCSEBaseSampler
class DensePoseCSEConfidenceBasedSampler(DensePoseCSEBaseSampler):
"""
Samples DensePose data from DensePose predictions.
Samples for each class are drawn using confidence value estimates.
"""
def __init__(
self,
cfg: CfgNode,
use_gt_categories: bool,
embedder: torch.nn.Module,
confidence_channel: str,
count_per_class: int = 8,
search_count_multiplier: Optional[float] = None,
search_proportion: Optional[float] = None,
):
"""
Constructor
Args:
cfg (CfgNode): the config of the model
embedder (torch.nn.Module): necessary to compute mesh vertex embeddings
confidence_channel (str): confidence channel to use for sampling;
possible values:
"coarse_segm_confidence": confidences for coarse segmentation
(default: "coarse_segm_confidence")
count_per_class (int): the sampler produces at most `count_per_class`
samples for each category (default: 8)
search_count_multiplier (float or None): if not None, the total number
of the most confident estimates of a given class to consider is
defined as `min(search_count_multiplier * count_per_class, N)`,
where `N` is the total number of estimates of the class; cannot be
specified together with `search_proportion` (default: None)
search_proportion (float or None): if not None, the total number of the
of the most confident estimates of a given class to consider is
defined as `min(max(search_proportion * N, count_per_class), N)`,
where `N` is the total number of estimates of the class; cannot be
specified together with `search_count_multiplier` (default: None)
"""
super().__init__(cfg, use_gt_categories, embedder, count_per_class)
self.confidence_channel = confidence_channel
self.search_count_multiplier = search_count_multiplier
self.search_proportion = search_proportion
assert (search_count_multiplier is None) or (search_proportion is None), (
f"Cannot specify both search_count_multiplier (={search_count_multiplier})"
f"and search_proportion (={search_proportion})"
)
def _produce_index_sample(self, values: torch.Tensor, count: int):
"""
Produce a sample of indices to select data based on confidences
Args:
values (torch.Tensor): a tensor of length k that contains confidences
k: number of points labeled with part_id
count (int): number of samples to produce, should be positive and <= k
Return:
list(int): indices of values (along axis 1) selected as a sample
"""
k = values.shape[1]
if k == count:
index_sample = list(range(k))
else:
# take the best count * search_count_multiplier pixels,
# sample from them uniformly
# (here best = smallest variance)
_, sorted_confidence_indices = torch.sort(values[0])
if self.search_count_multiplier is not None:
search_count = min(int(count * self.search_count_multiplier), k) # pyre-ignore[58]
elif self.search_proportion is not None:
search_count = min(max(int(k * self.search_proportion), count), k)
else:
search_count = min(count, k)
sample_from_top = random.sample(range(search_count), count)
index_sample = sorted_confidence_indices[:search_count][sample_from_top]
return index_sample
def _produce_mask_and_results(
self, instance: Instances, bbox_xywh: IntTupleBox
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Method to get labels and DensePose results from an instance
Args:
instance (Instances): an instance of
`DensePoseEmbeddingPredictorOutputWithConfidences`
bbox_xywh (IntTupleBox): the corresponding bounding box
Return:
mask (torch.Tensor): shape [H, W], DensePose segmentation mask
embeddings (Tuple[torch.Tensor]): a tensor of shape [D, H, W]
DensePose CSE Embeddings
other_values: a tensor of shape [1, H, W], DensePose CSE confidence
"""
_, _, w, h = bbox_xywh
densepose_output = instance.pred_densepose
mask, embeddings, _ = super()._produce_mask_and_results(instance, bbox_xywh)
other_values = F.interpolate(
getattr(densepose_output, self.confidence_channel), size=(h, w), mode="bilinear"
)[0].cpu()
return mask, embeddings, other_values
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/data/samplers/densepose_cse_confidence_based.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import random
from typing import Optional, Tuple
import torch
from densepose.converters import ToChartResultConverterWithConfidences
from .densepose_base import DensePoseBaseSampler
class DensePoseConfidenceBasedSampler(DensePoseBaseSampler):
"""
Samples DensePose data from DensePose predictions.
Samples for each class are drawn using confidence value estimates.
"""
def __init__(
self,
confidence_channel: str,
count_per_class: int = 8,
search_count_multiplier: Optional[float] = None,
search_proportion: Optional[float] = None,
):
"""
Constructor
Args:
confidence_channel (str): confidence channel to use for sampling;
possible values:
"sigma_2": confidences for UV values
"fine_segm_confidence": confidences for fine segmentation
"coarse_segm_confidence": confidences for coarse segmentation
(default: "sigma_2")
count_per_class (int): the sampler produces at most `count_per_class`
samples for each category (default: 8)
search_count_multiplier (float or None): if not None, the total number
of the most confident estimates of a given class to consider is
defined as `min(search_count_multiplier * count_per_class, N)`,
where `N` is the total number of estimates of the class; cannot be
specified together with `search_proportion` (default: None)
search_proportion (float or None): if not None, the total number of the
of the most confident estimates of a given class to consider is
defined as `min(max(search_proportion * N, count_per_class), N)`,
where `N` is the total number of estimates of the class; cannot be
specified together with `search_count_multiplier` (default: None)
"""
super().__init__(count_per_class)
self.confidence_channel = confidence_channel
self.search_count_multiplier = search_count_multiplier
self.search_proportion = search_proportion
assert (search_count_multiplier is None) or (search_proportion is None), (
f"Cannot specify both search_count_multiplier (={search_count_multiplier})"
f"and search_proportion (={search_proportion})"
)
def _produce_index_sample(self, values: torch.Tensor, count: int):
"""
Produce a sample of indices to select data based on confidences
Args:
values (torch.Tensor): an array of size [n, k] that contains
estimated values (U, V, confidences);
n: number of channels (U, V, confidences)
k: number of points labeled with part_id
count (int): number of samples to produce, should be positive and <= k
Return:
list(int): indices of values (along axis 1) selected as a sample
"""
k = values.shape[1]
if k == count:
index_sample = list(range(k))
else:
# take the best count * search_count_multiplier pixels,
# sample from them uniformly
# (here best = smallest variance)
_, sorted_confidence_indices = torch.sort(values[2])
if self.search_count_multiplier is not None:
search_count = min(int(count * self.search_count_multiplier), k) # pyre-ignore[58]
elif self.search_proportion is not None:
search_count = min(max(int(k * self.search_proportion), count), k)
else:
search_count = min(count, k)
sample_from_top = random.sample(range(search_count), count)
index_sample = sorted_confidence_indices[:search_count][sample_from_top]
return index_sample
def _produce_labels_and_results(self, instance) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Method to get labels and DensePose results from an instance, with confidences
Args:
instance (Instances): an instance of `DensePoseChartPredictorOutputWithConfidences`
Return:
labels (torch.Tensor): shape [H, W], DensePose segmentation labels
dp_result (torch.Tensor): shape [3, H, W], DensePose results u and v
stacked with the confidence channel
"""
converter = ToChartResultConverterWithConfidences
chart_result = converter.convert(instance.pred_densepose, instance.pred_boxes)
labels, dp_result = chart_result.labels.cpu(), chart_result.uv.cpu()
dp_result = torch.cat(
(dp_result, getattr(chart_result, self.confidence_channel)[None].cpu())
)
return labels, dp_result
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/data/samplers/densepose_confidence_based.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from typing import Any, Dict, List, Tuple
import torch
from torch.nn import functional as F
from detectron2.config import CfgNode
from detectron2.structures import Instances
from densepose.converters.base import IntTupleBox
from densepose.data.utils import get_class_to_mesh_name_mapping
from densepose.modeling.cse.utils import squared_euclidean_distance_matrix
from densepose.structures import DensePoseDataRelative
from .densepose_base import DensePoseBaseSampler
class DensePoseCSEBaseSampler(DensePoseBaseSampler):
"""
Base DensePose sampler to produce DensePose data from DensePose predictions.
Samples for each class are drawn according to some distribution over all pixels estimated
to belong to that class.
"""
def __init__(
self,
cfg: CfgNode,
use_gt_categories: bool,
embedder: torch.nn.Module,
count_per_class: int = 8,
):
"""
Constructor
Args:
cfg (CfgNode): the config of the model
embedder (torch.nn.Module): necessary to compute mesh vertex embeddings
count_per_class (int): the sampler produces at most `count_per_class`
samples for each category
"""
super().__init__(count_per_class)
self.embedder = embedder
self.class_to_mesh_name = get_class_to_mesh_name_mapping(cfg)
self.use_gt_categories = use_gt_categories
def _sample(self, instance: Instances, bbox_xywh: IntTupleBox) -> Dict[str, List[Any]]:
"""
Sample DensPoseDataRelative from estimation results
"""
if self.use_gt_categories:
instance_class = instance.dataset_classes.tolist()[0]
else:
instance_class = instance.pred_classes.tolist()[0]
mesh_name = self.class_to_mesh_name[instance_class]
annotation = {
DensePoseDataRelative.X_KEY: [],
DensePoseDataRelative.Y_KEY: [],
DensePoseDataRelative.VERTEX_IDS_KEY: [],
DensePoseDataRelative.MESH_NAME_KEY: mesh_name,
}
mask, embeddings, other_values = self._produce_mask_and_results(instance, bbox_xywh)
indices = torch.nonzero(mask, as_tuple=True)
selected_embeddings = embeddings.permute(1, 2, 0)[indices]
values = other_values[:, indices[0], indices[1]]
k = values.shape[1]
count = min(self.count_per_class, k)
if count <= 0:
return annotation
index_sample = self._produce_index_sample(values, count)
closest_vertices = squared_euclidean_distance_matrix(
selected_embeddings[index_sample], self.embedder(mesh_name)
)
closest_vertices = torch.argmin(closest_vertices, dim=1)
sampled_y = indices[0][index_sample] + 0.5
sampled_x = indices[1][index_sample] + 0.5
# prepare / normalize data
_, _, w, h = bbox_xywh
x = (sampled_x / w * 256.0).cpu().tolist()
y = (sampled_y / h * 256.0).cpu().tolist()
# extend annotations
annotation[DensePoseDataRelative.X_KEY].extend(x)
annotation[DensePoseDataRelative.Y_KEY].extend(y)
annotation[DensePoseDataRelative.VERTEX_IDS_KEY].extend(closest_vertices.cpu().tolist())
return annotation
def _produce_mask_and_results(
self, instance: Instances, bbox_xywh: IntTupleBox
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Method to get labels and DensePose results from an instance
Args:
instance (Instances): an instance of `DensePoseEmbeddingPredictorOutput`
bbox_xywh (IntTupleBox): the corresponding bounding box
Return:
mask (torch.Tensor): shape [H, W], DensePose segmentation mask
embeddings (Tuple[torch.Tensor]): a tensor of shape [D, H, W],
DensePose CSE Embeddings
other_values (Tuple[torch.Tensor]): a tensor of shape [0, H, W],
for potential other values
"""
densepose_output = instance.pred_densepose
S = densepose_output.coarse_segm
E = densepose_output.embedding
_, _, w, h = bbox_xywh
embeddings = F.interpolate(E, size=(h, w), mode="bilinear")[0].cpu()
coarse_segm_resized = F.interpolate(S, size=(h, w), mode="bilinear")[0].cpu()
mask = coarse_segm_resized.argmax(0) > 0
other_values = torch.empty((0, h, w))
return mask, embeddings, other_values
def _resample_mask(self, output: Any) -> torch.Tensor:
"""
Convert DensePose predictor output to segmentation annotation - tensors of size
(256, 256) and type `int64`.
Args:
output: DensePose predictor output with the following attributes:
- coarse_segm: tensor of size [N, D, H, W] with unnormalized coarse
segmentation scores
Return:
Tensor of size (S, S) and type `int64` with coarse segmentation annotations,
where S = DensePoseDataRelative.MASK_SIZE
"""
sz = DensePoseDataRelative.MASK_SIZE
mask = (
F.interpolate(output.coarse_segm, (sz, sz), mode="bilinear", align_corners=False)
.argmax(dim=1)
.long()
.squeeze()
.cpu()
)
return mask
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/data/samplers/densepose_cse_base.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from .image import ImageResizeTransform
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/data/transform/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import torch
class ImageResizeTransform:
"""
Transform that resizes images loaded from a dataset
(BGR data in NCHW channel order, typically uint8) to a format ready to be
consumed by DensePose training (BGR float32 data in NCHW channel order)
"""
def __init__(self, min_size: int = 800, max_size: int = 1333):
self.min_size = min_size
self.max_size = max_size
def __call__(self, images: torch.Tensor) -> torch.Tensor:
"""
Args:
images (torch.Tensor): tensor of size [N, 3, H, W] that contains
BGR data (typically in uint8)
Returns:
images (torch.Tensor): tensor of size [N, 3, H1, W1] where
H1 and W1 are chosen to respect the specified min and max sizes
and preserve the original aspect ratio, the data channels
follow BGR order and the data type is `torch.float32`
"""
# resize with min size
images = images.float()
min_size = min(images.shape[-2:])
max_size = max(images.shape[-2:])
scale = min(self.min_size / min_size, self.max_size / max_size)
images = torch.nn.functional.interpolate(
images, scale_factor=scale, mode="bilinear", align_corners=False
)
return images
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/data/transform/image.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from .trainer import Trainer
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/engine/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import logging
import os
from collections import OrderedDict
from typing import List, Optional, Union
import torch
from torch import nn
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import CfgNode
from detectron2.engine import DefaultTrainer
from detectron2.evaluation import (
DatasetEvaluator,
DatasetEvaluators,
inference_on_dataset,
print_csv_format,
)
from detectron2.solver.build import get_default_optimizer_params, maybe_add_gradient_clipping
from detectron2.utils import comm
from detectron2.utils.events import EventWriter, get_event_storage
from densepose import DensePoseDatasetMapperTTA, DensePoseGeneralizedRCNNWithTTA, load_from_cfg
from densepose.data import (
DatasetMapper,
build_combined_loader,
build_detection_test_loader,
build_detection_train_loader,
build_inference_based_loaders,
has_inference_based_loaders,
)
from densepose.evaluation.d2_evaluator_adapter import Detectron2COCOEvaluatorAdapter
from densepose.evaluation.evaluator import DensePoseCOCOEvaluator, build_densepose_evaluator_storage
from densepose.modeling.cse import Embedder
class SampleCountingLoader:
def __init__(self, loader):
self.loader = loader
def __iter__(self):
it = iter(self.loader)
storage = get_event_storage()
while True:
try:
batch = next(it)
num_inst_per_dataset = {}
for data in batch:
dataset_name = data["dataset"]
if dataset_name not in num_inst_per_dataset:
num_inst_per_dataset[dataset_name] = 0
num_inst = len(data["instances"])
num_inst_per_dataset[dataset_name] += num_inst
for dataset_name in num_inst_per_dataset:
storage.put_scalar(f"batch/{dataset_name}", num_inst_per_dataset[dataset_name])
yield batch
except StopIteration:
break
class SampleCountMetricPrinter(EventWriter):
def __init__(self):
self.logger = logging.getLogger(__name__)
def write(self):
storage = get_event_storage()
batch_stats_strs = []
for key, buf in storage.histories().items():
if key.startswith("batch/"):
batch_stats_strs.append(f"{key} {buf.avg(20)}")
self.logger.info(", ".join(batch_stats_strs))
class Trainer(DefaultTrainer):
@classmethod
def extract_embedder_from_model(cls, model: nn.Module) -> Optional[Embedder]:
if isinstance(model, nn.parallel.DistributedDataParallel):
model = model.module
if hasattr(model, "roi_heads") and hasattr(model.roi_heads, "embedder"):
# pyre-fixme[16]: `Tensor` has no attribute `embedder`.
return model.roi_heads.embedder
return None
# TODO: the only reason to copy the base class code here is to pass the embedder from
# the model to the evaluator; that should be refactored to avoid unnecessary copy-pasting
@classmethod
def test(
cls,
cfg: CfgNode,
model: nn.Module,
evaluators: Optional[Union[DatasetEvaluator, List[DatasetEvaluator]]] = None,
):
"""
Args:
cfg (CfgNode):
model (nn.Module):
evaluators (DatasetEvaluator, list[DatasetEvaluator] or None): if None, will call
:meth:`build_evaluator`. Otherwise, must have the same length as
``cfg.DATASETS.TEST``.
Returns:
dict: a dict of result metrics
"""
logger = logging.getLogger(__name__)
if isinstance(evaluators, DatasetEvaluator):
evaluators = [evaluators]
if evaluators is not None:
assert len(cfg.DATASETS.TEST) == len(evaluators), "{} != {}".format(
len(cfg.DATASETS.TEST), len(evaluators)
)
results = OrderedDict()
for idx, dataset_name in enumerate(cfg.DATASETS.TEST):
data_loader = cls.build_test_loader(cfg, dataset_name)
# When evaluators are passed in as arguments,
# implicitly assume that evaluators can be created before data_loader.
if evaluators is not None:
evaluator = evaluators[idx]
else:
try:
embedder = cls.extract_embedder_from_model(model)
evaluator = cls.build_evaluator(cfg, dataset_name, embedder=embedder)
except NotImplementedError:
logger.warn(
"No evaluator found. Use `DefaultTrainer.test(evaluators=)`, "
"or implement its `build_evaluator` method."
)
results[dataset_name] = {}
continue
if cfg.DENSEPOSE_EVALUATION.DISTRIBUTED_INFERENCE or comm.is_main_process():
results_i = inference_on_dataset(model, data_loader, evaluator)
else:
results_i = {}
results[dataset_name] = results_i
if comm.is_main_process():
assert isinstance(
results_i, dict
), "Evaluator must return a dict on the main process. Got {} instead.".format(
results_i
)
logger.info("Evaluation results for {} in csv format:".format(dataset_name))
print_csv_format(results_i)
if len(results) == 1:
results = list(results.values())[0]
return results
@classmethod
def build_evaluator(
cls,
cfg: CfgNode,
dataset_name: str,
output_folder: Optional[str] = None,
embedder: Optional[Embedder] = None,
) -> DatasetEvaluators:
if output_folder is None:
output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
evaluators = []
distributed = cfg.DENSEPOSE_EVALUATION.DISTRIBUTED_INFERENCE
# Note: we currently use COCO evaluator for both COCO and LVIS datasets
# to have compatible metrics. LVIS bbox evaluator could also be used
# with an adapter to properly handle filtered / mapped categories
# evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
# if evaluator_type == "coco":
# evaluators.append(COCOEvaluator(dataset_name, output_dir=output_folder))
# elif evaluator_type == "lvis":
# evaluators.append(LVISEvaluator(dataset_name, output_dir=output_folder))
evaluators.append(
Detectron2COCOEvaluatorAdapter(
dataset_name, output_dir=output_folder, distributed=distributed
)
)
if cfg.MODEL.DENSEPOSE_ON:
storage = build_densepose_evaluator_storage(cfg, output_folder)
evaluators.append(
DensePoseCOCOEvaluator(
dataset_name,
distributed,
output_folder,
evaluator_type=cfg.DENSEPOSE_EVALUATION.TYPE,
min_iou_threshold=cfg.DENSEPOSE_EVALUATION.MIN_IOU_THRESHOLD,
storage=storage,
embedder=embedder,
should_evaluate_mesh_alignment=cfg.DENSEPOSE_EVALUATION.EVALUATE_MESH_ALIGNMENT,
mesh_alignment_mesh_names=cfg.DENSEPOSE_EVALUATION.MESH_ALIGNMENT_MESH_NAMES,
)
)
return DatasetEvaluators(evaluators)
@classmethod
def build_optimizer(cls, cfg: CfgNode, model: nn.Module):
params = get_default_optimizer_params(
model,
base_lr=cfg.SOLVER.BASE_LR,
weight_decay_norm=cfg.SOLVER.WEIGHT_DECAY_NORM,
bias_lr_factor=cfg.SOLVER.BIAS_LR_FACTOR,
weight_decay_bias=cfg.SOLVER.WEIGHT_DECAY_BIAS,
overrides={
"features": {
"lr": cfg.SOLVER.BASE_LR * cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.FEATURES_LR_FACTOR,
},
"embeddings": {
"lr": cfg.SOLVER.BASE_LR * cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.EMBEDDING_LR_FACTOR,
},
},
)
optimizer = torch.optim.SGD(
params,
cfg.SOLVER.BASE_LR,
momentum=cfg.SOLVER.MOMENTUM,
nesterov=cfg.SOLVER.NESTEROV,
weight_decay=cfg.SOLVER.WEIGHT_DECAY,
)
return maybe_add_gradient_clipping(cfg, optimizer)
@classmethod
def build_test_loader(cls, cfg: CfgNode, dataset_name):
return build_detection_test_loader(cfg, dataset_name, mapper=DatasetMapper(cfg, False))
@classmethod
def build_train_loader(cls, cfg: CfgNode):
data_loader = build_detection_train_loader(cfg, mapper=DatasetMapper(cfg, True))
if not has_inference_based_loaders(cfg):
return data_loader
model = cls.build_model(cfg)
model.to(cfg.BOOTSTRAP_MODEL.DEVICE)
DetectionCheckpointer(model).resume_or_load(cfg.BOOTSTRAP_MODEL.WEIGHTS, resume=False)
inference_based_loaders, ratios = build_inference_based_loaders(cfg, model)
loaders = [data_loader] + inference_based_loaders
ratios = [1.0] + ratios
combined_data_loader = build_combined_loader(cfg, loaders, ratios)
sample_counting_loader = SampleCountingLoader(combined_data_loader)
return sample_counting_loader
def build_writers(self):
writers = super().build_writers()
writers.append(SampleCountMetricPrinter())
return writers
@classmethod
def test_with_TTA(cls, cfg: CfgNode, model):
logger = logging.getLogger("detectron2.trainer")
# In the end of training, run an evaluation with TTA
# Only support some R-CNN models.
logger.info("Running inference with test-time augmentation ...")
transform_data = load_from_cfg(cfg)
model = DensePoseGeneralizedRCNNWithTTA(
cfg, model, transform_data, DensePoseDatasetMapperTTA(cfg)
)
evaluators = [
cls.build_evaluator(
cfg, name, output_folder=os.path.join(cfg.OUTPUT_DIR, "inference_TTA")
)
for name in cfg.DATASETS.TEST
]
res = cls.test(cfg, model, evaluators) # pyre-ignore[6]
res = OrderedDict({k + "_TTA": v for k, v in res.items()})
return res
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/engine/trainer.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import random
import unittest
from densepose.data.video import FirstKFramesSelector, LastKFramesSelector, RandomKFramesSelector
class TestFrameSelector(unittest.TestCase):
def test_frame_selector_random_k_1(self):
_SEED = 43
_K = 4
random.seed(_SEED)
selector = RandomKFramesSelector(_K)
frame_tss = list(range(0, 20, 2))
_SELECTED_GT = [0, 8, 4, 6]
selected = selector(frame_tss)
self.assertEqual(_SELECTED_GT, selected)
def test_frame_selector_random_k_2(self):
_SEED = 43
_K = 10
random.seed(_SEED)
selector = RandomKFramesSelector(_K)
frame_tss = list(range(0, 6, 2))
_SELECTED_GT = [0, 2, 4]
selected = selector(frame_tss)
self.assertEqual(_SELECTED_GT, selected)
def test_frame_selector_first_k_1(self):
_K = 4
selector = FirstKFramesSelector(_K)
frame_tss = list(range(0, 20, 2))
_SELECTED_GT = frame_tss[:_K]
selected = selector(frame_tss)
self.assertEqual(_SELECTED_GT, selected)
def test_frame_selector_first_k_2(self):
_K = 10
selector = FirstKFramesSelector(_K)
frame_tss = list(range(0, 6, 2))
_SELECTED_GT = frame_tss[:_K]
selected = selector(frame_tss)
self.assertEqual(_SELECTED_GT, selected)
def test_frame_selector_last_k_1(self):
_K = 4
selector = LastKFramesSelector(_K)
frame_tss = list(range(0, 20, 2))
_SELECTED_GT = frame_tss[-_K:]
selected = selector(frame_tss)
self.assertEqual(_SELECTED_GT, selected)
def test_frame_selector_last_k_2(self):
_K = 10
selector = LastKFramesSelector(_K)
frame_tss = list(range(0, 6, 2))
_SELECTED_GT = frame_tss[-_K:]
selected = selector(frame_tss)
self.assertEqual(_SELECTED_GT, selected)
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/tests/test_frame_selector.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import unittest
from .common import (
get_config_files,
get_evolution_config_files,
get_hrnet_config_files,
get_quick_schedules_config_files,
setup,
)
class TestSetup(unittest.TestCase):
def _test_setup(self, config_file):
setup(config_file)
def test_setup_configs(self):
config_files = get_config_files()
for config_file in config_files:
self._test_setup(config_file)
def test_setup_evolution_configs(self):
config_files = get_evolution_config_files()
for config_file in config_files:
self._test_setup(config_file)
def test_setup_hrnet_configs(self):
config_files = get_hrnet_config_files()
for config_file in config_files:
self._test_setup(config_file)
def test_setup_quick_schedules_configs(self):
config_files = get_quick_schedules_config_files()
for config_file in config_files:
self._test_setup(config_file)
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/tests/test_setup.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import unittest
from densepose.structures import normalized_coords_transform
class TestStructures(unittest.TestCase):
def test_normalized_coords_transform(self):
bbox = (32, 24, 288, 216)
x0, y0, w, h = bbox
xmin, ymin, xmax, ymax = x0, y0, x0 + w, y0 + h
f = normalized_coords_transform(*bbox)
# Top-left
expected_p, actual_p = (-1, -1), f((xmin, ymin))
self.assertEqual(expected_p, actual_p)
# Top-right
expected_p, actual_p = (1, -1), f((xmax, ymin))
self.assertEqual(expected_p, actual_p)
# Bottom-left
expected_p, actual_p = (-1, 1), f((xmin, ymax))
self.assertEqual(expected_p, actual_p)
# Bottom-right
expected_p, actual_p = (1, 1), f((xmax, ymax))
self.assertEqual(expected_p, actual_p)
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/tests/test_structures.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import unittest
import torch
from detectron2.structures import BitMasks, Boxes, Instances
from .common import get_model
# TODO(plabatut): Modularize detectron2 tests and re-use
def make_model_inputs(image, instances=None):
if instances is None:
return {"image": image}
return {"image": image, "instances": instances}
def make_empty_instances(h, w):
instances = Instances((h, w))
instances.gt_boxes = Boxes(torch.rand(0, 4))
instances.gt_classes = torch.tensor([]).to(dtype=torch.int64)
instances.gt_masks = BitMasks(torch.rand(0, h, w))
return instances
class ModelE2ETest(unittest.TestCase):
CONFIG_PATH = ""
def setUp(self):
self.model = get_model(self.CONFIG_PATH)
def _test_eval(self, sizes):
inputs = [make_model_inputs(torch.rand(3, size[0], size[1])) for size in sizes]
self.model.eval()
self.model(inputs)
class DensePoseRCNNE2ETest(ModelE2ETest):
CONFIG_PATH = "densepose_rcnn_R_101_FPN_s1x.yaml"
def test_empty_data(self):
self._test_eval([(200, 250), (200, 249)])
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/tests/test_model_e2e.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import contextlib
import os
import random
import tempfile
import unittest
import torch
import torchvision.io as io
from densepose.data.transform import ImageResizeTransform
from densepose.data.video import RandomKFramesSelector, VideoKeyframeDataset
try:
import av
except ImportError:
av = None
# copied from torchvision test/test_io.py
def _create_video_frames(num_frames, height, width):
y, x = torch.meshgrid(torch.linspace(-2, 2, height), torch.linspace(-2, 2, width))
data = []
for i in range(num_frames):
xc = float(i) / num_frames
yc = 1 - float(i) / (2 * num_frames)
d = torch.exp(-((x - xc) ** 2 + (y - yc) ** 2) / 2) * 255
data.append(d.unsqueeze(2).repeat(1, 1, 3).byte())
return torch.stack(data, 0)
# adapted from torchvision test/test_io.py
@contextlib.contextmanager
def temp_video(num_frames, height, width, fps, lossless=False, video_codec=None, options=None):
if lossless:
if video_codec is not None:
raise ValueError("video_codec can't be specified together with lossless")
if options is not None:
raise ValueError("options can't be specified together with lossless")
video_codec = "libx264rgb"
options = {"crf": "0"}
if video_codec is None:
video_codec = "libx264"
if options is None:
options = {}
data = _create_video_frames(num_frames, height, width)
with tempfile.NamedTemporaryFile(suffix=".mp4") as f:
f.close()
io.write_video(f.name, data, fps=fps, video_codec=video_codec, options=options)
yield f.name, data
os.unlink(f.name)
@unittest.skipIf(av is None, "PyAV unavailable")
class TestVideoKeyframeDataset(unittest.TestCase):
def test_read_keyframes_all(self):
with temp_video(60, 300, 300, 5, video_codec="mpeg4") as (fname, data):
video_list = [fname]
category_list = [None]
dataset = VideoKeyframeDataset(video_list, category_list)
self.assertEqual(len(dataset), 1)
data1, categories1 = dataset[0]["images"], dataset[0]["categories"]
self.assertEqual(data1.shape, torch.Size((5, 3, 300, 300)))
self.assertEqual(data1.dtype, torch.float32)
self.assertIsNone(categories1[0])
return
self.assertTrue(False)
def test_read_keyframes_with_selector(self):
with temp_video(60, 300, 300, 5, video_codec="mpeg4") as (fname, data):
video_list = [fname]
category_list = [None]
random.seed(0)
frame_selector = RandomKFramesSelector(3)
dataset = VideoKeyframeDataset(video_list, category_list, frame_selector)
self.assertEqual(len(dataset), 1)
data1, categories1 = dataset[0]["images"], dataset[0]["categories"]
self.assertEqual(data1.shape, torch.Size((3, 3, 300, 300)))
self.assertEqual(data1.dtype, torch.float32)
self.assertIsNone(categories1[0])
return
self.assertTrue(False)
def test_read_keyframes_with_selector_with_transform(self):
with temp_video(60, 300, 300, 5, video_codec="mpeg4") as (fname, data):
video_list = [fname]
category_list = [None]
random.seed(0)
frame_selector = RandomKFramesSelector(1)
transform = ImageResizeTransform()
dataset = VideoKeyframeDataset(video_list, category_list, frame_selector, transform)
data1, categories1 = dataset[0]["images"], dataset[0]["categories"]
self.assertEqual(len(dataset), 1)
self.assertEqual(data1.shape, torch.Size((1, 3, 800, 800)))
self.assertEqual(data1.dtype, torch.float32)
self.assertIsNone(categories1[0])
return
self.assertTrue(False)
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/tests/test_video_keyframe_dataset.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import random
import unittest
from typing import Any, Iterable, Iterator, Tuple
from densepose.data import CombinedDataLoader
def _grouper(iterable: Iterable[Any], n: int, fillvalue=None) -> Iterator[Tuple[Any]]:
"""
Group elements of an iterable by chunks of size `n`, e.g.
grouper(range(9), 4) ->
(0, 1, 2, 3), (4, 5, 6, 7), (8, None, None, None)
"""
it = iter(iterable)
while True:
values = []
for _ in range(n):
try:
value = next(it)
except StopIteration:
values.extend([fillvalue] * (n - len(values)))
yield tuple(values)
return
values.append(value)
yield tuple(values)
class TestCombinedDataLoader(unittest.TestCase):
def test_combine_loaders_1(self):
loader1 = _grouper([f"1_{i}" for i in range(10)], 2)
loader2 = _grouper([f"2_{i}" for i in range(11)], 3)
batch_size = 4
ratios = (0.1, 0.9)
random.seed(43)
combined = CombinedDataLoader((loader1, loader2), batch_size, ratios)
BATCHES_GT = [
["1_0", "1_1", "2_0", "2_1"],
["2_2", "2_3", "2_4", "2_5"],
["1_2", "1_3", "2_6", "2_7"],
["2_8", "2_9", "2_10", None],
]
for i, batch in enumerate(combined):
self.assertEqual(len(batch), batch_size)
self.assertEqual(batch, BATCHES_GT[i])
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/tests/test_combine_data_loader.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import unittest
import torch
from detectron2.structures import Boxes, BoxMode, Instances
from densepose.modeling.losses.utils import ChartBasedAnnotationsAccumulator
from densepose.structures import DensePoseDataRelative, DensePoseList
image_shape = (100, 100)
instances = Instances(image_shape)
n_instances = 3
instances.proposal_boxes = Boxes(torch.rand(n_instances, 4))
instances.gt_boxes = Boxes(torch.rand(n_instances, 4))
# instances.gt_densepose = None cannot happen because instances attributes need a length
class TestChartBasedAnnotationsAccumulator(unittest.TestCase):
def test_chart_based_annotations_accumulator_no_gt_densepose(self):
accumulator = ChartBasedAnnotationsAccumulator()
accumulator.accumulate(instances)
expected_values = {"nxt_bbox_with_dp_index": 0, "nxt_bbox_index": n_instances}
for key in accumulator.__dict__:
self.assertEqual(getattr(accumulator, key), expected_values.get(key, []))
def test_chart_based_annotations_accumulator_gt_densepose_none(self):
instances.gt_densepose = [None] * n_instances
accumulator = ChartBasedAnnotationsAccumulator()
accumulator.accumulate(instances)
expected_values = {"nxt_bbox_with_dp_index": 0, "nxt_bbox_index": n_instances}
for key in accumulator.__dict__:
self.assertEqual(getattr(accumulator, key), expected_values.get(key, []))
def test_chart_based_annotations_accumulator_gt_densepose(self):
data_relative_keys = [
DensePoseDataRelative.X_KEY,
DensePoseDataRelative.Y_KEY,
DensePoseDataRelative.I_KEY,
DensePoseDataRelative.U_KEY,
DensePoseDataRelative.V_KEY,
DensePoseDataRelative.S_KEY,
]
annotations = [DensePoseDataRelative({k: [0] for k in data_relative_keys})] * n_instances
instances.gt_densepose = DensePoseList(annotations, instances.gt_boxes, image_shape)
accumulator = ChartBasedAnnotationsAccumulator()
accumulator.accumulate(instances)
bbox_xywh_est = BoxMode.convert(
instances.proposal_boxes.tensor.clone(), BoxMode.XYXY_ABS, BoxMode.XYWH_ABS
)
bbox_xywh_gt = BoxMode.convert(
instances.gt_boxes.tensor.clone(), BoxMode.XYXY_ABS, BoxMode.XYWH_ABS
)
expected_values = {
"s_gt": [
torch.zeros((3, DensePoseDataRelative.MASK_SIZE, DensePoseDataRelative.MASK_SIZE))
]
* n_instances,
"bbox_xywh_est": bbox_xywh_est.split(1),
"bbox_xywh_gt": bbox_xywh_gt.split(1),
"point_bbox_with_dp_indices": [torch.tensor([i]) for i in range(n_instances)],
"point_bbox_indices": [torch.tensor([i]) for i in range(n_instances)],
"bbox_indices": list(range(n_instances)),
"nxt_bbox_with_dp_index": n_instances,
"nxt_bbox_index": n_instances,
}
default_value = [torch.tensor([0])] * 3
for key in accumulator.__dict__:
to_test = getattr(accumulator, key)
gt_value = expected_values.get(key, default_value)
if key in ["nxt_bbox_with_dp_index", "nxt_bbox_index"]:
self.assertEqual(to_test, gt_value)
elif key == "bbox_indices":
self.assertListEqual(to_test, gt_value)
else:
self.assertTrue(torch.allclose(torch.stack(to_test), torch.stack(gt_value)))
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/tests/test_chart_based_annotations_accumulator.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import unittest
import torch
from detectron2.structures import Boxes, BoxMode, Instances
from densepose.modeling.losses.embed_utils import CseAnnotationsAccumulator
from densepose.structures import DensePoseDataRelative, DensePoseList
class TestCseAnnotationsAccumulator(unittest.TestCase):
def test_cse_annotations_accumulator_nodp(self):
instances_lst = [
self._create_instances_nodp(),
]
self._test_template(instances_lst)
def test_cse_annotations_accumulator_sparsedp(self):
instances_lst = [
self._create_instances_sparsedp(),
]
self._test_template(instances_lst)
def test_cse_annotations_accumulator_fulldp(self):
instances_lst = [
self._create_instances_fulldp(),
]
self._test_template(instances_lst)
def test_cse_annotations_accumulator_combined(self):
instances_lst = [
self._create_instances_nodp(),
self._create_instances_sparsedp(),
self._create_instances_fulldp(),
]
self._test_template(instances_lst)
def _test_template(self, instances_lst):
acc = CseAnnotationsAccumulator()
for instances in instances_lst:
acc.accumulate(instances)
packed_anns = acc.pack()
self._check_correspondence(packed_anns, instances_lst)
def _create_instances_nodp(self):
image_shape = (480, 640)
instances = Instances(image_shape)
instances.gt_boxes = Boxes(
torch.as_tensor(
[
[40.0, 40.0, 140.0, 140.0],
[160.0, 160.0, 270.0, 270.0],
[40.0, 160.0, 160.0, 280.0],
]
)
)
instances.proposal_boxes = Boxes(
torch.as_tensor(
[
[41.0, 39.0, 142.0, 138.0],
[161.0, 159.0, 272.0, 268.0],
[41.0, 159.0, 162.0, 278.0],
]
)
)
# do not add gt_densepose
return instances
def _create_instances_sparsedp(self):
image_shape = (540, 720)
instances = Instances(image_shape)
instances.gt_boxes = Boxes(
torch.as_tensor(
[
[50.0, 50.0, 130.0, 130.0],
[150.0, 150.0, 240.0, 240.0],
[50.0, 150.0, 230.0, 330.0],
]
)
)
instances.proposal_boxes = Boxes(
torch.as_tensor(
[
[49.0, 51.0, 131.0, 129.0],
[151.0, 149.0, 241.0, 239.0],
[51.0, 149.0, 232.0, 329.0],
]
)
)
instances.gt_densepose = DensePoseList(
[
None,
self._create_dp_data(
{
"dp_x": [81.69, 153.47, 151.00],
"dp_y": [162.24, 128.71, 113.81],
"dp_vertex": [0, 1, 2],
"ref_model": "zebra_5002",
"dp_masks": [],
},
{"c": (166, 133), "r": 64},
),
None,
],
instances.gt_boxes,
image_shape,
)
return instances
def _create_instances_fulldp(self):
image_shape = (680, 840)
instances = Instances(image_shape)
instances.gt_boxes = Boxes(
torch.as_tensor(
[
[65.0, 55.0, 165.0, 155.0],
[170.0, 175.0, 275.0, 280.0],
[55.0, 165.0, 165.0, 275.0],
]
)
)
instances.proposal_boxes = Boxes(
torch.as_tensor(
[
[66.0, 54.0, 166.0, 154.0],
[171.0, 174.0, 276.0, 279.0],
[56.0, 164.0, 166.0, 274.0],
]
)
)
instances.gt_densepose = DensePoseList(
[
self._create_dp_data(
{
"dp_x": [149.99, 198.62, 157.59],
"dp_y": [170.74, 197.73, 123.12],
"dp_vertex": [3, 4, 5],
"ref_model": "cat_5001",
"dp_masks": [],
},
{"c": (100, 100), "r": 50},
),
self._create_dp_data(
{
"dp_x": [234.53, 116.72, 71.66],
"dp_y": [107.53, 11.31, 142.32],
"dp_vertex": [6, 7, 8],
"ref_model": "dog_5002",
"dp_masks": [],
},
{"c": (200, 150), "r": 40},
),
self._create_dp_data(
{
"dp_x": [225.54, 202.61, 135.90],
"dp_y": [167.46, 181.00, 211.47],
"dp_vertex": [9, 10, 11],
"ref_model": "elephant_5002",
"dp_masks": [],
},
{"c": (100, 200), "r": 45},
),
],
instances.gt_boxes,
image_shape,
)
return instances
def _create_dp_data(self, anns, blob_def=None):
dp_data = DensePoseDataRelative(anns)
if blob_def is not None:
dp_data.segm[
blob_def["c"][0] - blob_def["r"] : blob_def["c"][0] + blob_def["r"],
blob_def["c"][1] - blob_def["r"] : blob_def["c"][1] + blob_def["r"],
] = 1
return dp_data
def _check_correspondence(self, packed_anns, instances_lst):
instance_idx = 0
data_idx = 0
pt_offset = 0
if packed_anns is not None:
bbox_xyxy_gt = BoxMode.convert(
packed_anns.bbox_xywh_gt.clone(), BoxMode.XYWH_ABS, BoxMode.XYXY_ABS
)
bbox_xyxy_est = BoxMode.convert(
packed_anns.bbox_xywh_est.clone(), BoxMode.XYWH_ABS, BoxMode.XYXY_ABS
)
for instances in instances_lst:
if not hasattr(instances, "gt_densepose"):
instance_idx += len(instances)
continue
for i, dp_data in enumerate(instances.gt_densepose):
if dp_data is None:
instance_idx += 1
continue
n_pts = len(dp_data.x)
self.assertTrue(
torch.allclose(dp_data.x, packed_anns.x_gt[pt_offset : pt_offset + n_pts])
)
self.assertTrue(
torch.allclose(dp_data.y, packed_anns.y_gt[pt_offset : pt_offset + n_pts])
)
self.assertTrue(torch.allclose(dp_data.segm, packed_anns.coarse_segm_gt[data_idx]))
self.assertTrue(
torch.allclose(
torch.ones(n_pts, dtype=torch.long) * dp_data.mesh_id,
packed_anns.vertex_mesh_ids_gt[pt_offset : pt_offset + n_pts],
)
)
self.assertTrue(
torch.allclose(
dp_data.vertex_ids, packed_anns.vertex_ids_gt[pt_offset : pt_offset + n_pts]
)
)
self.assertTrue(
torch.allclose(instances.gt_boxes.tensor[i], bbox_xyxy_gt[data_idx])
)
self.assertTrue(
torch.allclose(instances.proposal_boxes.tensor[i], bbox_xyxy_est[data_idx])
)
self.assertTrue(
torch.allclose(
torch.ones(n_pts, dtype=torch.long) * data_idx,
packed_anns.point_bbox_with_dp_indices[pt_offset : pt_offset + n_pts],
)
)
self.assertTrue(
torch.allclose(
torch.ones(n_pts, dtype=torch.long) * instance_idx,
packed_anns.point_bbox_indices[pt_offset : pt_offset + n_pts],
)
)
self.assertEqual(instance_idx, packed_anns.bbox_indices[data_idx])
pt_offset += n_pts
instance_idx += 1
data_idx += 1
if data_idx == 0:
self.assertIsNone(packed_anns)
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/tests/test_cse_annotations_accumulator.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import contextlib
import os
import tempfile
import unittest
import torch
from torchvision.utils import save_image
from densepose.data.image_list_dataset import ImageListDataset
from densepose.data.transform import ImageResizeTransform
@contextlib.contextmanager
def temp_image(height, width):
random_image = torch.rand(height, width)
with tempfile.NamedTemporaryFile(suffix=".jpg") as f:
f.close()
save_image(random_image, f.name)
yield f.name
os.unlink(f.name)
class TestImageListDataset(unittest.TestCase):
def test_image_list_dataset(self):
height, width = 720, 1280
with temp_image(height, width) as image_fpath:
image_list = [image_fpath]
category_list = [None]
dataset = ImageListDataset(image_list, category_list)
self.assertEqual(len(dataset), 1)
data1, categories1 = dataset[0]["images"], dataset[0]["categories"]
self.assertEqual(data1.shape, torch.Size((1, 3, height, width)))
self.assertEqual(data1.dtype, torch.float32)
self.assertIsNone(categories1[0])
def test_image_list_dataset_with_transform(self):
height, width = 720, 1280
with temp_image(height, width) as image_fpath:
image_list = [image_fpath]
category_list = [None]
transform = ImageResizeTransform()
dataset = ImageListDataset(image_list, category_list, transform)
self.assertEqual(len(dataset), 1)
data1, categories1 = dataset[0]["images"], dataset[0]["categories"]
self.assertEqual(data1.shape, torch.Size((1, 3, 749, 1333)))
self.assertEqual(data1.dtype, torch.float32)
self.assertIsNone(categories1[0])
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/tests/test_image_list_dataset.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import os
import torch
from detectron2.config import get_cfg
from detectron2.engine import default_setup
from detectron2.modeling import build_model
from densepose import add_densepose_config
_BASE_CONFIG_DIR = "configs"
_EVOLUTION_CONFIG_SUB_DIR = "evolution"
_HRNET_CONFIG_SUB_DIR = "HRNet"
_QUICK_SCHEDULES_CONFIG_SUB_DIR = "quick_schedules"
_BASE_CONFIG_FILE_PREFIX = "Base-"
_CONFIG_FILE_EXT = ".yaml"
def _get_base_config_dir():
"""
Return the base directory for configurations
"""
return os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", _BASE_CONFIG_DIR)
def _get_evolution_config_dir():
"""
Return the base directory for evolution configurations
"""
return os.path.join(_get_base_config_dir(), _EVOLUTION_CONFIG_SUB_DIR)
def _get_hrnet_config_dir():
"""
Return the base directory for HRNet configurations
"""
return os.path.join(_get_base_config_dir(), _HRNET_CONFIG_SUB_DIR)
def _get_quick_schedules_config_dir():
"""
Return the base directory for quick schedules configurations
"""
return os.path.join(_get_base_config_dir(), _QUICK_SCHEDULES_CONFIG_SUB_DIR)
def _collect_config_files(config_dir):
"""
Collect all configuration files (i.e. densepose_*.yaml) directly in the specified directory
"""
start = _get_base_config_dir()
results = []
for entry in os.listdir(config_dir):
path = os.path.join(config_dir, entry)
if not os.path.isfile(path):
continue
_, ext = os.path.splitext(entry)
if ext != _CONFIG_FILE_EXT:
continue
if entry.startswith(_BASE_CONFIG_FILE_PREFIX):
continue
config_file = os.path.relpath(path, start)
results.append(config_file)
return results
def get_config_files():
"""
Get all the configuration files (relative to the base configuration directory)
"""
return _collect_config_files(_get_base_config_dir())
def get_evolution_config_files():
"""
Get all the evolution configuration files (relative to the base configuration directory)
"""
return _collect_config_files(_get_evolution_config_dir())
def get_hrnet_config_files():
"""
Get all the HRNet configuration files (relative to the base configuration directory)
"""
return _collect_config_files(_get_hrnet_config_dir())
def get_quick_schedules_config_files():
"""
Get all the quick schedules configuration files (relative to the base configuration directory)
"""
return _collect_config_files(_get_quick_schedules_config_dir())
def get_model_config(config_file):
"""
Load and return the configuration from the specified file (relative to the base configuration
directory)
"""
cfg = get_cfg()
add_densepose_config(cfg)
path = os.path.join(_get_base_config_dir(), config_file)
cfg.merge_from_file(path)
if not torch.cuda.is_available():
cfg.MODEL_DEVICE = "cpu"
return cfg
def get_model(config_file):
"""
Get the model from the specified file (relative to the base configuration directory)
"""
cfg = get_model_config(config_file)
return build_model(cfg)
def setup(config_file):
"""
Setup the configuration from the specified file (relative to the base configuration directory)
"""
cfg = get_model_config(config_file)
cfg.freeze()
default_setup(cfg, {})
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/tests/common.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import unittest
import torch
from densepose.data.transform import ImageResizeTransform
class TestImageResizeTransform(unittest.TestCase):
def test_image_resize_1(self):
images_batch = torch.ones((3, 3, 100, 100), dtype=torch.uint8) * 100
transform = ImageResizeTransform()
images_transformed = transform(images_batch)
IMAGES_GT = torch.ones((3, 3, 800, 800), dtype=torch.float) * 100
self.assertEqual(images_transformed.size(), IMAGES_GT.size())
self.assertAlmostEqual(torch.abs(IMAGES_GT - images_transformed).max().item(), 0.0)
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/tests/test_image_resize_transform.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import io
import tempfile
import unittest
from contextlib import ExitStack
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
from detectron2.utils import comm
from densepose.evaluation.tensor_storage import (
SingleProcessFileTensorStorage,
SingleProcessRamTensorStorage,
SizeData,
storage_gather,
)
class TestSingleProcessRamTensorStorage(unittest.TestCase):
def test_read_write_1(self):
schema = {
"tf": SizeData(dtype="float32", shape=(112, 112)),
"ti": SizeData(dtype="int32", shape=(4, 64, 64)),
}
# generate data which corresponds to the schema
data_elts = []
torch.manual_seed(23)
for _i in range(3):
data_elt = {
"tf": torch.rand((112, 112), dtype=torch.float32),
"ti": (torch.rand(4, 64, 64) * 1000).to(dtype=torch.int32),
}
data_elts.append(data_elt)
storage = SingleProcessRamTensorStorage(schema, io.BytesIO())
# write data to the storage
for i in range(3):
record_id = storage.put(data_elts[i])
self.assertEqual(record_id, i)
# read data from the storage
for i in range(3):
record = storage.get(i)
self.assertEqual(len(record), len(schema))
for field_name in schema:
self.assertTrue(field_name in record)
self.assertEqual(data_elts[i][field_name].shape, record[field_name].shape)
self.assertEqual(data_elts[i][field_name].dtype, record[field_name].dtype)
self.assertTrue(torch.allclose(data_elts[i][field_name], record[field_name]))
class TestSingleProcessFileTensorStorage(unittest.TestCase):
def test_read_write_1(self):
schema = {
"tf": SizeData(dtype="float32", shape=(112, 112)),
"ti": SizeData(dtype="int32", shape=(4, 64, 64)),
}
# generate data which corresponds to the schema
data_elts = []
torch.manual_seed(23)
for _i in range(3):
data_elt = {
"tf": torch.rand((112, 112), dtype=torch.float32),
"ti": (torch.rand(4, 64, 64) * 1000).to(dtype=torch.int32),
}
data_elts.append(data_elt)
# WARNING: opens the file several times! may not work on all platforms
with tempfile.NamedTemporaryFile() as hFile:
storage = SingleProcessFileTensorStorage(schema, hFile.name, "wb")
# write data to the storage
for i in range(3):
record_id = storage.put(data_elts[i])
self.assertEqual(record_id, i)
hFile.seek(0)
storage = SingleProcessFileTensorStorage(schema, hFile.name, "rb")
# read data from the storage
for i in range(3):
record = storage.get(i)
self.assertEqual(len(record), len(schema))
for field_name in schema:
self.assertTrue(field_name in record)
self.assertEqual(data_elts[i][field_name].shape, record[field_name].shape)
self.assertEqual(data_elts[i][field_name].dtype, record[field_name].dtype)
self.assertTrue(torch.allclose(data_elts[i][field_name], record[field_name]))
def _find_free_port():
"""
Copied from detectron2/engine/launch.py
"""
import socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Binding to port 0 will cause the OS to find an available port for us
sock.bind(("", 0))
port = sock.getsockname()[1]
sock.close()
# NOTE: there is still a chance the port could be taken by other processes.
return port
def launch(main_func, nprocs, args=()):
port = _find_free_port()
dist_url = f"tcp://127.0.0.1:{port}"
# dist_url = "env://"
mp.spawn(
distributed_worker, nprocs=nprocs, args=(main_func, nprocs, dist_url, args), daemon=False
)
def distributed_worker(local_rank, main_func, nprocs, dist_url, args):
dist.init_process_group(
backend="gloo", init_method=dist_url, world_size=nprocs, rank=local_rank
)
comm.synchronize()
assert comm._LOCAL_PROCESS_GROUP is None
pg = dist.new_group(list(range(nprocs)))
comm._LOCAL_PROCESS_GROUP = pg
main_func(*args)
def ram_read_write_worker():
schema = {
"tf": SizeData(dtype="float32", shape=(112, 112)),
"ti": SizeData(dtype="int32", shape=(4, 64, 64)),
}
storage = SingleProcessRamTensorStorage(schema, io.BytesIO())
world_size = comm.get_world_size()
rank = comm.get_rank()
data_elts = []
# prepare different number of tensors in different processes
for i in range(rank + 1):
data_elt = {
"tf": torch.ones((112, 112), dtype=torch.float32) * (rank + i * world_size),
"ti": torch.ones((4, 64, 64), dtype=torch.int32) * (rank + i * world_size),
}
data_elts.append(data_elt)
# write data to the single process storage
for i in range(rank + 1):
record_id = storage.put(data_elts[i])
assert record_id == i, f"Process {rank}: record ID {record_id}, expected {i}"
comm.synchronize()
# gather all data in process rank 0
multi_storage = storage_gather(storage)
if rank != 0:
return
# read and check data from the multiprocess storage
for j in range(world_size):
for i in range(j):
record = multi_storage.get(j, i)
record_gt = {
"tf": torch.ones((112, 112), dtype=torch.float32) * (j + i * world_size),
"ti": torch.ones((4, 64, 64), dtype=torch.int32) * (j + i * world_size),
}
assert len(record) == len(schema), (
f"Process {rank}: multi storage record, rank {j}, id {i}: "
f"expected {len(schema)} fields in the record, got {len(record)}"
)
for field_name in schema:
assert field_name in record, (
f"Process {rank}: multi storage record, rank {j}, id {i}: "
f"field {field_name} not in the record"
)
assert record_gt[field_name].shape == record[field_name].shape, (
f"Process {rank}: multi storage record, rank {j}, id {i}: "
f"field {field_name}, expected shape {record_gt[field_name].shape} "
f"got {record[field_name].shape}"
)
assert record_gt[field_name].dtype == record[field_name].dtype, (
f"Process {rank}: multi storage record, rank {j}, id {i}: "
f"field {field_name}, expected dtype {record_gt[field_name].dtype} "
f"got {record[field_name].dtype}"
)
assert torch.allclose(record_gt[field_name], record[field_name]), (
f"Process {rank}: multi storage record, rank {j}, id {i}: "
f"field {field_name}, tensors are not close enough:"
f"L-inf {(record_gt[field_name]-record[field_name]).abs_().max()} "
f"L1 {(record_gt[field_name]-record[field_name]).abs_().sum()} "
)
def file_read_write_worker(rank_to_fpath):
schema = {
"tf": SizeData(dtype="float32", shape=(112, 112)),
"ti": SizeData(dtype="int32", shape=(4, 64, 64)),
}
world_size = comm.get_world_size()
rank = comm.get_rank()
storage = SingleProcessFileTensorStorage(schema, rank_to_fpath[rank], "wb")
data_elts = []
# prepare different number of tensors in different processes
for i in range(rank + 1):
data_elt = {
"tf": torch.ones((112, 112), dtype=torch.float32) * (rank + i * world_size),
"ti": torch.ones((4, 64, 64), dtype=torch.int32) * (rank + i * world_size),
}
data_elts.append(data_elt)
# write data to the single process storage
for i in range(rank + 1):
record_id = storage.put(data_elts[i])
assert record_id == i, f"Process {rank}: record ID {record_id}, expected {i}"
comm.synchronize()
# gather all data in process rank 0
multi_storage = storage_gather(storage)
if rank != 0:
return
# read and check data from the multiprocess storage
for j in range(world_size):
for i in range(j):
record = multi_storage.get(j, i)
record_gt = {
"tf": torch.ones((112, 112), dtype=torch.float32) * (j + i * world_size),
"ti": torch.ones((4, 64, 64), dtype=torch.int32) * (j + i * world_size),
}
assert len(record) == len(schema), (
f"Process {rank}: multi storage record, rank {j}, id {i}: "
f"expected {len(schema)} fields in the record, got {len(record)}"
)
for field_name in schema:
assert field_name in record, (
f"Process {rank}: multi storage record, rank {j}, id {i}: "
f"field {field_name} not in the record"
)
assert record_gt[field_name].shape == record[field_name].shape, (
f"Process {rank}: multi storage record, rank {j}, id {i}: "
f"field {field_name}, expected shape {record_gt[field_name].shape} "
f"got {record[field_name].shape}"
)
assert record_gt[field_name].dtype == record[field_name].dtype, (
f"Process {rank}: multi storage record, rank {j}, id {i}: "
f"field {field_name}, expected dtype {record_gt[field_name].dtype} "
f"got {record[field_name].dtype}"
)
assert torch.allclose(record_gt[field_name], record[field_name]), (
f"Process {rank}: multi storage record, rank {j}, id {i}: "
f"field {field_name}, tensors are not close enough:"
f"L-inf {(record_gt[field_name]-record[field_name]).abs_().max()} "
f"L1 {(record_gt[field_name]-record[field_name]).abs_().sum()} "
)
class TestMultiProcessRamTensorStorage(unittest.TestCase):
def test_read_write_1(self):
launch(ram_read_write_worker, 8)
class TestMultiProcessFileTensorStorage(unittest.TestCase):
def test_read_write_1(self):
with ExitStack() as stack:
# WARNING: opens the files several times! may not work on all platforms
rank_to_fpath = {
i: stack.enter_context(tempfile.NamedTemporaryFile()).name for i in range(8)
}
launch(file_read_write_worker, 8, (rank_to_fpath,))
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/tests/test_tensor_storage.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import unittest
from densepose.data.datasets.builtin import COCO_DATASETS, DENSEPOSE_ANNOTATIONS_DIR, LVIS_DATASETS
from densepose.data.datasets.coco import load_coco_json
from densepose.data.datasets.lvis import load_lvis_json
from densepose.data.utils import maybe_prepend_base_path
from densepose.structures import DensePoseDataRelative
class TestDatasetLoadedAnnotations(unittest.TestCase):
COCO_DATASET_DATA = {
"densepose_coco_2014_train": {"n_instances": 39210},
"densepose_coco_2014_minival": {"n_instances": 2243},
"densepose_coco_2014_minival_100": {"n_instances": 164},
"densepose_coco_2014_valminusminival": {"n_instances": 7297},
"densepose_coco_2014_train_cse": {"n_instances": 39210},
"densepose_coco_2014_minival_cse": {"n_instances": 2243},
"densepose_coco_2014_minival_100_cse": {"n_instances": 164},
"densepose_coco_2014_valminusminival_cse": {"n_instances": 7297},
"densepose_chimps": {"n_instances": 930},
"posetrack2017_train": {"n_instances": 8274},
"posetrack2017_val": {"n_instances": 4753},
"lvis_v05_train": {"n_instances": 5186},
"lvis_v05_val": {"n_instances": 1037},
}
LVIS_DATASET_DATA = {
"densepose_lvis_v1_train1": {"n_instances": 3394},
"densepose_lvis_v1_train2": {"n_instances": 1800},
"densepose_lvis_v1_val": {"n_instances": 1037},
"densepose_lvis_v1_val_animals_100": {"n_instances": 89},
}
def generic_coco_test(self, dataset_info):
if dataset_info.name not in self.COCO_DATASET_DATA:
return
n_inst = self.COCO_DATASET_DATA[dataset_info.name]["n_instances"]
self.generic_test(dataset_info, n_inst, load_coco_json)
def generic_lvis_test(self, dataset_info):
if dataset_info.name not in self.LVIS_DATASET_DATA:
return
n_inst = self.LVIS_DATASET_DATA[dataset_info.name]["n_instances"]
self.generic_test(dataset_info, n_inst, load_lvis_json)
def generic_test(self, dataset_info, n_inst, loader_fun):
datasets_root = DENSEPOSE_ANNOTATIONS_DIR
annotations_fpath = maybe_prepend_base_path(datasets_root, dataset_info.annotations_fpath)
images_root = maybe_prepend_base_path(datasets_root, dataset_info.images_root)
image_annotation_dicts = loader_fun(
annotations_json_file=annotations_fpath,
image_root=images_root,
dataset_name=dataset_info.name,
)
num_valid = sum(
1
for image_annotation_dict in image_annotation_dicts
for ann in image_annotation_dict["annotations"]
if DensePoseDataRelative.validate_annotation(ann)[0]
)
self.assertEqual(num_valid, n_inst)
def coco_test_fun(dataset_info):
return lambda self: self.generic_coco_test(dataset_info)
for dataset_info in COCO_DATASETS:
setattr(
TestDatasetLoadedAnnotations,
f"test_coco_builtin_loaded_annotations_{dataset_info.name}",
coco_test_fun(dataset_info),
)
def lvis_test_fun(dataset_info):
return lambda self: self.generic_lvis_test(dataset_info)
for dataset_info in LVIS_DATASETS:
setattr(
TestDatasetLoadedAnnotations,
f"test_lvis_builtin_loaded_annotations_{dataset_info.name}",
lvis_test_fun(dataset_info),
)
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/tests/test_dataset_loaded_annotations.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
"""
Panoptic-DeepLab Training Script.
This script is a simplified version of the training script in detectron2/tools.
"""
import os
import torch
import detectron2.data.transforms as T
import detectron2.utils.comm as comm
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import get_cfg
from detectron2.data import MetadataCatalog, build_detection_train_loader
from detectron2.engine import DefaultTrainer, default_argument_parser, default_setup, launch
from detectron2.evaluation import (
CityscapesInstanceEvaluator,
CityscapesSemSegEvaluator,
COCOEvaluator,
COCOPanopticEvaluator,
DatasetEvaluators,
)
from detectron2.projects.deeplab import build_lr_scheduler
from detectron2.projects.panoptic_deeplab import (
PanopticDeeplabDatasetMapper,
add_panoptic_deeplab_config,
)
from detectron2.solver import get_default_optimizer_params
from detectron2.solver.build import maybe_add_gradient_clipping
def build_sem_seg_train_aug(cfg):
augs = [
T.ResizeShortestEdge(
cfg.INPUT.MIN_SIZE_TRAIN, cfg.INPUT.MAX_SIZE_TRAIN, cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING
)
]
if cfg.INPUT.CROP.ENABLED:
augs.append(T.RandomCrop(cfg.INPUT.CROP.TYPE, cfg.INPUT.CROP.SIZE))
augs.append(T.RandomFlip())
return augs
class Trainer(DefaultTrainer):
"""
We use the "DefaultTrainer" which contains a number pre-defined logic for
standard training workflow. They may not work for you, especially if you
are working on a new research project. In that case you can use the cleaner
"SimpleTrainer", or write your own training loop.
"""
@classmethod
def build_evaluator(cls, cfg, dataset_name, output_folder=None):
"""
Create evaluator(s) for a given dataset.
This uses the special metadata "evaluator_type" associated with each builtin dataset.
For your own dataset, you can simply create an evaluator manually in your
script and do not have to worry about the hacky if-else logic here.
"""
if cfg.MODEL.PANOPTIC_DEEPLAB.BENCHMARK_NETWORK_SPEED:
return None
if output_folder is None:
output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
evaluator_list = []
evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
if evaluator_type in ["cityscapes_panoptic_seg", "coco_panoptic_seg"]:
evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder))
if evaluator_type == "cityscapes_panoptic_seg":
assert (
torch.cuda.device_count() >= comm.get_rank()
), "CityscapesEvaluator currently do not work with multiple machines."
evaluator_list.append(CityscapesSemSegEvaluator(dataset_name))
evaluator_list.append(CityscapesInstanceEvaluator(dataset_name))
if evaluator_type == "coco_panoptic_seg":
# `thing_classes` in COCO panoptic metadata includes both thing and
# stuff classes for visualization. COCOEvaluator requires metadata
# which only contains thing classes, thus we map the name of
# panoptic datasets to their corresponding instance datasets.
dataset_name_mapper = {
"coco_2017_val_panoptic": "coco_2017_val",
"coco_2017_val_100_panoptic": "coco_2017_val_100",
}
evaluator_list.append(
COCOEvaluator(dataset_name_mapper[dataset_name], output_dir=output_folder)
)
if len(evaluator_list) == 0:
raise NotImplementedError(
"no Evaluator for the dataset {} with the type {}".format(
dataset_name, evaluator_type
)
)
elif len(evaluator_list) == 1:
return evaluator_list[0]
return DatasetEvaluators(evaluator_list)
@classmethod
def build_train_loader(cls, cfg):
mapper = PanopticDeeplabDatasetMapper(cfg, augmentations=build_sem_seg_train_aug(cfg))
return build_detection_train_loader(cfg, mapper=mapper)
@classmethod
def build_lr_scheduler(cls, cfg, optimizer):
"""
It now calls :func:`detectron2.solver.build_lr_scheduler`.
Overwrite it if you'd like a different scheduler.
"""
return build_lr_scheduler(cfg, optimizer)
@classmethod
def build_optimizer(cls, cfg, model):
"""
Build an optimizer from config.
"""
params = get_default_optimizer_params(
model,
weight_decay=cfg.SOLVER.WEIGHT_DECAY,
weight_decay_norm=cfg.SOLVER.WEIGHT_DECAY_NORM,
)
optimizer_type = cfg.SOLVER.OPTIMIZER
if optimizer_type == "SGD":
return maybe_add_gradient_clipping(cfg, torch.optim.SGD)(
params,
cfg.SOLVER.BASE_LR,
momentum=cfg.SOLVER.MOMENTUM,
nesterov=cfg.SOLVER.NESTEROV,
)
elif optimizer_type == "ADAM":
return maybe_add_gradient_clipping(cfg, torch.optim.Adam)(params, cfg.SOLVER.BASE_LR)
else:
raise NotImplementedError(f"no optimizer type {optimizer_type}")
def setup(args):
"""
Create configs and perform basic setups.
"""
cfg = get_cfg()
add_panoptic_deeplab_config(cfg)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
default_setup(cfg, args)
return cfg
def main(args):
cfg = setup(args)
if args.eval_only:
model = Trainer.build_model(cfg)
DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
cfg.MODEL.WEIGHTS, resume=args.resume
)
res = Trainer.test(cfg, model)
return res
trainer = Trainer(cfg)
trainer.resume_or_load(resume=args.resume)
return trainer.train()
if __name__ == "__main__":
args = default_argument_parser().parse_args()
print("Command Line Args:", args)
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
)
|
banmo-main
|
third_party/detectron2_old/projects/Panoptic-DeepLab/train_net.py
|
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
from detectron2.config import CfgNode as CN
from detectron2.projects.deeplab import add_deeplab_config
def add_panoptic_deeplab_config(cfg):
"""
Add config for Panoptic-DeepLab.
"""
# Reuse DeepLab config.
add_deeplab_config(cfg)
# Target generation parameters.
cfg.INPUT.GAUSSIAN_SIGMA = 10
cfg.INPUT.IGNORE_STUFF_IN_OFFSET = True
cfg.INPUT.SMALL_INSTANCE_AREA = 4096
cfg.INPUT.SMALL_INSTANCE_WEIGHT = 3
cfg.INPUT.IGNORE_CROWD_IN_SEMANTIC = False
# Optimizer type.
cfg.SOLVER.OPTIMIZER = "ADAM"
# Panoptic-DeepLab semantic segmentation head.
# We add an extra convolution before predictor.
cfg.MODEL.SEM_SEG_HEAD.HEAD_CHANNELS = 256
cfg.MODEL.SEM_SEG_HEAD.LOSS_TOP_K = 0.2
# Panoptic-DeepLab instance segmentation head.
cfg.MODEL.INS_EMBED_HEAD = CN()
cfg.MODEL.INS_EMBED_HEAD.NAME = "PanopticDeepLabInsEmbedHead"
cfg.MODEL.INS_EMBED_HEAD.IN_FEATURES = ["res2", "res3", "res5"]
cfg.MODEL.INS_EMBED_HEAD.PROJECT_FEATURES = ["res2", "res3"]
cfg.MODEL.INS_EMBED_HEAD.PROJECT_CHANNELS = [32, 64]
cfg.MODEL.INS_EMBED_HEAD.ASPP_CHANNELS = 256
cfg.MODEL.INS_EMBED_HEAD.ASPP_DILATIONS = [6, 12, 18]
cfg.MODEL.INS_EMBED_HEAD.ASPP_DROPOUT = 0.1
# We add an extra convolution before predictor.
cfg.MODEL.INS_EMBED_HEAD.HEAD_CHANNELS = 32
cfg.MODEL.INS_EMBED_HEAD.CONVS_DIM = 128
cfg.MODEL.INS_EMBED_HEAD.COMMON_STRIDE = 4
cfg.MODEL.INS_EMBED_HEAD.NORM = "SyncBN"
cfg.MODEL.INS_EMBED_HEAD.CENTER_LOSS_WEIGHT = 200.0
cfg.MODEL.INS_EMBED_HEAD.OFFSET_LOSS_WEIGHT = 0.01
# Panoptic-DeepLab post-processing setting.
cfg.MODEL.PANOPTIC_DEEPLAB = CN()
# Stuff area limit, ignore stuff region below this number.
cfg.MODEL.PANOPTIC_DEEPLAB.STUFF_AREA = 2048
cfg.MODEL.PANOPTIC_DEEPLAB.CENTER_THRESHOLD = 0.1
cfg.MODEL.PANOPTIC_DEEPLAB.NMS_KERNEL = 7
cfg.MODEL.PANOPTIC_DEEPLAB.TOP_K_INSTANCE = 200
# If set to False, Panoptic-DeepLab will not evaluate instance segmentation.
cfg.MODEL.PANOPTIC_DEEPLAB.PREDICT_INSTANCES = True
cfg.MODEL.PANOPTIC_DEEPLAB.USE_DEPTHWISE_SEPARABLE_CONV = False
# This is the padding parameter for images with various sizes. ASPP layers
# requires input images to be divisible by the average pooling size and we
# can use `MODEL.PANOPTIC_DEEPLAB.SIZE_DIVISIBILITY` to pad all images to
# a fixed resolution (e.g. 640x640 for COCO) to avoid having a image size
# that is not divisible by ASPP average pooling size.
cfg.MODEL.PANOPTIC_DEEPLAB.SIZE_DIVISIBILITY = -1
# Only evaluates network speed (ignores post-processing).
cfg.MODEL.PANOPTIC_DEEPLAB.BENCHMARK_NETWORK_SPEED = False
|
banmo-main
|
third_party/detectron2_old/projects/Panoptic-DeepLab/panoptic_deeplab/config.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from .config import add_panoptic_deeplab_config
from .dataset_mapper import PanopticDeeplabDatasetMapper
from .panoptic_seg import (
PanopticDeepLab,
INS_EMBED_BRANCHES_REGISTRY,
build_ins_embed_branch,
PanopticDeepLabSemSegHead,
PanopticDeepLabInsEmbedHead,
)
|
banmo-main
|
third_party/detectron2_old/projects/Panoptic-DeepLab/panoptic_deeplab/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import copy
import logging
import numpy as np
from typing import Callable, List, Union
import torch
from panopticapi.utils import rgb2id
from detectron2.config import configurable
from detectron2.data import MetadataCatalog
from detectron2.data import detection_utils as utils
from detectron2.data import transforms as T
from .target_generator import PanopticDeepLabTargetGenerator
__all__ = ["PanopticDeeplabDatasetMapper"]
class PanopticDeeplabDatasetMapper:
"""
The callable currently does the following:
1. Read the image from "file_name" and label from "pan_seg_file_name"
2. Applies random scale, crop and flip transforms to image and label
3. Prepare data to Tensor and generate training targets from label
"""
@configurable
def __init__(
self,
*,
augmentations: List[Union[T.Augmentation, T.Transform]],
image_format: str,
panoptic_target_generator: Callable,
):
"""
NOTE: this interface is experimental.
Args:
augmentations: a list of augmentations or deterministic transforms to apply
image_format: an image format supported by :func:`detection_utils.read_image`.
panoptic_target_generator: a callable that takes "panoptic_seg" and
"segments_info" to generate training targets for the model.
"""
# fmt: off
self.augmentations = T.AugmentationList(augmentations)
self.image_format = image_format
# fmt: on
logger = logging.getLogger(__name__)
logger.info("Augmentations used in training: " + str(augmentations))
self.panoptic_target_generator = panoptic_target_generator
@classmethod
def from_config(cls, cfg):
augs = [
T.ResizeShortestEdge(
cfg.INPUT.MIN_SIZE_TRAIN,
cfg.INPUT.MAX_SIZE_TRAIN,
cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING,
)
]
if cfg.INPUT.CROP.ENABLED:
augs.append(T.RandomCrop(cfg.INPUT.CROP.TYPE, cfg.INPUT.CROP.SIZE))
augs.append(T.RandomFlip())
# Assume always applies to the training set.
dataset_names = cfg.DATASETS.TRAIN
meta = MetadataCatalog.get(dataset_names[0])
panoptic_target_generator = PanopticDeepLabTargetGenerator(
ignore_label=meta.ignore_label,
thing_ids=list(meta.thing_dataset_id_to_contiguous_id.values()),
sigma=cfg.INPUT.GAUSSIAN_SIGMA,
ignore_stuff_in_offset=cfg.INPUT.IGNORE_STUFF_IN_OFFSET,
small_instance_area=cfg.INPUT.SMALL_INSTANCE_AREA,
small_instance_weight=cfg.INPUT.SMALL_INSTANCE_WEIGHT,
ignore_crowd_in_semantic=cfg.INPUT.IGNORE_CROWD_IN_SEMANTIC,
)
ret = {
"augmentations": augs,
"image_format": cfg.INPUT.FORMAT,
"panoptic_target_generator": panoptic_target_generator,
}
return ret
def __call__(self, dataset_dict):
"""
Args:
dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.
Returns:
dict: a format that builtin models in detectron2 accept
"""
dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below
# Load image.
image = utils.read_image(dataset_dict["file_name"], format=self.image_format)
utils.check_image_size(dataset_dict, image)
# Panoptic label is encoded in RGB image.
pan_seg_gt = utils.read_image(dataset_dict.pop("pan_seg_file_name"), "RGB")
# Reuses semantic transform for panoptic labels.
aug_input = T.AugInput(image, sem_seg=pan_seg_gt)
_ = self.augmentations(aug_input)
image, pan_seg_gt = aug_input.image, aug_input.sem_seg
# Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,
# but not efficient on large generic data structures due to the use of pickle & mp.Queue.
# Therefore it's important to use torch.Tensor.
dataset_dict["image"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))
# Generates training targets for Panoptic-DeepLab.
targets = self.panoptic_target_generator(rgb2id(pan_seg_gt), dataset_dict["segments_info"])
dataset_dict.update(targets)
return dataset_dict
|
banmo-main
|
third_party/detectron2_old/projects/Panoptic-DeepLab/panoptic_deeplab/dataset_mapper.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import numpy as np
from typing import Callable, Dict, List, Union
import fvcore.nn.weight_init as weight_init
import torch
from torch import nn
from torch.nn import functional as F
from detectron2.config import configurable
from detectron2.data import MetadataCatalog
from detectron2.layers import Conv2d, DepthwiseSeparableConv2d, ShapeSpec, get_norm
from detectron2.modeling import (
META_ARCH_REGISTRY,
SEM_SEG_HEADS_REGISTRY,
build_backbone,
build_sem_seg_head,
)
from detectron2.modeling.postprocessing import sem_seg_postprocess
from detectron2.projects.deeplab import DeepLabV3PlusHead
from detectron2.projects.deeplab.loss import DeepLabCE
from detectron2.structures import BitMasks, ImageList, Instances
from detectron2.utils.registry import Registry
from .post_processing import get_panoptic_segmentation
__all__ = ["PanopticDeepLab", "INS_EMBED_BRANCHES_REGISTRY", "build_ins_embed_branch"]
INS_EMBED_BRANCHES_REGISTRY = Registry("INS_EMBED_BRANCHES")
INS_EMBED_BRANCHES_REGISTRY.__doc__ = """
Registry for instance embedding branches, which make instance embedding
predictions from feature maps.
"""
@META_ARCH_REGISTRY.register()
class PanopticDeepLab(nn.Module):
"""
Main class for panoptic segmentation architectures.
"""
def __init__(self, cfg):
super().__init__()
self.backbone = build_backbone(cfg)
self.sem_seg_head = build_sem_seg_head(cfg, self.backbone.output_shape())
self.ins_embed_head = build_ins_embed_branch(cfg, self.backbone.output_shape())
self.register_buffer("pixel_mean", torch.tensor(cfg.MODEL.PIXEL_MEAN).view(-1, 1, 1), False)
self.register_buffer("pixel_std", torch.tensor(cfg.MODEL.PIXEL_STD).view(-1, 1, 1), False)
self.meta = MetadataCatalog.get(cfg.DATASETS.TRAIN[0])
self.stuff_area = cfg.MODEL.PANOPTIC_DEEPLAB.STUFF_AREA
self.threshold = cfg.MODEL.PANOPTIC_DEEPLAB.CENTER_THRESHOLD
self.nms_kernel = cfg.MODEL.PANOPTIC_DEEPLAB.NMS_KERNEL
self.top_k = cfg.MODEL.PANOPTIC_DEEPLAB.TOP_K_INSTANCE
self.predict_instances = cfg.MODEL.PANOPTIC_DEEPLAB.PREDICT_INSTANCES
self.use_depthwise_separable_conv = cfg.MODEL.PANOPTIC_DEEPLAB.USE_DEPTHWISE_SEPARABLE_CONV
assert (
cfg.MODEL.SEM_SEG_HEAD.USE_DEPTHWISE_SEPARABLE_CONV
== cfg.MODEL.PANOPTIC_DEEPLAB.USE_DEPTHWISE_SEPARABLE_CONV
)
self.size_divisibility = cfg.MODEL.PANOPTIC_DEEPLAB.SIZE_DIVISIBILITY
self.benchmark_network_speed = cfg.MODEL.PANOPTIC_DEEPLAB.BENCHMARK_NETWORK_SPEED
@property
def device(self):
return self.pixel_mean.device
def forward(self, batched_inputs):
"""
Args:
batched_inputs: a list, batched outputs of :class:`DatasetMapper`.
Each item in the list contains the inputs for one image.
For now, each item in the list is a dict that contains:
* "image": Tensor, image in (C, H, W) format.
* "sem_seg": semantic segmentation ground truth
* "center": center points heatmap ground truth
* "offset": pixel offsets to center points ground truth
* Other information that's included in the original dicts, such as:
"height", "width" (int): the output resolution of the model (may be different
from input resolution), used in inference.
Returns:
list[dict]:
each dict is the results for one image. The dict contains the following keys:
* "panoptic_seg", "sem_seg": see documentation
:doc:`/tutorials/models` for the standard output format
* "instances": available if ``predict_instances is True``. see documentation
:doc:`/tutorials/models` for the standard output format
"""
images = [x["image"].to(self.device) for x in batched_inputs]
images = [(x - self.pixel_mean) / self.pixel_std for x in images]
# To avoid error in ASPP layer when input has different size.
size_divisibility = (
self.size_divisibility
if self.size_divisibility > 0
else self.backbone.size_divisibility
)
images = ImageList.from_tensors(images, size_divisibility)
features = self.backbone(images.tensor)
losses = {}
if "sem_seg" in batched_inputs[0]:
targets = [x["sem_seg"].to(self.device) for x in batched_inputs]
targets = ImageList.from_tensors(
targets, size_divisibility, self.sem_seg_head.ignore_value
).tensor
if "sem_seg_weights" in batched_inputs[0]:
# The default D2 DatasetMapper may not contain "sem_seg_weights"
# Avoid error in testing when default DatasetMapper is used.
weights = [x["sem_seg_weights"].to(self.device) for x in batched_inputs]
weights = ImageList.from_tensors(weights, size_divisibility).tensor
else:
weights = None
else:
targets = None
weights = None
sem_seg_results, sem_seg_losses = self.sem_seg_head(features, targets, weights)
losses.update(sem_seg_losses)
if "center" in batched_inputs[0] and "offset" in batched_inputs[0]:
center_targets = [x["center"].to(self.device) for x in batched_inputs]
center_targets = ImageList.from_tensors(
center_targets, size_divisibility
).tensor.unsqueeze(1)
center_weights = [x["center_weights"].to(self.device) for x in batched_inputs]
center_weights = ImageList.from_tensors(center_weights, size_divisibility).tensor
offset_targets = [x["offset"].to(self.device) for x in batched_inputs]
offset_targets = ImageList.from_tensors(offset_targets, size_divisibility).tensor
offset_weights = [x["offset_weights"].to(self.device) for x in batched_inputs]
offset_weights = ImageList.from_tensors(offset_weights, size_divisibility).tensor
else:
center_targets = None
center_weights = None
offset_targets = None
offset_weights = None
center_results, offset_results, center_losses, offset_losses = self.ins_embed_head(
features, center_targets, center_weights, offset_targets, offset_weights
)
losses.update(center_losses)
losses.update(offset_losses)
if self.training:
return losses
if self.benchmark_network_speed:
return []
processed_results = []
for sem_seg_result, center_result, offset_result, input_per_image, image_size in zip(
sem_seg_results, center_results, offset_results, batched_inputs, images.image_sizes
):
height = input_per_image.get("height")
width = input_per_image.get("width")
r = sem_seg_postprocess(sem_seg_result, image_size, height, width)
c = sem_seg_postprocess(center_result, image_size, height, width)
o = sem_seg_postprocess(offset_result, image_size, height, width)
# Post-processing to get panoptic segmentation.
panoptic_image, _ = get_panoptic_segmentation(
r.argmax(dim=0, keepdim=True),
c,
o,
thing_ids=self.meta.thing_dataset_id_to_contiguous_id.values(),
label_divisor=self.meta.label_divisor,
stuff_area=self.stuff_area,
void_label=-1,
threshold=self.threshold,
nms_kernel=self.nms_kernel,
top_k=self.top_k,
)
# For semantic segmentation evaluation.
processed_results.append({"sem_seg": r})
panoptic_image = panoptic_image.squeeze(0)
semantic_prob = F.softmax(r, dim=0)
# For panoptic segmentation evaluation.
processed_results[-1]["panoptic_seg"] = (panoptic_image, None)
# For instance segmentation evaluation.
if self.predict_instances:
instances = []
panoptic_image_cpu = panoptic_image.cpu().numpy()
for panoptic_label in np.unique(panoptic_image_cpu):
if panoptic_label == -1:
continue
pred_class = panoptic_label // self.meta.label_divisor
isthing = pred_class in list(
self.meta.thing_dataset_id_to_contiguous_id.values()
)
# Get instance segmentation results.
if isthing:
instance = Instances((height, width))
# Evaluation code takes continuous id starting from 0
instance.pred_classes = torch.tensor(
[pred_class], device=panoptic_image.device
)
mask = panoptic_image == panoptic_label
instance.pred_masks = mask.unsqueeze(0)
# Average semantic probability
sem_scores = semantic_prob[pred_class, ...]
sem_scores = torch.mean(sem_scores[mask])
# Center point probability
mask_indices = torch.nonzero(mask).float()
center_y, center_x = (
torch.mean(mask_indices[:, 0]),
torch.mean(mask_indices[:, 1]),
)
center_scores = c[0, int(center_y.item()), int(center_x.item())]
# Confidence score is semantic prob * center prob.
instance.scores = torch.tensor(
[sem_scores * center_scores], device=panoptic_image.device
)
# Get bounding boxes
instance.pred_boxes = BitMasks(instance.pred_masks).get_bounding_boxes()
instances.append(instance)
if len(instances) > 0:
processed_results[-1]["instances"] = Instances.cat(instances)
return processed_results
@SEM_SEG_HEADS_REGISTRY.register()
class PanopticDeepLabSemSegHead(DeepLabV3PlusHead):
"""
A semantic segmentation head described in :paper:`Panoptic-DeepLab`.
"""
@configurable
def __init__(
self,
input_shape: Dict[str, ShapeSpec],
*,
decoder_channels: List[int],
norm: Union[str, Callable],
head_channels: int,
loss_weight: float,
loss_type: str,
loss_top_k: float,
ignore_value: int,
num_classes: int,
**kwargs,
):
"""
NOTE: this interface is experimental.
Args:
input_shape (ShapeSpec): shape of the input feature
decoder_channels (list[int]): a list of output channels of each
decoder stage. It should have the same length as "input_shape"
(each element in "input_shape" corresponds to one decoder stage).
norm (str or callable): normalization for all conv layers.
head_channels (int): the output channels of extra convolutions
between decoder and predictor.
loss_weight (float): loss weight.
loss_top_k: (float): setting the top k% hardest pixels for
"hard_pixel_mining" loss.
loss_type, ignore_value, num_classes: the same as the base class.
"""
super().__init__(
input_shape,
decoder_channels=decoder_channels,
norm=norm,
ignore_value=ignore_value,
**kwargs,
)
assert self.decoder_only
self.loss_weight = loss_weight
use_bias = norm == ""
# `head` is additional transform before predictor
if self.use_depthwise_separable_conv:
# We use a single 5x5 DepthwiseSeparableConv2d to replace
# 2 3x3 Conv2d since they have the same receptive field.
self.head = DepthwiseSeparableConv2d(
decoder_channels[0],
head_channels,
kernel_size=5,
padding=2,
norm1=norm,
activation1=F.relu,
norm2=norm,
activation2=F.relu,
)
else:
self.head = nn.Sequential(
Conv2d(
decoder_channels[0],
decoder_channels[0],
kernel_size=3,
padding=1,
bias=use_bias,
norm=get_norm(norm, decoder_channels[0]),
activation=F.relu,
),
Conv2d(
decoder_channels[0],
head_channels,
kernel_size=3,
padding=1,
bias=use_bias,
norm=get_norm(norm, head_channels),
activation=F.relu,
),
)
weight_init.c2_xavier_fill(self.head[0])
weight_init.c2_xavier_fill(self.head[1])
self.predictor = Conv2d(head_channels, num_classes, kernel_size=1)
nn.init.normal_(self.predictor.weight, 0, 0.001)
nn.init.constant_(self.predictor.bias, 0)
if loss_type == "cross_entropy":
self.loss = nn.CrossEntropyLoss(reduction="mean", ignore_index=ignore_value)
elif loss_type == "hard_pixel_mining":
self.loss = DeepLabCE(ignore_label=ignore_value, top_k_percent_pixels=loss_top_k)
else:
raise ValueError("Unexpected loss type: %s" % loss_type)
@classmethod
def from_config(cls, cfg, input_shape):
ret = super().from_config(cfg, input_shape)
ret["head_channels"] = cfg.MODEL.SEM_SEG_HEAD.HEAD_CHANNELS
ret["loss_top_k"] = cfg.MODEL.SEM_SEG_HEAD.LOSS_TOP_K
return ret
def forward(self, features, targets=None, weights=None):
"""
Returns:
In training, returns (None, dict of losses)
In inference, returns (CxHxW logits, {})
"""
y = self.layers(features)
if self.training:
return None, self.losses(y, targets, weights)
else:
y = F.interpolate(
y, scale_factor=self.common_stride, mode="bilinear", align_corners=False
)
return y, {}
def layers(self, features):
assert self.decoder_only
y = super().layers(features)
y = self.head(y)
y = self.predictor(y)
return y
def losses(self, predictions, targets, weights=None):
predictions = F.interpolate(
predictions, scale_factor=self.common_stride, mode="bilinear", align_corners=False
)
loss = self.loss(predictions, targets, weights)
losses = {"loss_sem_seg": loss * self.loss_weight}
return losses
def build_ins_embed_branch(cfg, input_shape):
"""
Build a instance embedding branch from `cfg.MODEL.INS_EMBED_HEAD.NAME`.
"""
name = cfg.MODEL.INS_EMBED_HEAD.NAME
return INS_EMBED_BRANCHES_REGISTRY.get(name)(cfg, input_shape)
@INS_EMBED_BRANCHES_REGISTRY.register()
class PanopticDeepLabInsEmbedHead(DeepLabV3PlusHead):
"""
A instance embedding head described in :paper:`Panoptic-DeepLab`.
"""
@configurable
def __init__(
self,
input_shape: Dict[str, ShapeSpec],
*,
decoder_channels: List[int],
norm: Union[str, Callable],
head_channels: int,
center_loss_weight: float,
offset_loss_weight: float,
**kwargs,
):
"""
NOTE: this interface is experimental.
Args:
input_shape (ShapeSpec): shape of the input feature
decoder_channels (list[int]): a list of output channels of each
decoder stage. It should have the same length as "input_shape"
(each element in "input_shape" corresponds to one decoder stage).
norm (str or callable): normalization for all conv layers.
head_channels (int): the output channels of extra convolutions
between decoder and predictor.
center_loss_weight (float): loss weight for center point prediction.
offset_loss_weight (float): loss weight for center offset prediction.
"""
super().__init__(input_shape, decoder_channels=decoder_channels, norm=norm, **kwargs)
assert self.decoder_only
self.center_loss_weight = center_loss_weight
self.offset_loss_weight = offset_loss_weight
use_bias = norm == ""
# center prediction
# `head` is additional transform before predictor
self.center_head = nn.Sequential(
Conv2d(
decoder_channels[0],
decoder_channels[0],
kernel_size=3,
padding=1,
bias=use_bias,
norm=get_norm(norm, decoder_channels[0]),
activation=F.relu,
),
Conv2d(
decoder_channels[0],
head_channels,
kernel_size=3,
padding=1,
bias=use_bias,
norm=get_norm(norm, head_channels),
activation=F.relu,
),
)
weight_init.c2_xavier_fill(self.center_head[0])
weight_init.c2_xavier_fill(self.center_head[1])
self.center_predictor = Conv2d(head_channels, 1, kernel_size=1)
nn.init.normal_(self.center_predictor.weight, 0, 0.001)
nn.init.constant_(self.center_predictor.bias, 0)
# offset prediction
# `head` is additional transform before predictor
if self.use_depthwise_separable_conv:
# We use a single 5x5 DepthwiseSeparableConv2d to replace
# 2 3x3 Conv2d since they have the same receptive field.
self.offset_head = DepthwiseSeparableConv2d(
decoder_channels[0],
head_channels,
kernel_size=5,
padding=2,
norm1=norm,
activation1=F.relu,
norm2=norm,
activation2=F.relu,
)
else:
self.offset_head = nn.Sequential(
Conv2d(
decoder_channels[0],
decoder_channels[0],
kernel_size=3,
padding=1,
bias=use_bias,
norm=get_norm(norm, decoder_channels[0]),
activation=F.relu,
),
Conv2d(
decoder_channels[0],
head_channels,
kernel_size=3,
padding=1,
bias=use_bias,
norm=get_norm(norm, head_channels),
activation=F.relu,
),
)
weight_init.c2_xavier_fill(self.offset_head[0])
weight_init.c2_xavier_fill(self.offset_head[1])
self.offset_predictor = Conv2d(head_channels, 2, kernel_size=1)
nn.init.normal_(self.offset_predictor.weight, 0, 0.001)
nn.init.constant_(self.offset_predictor.bias, 0)
self.center_loss = nn.MSELoss(reduction="none")
self.offset_loss = nn.L1Loss(reduction="none")
@classmethod
def from_config(cls, cfg, input_shape):
if cfg.INPUT.CROP.ENABLED:
assert cfg.INPUT.CROP.TYPE == "absolute"
train_size = cfg.INPUT.CROP.SIZE
else:
train_size = None
decoder_channels = [cfg.MODEL.INS_EMBED_HEAD.CONVS_DIM] * (
len(cfg.MODEL.INS_EMBED_HEAD.IN_FEATURES) - 1
) + [cfg.MODEL.INS_EMBED_HEAD.ASPP_CHANNELS]
ret = dict(
input_shape={
k: v for k, v in input_shape.items() if k in cfg.MODEL.INS_EMBED_HEAD.IN_FEATURES
},
project_channels=cfg.MODEL.INS_EMBED_HEAD.PROJECT_CHANNELS,
aspp_dilations=cfg.MODEL.INS_EMBED_HEAD.ASPP_DILATIONS,
aspp_dropout=cfg.MODEL.INS_EMBED_HEAD.ASPP_DROPOUT,
decoder_channels=decoder_channels,
common_stride=cfg.MODEL.INS_EMBED_HEAD.COMMON_STRIDE,
norm=cfg.MODEL.INS_EMBED_HEAD.NORM,
train_size=train_size,
head_channels=cfg.MODEL.INS_EMBED_HEAD.HEAD_CHANNELS,
center_loss_weight=cfg.MODEL.INS_EMBED_HEAD.CENTER_LOSS_WEIGHT,
offset_loss_weight=cfg.MODEL.INS_EMBED_HEAD.OFFSET_LOSS_WEIGHT,
use_depthwise_separable_conv=cfg.MODEL.SEM_SEG_HEAD.USE_DEPTHWISE_SEPARABLE_CONV,
)
return ret
def forward(
self,
features,
center_targets=None,
center_weights=None,
offset_targets=None,
offset_weights=None,
):
"""
Returns:
In training, returns (None, dict of losses)
In inference, returns (CxHxW logits, {})
"""
center, offset = self.layers(features)
if self.training:
return (
None,
None,
self.center_losses(center, center_targets, center_weights),
self.offset_losses(offset, offset_targets, offset_weights),
)
else:
center = F.interpolate(
center, scale_factor=self.common_stride, mode="bilinear", align_corners=False
)
offset = (
F.interpolate(
offset, scale_factor=self.common_stride, mode="bilinear", align_corners=False
)
* self.common_stride
)
return center, offset, {}, {}
def layers(self, features):
assert self.decoder_only
y = super().layers(features)
# center
center = self.center_head(y)
center = self.center_predictor(center)
# offset
offset = self.offset_head(y)
offset = self.offset_predictor(offset)
return center, offset
def center_losses(self, predictions, targets, weights):
predictions = F.interpolate(
predictions, scale_factor=self.common_stride, mode="bilinear", align_corners=False
)
loss = self.center_loss(predictions, targets) * weights
if weights.sum() > 0:
loss = loss.sum() / weights.sum()
else:
loss = loss.sum() * 0
losses = {"loss_center": loss * self.center_loss_weight}
return losses
def offset_losses(self, predictions, targets, weights):
predictions = (
F.interpolate(
predictions, scale_factor=self.common_stride, mode="bilinear", align_corners=False
)
* self.common_stride
)
loss = self.offset_loss(predictions, targets) * weights
if weights.sum() > 0:
loss = loss.sum() / weights.sum()
else:
loss = loss.sum() * 0
losses = {"loss_offset": loss * self.offset_loss_weight}
return losses
|
banmo-main
|
third_party/detectron2_old/projects/Panoptic-DeepLab/panoptic_deeplab/panoptic_seg.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# Reference: https://github.com/bowenc0221/panoptic-deeplab/blob/master/segmentation/model/post_processing/instance_post_processing.py # noqa
from collections import Counter
import torch
import torch.nn.functional as F
def find_instance_center(center_heatmap, threshold=0.1, nms_kernel=3, top_k=None):
"""
Find the center points from the center heatmap.
Args:
center_heatmap: A Tensor of shape [1, H, W] of raw center heatmap output.
threshold: A float, threshold applied to center heatmap score.
nms_kernel: An integer, NMS max pooling kernel size.
top_k: An integer, top k centers to keep.
Returns:
A Tensor of shape [K, 2] where K is the number of center points. The
order of second dim is (y, x).
"""
# Thresholding, setting values below threshold to -1.
center_heatmap = F.threshold(center_heatmap, threshold, -1)
# NMS
nms_padding = (nms_kernel - 1) // 2
center_heatmap_max_pooled = F.max_pool2d(
center_heatmap, kernel_size=nms_kernel, stride=1, padding=nms_padding
)
center_heatmap[center_heatmap != center_heatmap_max_pooled] = -1
# Squeeze first two dimensions.
center_heatmap = center_heatmap.squeeze()
assert len(center_heatmap.size()) == 2, "Something is wrong with center heatmap dimension."
# Find non-zero elements.
if top_k is None:
return torch.nonzero(center_heatmap > 0)
else:
# find top k centers.
top_k_scores, _ = torch.topk(torch.flatten(center_heatmap), top_k)
return torch.nonzero(center_heatmap > top_k_scores[-1].clamp_(min=0))
def group_pixels(center_points, offsets):
"""
Gives each pixel in the image an instance id.
Args:
center_points: A Tensor of shape [K, 2] where K is the number of center points.
The order of second dim is (y, x).
offsets: A Tensor of shape [2, H, W] of raw offset output. The order of
second dim is (offset_y, offset_x).
Returns:
A Tensor of shape [1, H, W] with values in range [1, K], which represents
the center this pixel belongs to.
"""
height, width = offsets.size()[1:]
# Generates a coordinate map, where each location is the coordinate of
# that location.
y_coord, x_coord = torch.meshgrid(
torch.arange(height, dtype=offsets.dtype, device=offsets.device),
torch.arange(width, dtype=offsets.dtype, device=offsets.device),
)
coord = torch.cat((y_coord.unsqueeze(0), x_coord.unsqueeze(0)), dim=0)
center_loc = coord + offsets
center_loc = center_loc.flatten(1).T.unsqueeze_(0) # [1, H*W, 2]
center_points = center_points.unsqueeze(1) # [K, 1, 2]
# Distance: [K, H*W].
distance = torch.norm(center_points - center_loc, dim=-1)
# Finds center with minimum distance at each location, offset by 1, to
# reserve id=0 for stuff.
instance_id = torch.argmin(distance, dim=0).reshape((1, height, width)) + 1
return instance_id
def get_instance_segmentation(
sem_seg, center_heatmap, offsets, thing_seg, thing_ids, threshold=0.1, nms_kernel=3, top_k=None
):
"""
Post-processing for instance segmentation, gets class agnostic instance id.
Args:
sem_seg: A Tensor of shape [1, H, W], predicted semantic label.
center_heatmap: A Tensor of shape [1, H, W] of raw center heatmap output.
offsets: A Tensor of shape [2, H, W] of raw offset output. The order of
second dim is (offset_y, offset_x).
thing_seg: A Tensor of shape [1, H, W], predicted foreground mask,
if not provided, inference from semantic prediction.
thing_ids: A set of ids from contiguous category ids belonging
to thing categories.
threshold: A float, threshold applied to center heatmap score.
nms_kernel: An integer, NMS max pooling kernel size.
top_k: An integer, top k centers to keep.
Returns:
A Tensor of shape [1, H, W] with value 0 represent stuff (not instance)
and other positive values represent different instances.
A Tensor of shape [1, K, 2] where K is the number of center points.
The order of second dim is (y, x).
"""
center_points = find_instance_center(
center_heatmap, threshold=threshold, nms_kernel=nms_kernel, top_k=top_k
)
if center_points.size(0) == 0:
return torch.zeros_like(sem_seg), center_points.unsqueeze(0)
ins_seg = group_pixels(center_points, offsets)
return thing_seg * ins_seg, center_points.unsqueeze(0)
def merge_semantic_and_instance(
sem_seg, ins_seg, semantic_thing_seg, label_divisor, thing_ids, stuff_area, void_label
):
"""
Post-processing for panoptic segmentation, by merging semantic segmentation
label and class agnostic instance segmentation label.
Args:
sem_seg: A Tensor of shape [1, H, W], predicted category id for each pixel.
ins_seg: A Tensor of shape [1, H, W], predicted instance id for each pixel.
semantic_thing_seg: A Tensor of shape [1, H, W], predicted foreground mask.
label_divisor: An integer, used to convert panoptic id =
semantic id * label_divisor + instance_id.
thing_ids: Set, a set of ids from contiguous category ids belonging
to thing categories.
stuff_area: An integer, remove stuff whose area is less tan stuff_area.
void_label: An integer, indicates the region has no confident prediction.
Returns:
A Tensor of shape [1, H, W].
"""
# In case thing mask does not align with semantic prediction.
pan_seg = torch.zeros_like(sem_seg) + void_label
is_thing = (ins_seg > 0) & (semantic_thing_seg > 0)
# Keep track of instance id for each class.
class_id_tracker = Counter()
# Paste thing by majority voting.
instance_ids = torch.unique(ins_seg)
for ins_id in instance_ids:
if ins_id == 0:
continue
# Make sure only do majority voting within `semantic_thing_seg`.
thing_mask = (ins_seg == ins_id) & is_thing
if torch.nonzero(thing_mask).size(0) == 0:
continue
class_id, _ = torch.mode(sem_seg[thing_mask].view(-1))
class_id_tracker[class_id.item()] += 1
new_ins_id = class_id_tracker[class_id.item()]
pan_seg[thing_mask] = class_id * label_divisor + new_ins_id
# Paste stuff to unoccupied area.
class_ids = torch.unique(sem_seg)
for class_id in class_ids:
if class_id.item() in thing_ids:
# thing class
continue
# Calculate stuff area.
stuff_mask = (sem_seg == class_id) & (ins_seg == 0)
if stuff_mask.sum().item() >= stuff_area:
pan_seg[stuff_mask] = class_id * label_divisor
return pan_seg
def get_panoptic_segmentation(
sem_seg,
center_heatmap,
offsets,
thing_ids,
label_divisor,
stuff_area,
void_label,
threshold=0.1,
nms_kernel=7,
top_k=200,
foreground_mask=None,
):
"""
Post-processing for panoptic segmentation.
Args:
sem_seg: A Tensor of shape [1, H, W] of predicted semantic label.
center_heatmap: A Tensor of shape [1, H, W] of raw center heatmap output.
offsets: A Tensor of shape [2, H, W] of raw offset output. The order of
second dim is (offset_y, offset_x).
thing_ids: A set of ids from contiguous category ids belonging
to thing categories.
label_divisor: An integer, used to convert panoptic id =
semantic id * label_divisor + instance_id.
stuff_area: An integer, remove stuff whose area is less tan stuff_area.
void_label: An integer, indicates the region has no confident prediction.
threshold: A float, threshold applied to center heatmap score.
nms_kernel: An integer, NMS max pooling kernel size.
top_k: An integer, top k centers to keep.
foreground_mask: Optional, A Tensor of shape [1, H, W] of predicted
binary foreground mask. If not provided, it will be generated from
sem_seg.
Returns:
A Tensor of shape [1, H, W], int64.
"""
if sem_seg.dim() != 3 and sem_seg.size(0) != 1:
raise ValueError("Semantic prediction with un-supported shape: {}.".format(sem_seg.size()))
if center_heatmap.dim() != 3:
raise ValueError(
"Center prediction with un-supported dimension: {}.".format(center_heatmap.dim())
)
if offsets.dim() != 3:
raise ValueError("Offset prediction with un-supported dimension: {}.".format(offsets.dim()))
if foreground_mask is not None:
if foreground_mask.dim() != 3 and foreground_mask.size(0) != 1:
raise ValueError(
"Foreground prediction with un-supported shape: {}.".format(sem_seg.size())
)
thing_seg = foreground_mask
else:
# inference from semantic segmentation
thing_seg = torch.zeros_like(sem_seg)
for thing_class in list(thing_ids):
thing_seg[sem_seg == thing_class] = 1
instance, center = get_instance_segmentation(
sem_seg,
center_heatmap,
offsets,
thing_seg,
thing_ids,
threshold=threshold,
nms_kernel=nms_kernel,
top_k=top_k,
)
panoptic = merge_semantic_and_instance(
sem_seg, instance, thing_seg, label_divisor, thing_ids, stuff_area, void_label
)
return panoptic, center
|
banmo-main
|
third_party/detectron2_old/projects/Panoptic-DeepLab/panoptic_deeplab/post_processing.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# Reference: https://github.com/bowenc0221/panoptic-deeplab/blob/aa934324b55a34ce95fea143aea1cb7a6dbe04bd/segmentation/data/transforms/target_transforms.py#L11 # noqa
import numpy as np
import torch
class PanopticDeepLabTargetGenerator(object):
"""
Generates training targets for Panoptic-DeepLab.
"""
def __init__(
self,
ignore_label,
thing_ids,
sigma=8,
ignore_stuff_in_offset=False,
small_instance_area=0,
small_instance_weight=1,
ignore_crowd_in_semantic=False,
):
"""
Args:
ignore_label: Integer, the ignore label for semantic segmentation.
thing_ids: Set, a set of ids from contiguous category ids belonging
to thing categories.
sigma: the sigma for Gaussian kernel.
ignore_stuff_in_offset: Boolean, whether to ignore stuff region when
training the offset branch.
small_instance_area: Integer, indicates largest area for small instances.
small_instance_weight: Integer, indicates semantic loss weights for
small instances.
ignore_crowd_in_semantic: Boolean, whether to ignore crowd region in
semantic segmentation branch, crowd region is ignored in the original
TensorFlow implementation.
"""
self.ignore_label = ignore_label
self.thing_ids = set(thing_ids)
self.ignore_stuff_in_offset = ignore_stuff_in_offset
self.small_instance_area = small_instance_area
self.small_instance_weight = small_instance_weight
self.ignore_crowd_in_semantic = ignore_crowd_in_semantic
# Generate the default Gaussian image for each center
self.sigma = sigma
size = 6 * sigma + 3
x = np.arange(0, size, 1, float)
y = x[:, np.newaxis]
x0, y0 = 3 * sigma + 1, 3 * sigma + 1
self.g = np.exp(-((x - x0) ** 2 + (y - y0) ** 2) / (2 * sigma ** 2))
def __call__(self, panoptic, segments_info):
"""Generates the training target.
reference: https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/preparation/createPanopticImgs.py # noqa
reference: https://github.com/facebookresearch/detectron2/blob/master/datasets/prepare_panoptic_fpn.py#L18 # noqa
Args:
panoptic: numpy.array, panoptic label, we assume it is already
converted from rgb image by panopticapi.utils.rgb2id.
segments_info (list[dict]): see detectron2 documentation of "Use Custom Datasets".
Returns:
A dictionary with fields:
- sem_seg: Tensor, semantic label, shape=(H, W).
- center: Tensor, center heatmap, shape=(H, W).
- center_points: List, center coordinates, with tuple
(y-coord, x-coord).
- offset: Tensor, offset, shape=(2, H, W), first dim is
(offset_y, offset_x).
- sem_seg_weights: Tensor, loss weight for semantic prediction,
shape=(H, W).
- center_weights: Tensor, ignore region of center prediction,
shape=(H, W), used as weights for center regression 0 is
ignore, 1 is has instance. Multiply this mask to loss.
- offset_weights: Tensor, ignore region of offset prediction,
shape=(H, W), used as weights for offset regression 0 is
ignore, 1 is has instance. Multiply this mask to loss.
"""
height, width = panoptic.shape[0], panoptic.shape[1]
semantic = np.zeros_like(panoptic, dtype=np.uint8) + self.ignore_label
center = np.zeros((height, width), dtype=np.float32)
center_pts = []
offset = np.zeros((2, height, width), dtype=np.float32)
y_coord, x_coord = np.meshgrid(
np.arange(height, dtype=np.float32), np.arange(width, dtype=np.float32), indexing="ij"
)
# Generate pixel-wise loss weights
semantic_weights = np.ones_like(panoptic, dtype=np.uint8)
# 0: ignore, 1: has instance
# three conditions for a region to be ignored for instance branches:
# (1) It is labeled as `ignore_label`
# (2) It is crowd region (iscrowd=1)
# (3) (Optional) It is stuff region (for offset branch)
center_weights = np.zeros_like(panoptic, dtype=np.uint8)
offset_weights = np.zeros_like(panoptic, dtype=np.uint8)
for seg in segments_info:
cat_id = seg["category_id"]
if not (self.ignore_crowd_in_semantic and seg["iscrowd"]):
semantic[panoptic == seg["id"]] = cat_id
if not seg["iscrowd"]:
# Ignored regions are not in `segments_info`.
# Handle crowd region.
center_weights[panoptic == seg["id"]] = 1
if not self.ignore_stuff_in_offset or cat_id in self.thing_ids:
offset_weights[panoptic == seg["id"]] = 1
if cat_id in self.thing_ids:
# find instance center
mask_index = np.where(panoptic == seg["id"])
if len(mask_index[0]) == 0:
# the instance is completely cropped
continue
# Find instance area
ins_area = len(mask_index[0])
if ins_area < self.small_instance_area:
semantic_weights[panoptic == seg["id"]] = self.small_instance_weight
center_y, center_x = np.mean(mask_index[0]), np.mean(mask_index[1])
center_pts.append([center_y, center_x])
# generate center heatmap
y, x = int(round(center_y)), int(round(center_x))
sigma = self.sigma
# upper left
ul = int(np.round(x - 3 * sigma - 1)), int(np.round(y - 3 * sigma - 1))
# bottom right
br = int(np.round(x + 3 * sigma + 2)), int(np.round(y + 3 * sigma + 2))
# start and end indices in default Gaussian image
gaussian_x0, gaussian_x1 = max(0, -ul[0]), min(br[0], width) - ul[0]
gaussian_y0, gaussian_y1 = max(0, -ul[1]), min(br[1], height) - ul[1]
# start and end indices in center heatmap image
center_x0, center_x1 = max(0, ul[0]), min(br[0], width)
center_y0, center_y1 = max(0, ul[1]), min(br[1], height)
center[center_y0:center_y1, center_x0:center_x1] = np.maximum(
center[center_y0:center_y1, center_x0:center_x1],
self.g[gaussian_y0:gaussian_y1, gaussian_x0:gaussian_x1],
)
# generate offset (2, h, w) -> (y-dir, x-dir)
offset[0][mask_index] = center_y - y_coord[mask_index]
offset[1][mask_index] = center_x - x_coord[mask_index]
center_weights = center_weights[None]
offset_weights = offset_weights[None]
return dict(
sem_seg=torch.as_tensor(semantic.astype("long")),
center=torch.as_tensor(center.astype(np.float32)),
center_points=center_pts,
offset=torch.as_tensor(offset.astype(np.float32)),
sem_seg_weights=torch.as_tensor(semantic_weights.astype(np.float32)),
center_weights=torch.as_tensor(center_weights.astype(np.float32)),
offset_weights=torch.as_tensor(offset_weights.astype(np.float32)),
)
|
banmo-main
|
third_party/detectron2_old/projects/Panoptic-DeepLab/panoptic_deeplab/target_generator.py
|
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
"""
Point supervision Training Script.
This script is a simplified version of the training script in detectron2/tools.
"""
import os
import detectron2.utils.comm as comm
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import get_cfg
from detectron2.data import MetadataCatalog, build_detection_train_loader
from detectron2.engine import DefaultTrainer, default_argument_parser, default_setup, launch
from detectron2.evaluation import COCOEvaluator, DatasetEvaluators, verify_results
from detectron2.projects.point_rend import add_pointrend_config
from detectron2.utils.logger import setup_logger
from point_sup import PointSupDatasetMapper, add_point_sup_config
class Trainer(DefaultTrainer):
"""
We use the "DefaultTrainer" which contains pre-defined default logic for
standard training workflow. They may not work for you, especially if you
are working on a new research project. In that case you can write your
own training loop. You can use "tools/plain_train_net.py" as an example.
"""
@classmethod
def build_evaluator(cls, cfg, dataset_name, output_folder=None):
"""
Create evaluator(s) for a given dataset.
This uses the special metadata "evaluator_type" associated with each builtin dataset.
For your own dataset, you can simply create an evaluator manually in your
script and do not have to worry about the hacky if-else logic here.
"""
if output_folder is None:
output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
evaluator_list = []
evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
if evaluator_type == "coco":
evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder))
if len(evaluator_list) == 0:
raise NotImplementedError(
"no Evaluator for the dataset {} with the type {}".format(
dataset_name, evaluator_type
)
)
elif len(evaluator_list) == 1:
return evaluator_list[0]
return DatasetEvaluators(evaluator_list)
@classmethod
def build_train_loader(cls, cfg):
if cfg.INPUT.POINT_SUP:
mapper = PointSupDatasetMapper(cfg, is_train=True)
else:
mapper = None
return build_detection_train_loader(cfg, mapper=mapper)
def setup(args):
"""
Create configs and perform basic setups.
"""
cfg = get_cfg()
add_pointrend_config(cfg)
add_point_sup_config(cfg)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
default_setup(cfg, args)
# Setup logger for "point_sup" module
setup_logger(output=cfg.OUTPUT_DIR, distributed_rank=comm.get_rank(), name="point_sup")
return cfg
def main(args):
cfg = setup(args)
if args.eval_only:
model = Trainer.build_model(cfg)
DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
cfg.MODEL.WEIGHTS, resume=args.resume
)
res = Trainer.test(cfg, model)
if cfg.TEST.AUG.ENABLED:
res.update(Trainer.test_with_TTA(cfg, model))
if comm.is_main_process():
verify_results(cfg, res)
return res
"""
If you'd like to do anything fancier than the standard training logic,
consider writing your own training loop (see plain_train_net.py) or
subclassing the trainer.
"""
trainer = Trainer(cfg)
trainer.resume_or_load(resume=args.resume)
return trainer.train()
if __name__ == "__main__":
args = default_argument_parser().parse_args()
print("Command Line Args:", args)
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
)
|
banmo-main
|
third_party/detectron2_old/projects/PointSup/train_net.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import copy
import json
import numpy as np
import os
import sys
import pycocotools.mask as mask_utils
from detectron2.utils.env import seed_all_rng
from detectron2.utils.file_io import PathManager
def get_point_annotations(input_filename, output_filename, num_points_per_instance):
with PathManager.open(input_filename, "r") as f:
coco_json = json.load(f)
coco_annos = coco_json.pop("annotations")
coco_points_json = copy.deepcopy(coco_json)
imgs = {}
for img in coco_json["images"]:
imgs[img["id"]] = img
new_annos = []
for ann in coco_annos:
# convert mask
t = imgs[ann["image_id"]]
h, w = t["height"], t["width"]
segm = ann.pop("segmentation")
if type(segm) == list:
# polygon -- a single object might consist of multiple parts
# we merge all parts into one mask rle code
rles = mask_utils.frPyObjects(segm, h, w)
rle = mask_utils.merge(rles)
elif type(segm["counts"]) == list:
# uncompressed RLE
rle = mask_utils.frPyObjects(segm, h, w)
else:
# rle
rle = segm
mask = mask_utils.decode(rle)
new_ann = copy.deepcopy(ann)
# sample points in image coordinates
box = ann["bbox"]
point_coords_wrt_image = np.random.rand(num_points_per_instance, 2)
point_coords_wrt_image[:, 0] = point_coords_wrt_image[:, 0] * box[2]
point_coords_wrt_image[:, 1] = point_coords_wrt_image[:, 1] * box[3]
point_coords_wrt_image[:, 0] += box[0]
point_coords_wrt_image[:, 1] += box[1]
# round to integer coordinates
point_coords_wrt_image = np.floor(point_coords_wrt_image).astype(int)
# get labels
assert (point_coords_wrt_image >= 0).all(), (point_coords_wrt_image, mask.shape)
assert (point_coords_wrt_image[:, 0] < w).all(), (point_coords_wrt_image, mask.shape)
assert (point_coords_wrt_image[:, 1] < h).all(), (point_coords_wrt_image, mask.shape)
point_labels = mask[point_coords_wrt_image[:, 1], point_coords_wrt_image[:, 0]]
# store new annotations
new_ann["point_coords"] = point_coords_wrt_image.tolist()
new_ann["point_labels"] = point_labels.tolist()
new_annos.append(new_ann)
coco_points_json["annotations"] = new_annos
with PathManager.open(output_filename, "w") as f:
json.dump(coco_points_json, f)
print("{} is modified and stored in {}.".format(input_filename, output_filename))
if __name__ == "__main__":
"""
Generate point-based supervision for COCO dataset.
Usage:
python tools/prepare_coco_point_annotations_without_masks.py \
NUM_POINTS_PER_INSTANCE NUM_VERSIONS_WITH_DIFFERENT_SEED
Example to generate point-based COCO dataset with 10 points per instance:
python tools/prepare_coco_point_annotations_without_masks.py 10
"""
# Fix random seed
seed_all_rng(12345)
assert len(sys.argv) >= 2, "Please provide number of points to sample per instance"
dataset_dir = os.path.join(os.getenv("DETECTRON2_DATASETS", "datasets"), "coco/annotations")
num_points_per_instance = int(sys.argv[1])
if len(sys.argv) == 3:
repeat = int(sys.argv[2])
else:
repeat = 1
s = "instances_train2017"
for version in range(repeat):
print(
"Start sampling {} points per instance for annotations {}.".format(
num_points_per_instance, s
)
)
get_point_annotations(
os.path.join(dataset_dir, "{}.json".format(s)),
os.path.join(
dataset_dir,
"{}_n{}_v{}_without_masks.json".format(s, num_points_per_instance, version + 1),
),
num_points_per_instance,
)
|
banmo-main
|
third_party/detectron2_old/projects/PointSup/tools/prepare_coco_point_annotations_without_masks.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import numpy as np
from typing import Any, List
from detectron2.modeling import ROI_MASK_HEAD_REGISTRY
from detectron2.modeling.roi_heads.mask_head import MaskRCNNConvUpsampleHead, mask_rcnn_inference
from detectron2.projects.point_rend import ImplicitPointRendMaskHead
from detectron2.projects.point_rend.point_features import point_sample
from detectron2.projects.point_rend.point_head import roi_mask_point_loss
from detectron2.structures import Instances
from .point_utils import get_point_coords_from_point_annotation
__all__ = [
"ImplicitPointRendPointSupHead",
"MaskRCNNConvUpsamplePointSupHead",
]
@ROI_MASK_HEAD_REGISTRY.register()
class MaskRCNNConvUpsamplePointSupHead(MaskRCNNConvUpsampleHead):
"""
A mask head with several conv layers, plus an upsample layer (with `ConvTranspose2d`).
Predictions are made with a final 1x1 conv layer.
The difference with `MaskRCNNConvUpsampleHead` is that this head is trained
with point supervision. Please use the `MaskRCNNConvUpsampleHead` if you want
to train the model with mask supervision.
"""
def forward(self, x, instances: List[Instances]) -> Any:
"""
Args:
x: input region feature(s) provided by :class:`ROIHeads`.
instances (list[Instances]): contains the boxes & labels corresponding
to the input features.
Exact format is up to its caller to decide.
Typically, this is the foreground instances in training, with
"proposal_boxes" field and other gt annotations.
In inference, it contains boxes that are already predicted.
Returns:
A dict of losses in training. The predicted "instances" in inference.
"""
x = self.layers(x)
if self.training:
N, C, H, W = x.shape
assert H == W
proposal_boxes = [x.proposal_boxes for x in instances]
assert N == np.sum(len(x) for x in proposal_boxes)
if N == 0:
return {"loss_mask": x.sum() * 0}
# Training with point supervision
# Sanity check: annotation should not contain gt_masks
assert not instances[0].has("gt_masks")
point_coords, point_labels = get_point_coords_from_point_annotation(instances)
mask_logits = point_sample(
x,
point_coords,
align_corners=False,
)
return {"loss_mask": roi_mask_point_loss(mask_logits, instances, point_labels)}
else:
mask_rcnn_inference(x, instances)
return instances
@ROI_MASK_HEAD_REGISTRY.register()
class ImplicitPointRendPointSupHead(ImplicitPointRendMaskHead):
def _uniform_sample_train_points(self, instances):
assert self.training
assert not instances[0].has("gt_masks")
point_coords, point_labels = get_point_coords_from_point_annotation(instances)
return point_coords, point_labels
|
banmo-main
|
third_party/detectron2_old/projects/PointSup/point_sup/mask_head.py
|
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
def add_point_sup_config(cfg):
"""
Add config for point supervision.
"""
# Use point annotation
cfg.INPUT.POINT_SUP = False
# Sample only part of points in each iteration.
# Default: 0, use all available points.
cfg.INPUT.SAMPLE_POINTS = 0
|
banmo-main
|
third_party/detectron2_old/projects/PointSup/point_sup/config.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from . import register_point_annotations
from .config import add_point_sup_config
from .dataset_mapper import PointSupDatasetMapper
from .mask_head import MaskRCNNConvUpsamplePointSupHead
from .point_utils import get_point_coords_from_point_annotation
|
banmo-main
|
third_party/detectron2_old/projects/PointSup/point_sup/__init__.py
|
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import numpy as np
import torch
# fmt: off
from detectron2.data.detection_utils import \
annotations_to_instances as base_annotations_to_instances
from detectron2.data.detection_utils import \
transform_instance_annotations as base_transform_instance_annotations
# fmt: on
def annotations_to_instances(annos, image_size, sample_points=0):
"""
Create an :class:`Instances` object used by the models,
from instance annotations in the dataset dict.
Args:
annos (list[dict]): a list of instance annotations in one image, each
element for one instance.
image_size (tuple): height, width
sample_points (int): subsample points at each iteration
Returns:
Instances:
It will contain fields "gt_boxes", "gt_classes",
"gt_point_coords", "gt_point_labels", if they can be obtained from `annos`.
This is the format that builtin models with point supervision expect.
"""
target = base_annotations_to_instances(annos, image_size)
assert "point_coords" in annos[0]
assert "point_labels" in annos[0]
assert "segmentation" not in annos[0], "Please remove mask annotation"
if len(annos) and "point_labels" in annos[0]:
point_coords = []
point_labels = []
for i, _ in enumerate(annos):
# Already in the image coordinate system
point_coords_wrt_image = np.array(annos[i]["point_coords"])
point_labels_wrt_image = np.array(annos[i]["point_labels"])
if sample_points > 0:
random_indices = np.random.choice(
point_coords_wrt_image.shape[0],
sample_points,
replace=point_coords_wrt_image.shape[0] < sample_points,
).astype(int)
point_coords_wrt_image = point_coords_wrt_image[random_indices]
point_labels_wrt_image = point_labels_wrt_image[random_indices]
assert point_coords_wrt_image.shape[0] == point_labels_wrt_image.size
point_coords.append(point_coords_wrt_image)
point_labels.append(point_labels_wrt_image)
point_coords = torch.stack([torch.from_numpy(x) for x in point_coords])
point_labels = torch.stack([torch.from_numpy(x) for x in point_labels])
target.gt_point_coords = point_coords
target.gt_point_labels = point_labels
return target
def transform_instance_annotations(annotation, transforms, image_size):
"""
Apply transforms to box, and point annotations of a single instance.
It will use `transforms.apply_box` for the box, and
`transforms.apply_coords` for points.
Args:
annotation (dict): dict of instance annotations for a single instance.
It will be modified in-place.
transforms (TransformList or list[Transform]):
image_size (tuple): the height, width of the transformed image
Returns:
dict:
the same input dict with fields "bbox", "point_coords", "point_labels"
transformed according to `transforms`.
The "bbox_mode" field will be set to XYXY_ABS.
"""
annotation = base_transform_instance_annotations(annotation, transforms, image_size)
assert "segmentation" not in annotation
assert "point_coords" in annotation
assert "point_labels" in annotation
point_coords = annotation["point_coords"]
point_labels = np.array(annotation["point_labels"]).astype(np.float)
point_coords = transforms.apply_coords(point_coords)
# Set all out-of-boundary points to "unlabeled"
inside = (point_coords >= np.array([0, 0])) & (point_coords <= np.array(image_size[::-1]))
inside = inside.all(axis=1)
point_labels[~inside] = -1
annotation["point_coords"] = point_coords
annotation["point_labels"] = point_labels
return annotation
|
banmo-main
|
third_party/detectron2_old/projects/PointSup/point_sup/detection_utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import copy
import logging
import numpy as np
from typing import List, Union
import torch
import detectron2.data.detection_utils as utils
import detectron2.data.transforms as T
from detectron2.config import configurable
from .detection_utils import annotations_to_instances, transform_instance_annotations
__all__ = [
"PointSupDatasetMapper",
]
class PointSupDatasetMapper:
"""
The callable currently does the following:
1. Read the image from "file_name"
2. Applies transforms to the image and annotations
3. Prepare data and annotations to Tensor and :class:`Instances`
"""
@configurable
def __init__(
self,
is_train: bool,
*,
augmentations: List[Union[T.Augmentation, T.Transform]],
image_format: str,
# Extra data augmentation for point supervision
sample_points: int = 0,
):
"""
NOTE: this interface is experimental.
Args:
is_train: whether it's used in training or inference
augmentations: a list of augmentations or deterministic transforms to apply
image_format: an image format supported by :func:`detection_utils.read_image`.
sample_points: subsample points at each iteration
"""
# fmt: off
self.is_train = is_train
self.augmentations = T.AugmentationList(augmentations)
self.image_format = image_format
self.sample_points = sample_points
# fmt: on
logger = logging.getLogger(__name__)
mode = "training" if is_train else "inference"
logger.info(f"[DatasetMapper] Augmentations used in {mode}: {augmentations}")
logger.info(f"Point Augmentations used in {mode}: sample {sample_points} points")
@classmethod
def from_config(cls, cfg, is_train: bool = True):
augs = utils.build_augmentation(cfg, is_train)
if cfg.INPUT.CROP.ENABLED and is_train:
raise ValueError("Crop augmentation not supported to point supervision.")
ret = {
"is_train": is_train,
"augmentations": augs,
"image_format": cfg.INPUT.FORMAT,
"sample_points": cfg.INPUT.SAMPLE_POINTS,
}
return ret
def __call__(self, dataset_dict):
"""
Args:
dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.
Returns:
dict: a format that builtin models in detectron2 accept
"""
dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below
image = utils.read_image(dataset_dict["file_name"], format=self.image_format)
utils.check_image_size(dataset_dict, image)
aug_input = T.AugInput(image)
transforms = self.augmentations(aug_input)
image = aug_input.image
image_shape = image.shape[:2] # h, w
# Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,
# but not efficient on large generic data structures due to the use of pickle & mp.Queue.
# Therefore it's important to use torch.Tensor.
dataset_dict["image"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))
if not self.is_train:
dataset_dict.pop("annotations", None)
return dataset_dict
if "annotations" in dataset_dict:
# Maps points from the closed interval [0, image_size - 1] on discrete
# image coordinates to the half-open interval [x1, x2) on continuous image
# coordinates. We use the continuous-discrete conversion from Heckbert
# 1990 ("What is the coordinate of a pixel?"): d = floor(c) and c = d + 0.5,
# where d is a discrete coordinate and c is a continuous coordinate.
for ann in dataset_dict["annotations"]:
point_coords_wrt_image = np.array(ann["point_coords"]).astype(np.float)
point_coords_wrt_image = point_coords_wrt_image + 0.5
ann["point_coords"] = point_coords_wrt_image
annos = [
# also need to transform point coordinates
transform_instance_annotations(
obj,
transforms,
image_shape,
)
for obj in dataset_dict.pop("annotations")
if obj.get("iscrowd", 0) == 0
]
instances = annotations_to_instances(
annos,
image_shape,
sample_points=self.sample_points,
)
dataset_dict["instances"] = utils.filter_empty_instances(instances)
return dataset_dict
|
banmo-main
|
third_party/detectron2_old/projects/PointSup/point_sup/dataset_mapper.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import torch
from detectron2.layers import cat
def get_point_coords_from_point_annotation(instances):
"""
Load point coords and their corresponding labels from point annotation.
Args:
instances (list[Instances]): A list of N Instances, where N is the number of images
in the batch. These instances are in 1:1
correspondence with the pred_mask_logits. The ground-truth labels (class, box, mask,
...) associated with each instance are stored in fields.
Returns:
point_coords (Tensor): A tensor of shape (N, P, 2) that contains the coordinates of P
sampled points.
point_labels (Tensor): A tensor of shape (N, P) that contains the labels of P
sampled points. `point_labels` takes 3 possible values:
- 0: the point belongs to background
- 1: the point belongs to the object
- -1: the point is ignored during training
"""
point_coords_list = []
point_labels_list = []
for instances_per_image in instances:
if len(instances_per_image) == 0:
continue
point_coords = instances_per_image.gt_point_coords.to(torch.float32)
point_labels = instances_per_image.gt_point_labels.to(torch.float32).clone()
proposal_boxes_per_image = instances_per_image.proposal_boxes.tensor
# Convert point coordinate system, ground truth points are in image coord.
point_coords_wrt_box = get_point_coords_wrt_box(proposal_boxes_per_image, point_coords)
# Ignore points that are outside predicted boxes.
point_ignores = (
(point_coords_wrt_box[:, :, 0] < 0)
| (point_coords_wrt_box[:, :, 0] > 1)
| (point_coords_wrt_box[:, :, 1] < 0)
| (point_coords_wrt_box[:, :, 1] > 1)
)
point_labels[point_ignores] = -1
point_coords_list.append(point_coords_wrt_box)
point_labels_list.append(point_labels)
return (
cat(point_coords_list, dim=0),
cat(point_labels_list, dim=0),
)
def get_point_coords_wrt_box(boxes_coords, point_coords):
"""
Convert image-level absolute coordinates to box-normalized [0, 1] x [0, 1] point cooordinates.
Args:
boxes_coords (Tensor): A tensor of shape (R, 4) that contains bounding boxes.
coordinates.
point_coords (Tensor): A tensor of shape (R, P, 2) that contains
image-normalized coordinates of P sampled points.
Returns:
point_coords_wrt_box (Tensor): A tensor of shape (R, P, 2) that contains
[0, 1] x [0, 1] box-normalized coordinates of the P sampled points.
"""
with torch.no_grad():
point_coords_wrt_box = point_coords.clone()
point_coords_wrt_box[:, :, 0] -= boxes_coords[:, None, 0]
point_coords_wrt_box[:, :, 1] -= boxes_coords[:, None, 1]
point_coords_wrt_box[:, :, 0] = point_coords_wrt_box[:, :, 0] / (
boxes_coords[:, None, 2] - boxes_coords[:, None, 0]
)
point_coords_wrt_box[:, :, 1] = point_coords_wrt_box[:, :, 1] / (
boxes_coords[:, None, 3] - boxes_coords[:, None, 1]
)
return point_coords_wrt_box
|
banmo-main
|
third_party/detectron2_old/projects/PointSup/point_sup/point_utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import logging
import os
from detectron2.data import DatasetCatalog, MetadataCatalog
from detectron2.data.datasets.builtin import _get_builtin_metadata
from detectron2.data.datasets.coco import load_coco_json
logger = logging.getLogger(__name__)
# COCO dataset
def register_coco_instances_with_points(name, metadata, json_file, image_root):
"""
Register a dataset in COCO's json annotation format for
instance segmentation with point annotation.
The point annotation json does not have "segmentation" field, instead,
it has "point_coords" and "point_labels" fields.
Args:
name (str): the name that identifies a dataset, e.g. "coco_2014_train".
metadata (dict): extra metadata associated with this dataset. You can
leave it as an empty dict.
json_file (str): path to the json instance annotation file.
image_root (str or path-like): directory which contains all the images.
"""
assert isinstance(name, str), name
assert isinstance(json_file, (str, os.PathLike)), json_file
assert isinstance(image_root, (str, os.PathLike)), image_root
# 1. register a function which returns dicts
DatasetCatalog.register(
name, lambda: load_coco_json(json_file, image_root, name, ["point_coords", "point_labels"])
)
# 2. Optionally, add metadata about this dataset,
# since they might be useful in evaluation, visualization or logging
MetadataCatalog.get(name).set(
json_file=json_file, image_root=image_root, evaluator_type="coco", **metadata
)
_PREDEFINED_SPLITS_COCO = {}
_PREDEFINED_SPLITS_COCO["coco"] = {
# point annotations without masks
"coco_2017_train_points_n10_v1_without_masks": (
"coco/train2017",
"coco/annotations/instances_train2017_n10_v1_without_masks.json",
),
}
def register_all_coco_train_points(root):
for dataset_name, splits_per_dataset in _PREDEFINED_SPLITS_COCO.items():
for key, (image_root, json_file) in splits_per_dataset.items():
# Assume pre-defined datasets live in `./datasets`.
register_coco_instances_with_points(
key,
_get_builtin_metadata(dataset_name),
os.path.join(root, json_file) if "://" not in json_file else json_file,
os.path.join(root, image_root),
)
_root = os.getenv("DETECTRON2_DATASETS", "datasets")
register_all_coco_train_points(_root)
|
banmo-main
|
third_party/detectron2_old/projects/PointSup/point_sup/register_point_annotations.py
|
#!/usr/bin/env python
import sys
import torch
from fvcore.nn.precise_bn import update_bn_stats
from torch import nn
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import LazyConfig, instantiate
from detectron2.evaluation import inference_on_dataset
from detectron2.utils.events import EventStorage
from detectron2.utils.logger import setup_logger
logger = setup_logger()
setup_logger(name="fvcore")
class CycleBatchNormList(nn.ModuleList):
"""
A hacky way to implement domain-specific BatchNorm
if it's guaranteed that a fixed number of domains will be
called with fixed order.
"""
def __init__(self, length, channels):
super().__init__([nn.BatchNorm2d(channels, affine=False) for k in range(length)])
# shared affine, domain-specific BN
self.weight = nn.Parameter(torch.ones(channels))
self.bias = nn.Parameter(torch.zeros(channels))
self._pos = 0
def forward(self, x):
ret = self[self._pos](x)
self._pos = (self._pos + 1) % len(self)
w = self.weight.reshape(1, -1, 1, 1)
b = self.bias.reshape(1, -1, 1, 1)
return ret * w + b
if __name__ == "__main__":
checkpoint = sys.argv[1]
cfg = LazyConfig.load_rel("./configs/retinanet_SyncBNhead.py")
model = cfg.model
model.head.norm = lambda c: CycleBatchNormList(len(model.head_in_features), c)
model = instantiate(model)
model.cuda()
DetectionCheckpointer(model).load(checkpoint)
cfg.dataloader.train.total_batch_size = 8
logger.info("Running PreciseBN ...")
with EventStorage(), torch.no_grad():
update_bn_stats(model, instantiate(cfg.dataloader.train), 500)
logger.info("Running evaluation ...")
inference_on_dataset(
model, instantiate(cfg.dataloader.test), instantiate(cfg.dataloader.evaluator)
)
|
banmo-main
|
third_party/detectron2_old/projects/Rethinking-BatchNorm/retinanet-eval-domain-specific.py
|
import math
import torch
import torch.distributed as dist
from detectron2.modeling.roi_heads import FastRCNNConvFCHead, MaskRCNNConvUpsampleHead
from detectron2.utils import comm
from fvcore.nn.distributed import differentiable_all_gather
def concat_all_gather(input):
bs_int = input.shape[0]
size_list = comm.all_gather(bs_int)
max_size = max(size_list)
max_shape = (max_size,) + input.shape[1:]
padded_input = input.new_zeros(max_shape)
padded_input[:bs_int] = input
all_inputs = differentiable_all_gather(padded_input)
inputs = [x[:sz] for sz, x in zip(size_list, all_inputs)]
return inputs, size_list
def batch_shuffle(x):
# gather from all gpus
batch_size_this = x.shape[0]
all_xs, batch_size_all = concat_all_gather(x)
all_xs_concat = torch.cat(all_xs, dim=0)
total_bs = sum(batch_size_all)
rank = dist.get_rank()
assert batch_size_all[rank] == batch_size_this
idx_range = (sum(batch_size_all[:rank]), sum(batch_size_all[: rank + 1]))
# random shuffle index
idx_shuffle = torch.randperm(total_bs, device=x.device)
# broadcast to all gpus
dist.broadcast(idx_shuffle, src=0)
# index for restoring
idx_unshuffle = torch.argsort(idx_shuffle)
# shuffled index for this gpu
splits = torch.split(idx_shuffle, math.ceil(total_bs / dist.get_world_size()))
if len(splits) > rank:
idx_this = splits[rank]
else:
idx_this = idx_shuffle.new_zeros([0])
return all_xs_concat[idx_this], idx_unshuffle[idx_range[0] : idx_range[1]]
def batch_unshuffle(x, idx_unshuffle):
all_x, _ = concat_all_gather(x)
x_gather = torch.cat(all_x, dim=0)
return x_gather[idx_unshuffle]
def wrap_shuffle(module_type, method):
def new_method(self, x):
if self.training:
x, idx = batch_shuffle(x)
x = getattr(module_type, method)(self, x)
if self.training:
x = batch_unshuffle(x, idx)
return x
return type(module_type.__name__ + "WithShuffle", (module_type,), {method: new_method})
from .mask_rcnn_BNhead import model, dataloader, lr_multiplier, optimizer, train
model.roi_heads.box_head._target_ = wrap_shuffle(FastRCNNConvFCHead, "forward")
model.roi_heads.mask_head._target_ = wrap_shuffle(MaskRCNNConvUpsampleHead, "layers")
|
banmo-main
|
third_party/detectron2_old/projects/Rethinking-BatchNorm/configs/mask_rcnn_BNhead_shuffle.py
|
from detectron2.model_zoo import get_config
model = get_config("common/models/mask_rcnn_fpn.py").model
model.backbone.bottom_up.freeze_at = 2
model.roi_heads.box_head.conv_norm = model.roi_heads.mask_head.conv_norm = "BN"
# 4conv1fc head
model.roi_heads.box_head.conv_dims = [256, 256, 256, 256]
model.roi_heads.box_head.fc_dims = [1024]
dataloader = get_config("common/data/coco.py").dataloader
lr_multiplier = get_config("common/coco_schedule.py").lr_multiplier_3x
optimizer = get_config("common/optim.py").SGD
train = get_config("common/train.py").train
train.max_iter = 270000 # 3x for batchsize = 16
|
banmo-main
|
third_party/detectron2_old/projects/Rethinking-BatchNorm/configs/mask_rcnn_BNhead.py
|
from .mask_rcnn_BNhead import model, dataloader, lr_multiplier, optimizer, train
model.roi_heads.box_head.conv_norm = model.roi_heads.mask_head.conv_norm = "SyncBN"
|
banmo-main
|
third_party/detectron2_old/projects/Rethinking-BatchNorm/configs/mask_rcnn_SyncBNhead.py
|
from torch.nn import BatchNorm2d
from torch.nn import functional as F
class BatchNormBatchStat(BatchNorm2d):
"""
BN that uses batch stat in inference
"""
def forward(self, input):
if self.training:
return super().forward(input)
return F.batch_norm(input, None, None, self.weight, self.bias, True, 1.0, self.eps)
# After training with the base config, it's sufficient to load its model with
# this config only for inference -- because the training-time behavior is identical.
from .mask_rcnn_BNhead import model, dataloader, lr_multiplier, optimizer, train
model.roi_heads.box_head.conv_norm = model.roi_heads.mask_head.conv_norm = BatchNormBatchStat
|
banmo-main
|
third_party/detectron2_old/projects/Rethinking-BatchNorm/configs/mask_rcnn_BNhead_batch_stats.py
|
from typing import List
import torch
from torch import Tensor, nn
from detectron2.modeling.meta_arch.retinanet import RetinaNetHead
def apply_sequential(inputs, modules):
for mod in modules:
if isinstance(mod, (nn.BatchNorm2d, nn.SyncBatchNorm)):
# for BN layer, normalize all inputs together
shapes = [i.shape for i in inputs]
spatial_sizes = [s[2] * s[3] for s in shapes]
x = [i.flatten(2) for i in inputs]
x = torch.cat(x, dim=2).unsqueeze(3)
x = mod(x).split(spatial_sizes, dim=2)
inputs = [i.view(s) for s, i in zip(shapes, x)]
else:
inputs = [mod(i) for i in inputs]
return inputs
class RetinaNetHead_SharedTrainingBN(RetinaNetHead):
def forward(self, features: List[Tensor]):
logits = apply_sequential(features, list(self.cls_subnet) + [self.cls_score])
bbox_reg = apply_sequential(features, list(self.bbox_subnet) + [self.bbox_pred])
return logits, bbox_reg
from .retinanet_SyncBNhead import model, dataloader, lr_multiplier, optimizer, train
model.head._target_ = RetinaNetHead_SharedTrainingBN
|
banmo-main
|
third_party/detectron2_old/projects/Rethinking-BatchNorm/configs/retinanet_SyncBNhead_SharedTraining.py
|
from detectron2.model_zoo import get_config
model = get_config("common/models/retinanet.py").model
model.backbone.bottom_up.freeze_at = 2
model.head.norm = "SyncBN"
dataloader = get_config("common/data/coco.py").dataloader
lr_multiplier = get_config("common/coco_schedule.py").lr_multiplier_3x
optimizer = get_config("common/optim.py").SGD
train = get_config("common/train.py").train
optimizer.lr = 0.01
train.max_iter = 270000 # 3x for batchsize = 16
|
banmo-main
|
third_party/detectron2_old/projects/Rethinking-BatchNorm/configs/retinanet_SyncBNhead.py
|
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
import glob
import os
from setuptools import find_packages, setup
import torch
from torch.utils.cpp_extension import CUDA_HOME, CppExtension, CUDAExtension
def get_extensions():
this_dir = os.path.dirname(os.path.abspath(__file__))
extensions_dir = os.path.join(this_dir, "tensormask", "layers", "csrc")
main_source = os.path.join(extensions_dir, "vision.cpp")
sources = glob.glob(os.path.join(extensions_dir, "**", "*.cpp"))
source_cuda = glob.glob(os.path.join(extensions_dir, "**", "*.cu")) + glob.glob(
os.path.join(extensions_dir, "*.cu")
)
sources = [main_source] + sources
extension = CppExtension
extra_compile_args = {"cxx": []}
define_macros = []
if (torch.cuda.is_available() and CUDA_HOME is not None) or os.getenv("FORCE_CUDA", "0") == "1":
extension = CUDAExtension
sources += source_cuda
define_macros += [("WITH_CUDA", None)]
extra_compile_args["nvcc"] = [
"-DCUDA_HAS_FP16=1",
"-D__CUDA_NO_HALF_OPERATORS__",
"-D__CUDA_NO_HALF_CONVERSIONS__",
"-D__CUDA_NO_HALF2_OPERATORS__",
]
# It's better if pytorch can do this by default ..
CC = os.environ.get("CC", None)
if CC is not None:
extra_compile_args["nvcc"].append("-ccbin={}".format(CC))
sources = [os.path.join(extensions_dir, s) for s in sources]
include_dirs = [extensions_dir]
ext_modules = [
extension(
"tensormask._C",
sources,
include_dirs=include_dirs,
define_macros=define_macros,
extra_compile_args=extra_compile_args,
)
]
return ext_modules
setup(
name="tensormask",
version="0.1",
author="FAIR",
packages=find_packages(exclude=("configs", "tests")),
python_requires=">=3.6",
ext_modules=get_extensions(),
cmdclass={"build_ext": torch.utils.cpp_extension.BuildExtension},
)
|
banmo-main
|
third_party/detectron2_old/projects/TensorMask/setup.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
"""
TensorMask Training Script.
This script is a simplified version of the training script in detectron2/tools.
"""
import os
import detectron2.utils.comm as comm
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import get_cfg
from detectron2.engine import DefaultTrainer, default_argument_parser, default_setup, launch
from detectron2.evaluation import COCOEvaluator, verify_results
from tensormask import add_tensormask_config
class Trainer(DefaultTrainer):
@classmethod
def build_evaluator(cls, cfg, dataset_name, output_folder=None):
if output_folder is None:
output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
return COCOEvaluator(dataset_name, output_dir=output_folder)
def setup(args):
"""
Create configs and perform basic setups.
"""
cfg = get_cfg()
add_tensormask_config(cfg)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
default_setup(cfg, args)
return cfg
def main(args):
cfg = setup(args)
if args.eval_only:
model = Trainer.build_model(cfg)
DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
cfg.MODEL.WEIGHTS, resume=args.resume
)
res = Trainer.test(cfg, model)
if comm.is_main_process():
verify_results(cfg, res)
return res
trainer = Trainer(cfg)
trainer.resume_or_load(resume=args.resume)
return trainer.train()
if __name__ == "__main__":
args = default_argument_parser().parse_args()
print("Command Line Args:", args)
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
)
|
banmo-main
|
third_party/detectron2_old/projects/TensorMask/train_net.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
|
banmo-main
|
third_party/detectron2_old/projects/TensorMask/tests/__init__.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
import unittest
import torch
from torch.autograd import gradcheck
from tensormask.layers.swap_align2nat import SwapAlign2Nat
class SwapAlign2NatTest(unittest.TestCase):
@unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
def test_swap_align2nat_gradcheck_cuda(self):
dtype = torch.float64
device = torch.device("cuda")
m = SwapAlign2Nat(2).to(dtype=dtype, device=device)
x = torch.rand(2, 4, 10, 10, dtype=dtype, device=device, requires_grad=True)
self.assertTrue(gradcheck(m, x), "gradcheck failed for SwapAlign2Nat CUDA")
def _swap_align2nat(self, tensor, lambda_val):
"""
The basic setup for testing Swap_Align
"""
op = SwapAlign2Nat(lambda_val, pad_val=0.0)
input = torch.from_numpy(tensor[None, :, :, :].astype("float32"))
output = op.forward(input.cuda()).cpu().numpy()
return output[0]
if __name__ == "__main__":
unittest.main()
|
banmo-main
|
third_party/detectron2_old/projects/TensorMask/tests/test_swap_align2nat.py
|
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
from detectron2.config import CfgNode as CN
def add_tensormask_config(cfg):
"""
Add config for TensorMask.
"""
cfg.MODEL.TENSOR_MASK = CN()
# Anchor parameters
cfg.MODEL.TENSOR_MASK.IN_FEATURES = ["p2", "p3", "p4", "p5", "p6", "p7"]
# Convolutions to use in the towers
cfg.MODEL.TENSOR_MASK.NUM_CONVS = 4
# Number of foreground classes.
cfg.MODEL.TENSOR_MASK.NUM_CLASSES = 80
# Channel size for the classification tower
cfg.MODEL.TENSOR_MASK.CLS_CHANNELS = 256
cfg.MODEL.TENSOR_MASK.SCORE_THRESH_TEST = 0.05
# Only the top (1000 * #levels) candidate boxes across all levels are
# considered jointly during test (to improve speed)
cfg.MODEL.TENSOR_MASK.TOPK_CANDIDATES_TEST = 6000
cfg.MODEL.TENSOR_MASK.NMS_THRESH_TEST = 0.5
# Box parameters
# Channel size for the box tower
cfg.MODEL.TENSOR_MASK.BBOX_CHANNELS = 128
# Weights on (dx, dy, dw, dh)
cfg.MODEL.TENSOR_MASK.BBOX_REG_WEIGHTS = (1.5, 1.5, 0.75, 0.75)
# Loss parameters
cfg.MODEL.TENSOR_MASK.FOCAL_LOSS_GAMMA = 3.0
cfg.MODEL.TENSOR_MASK.FOCAL_LOSS_ALPHA = 0.3
# Mask parameters
# Channel size for the mask tower
cfg.MODEL.TENSOR_MASK.MASK_CHANNELS = 128
# Mask loss weight
cfg.MODEL.TENSOR_MASK.MASK_LOSS_WEIGHT = 2.0
# weight on positive pixels within the mask
cfg.MODEL.TENSOR_MASK.POSITIVE_WEIGHT = 1.5
# Whether to predict in the aligned representation
cfg.MODEL.TENSOR_MASK.ALIGNED_ON = False
# Whether to use the bipyramid architecture
cfg.MODEL.TENSOR_MASK.BIPYRAMID_ON = False
|
banmo-main
|
third_party/detectron2_old/projects/TensorMask/tensormask/config.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import copy
import math
from typing import List
import torch
import torch.nn.functional as F
from fvcore.nn import sigmoid_focal_loss_star_jit, smooth_l1_loss
from torch import nn
from detectron2.layers import ShapeSpec, batched_nms, cat, paste_masks_in_image
from detectron2.modeling.anchor_generator import DefaultAnchorGenerator
from detectron2.modeling.backbone import build_backbone
from detectron2.modeling.box_regression import Box2BoxTransform
from detectron2.modeling.meta_arch.build import META_ARCH_REGISTRY
from detectron2.modeling.meta_arch.retinanet import permute_to_N_HWA_K
from detectron2.structures import Boxes, ImageList, Instances
from tensormask.layers import SwapAlign2Nat
__all__ = ["TensorMask"]
def permute_all_cls_and_box_to_N_HWA_K_and_concat(pred_logits, pred_anchor_deltas, num_classes=80):
"""
Rearrange the tensor layout from the network output, i.e.:
list[Tensor]: #lvl tensors of shape (N, A x K, Hi, Wi)
to per-image predictions, i.e.:
Tensor: of shape (N x sum(Hi x Wi x A), K)
"""
# for each feature level, permute the outputs to make them be in the
# same format as the labels.
pred_logits_flattened = [permute_to_N_HWA_K(x, num_classes) for x in pred_logits]
pred_anchor_deltas_flattened = [permute_to_N_HWA_K(x, 4) for x in pred_anchor_deltas]
# concatenate on the first dimension (representing the feature levels), to
# take into account the way the labels were generated (with all feature maps
# being concatenated as well)
pred_logits = cat(pred_logits_flattened, dim=1).view(-1, num_classes)
pred_anchor_deltas = cat(pred_anchor_deltas_flattened, dim=1).view(-1, 4)
return pred_logits, pred_anchor_deltas
def _assignment_rule(
gt_boxes,
anchor_boxes,
unit_lengths,
min_anchor_size,
scale_thresh=2.0,
spatial_thresh=1.0,
uniqueness_on=True,
):
"""
Given two lists of boxes of N ground truth boxes and M anchor boxes,
compute the assignment between the two, following the assignment rules in
https://arxiv.org/abs/1903.12174.
The box order must be (xmin, ymin, xmax, ymax), so please make sure to convert
to BoxMode.XYXY_ABS before calling this function.
Args:
gt_boxes, anchor_boxes (Boxes): two Boxes. Contains N & M boxes/anchors, respectively.
unit_lengths (Tensor): Contains the unit lengths of M anchor boxes.
min_anchor_size (float): Minimum size of the anchor, in pixels
scale_thresh (float): The `scale` threshold: the maximum size of the anchor
should not be greater than scale_thresh x max(h, w) of
the ground truth box.
spatial_thresh (float): The `spatial` threshold: the l2 distance between the
center of the anchor and the ground truth box should not
be greater than spatial_thresh x u where u is the unit length.
Returns:
matches (Tensor[int64]): a vector of length M, where matches[i] is a matched
ground-truth index in [0, N)
match_labels (Tensor[int8]): a vector of length M, where pred_labels[i] indicates
whether a prediction is a true or false positive or ignored
"""
gt_boxes, anchor_boxes = gt_boxes.tensor, anchor_boxes.tensor
N = gt_boxes.shape[0]
M = anchor_boxes.shape[0]
if N == 0 or M == 0:
return (
gt_boxes.new_full((N,), 0, dtype=torch.int64),
gt_boxes.new_full((N,), -1, dtype=torch.int8),
)
# Containment rule
lt = torch.min(gt_boxes[:, None, :2], anchor_boxes[:, :2]) # [N,M,2]
rb = torch.max(gt_boxes[:, None, 2:], anchor_boxes[:, 2:]) # [N,M,2]
union = cat([lt, rb], dim=2) # [N,M,4]
dummy_gt_boxes = torch.zeros_like(gt_boxes)
anchor = dummy_gt_boxes[:, None, :] + anchor_boxes[:, :] # [N,M,4]
contain_matrix = torch.all(union == anchor, dim=2) # [N,M]
# Centrality rule, scale
gt_size_lower = torch.max(gt_boxes[:, 2:] - gt_boxes[:, :2], dim=1)[0] # [N]
gt_size_upper = gt_size_lower * scale_thresh # [N]
# Fall back for small objects
gt_size_upper[gt_size_upper < min_anchor_size] = min_anchor_size
# Due to sampling of locations, the anchor sizes are deducted with sampling strides
anchor_size = (
torch.max(anchor_boxes[:, 2:] - anchor_boxes[:, :2], dim=1)[0] - unit_lengths
) # [M]
size_diff_upper = gt_size_upper[:, None] - anchor_size # [N,M]
scale_matrix = size_diff_upper >= 0 # [N,M]
# Centrality rule, spatial
gt_center = (gt_boxes[:, 2:] + gt_boxes[:, :2]) / 2 # [N,2]
anchor_center = (anchor_boxes[:, 2:] + anchor_boxes[:, :2]) / 2 # [M,2]
offset_center = gt_center[:, None, :] - anchor_center[:, :] # [N,M,2]
offset_center /= unit_lengths[:, None] # [N,M,2]
spatial_square = spatial_thresh * spatial_thresh
spatial_matrix = torch.sum(offset_center * offset_center, dim=2) <= spatial_square
assign_matrix = (contain_matrix & scale_matrix & spatial_matrix).int()
# assign_matrix is N (gt) x M (predicted)
# Max over gt elements (dim 0) to find best gt candidate for each prediction
matched_vals, matches = assign_matrix.max(dim=0)
match_labels = matches.new_full(matches.size(), 1, dtype=torch.int8)
match_labels[matched_vals == 0] = 0
match_labels[matched_vals == 1] = 1
# find all the elements that match to ground truths multiple times
not_unique_idxs = assign_matrix.sum(dim=0) > 1
if uniqueness_on:
match_labels[not_unique_idxs] = 0
else:
match_labels[not_unique_idxs] = -1
return matches, match_labels
# TODO make the paste_mask function in d2 core support mask list
def _paste_mask_lists_in_image(masks, boxes, image_shape, threshold=0.5):
"""
Paste a list of masks that are of various resolutions (e.g., 28 x 28) into an image.
The location, height, and width for pasting each mask is determined by their
corresponding bounding boxes in boxes.
Args:
masks (list(Tensor)): A list of Tensor of shape (1, Hmask_i, Wmask_i).
Values are in [0, 1]. The list length, Bimg, is the
number of detected object instances in the image.
boxes (Boxes): A Boxes of length Bimg. boxes.tensor[i] and masks[i] correspond
to the same object instance.
image_shape (tuple): height, width
threshold (float): A threshold in [0, 1] for converting the (soft) masks to
binary masks.
Returns:
img_masks (Tensor): A tensor of shape (Bimg, Himage, Wimage), where Bimg is the
number of detected object instances and Himage, Wimage are the image width
and height. img_masks[i] is a binary mask for object instance i.
"""
if len(masks) == 0:
return torch.empty((0, 1) + image_shape, dtype=torch.uint8)
# Loop over masks groups. Each group has the same mask prediction size.
img_masks = []
ind_masks = []
mask_sizes = torch.tensor([m.shape[-1] for m in masks])
unique_sizes = torch.unique(mask_sizes)
for msize in unique_sizes.tolist():
cur_ind = torch.where(mask_sizes == msize)[0]
ind_masks.append(cur_ind)
cur_masks = cat([masks[i] for i in cur_ind])
cur_boxes = boxes[cur_ind]
img_masks.append(paste_masks_in_image(cur_masks, cur_boxes, image_shape, threshold))
img_masks = cat(img_masks)
ind_masks = cat(ind_masks)
img_masks_out = torch.empty_like(img_masks)
img_masks_out[ind_masks, :, :] = img_masks
return img_masks_out
def _postprocess(results, result_mask_info, output_height, output_width, mask_threshold=0.5):
"""
Post-process the output boxes for TensorMask.
The input images are often resized when entering an object detector.
As a result, we often need the outputs of the detector in a different
resolution from its inputs.
This function will postprocess the raw outputs of TensorMask
to produce outputs according to the desired output resolution.
Args:
results (Instances): the raw outputs from the detector.
`results.image_size` contains the input image resolution the detector sees.
This object might be modified in-place. Note that it does not contain the field
`pred_masks`, which is provided by another input `result_masks`.
result_mask_info (list[Tensor], Boxes): a pair of two items for mask related results.
The first item is a list of #detection tensors, each is the predicted masks.
The second item is the anchors corresponding to the predicted masks.
output_height, output_width: the desired output resolution.
Returns:
Instances: the postprocessed output from the model, based on the output resolution
"""
scale_x, scale_y = (output_width / results.image_size[1], output_height / results.image_size[0])
results = Instances((output_height, output_width), **results.get_fields())
output_boxes = results.pred_boxes
output_boxes.tensor[:, 0::2] *= scale_x
output_boxes.tensor[:, 1::2] *= scale_y
output_boxes.clip(results.image_size)
inds_nonempty = output_boxes.nonempty()
results = results[inds_nonempty]
result_masks, result_anchors = result_mask_info
if result_masks:
result_anchors.tensor[:, 0::2] *= scale_x
result_anchors.tensor[:, 1::2] *= scale_y
result_masks = [x for (i, x) in zip(inds_nonempty.tolist(), result_masks) if i]
results.pred_masks = _paste_mask_lists_in_image(
result_masks,
result_anchors[inds_nonempty],
results.image_size,
threshold=mask_threshold,
)
return results
class TensorMaskAnchorGenerator(DefaultAnchorGenerator):
"""
For a set of image sizes and feature maps, computes a set of anchors for TensorMask.
It also computes the unit lengths and indexes for each anchor box.
"""
def grid_anchors_with_unit_lengths_and_indexes(self, grid_sizes):
anchors = []
unit_lengths = []
indexes = []
for lvl, (size, stride, base_anchors) in enumerate(
zip(grid_sizes, self.strides, self.cell_anchors)
):
grid_height, grid_width = size
device = base_anchors.device
shifts_x = torch.arange(
0, grid_width * stride, step=stride, dtype=torch.float32, device=device
)
shifts_y = torch.arange(
0, grid_height * stride, step=stride, dtype=torch.float32, device=device
)
shift_y, shift_x = torch.meshgrid(shifts_y, shifts_x)
shifts = torch.stack((shift_x, shift_y, shift_x, shift_y), dim=2)
# Stack anchors in shapes of (HWA, 4)
cur_anchor = (shifts[:, :, None, :] + base_anchors.view(1, 1, -1, 4)).view(-1, 4)
anchors.append(cur_anchor)
unit_lengths.append(
torch.full((cur_anchor.shape[0],), stride, dtype=torch.float32, device=device)
)
# create mask indexes using mesh grid
shifts_l = torch.full((1,), lvl, dtype=torch.int64, device=device)
shifts_i = torch.zeros((1,), dtype=torch.int64, device=device)
shifts_h = torch.arange(0, grid_height, dtype=torch.int64, device=device)
shifts_w = torch.arange(0, grid_width, dtype=torch.int64, device=device)
shifts_a = torch.arange(0, base_anchors.shape[0], dtype=torch.int64, device=device)
grids = torch.meshgrid(shifts_l, shifts_i, shifts_h, shifts_w, shifts_a)
indexes.append(torch.stack(grids, dim=5).view(-1, 5))
return anchors, unit_lengths, indexes
def forward(self, features):
"""
Returns:
list[list[Boxes]]: a list of #image elements. Each is a list of #feature level Boxes.
The Boxes contains anchors of this image on the specific feature level.
list[list[Tensor]]: a list of #image elements. Each is a list of #feature level tensors.
The tensor contains strides, or unit lengths for the anchors.
list[list[Tensor]]: a list of #image elements. Each is a list of #feature level tensors.
The Tensor contains indexes for the anchors, with the last dimension meaning
(L, N, H, W, A), where L is level, I is image (not set yet), H is height,
W is width, and A is anchor.
"""
num_images = len(features[0])
grid_sizes = [feature_map.shape[-2:] for feature_map in features]
anchors_list, lengths_list, indexes_list = self.grid_anchors_with_unit_lengths_and_indexes(
grid_sizes
)
# Convert anchors from Tensor to Boxes
anchors_per_im = [Boxes(x) for x in anchors_list]
# TODO it can be simplified to not return duplicated information for
# each image, just like detectron2's own AnchorGenerator
anchors = [copy.deepcopy(anchors_per_im) for _ in range(num_images)]
unit_lengths = [copy.deepcopy(lengths_list) for _ in range(num_images)]
indexes = [copy.deepcopy(indexes_list) for _ in range(num_images)]
return anchors, unit_lengths, indexes
@META_ARCH_REGISTRY.register()
class TensorMask(nn.Module):
"""
TensorMask model. Creates FPN backbone, anchors and a head for classification
and box regression. Calculates and applies proper losses to class, box, and
masks.
"""
def __init__(self, cfg):
super().__init__()
# fmt: off
self.num_classes = cfg.MODEL.TENSOR_MASK.NUM_CLASSES
self.in_features = cfg.MODEL.TENSOR_MASK.IN_FEATURES
self.anchor_sizes = cfg.MODEL.ANCHOR_GENERATOR.SIZES
self.num_levels = len(cfg.MODEL.ANCHOR_GENERATOR.SIZES)
# Loss parameters:
self.focal_loss_alpha = cfg.MODEL.TENSOR_MASK.FOCAL_LOSS_ALPHA
self.focal_loss_gamma = cfg.MODEL.TENSOR_MASK.FOCAL_LOSS_GAMMA
# Inference parameters:
self.score_threshold = cfg.MODEL.TENSOR_MASK.SCORE_THRESH_TEST
self.topk_candidates = cfg.MODEL.TENSOR_MASK.TOPK_CANDIDATES_TEST
self.nms_threshold = cfg.MODEL.TENSOR_MASK.NMS_THRESH_TEST
self.detections_im = cfg.TEST.DETECTIONS_PER_IMAGE
# Mask parameters:
self.mask_on = cfg.MODEL.MASK_ON
self.mask_loss_weight = cfg.MODEL.TENSOR_MASK.MASK_LOSS_WEIGHT
self.mask_pos_weight = torch.tensor(cfg.MODEL.TENSOR_MASK.POSITIVE_WEIGHT,
dtype=torch.float32)
self.bipyramid_on = cfg.MODEL.TENSOR_MASK.BIPYRAMID_ON
# fmt: on
# build the backbone
self.backbone = build_backbone(cfg)
backbone_shape = self.backbone.output_shape()
feature_shapes = [backbone_shape[f] for f in self.in_features]
feature_strides = [x.stride for x in feature_shapes]
# build anchors
self.anchor_generator = TensorMaskAnchorGenerator(cfg, feature_shapes)
self.num_anchors = self.anchor_generator.num_cell_anchors[0]
anchors_min_level = cfg.MODEL.ANCHOR_GENERATOR.SIZES[0]
self.mask_sizes = [size // feature_strides[0] for size in anchors_min_level]
self.min_anchor_size = min(anchors_min_level) - feature_strides[0]
# head of the TensorMask
self.head = TensorMaskHead(
cfg, self.num_levels, self.num_anchors, self.mask_sizes, feature_shapes
)
# box transform
self.box2box_transform = Box2BoxTransform(weights=cfg.MODEL.TENSOR_MASK.BBOX_REG_WEIGHTS)
self.register_buffer("pixel_mean", torch.tensor(cfg.MODEL.PIXEL_MEAN).view(-1, 1, 1), False)
self.register_buffer("pixel_std", torch.tensor(cfg.MODEL.PIXEL_STD).view(-1, 1, 1), False)
@property
def device(self):
return self.pixel_mean.device
def forward(self, batched_inputs):
"""
Args:
batched_inputs: a list, batched outputs of :class:`DetectionTransform` .
Each item in the list contains the inputs for one image.
For now, each item in the list is a dict that contains:
image: Tensor, image in (C, H, W) format.
instances: Instances
Other information that's included in the original dicts, such as:
"height", "width" (int): the output resolution of the model, used in inference.
See :meth:`postprocess` for details.
Returns:
losses (dict[str: Tensor]): mapping from a named loss to a tensor
storing the loss. Used during training only.
"""
images = self.preprocess_image(batched_inputs)
if "instances" in batched_inputs[0]:
gt_instances = [x["instances"].to(self.device) for x in batched_inputs]
else:
gt_instances = None
features = self.backbone(images.tensor)
features = [features[f] for f in self.in_features]
# apply the TensorMask head
pred_logits, pred_deltas, pred_masks = self.head(features)
# generate anchors based on features, is it image specific?
anchors, unit_lengths, indexes = self.anchor_generator(features)
if self.training:
# get ground truths for class labels and box targets, it will label each anchor
gt_class_info, gt_delta_info, gt_mask_info, num_fg = self.get_ground_truth(
anchors, unit_lengths, indexes, gt_instances
)
# compute the loss
return self.losses(
gt_class_info,
gt_delta_info,
gt_mask_info,
num_fg,
pred_logits,
pred_deltas,
pred_masks,
)
else:
# do inference to get the output
results = self.inference(pred_logits, pred_deltas, pred_masks, anchors, indexes, images)
processed_results = []
for results_im, input_im, image_size in zip(
results, batched_inputs, images.image_sizes
):
height = input_im.get("height", image_size[0])
width = input_im.get("width", image_size[1])
# this is to do post-processing with the image size
result_box, result_mask = results_im
r = _postprocess(result_box, result_mask, height, width)
processed_results.append({"instances": r})
return processed_results
def losses(
self,
gt_class_info,
gt_delta_info,
gt_mask_info,
num_fg,
pred_logits,
pred_deltas,
pred_masks,
):
"""
Args:
For `gt_class_info`, `gt_delta_info`, `gt_mask_info` and `num_fg` parameters, see
:meth:`TensorMask.get_ground_truth`.
For `pred_logits`, `pred_deltas` and `pred_masks`, see
:meth:`TensorMaskHead.forward`.
Returns:
losses (dict[str: Tensor]): mapping from a named loss to a scalar tensor
storing the loss. Used during training only. The potential dict keys are:
"loss_cls", "loss_box_reg" and "loss_mask".
"""
gt_classes_target, gt_valid_inds = gt_class_info
gt_deltas, gt_fg_inds = gt_delta_info
gt_masks, gt_mask_inds = gt_mask_info
loss_normalizer = torch.tensor(max(1, num_fg), dtype=torch.float32, device=self.device)
# classification and regression
pred_logits, pred_deltas = permute_all_cls_and_box_to_N_HWA_K_and_concat(
pred_logits, pred_deltas, self.num_classes
)
loss_cls = (
sigmoid_focal_loss_star_jit(
pred_logits[gt_valid_inds],
gt_classes_target[gt_valid_inds],
alpha=self.focal_loss_alpha,
gamma=self.focal_loss_gamma,
reduction="sum",
)
/ loss_normalizer
)
if num_fg == 0:
loss_box_reg = pred_deltas.sum() * 0
else:
loss_box_reg = (
smooth_l1_loss(pred_deltas[gt_fg_inds], gt_deltas, beta=0.0, reduction="sum")
/ loss_normalizer
)
losses = {"loss_cls": loss_cls, "loss_box_reg": loss_box_reg}
# mask prediction
if self.mask_on:
loss_mask = 0
for lvl in range(self.num_levels):
cur_level_factor = 2 ** lvl if self.bipyramid_on else 1
for anc in range(self.num_anchors):
cur_gt_mask_inds = gt_mask_inds[lvl][anc]
if cur_gt_mask_inds is None:
loss_mask += pred_masks[lvl][anc][0, 0, 0, 0] * 0
else:
cur_mask_size = self.mask_sizes[anc] * cur_level_factor
# TODO maybe there are numerical issues when mask sizes are large
cur_size_divider = torch.tensor(
self.mask_loss_weight / (cur_mask_size ** 2),
dtype=torch.float32,
device=self.device,
)
cur_pred_masks = pred_masks[lvl][anc][
cur_gt_mask_inds[:, 0], # N
:, # V x U
cur_gt_mask_inds[:, 1], # H
cur_gt_mask_inds[:, 2], # W
]
loss_mask += F.binary_cross_entropy_with_logits(
cur_pred_masks.view(-1, cur_mask_size, cur_mask_size), # V, U
gt_masks[lvl][anc].to(dtype=torch.float32),
reduction="sum",
weight=cur_size_divider,
pos_weight=self.mask_pos_weight,
)
losses["loss_mask"] = loss_mask / loss_normalizer
return losses
@torch.no_grad()
def get_ground_truth(self, anchors, unit_lengths, indexes, targets):
"""
Args:
anchors (list[list[Boxes]]): a list of N=#image elements. Each is a
list of #feature level Boxes. The Boxes contains anchors of
this image on the specific feature level.
unit_lengths (list[list[Tensor]]): a list of N=#image elements. Each is a
list of #feature level Tensor. The tensor contains unit lengths for anchors of
this image on the specific feature level.
indexes (list[list[Tensor]]): a list of N=#image elements. Each is a
list of #feature level Tensor. The tensor contains the 5D index of
each anchor, the second dimension means (L, N, H, W, A), where L
is level, I is image, H is height, W is width, and A is anchor.
targets (list[Instances]): a list of N `Instances`s. The i-th
`Instances` contains the ground-truth per-instance annotations
for the i-th input image. Specify `targets` during training only.
Returns:
gt_class_info (Tensor, Tensor): A pair of two tensors for classification.
The first one is an integer tensor of shape (R, #classes) storing ground-truth
labels for each anchor. R is the total number of anchors in the batch.
The second one is an integer tensor of shape (R,), to indicate which
anchors are valid for loss computation, which anchors are not.
gt_delta_info (Tensor, Tensor): A pair of two tensors for boxes.
The first one, of shape (F, 4). F=#foreground anchors.
The last dimension represents ground-truth box2box transform
targets (dx, dy, dw, dh) that map each anchor to its matched ground-truth box.
Only foreground anchors have values in this tensor. Could be `None` if F=0.
The second one, of shape (R,), is an integer tensor indicating which anchors
are foreground ones used for box regression. Could be `None` if F=0.
gt_mask_info (list[list[Tensor]], list[list[Tensor]]): A pair of two lists for masks.
The first one is a list of P=#feature level elements. Each is a
list of A=#anchor tensors. Each tensor contains the ground truth
masks of the same size and for the same feature level. Could be `None`.
The second one is a list of P=#feature level elements. Each is a
list of A=#anchor tensors. Each tensor contains the location of the ground truth
masks of the same size and for the same feature level. The second dimension means
(N, H, W), where N is image, H is height, and W is width. Could be `None`.
num_fg (int): F=#foreground anchors, used later for loss normalization.
"""
gt_classes = []
gt_deltas = []
gt_masks = [[[] for _ in range(self.num_anchors)] for _ in range(self.num_levels)]
gt_mask_inds = [[[] for _ in range(self.num_anchors)] for _ in range(self.num_levels)]
anchors = [Boxes.cat(anchors_i) for anchors_i in anchors]
unit_lengths = [cat(unit_lengths_i) for unit_lengths_i in unit_lengths]
indexes = [cat(indexes_i) for indexes_i in indexes]
num_fg = 0
for i, (anchors_im, unit_lengths_im, indexes_im, targets_im) in enumerate(
zip(anchors, unit_lengths, indexes, targets)
):
# Initialize all
gt_classes_i = torch.full_like(
unit_lengths_im, self.num_classes, dtype=torch.int64, device=self.device
)
# Ground truth classes
has_gt = len(targets_im) > 0
if has_gt:
# Compute the pairwise matrix
gt_matched_inds, anchor_labels = _assignment_rule(
targets_im.gt_boxes, anchors_im, unit_lengths_im, self.min_anchor_size
)
# Find the foreground instances
fg_inds = anchor_labels == 1
fg_anchors = anchors_im[fg_inds]
num_fg += len(fg_anchors)
# Find the ground truths for foreground instances
gt_fg_matched_inds = gt_matched_inds[fg_inds]
# Assign labels for foreground instances
gt_classes_i[fg_inds] = targets_im.gt_classes[gt_fg_matched_inds]
# Anchors with label -1 are ignored, others are left as negative
gt_classes_i[anchor_labels == -1] = -1
# Boxes
# Ground truth box regression, only for foregrounds
matched_gt_boxes = targets_im[gt_fg_matched_inds].gt_boxes
# Compute box regression offsets for foregrounds only
gt_deltas_i = self.box2box_transform.get_deltas(
fg_anchors.tensor, matched_gt_boxes.tensor
)
gt_deltas.append(gt_deltas_i)
# Masks
if self.mask_on:
# Compute masks for each level and each anchor
matched_indexes = indexes_im[fg_inds, :]
for lvl in range(self.num_levels):
ids_lvl = matched_indexes[:, 0] == lvl
if torch.any(ids_lvl):
cur_level_factor = 2 ** lvl if self.bipyramid_on else 1
for anc in range(self.num_anchors):
ids_lvl_anchor = ids_lvl & (matched_indexes[:, 4] == anc)
if torch.any(ids_lvl_anchor):
gt_masks[lvl][anc].append(
targets_im[
gt_fg_matched_inds[ids_lvl_anchor]
].gt_masks.crop_and_resize(
fg_anchors[ids_lvl_anchor].tensor,
self.mask_sizes[anc] * cur_level_factor,
)
)
# Select (N, H, W) dimensions
gt_mask_inds_lvl_anc = matched_indexes[ids_lvl_anchor, 1:4]
# Set the image index to the current image
gt_mask_inds_lvl_anc[:, 0] = i
gt_mask_inds[lvl][anc].append(gt_mask_inds_lvl_anc)
gt_classes.append(gt_classes_i)
# Classes and boxes
gt_classes = cat(gt_classes)
gt_valid_inds = gt_classes >= 0
gt_fg_inds = gt_valid_inds & (gt_classes < self.num_classes)
gt_classes_target = torch.zeros(
(gt_classes.shape[0], self.num_classes), dtype=torch.float32, device=self.device
)
gt_classes_target[gt_fg_inds, gt_classes[gt_fg_inds]] = 1
gt_deltas = cat(gt_deltas) if gt_deltas else None
# Masks
gt_masks = [[cat(mla) if mla else None for mla in ml] for ml in gt_masks]
gt_mask_inds = [[cat(ila) if ila else None for ila in il] for il in gt_mask_inds]
return (
(gt_classes_target, gt_valid_inds),
(gt_deltas, gt_fg_inds),
(gt_masks, gt_mask_inds),
num_fg,
)
def inference(self, pred_logits, pred_deltas, pred_masks, anchors, indexes, images):
"""
Arguments:
pred_logits, pred_deltas, pred_masks: Same as the output of:
meth:`TensorMaskHead.forward`
anchors, indexes: Same as the input of meth:`TensorMask.get_ground_truth`
images (ImageList): the input images
Returns:
results (List[Instances]): a list of #images elements.
"""
assert len(anchors) == len(images)
results = []
pred_logits = [permute_to_N_HWA_K(x, self.num_classes) for x in pred_logits]
pred_deltas = [permute_to_N_HWA_K(x, 4) for x in pred_deltas]
pred_logits = cat(pred_logits, dim=1)
pred_deltas = cat(pred_deltas, dim=1)
for img_idx, (anchors_im, indexes_im) in enumerate(zip(anchors, indexes)):
# Get the size of the current image
image_size = images.image_sizes[img_idx]
logits_im = pred_logits[img_idx]
deltas_im = pred_deltas[img_idx]
if self.mask_on:
masks_im = [[mla[img_idx] for mla in ml] for ml in pred_masks]
else:
masks_im = [None] * self.num_levels
results_im = self.inference_single_image(
logits_im,
deltas_im,
masks_im,
Boxes.cat(anchors_im),
cat(indexes_im),
tuple(image_size),
)
results.append(results_im)
return results
def inference_single_image(
self, pred_logits, pred_deltas, pred_masks, anchors, indexes, image_size
):
"""
Single-image inference. Return bounding-box detection results by thresholding
on scores and applying non-maximum suppression (NMS).
Arguments:
pred_logits (list[Tensor]): list of #feature levels. Each entry contains
tensor of size (AxHxW, K)
pred_deltas (list[Tensor]): Same shape as 'pred_logits' except that K becomes 4.
pred_masks (list[list[Tensor]]): List of #feature levels, each is a list of #anchors.
Each entry contains tensor of size (M_i*M_i, H, W). `None` if mask_on=False.
anchors (list[Boxes]): list of #feature levels. Each entry contains
a Boxes object, which contains all the anchors for that
image in that feature level.
image_size (tuple(H, W)): a tuple of the image height and width.
Returns:
Same as `inference`, but for only one image.
"""
pred_logits = pred_logits.flatten().sigmoid_()
# We get top locations across all levels to accelerate the inference speed,
# which does not seem to affect the accuracy.
# First select values above the threshold
logits_top_idxs = torch.where(pred_logits > self.score_threshold)[0]
# Then get the top values
num_topk = min(self.topk_candidates, logits_top_idxs.shape[0])
pred_prob, topk_idxs = pred_logits[logits_top_idxs].sort(descending=True)
# Keep top k scoring values
pred_prob = pred_prob[:num_topk]
# Keep top k values
top_idxs = logits_top_idxs[topk_idxs[:num_topk]]
# class index
cls_idxs = top_idxs % self.num_classes
# HWA index
top_idxs //= self.num_classes
# predict boxes
pred_boxes = self.box2box_transform.apply_deltas(
pred_deltas[top_idxs], anchors[top_idxs].tensor
)
# apply nms
keep = batched_nms(pred_boxes, pred_prob, cls_idxs, self.nms_threshold)
# pick the top ones
keep = keep[: self.detections_im]
results = Instances(image_size)
results.pred_boxes = Boxes(pred_boxes[keep])
results.scores = pred_prob[keep]
results.pred_classes = cls_idxs[keep]
# deal with masks
result_masks, result_anchors = [], None
if self.mask_on:
# index and anchors, useful for masks
top_indexes = indexes[top_idxs]
top_anchors = anchors[top_idxs]
result_indexes = top_indexes[keep]
result_anchors = top_anchors[keep]
# Get masks and do sigmoid
for lvl, _, h, w, anc in result_indexes.tolist():
cur_size = self.mask_sizes[anc] * (2 ** lvl if self.bipyramid_on else 1)
result_masks.append(
torch.sigmoid(pred_masks[lvl][anc][:, h, w].view(1, cur_size, cur_size))
)
return results, (result_masks, result_anchors)
def preprocess_image(self, batched_inputs):
"""
Normalize, pad and batch the input images.
"""
images = [x["image"].to(self.device) for x in batched_inputs]
images = [(x - self.pixel_mean) / self.pixel_std for x in images]
images = ImageList.from_tensors(images, self.backbone.size_divisibility)
return images
class TensorMaskHead(nn.Module):
def __init__(self, cfg, num_levels, num_anchors, mask_sizes, input_shape: List[ShapeSpec]):
"""
TensorMask head.
"""
super().__init__()
# fmt: off
self.in_features = cfg.MODEL.TENSOR_MASK.IN_FEATURES
in_channels = input_shape[0].channels
num_classes = cfg.MODEL.TENSOR_MASK.NUM_CLASSES
cls_channels = cfg.MODEL.TENSOR_MASK.CLS_CHANNELS
num_convs = cfg.MODEL.TENSOR_MASK.NUM_CONVS
# box parameters
bbox_channels = cfg.MODEL.TENSOR_MASK.BBOX_CHANNELS
# mask parameters
self.mask_on = cfg.MODEL.MASK_ON
self.mask_sizes = mask_sizes
mask_channels = cfg.MODEL.TENSOR_MASK.MASK_CHANNELS
self.align_on = cfg.MODEL.TENSOR_MASK.ALIGNED_ON
self.bipyramid_on = cfg.MODEL.TENSOR_MASK.BIPYRAMID_ON
# fmt: on
# class subnet
cls_subnet = []
cur_channels = in_channels
for _ in range(num_convs):
cls_subnet.append(
nn.Conv2d(cur_channels, cls_channels, kernel_size=3, stride=1, padding=1)
)
cur_channels = cls_channels
cls_subnet.append(nn.ReLU())
self.cls_subnet = nn.Sequential(*cls_subnet)
self.cls_score = nn.Conv2d(
cur_channels, num_anchors * num_classes, kernel_size=3, stride=1, padding=1
)
modules_list = [self.cls_subnet, self.cls_score]
# box subnet
bbox_subnet = []
cur_channels = in_channels
for _ in range(num_convs):
bbox_subnet.append(
nn.Conv2d(cur_channels, bbox_channels, kernel_size=3, stride=1, padding=1)
)
cur_channels = bbox_channels
bbox_subnet.append(nn.ReLU())
self.bbox_subnet = nn.Sequential(*bbox_subnet)
self.bbox_pred = nn.Conv2d(
cur_channels, num_anchors * 4, kernel_size=3, stride=1, padding=1
)
modules_list.extend([self.bbox_subnet, self.bbox_pred])
# mask subnet
if self.mask_on:
mask_subnet = []
cur_channels = in_channels
for _ in range(num_convs):
mask_subnet.append(
nn.Conv2d(cur_channels, mask_channels, kernel_size=3, stride=1, padding=1)
)
cur_channels = mask_channels
mask_subnet.append(nn.ReLU())
self.mask_subnet = nn.Sequential(*mask_subnet)
modules_list.append(self.mask_subnet)
for mask_size in self.mask_sizes:
cur_mask_module = "mask_pred_%02d" % mask_size
self.add_module(
cur_mask_module,
nn.Conv2d(
cur_channels, mask_size * mask_size, kernel_size=1, stride=1, padding=0
),
)
modules_list.append(getattr(self, cur_mask_module))
if self.align_on:
if self.bipyramid_on:
for lvl in range(num_levels):
cur_mask_module = "align2nat_%02d" % lvl
lambda_val = 2 ** lvl
setattr(self, cur_mask_module, SwapAlign2Nat(lambda_val))
# Also the fusing layer, stay at the same channel size
mask_fuse = [
nn.Conv2d(cur_channels, cur_channels, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
]
self.mask_fuse = nn.Sequential(*mask_fuse)
modules_list.append(self.mask_fuse)
else:
self.align2nat = SwapAlign2Nat(1)
# Initialization
for modules in modules_list:
for layer in modules.modules():
if isinstance(layer, nn.Conv2d):
torch.nn.init.normal_(layer.weight, mean=0, std=0.01)
torch.nn.init.constant_(layer.bias, 0)
# Use prior in model initialization to improve stability
bias_value = -(math.log((1 - 0.01) / 0.01))
torch.nn.init.constant_(self.cls_score.bias, bias_value)
def forward(self, features):
"""
Arguments:
features (list[Tensor]): FPN feature map tensors in high to low resolution.
Each tensor in the list correspond to different feature levels.
Returns:
pred_logits (list[Tensor]): #lvl tensors, each has shape (N, AxK, Hi, Wi).
The tensor predicts the classification probability
at each spatial position for each of the A anchors and K object
classes.
pred_deltas (list[Tensor]): #lvl tensors, each has shape (N, Ax4, Hi, Wi).
The tensor predicts 4-vector (dx,dy,dw,dh) box
regression values for every anchor. These values are the
relative offset between the anchor and the ground truth box.
pred_masks (list(list[Tensor])): #lvl list of tensors, each is a list of
A tensors of shape (N, M_{i,a}, Hi, Wi).
The tensor predicts a dense set of M_ixM_i masks at every location.
"""
pred_logits = [self.cls_score(self.cls_subnet(x)) for x in features]
pred_deltas = [self.bbox_pred(self.bbox_subnet(x)) for x in features]
pred_masks = None
if self.mask_on:
mask_feats = [self.mask_subnet(x) for x in features]
if self.bipyramid_on:
mask_feat_high_res = mask_feats[0]
H, W = mask_feat_high_res.shape[-2:]
mask_feats_up = []
for lvl, mask_feat in enumerate(mask_feats):
lambda_val = 2.0 ** lvl
mask_feat_up = mask_feat
if lvl > 0:
mask_feat_up = F.interpolate(
mask_feat, scale_factor=lambda_val, mode="bilinear", align_corners=False
)
mask_feats_up.append(
self.mask_fuse(mask_feat_up[:, :, :H, :W] + mask_feat_high_res)
)
mask_feats = mask_feats_up
pred_masks = []
for lvl, mask_feat in enumerate(mask_feats):
cur_masks = []
for mask_size in self.mask_sizes:
cur_mask_module = getattr(self, "mask_pred_%02d" % mask_size)
cur_mask = cur_mask_module(mask_feat)
if self.align_on:
if self.bipyramid_on:
cur_mask_module = getattr(self, "align2nat_%02d" % lvl)
cur_mask = cur_mask_module(cur_mask)
else:
cur_mask = self.align2nat(cur_mask)
cur_masks.append(cur_mask)
pred_masks.append(cur_masks)
return pred_logits, pred_deltas, pred_masks
|
banmo-main
|
third_party/detectron2_old/projects/TensorMask/tensormask/arch.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from .config import add_tensormask_config
from .arch import TensorMask
|
banmo-main
|
third_party/detectron2_old/projects/TensorMask/tensormask/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from torch import nn
from torch.autograd import Function
from torch.autograd.function import once_differentiable
from tensormask import _C
class _SwapAlign2Nat(Function):
@staticmethod
def forward(ctx, X, lambda_val, pad_val):
ctx.lambda_val = lambda_val
ctx.input_shape = X.size()
Y = _C.swap_align2nat_forward(X, lambda_val, pad_val)
return Y
@staticmethod
@once_differentiable
def backward(ctx, gY):
lambda_val = ctx.lambda_val
bs, ch, h, w = ctx.input_shape
gX = _C.swap_align2nat_backward(gY, lambda_val, bs, ch, h, w)
return gX, None, None
swap_align2nat = _SwapAlign2Nat.apply
class SwapAlign2Nat(nn.Module):
"""
The op `SwapAlign2Nat` described in https://arxiv.org/abs/1903.12174.
Given an input tensor that predicts masks of shape (N, C=VxU, H, W),
apply the op, it will return masks of shape (N, V'xU', H', W') where
the unit lengths of (V, U) and (H, W) are swapped, and the mask representation
is transformed from aligned to natural.
Args:
lambda_val (int): the relative unit length ratio between (V, U) and (H, W),
as we always have larger unit lengths for (V, U) than (H, W),
lambda_val is always >= 1.
pad_val (float): padding value for the values falling outside of the input
tensor, default set to -6 as sigmoid(-6) is ~0, indicating
that is no masks outside of the tensor.
"""
def __init__(self, lambda_val, pad_val=-6.0):
super(SwapAlign2Nat, self).__init__()
self.lambda_val = lambda_val
self.pad_val = pad_val
def forward(self, X):
return swap_align2nat(X, self.lambda_val, self.pad_val)
def __repr__(self):
tmpstr = self.__class__.__name__ + "("
tmpstr += "lambda_val=" + str(self.lambda_val)
tmpstr += ", pad_val=" + str(self.pad_val)
tmpstr += ")"
return tmpstr
|
banmo-main
|
third_party/detectron2_old/projects/TensorMask/tensormask/layers/swap_align2nat.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from .swap_align2nat import SwapAlign2Nat, swap_align2nat
__all__ = [k for k in globals().keys() if not k.startswith("_")]
|
banmo-main
|
third_party/detectron2_old/projects/TensorMask/tensormask/layers/__init__.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
"""
DeepLab Training Script.
This script is a simplified version of the training script in detectron2/tools.
"""
import os
import torch
import detectron2.data.transforms as T
import detectron2.utils.comm as comm
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import get_cfg
from detectron2.data import DatasetMapper, MetadataCatalog, build_detection_train_loader
from detectron2.engine import DefaultTrainer, default_argument_parser, default_setup, launch
from detectron2.evaluation import CityscapesSemSegEvaluator, DatasetEvaluators, SemSegEvaluator
from detectron2.projects.deeplab import add_deeplab_config, build_lr_scheduler
def build_sem_seg_train_aug(cfg):
augs = [
T.ResizeShortestEdge(
cfg.INPUT.MIN_SIZE_TRAIN, cfg.INPUT.MAX_SIZE_TRAIN, cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING
)
]
if cfg.INPUT.CROP.ENABLED:
augs.append(
T.RandomCrop_CategoryAreaConstraint(
cfg.INPUT.CROP.TYPE,
cfg.INPUT.CROP.SIZE,
cfg.INPUT.CROP.SINGLE_CATEGORY_MAX_AREA,
cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE,
)
)
augs.append(T.RandomFlip())
return augs
class Trainer(DefaultTrainer):
"""
We use the "DefaultTrainer" which contains a number pre-defined logic for
standard training workflow. They may not work for you, especially if you
are working on a new research project. In that case you can use the cleaner
"SimpleTrainer", or write your own training loop.
"""
@classmethod
def build_evaluator(cls, cfg, dataset_name, output_folder=None):
"""
Create evaluator(s) for a given dataset.
This uses the special metadata "evaluator_type" associated with each builtin dataset.
For your own dataset, you can simply create an evaluator manually in your
script and do not have to worry about the hacky if-else logic here.
"""
if output_folder is None:
output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
evaluator_list = []
evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
if evaluator_type == "sem_seg":
return SemSegEvaluator(
dataset_name,
distributed=True,
output_dir=output_folder,
)
if evaluator_type == "cityscapes_sem_seg":
assert (
torch.cuda.device_count() >= comm.get_rank()
), "CityscapesEvaluator currently do not work with multiple machines."
return CityscapesSemSegEvaluator(dataset_name)
if len(evaluator_list) == 0:
raise NotImplementedError(
"no Evaluator for the dataset {} with the type {}".format(
dataset_name, evaluator_type
)
)
if len(evaluator_list) == 1:
return evaluator_list[0]
return DatasetEvaluators(evaluator_list)
@classmethod
def build_train_loader(cls, cfg):
if "SemanticSegmentor" in cfg.MODEL.META_ARCHITECTURE:
mapper = DatasetMapper(cfg, is_train=True, augmentations=build_sem_seg_train_aug(cfg))
else:
mapper = None
return build_detection_train_loader(cfg, mapper=mapper)
@classmethod
def build_lr_scheduler(cls, cfg, optimizer):
"""
It now calls :func:`detectron2.solver.build_lr_scheduler`.
Overwrite it if you'd like a different scheduler.
"""
return build_lr_scheduler(cfg, optimizer)
def setup(args):
"""
Create configs and perform basic setups.
"""
cfg = get_cfg()
add_deeplab_config(cfg)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
default_setup(cfg, args)
return cfg
def main(args):
cfg = setup(args)
if args.eval_only:
model = Trainer.build_model(cfg)
DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
cfg.MODEL.WEIGHTS, resume=args.resume
)
res = Trainer.test(cfg, model)
return res
trainer = Trainer(cfg)
trainer.resume_or_load(resume=args.resume)
return trainer.train()
if __name__ == "__main__":
args = default_argument_parser().parse_args()
print("Command Line Args:", args)
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
)
|
banmo-main
|
third_party/detectron2_old/projects/DeepLab/train_net.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import math
from typing import List
import torch
from detectron2.solver.lr_scheduler import _get_warmup_factor_at_iter
# NOTE: PyTorch's LR scheduler interface uses names that assume the LR changes
# only on epoch boundaries. We typically use iteration based schedules instead.
# As a result, "epoch" (e.g., as in self.last_epoch) should be understood to mean
# "iteration" instead.
# FIXME: ideally this would be achieved with a CombinedLRScheduler, separating
# MultiStepLR with WarmupLR but the current LRScheduler design doesn't allow it.
class WarmupPolyLR(torch.optim.lr_scheduler._LRScheduler):
"""
Poly learning rate schedule used to train DeepLab.
Paper: DeepLab: Semantic Image Segmentation with Deep Convolutional Nets,
Atrous Convolution, and Fully Connected CRFs.
Reference: https://github.com/tensorflow/models/blob/21b73d22f3ed05b650e85ac50849408dd36de32e/research/deeplab/utils/train_utils.py#L337 # noqa
"""
def __init__(
self,
optimizer: torch.optim.Optimizer,
max_iters: int,
warmup_factor: float = 0.001,
warmup_iters: int = 1000,
warmup_method: str = "linear",
last_epoch: int = -1,
power: float = 0.9,
constant_ending: float = 0.0,
):
self.max_iters = max_iters
self.warmup_factor = warmup_factor
self.warmup_iters = warmup_iters
self.warmup_method = warmup_method
self.power = power
self.constant_ending = constant_ending
super().__init__(optimizer, last_epoch)
def get_lr(self) -> List[float]:
warmup_factor = _get_warmup_factor_at_iter(
self.warmup_method, self.last_epoch, self.warmup_iters, self.warmup_factor
)
if self.constant_ending > 0 and warmup_factor == 1.0:
# Constant ending lr.
if (
math.pow((1.0 - self.last_epoch / self.max_iters), self.power)
< self.constant_ending
):
return [base_lr * self.constant_ending for base_lr in self.base_lrs]
return [
base_lr * warmup_factor * math.pow((1.0 - self.last_epoch / self.max_iters), self.power)
for base_lr in self.base_lrs
]
def _compute_values(self) -> List[float]:
# The new interface
return self.get_lr()
|
banmo-main
|
third_party/detectron2_old/projects/DeepLab/deeplab/lr_scheduler.py
|
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
def add_deeplab_config(cfg):
"""
Add config for DeepLab.
"""
# We retry random cropping until no single category in semantic segmentation GT occupies more
# than `SINGLE_CATEGORY_MAX_AREA` part of the crop.
cfg.INPUT.CROP.SINGLE_CATEGORY_MAX_AREA = 1.0
# Used for `poly` learning rate schedule.
cfg.SOLVER.POLY_LR_POWER = 0.9
cfg.SOLVER.POLY_LR_CONSTANT_ENDING = 0.0
# Loss type, choose from `cross_entropy`, `hard_pixel_mining`.
cfg.MODEL.SEM_SEG_HEAD.LOSS_TYPE = "hard_pixel_mining"
# DeepLab settings
cfg.MODEL.SEM_SEG_HEAD.PROJECT_FEATURES = ["res2"]
cfg.MODEL.SEM_SEG_HEAD.PROJECT_CHANNELS = [48]
cfg.MODEL.SEM_SEG_HEAD.ASPP_CHANNELS = 256
cfg.MODEL.SEM_SEG_HEAD.ASPP_DILATIONS = [6, 12, 18]
cfg.MODEL.SEM_SEG_HEAD.ASPP_DROPOUT = 0.1
cfg.MODEL.SEM_SEG_HEAD.USE_DEPTHWISE_SEPARABLE_CONV = False
# Backbone new configs
cfg.MODEL.RESNETS.RES4_DILATION = 1
cfg.MODEL.RESNETS.RES5_MULTI_GRID = [1, 2, 4]
# ResNet stem type from: `basic`, `deeplab`
cfg.MODEL.RESNETS.STEM_TYPE = "deeplab"
|
banmo-main
|
third_party/detectron2_old/projects/DeepLab/deeplab/config.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from .build_solver import build_lr_scheduler
from .config import add_deeplab_config
from .resnet import build_resnet_deeplab_backbone
from .semantic_seg import DeepLabV3Head, DeepLabV3PlusHead
|
banmo-main
|
third_party/detectron2_old/projects/DeepLab/deeplab/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import torch
import torch.nn as nn
class DeepLabCE(nn.Module):
"""
Hard pixel mining with cross entropy loss, for semantic segmentation.
This is used in TensorFlow DeepLab frameworks.
Paper: DeeperLab: Single-Shot Image Parser
Reference: https://github.com/tensorflow/models/blob/bd488858d610e44df69da6f89277e9de8a03722c/research/deeplab/utils/train_utils.py#L33 # noqa
Arguments:
ignore_label: Integer, label to ignore.
top_k_percent_pixels: Float, the value lies in [0.0, 1.0]. When its
value < 1.0, only compute the loss for the top k percent pixels
(e.g., the top 20% pixels). This is useful for hard pixel mining.
weight: Tensor, a manual rescaling weight given to each class.
"""
def __init__(self, ignore_label=-1, top_k_percent_pixels=1.0, weight=None):
super(DeepLabCE, self).__init__()
self.top_k_percent_pixels = top_k_percent_pixels
self.ignore_label = ignore_label
self.criterion = nn.CrossEntropyLoss(
weight=weight, ignore_index=ignore_label, reduction="none"
)
def forward(self, logits, labels, weights=None):
if weights is None:
pixel_losses = self.criterion(logits, labels).contiguous().view(-1)
else:
# Apply per-pixel loss weights.
pixel_losses = self.criterion(logits, labels) * weights
pixel_losses = pixel_losses.contiguous().view(-1)
if self.top_k_percent_pixels == 1.0:
return pixel_losses.mean()
top_k_pixels = int(self.top_k_percent_pixels * pixel_losses.numel())
pixel_losses, _ = torch.topk(pixel_losses, top_k_pixels)
return pixel_losses.mean()
|
banmo-main
|
third_party/detectron2_old/projects/DeepLab/deeplab/loss.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import fvcore.nn.weight_init as weight_init
import torch.nn.functional as F
from detectron2.layers import CNNBlockBase, Conv2d, get_norm
from detectron2.modeling import BACKBONE_REGISTRY
from detectron2.modeling.backbone.resnet import (
BasicStem,
BottleneckBlock,
DeformBottleneckBlock,
ResNet,
)
class DeepLabStem(CNNBlockBase):
"""
The DeepLab ResNet stem (layers before the first residual block).
"""
def __init__(self, in_channels=3, out_channels=128, norm="BN"):
"""
Args:
norm (str or callable): norm after the first conv layer.
See :func:`layers.get_norm` for supported format.
"""
super().__init__(in_channels, out_channels, 4)
self.in_channels = in_channels
self.conv1 = Conv2d(
in_channels,
out_channels // 2,
kernel_size=3,
stride=2,
padding=1,
bias=False,
norm=get_norm(norm, out_channels // 2),
)
self.conv2 = Conv2d(
out_channels // 2,
out_channels // 2,
kernel_size=3,
stride=1,
padding=1,
bias=False,
norm=get_norm(norm, out_channels // 2),
)
self.conv3 = Conv2d(
out_channels // 2,
out_channels,
kernel_size=3,
stride=1,
padding=1,
bias=False,
norm=get_norm(norm, out_channels),
)
weight_init.c2_msra_fill(self.conv1)
weight_init.c2_msra_fill(self.conv2)
weight_init.c2_msra_fill(self.conv3)
def forward(self, x):
x = self.conv1(x)
x = F.relu_(x)
x = self.conv2(x)
x = F.relu_(x)
x = self.conv3(x)
x = F.relu_(x)
x = F.max_pool2d(x, kernel_size=3, stride=2, padding=1)
return x
@BACKBONE_REGISTRY.register()
def build_resnet_deeplab_backbone(cfg, input_shape):
"""
Create a ResNet instance from config.
Returns:
ResNet: a :class:`ResNet` instance.
"""
# need registration of new blocks/stems?
norm = cfg.MODEL.RESNETS.NORM
if cfg.MODEL.RESNETS.STEM_TYPE == "basic":
stem = BasicStem(
in_channels=input_shape.channels,
out_channels=cfg.MODEL.RESNETS.STEM_OUT_CHANNELS,
norm=norm,
)
elif cfg.MODEL.RESNETS.STEM_TYPE == "deeplab":
stem = DeepLabStem(
in_channels=input_shape.channels,
out_channels=cfg.MODEL.RESNETS.STEM_OUT_CHANNELS,
norm=norm,
)
else:
raise ValueError("Unknown stem type: {}".format(cfg.MODEL.RESNETS.STEM_TYPE))
# fmt: off
freeze_at = cfg.MODEL.BACKBONE.FREEZE_AT
out_features = cfg.MODEL.RESNETS.OUT_FEATURES
depth = cfg.MODEL.RESNETS.DEPTH
num_groups = cfg.MODEL.RESNETS.NUM_GROUPS
width_per_group = cfg.MODEL.RESNETS.WIDTH_PER_GROUP
bottleneck_channels = num_groups * width_per_group
in_channels = cfg.MODEL.RESNETS.STEM_OUT_CHANNELS
out_channels = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS
stride_in_1x1 = cfg.MODEL.RESNETS.STRIDE_IN_1X1
res4_dilation = cfg.MODEL.RESNETS.RES4_DILATION
res5_dilation = cfg.MODEL.RESNETS.RES5_DILATION
deform_on_per_stage = cfg.MODEL.RESNETS.DEFORM_ON_PER_STAGE
deform_modulated = cfg.MODEL.RESNETS.DEFORM_MODULATED
deform_num_groups = cfg.MODEL.RESNETS.DEFORM_NUM_GROUPS
res5_multi_grid = cfg.MODEL.RESNETS.RES5_MULTI_GRID
# fmt: on
assert res4_dilation in {1, 2}, "res4_dilation cannot be {}.".format(res4_dilation)
assert res5_dilation in {1, 2, 4}, "res5_dilation cannot be {}.".format(res5_dilation)
if res4_dilation == 2:
# Always dilate res5 if res4 is dilated.
assert res5_dilation == 4
num_blocks_per_stage = {50: [3, 4, 6, 3], 101: [3, 4, 23, 3], 152: [3, 8, 36, 3]}[depth]
stages = []
# Avoid creating variables without gradients
# It consumes extra memory and may cause allreduce to fail
out_stage_idx = [{"res2": 2, "res3": 3, "res4": 4, "res5": 5}[f] for f in out_features]
max_stage_idx = max(out_stage_idx)
for idx, stage_idx in enumerate(range(2, max_stage_idx + 1)):
if stage_idx == 4:
dilation = res4_dilation
elif stage_idx == 5:
dilation = res5_dilation
else:
dilation = 1
first_stride = 1 if idx == 0 or dilation > 1 else 2
stage_kargs = {
"num_blocks": num_blocks_per_stage[idx],
"stride_per_block": [first_stride] + [1] * (num_blocks_per_stage[idx] - 1),
"in_channels": in_channels,
"out_channels": out_channels,
"norm": norm,
}
stage_kargs["bottleneck_channels"] = bottleneck_channels
stage_kargs["stride_in_1x1"] = stride_in_1x1
stage_kargs["dilation"] = dilation
stage_kargs["num_groups"] = num_groups
if deform_on_per_stage[idx]:
stage_kargs["block_class"] = DeformBottleneckBlock
stage_kargs["deform_modulated"] = deform_modulated
stage_kargs["deform_num_groups"] = deform_num_groups
else:
stage_kargs["block_class"] = BottleneckBlock
if stage_idx == 5:
stage_kargs.pop("dilation")
stage_kargs["dilation_per_block"] = [dilation * mg for mg in res5_multi_grid]
blocks = ResNet.make_stage(**stage_kargs)
in_channels = out_channels
out_channels *= 2
bottleneck_channels *= 2
stages.append(blocks)
return ResNet(stem, stages, out_features=out_features).freeze(freeze_at)
|
banmo-main
|
third_party/detectron2_old/projects/DeepLab/deeplab/resnet.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import torch
from detectron2.config import CfgNode
from detectron2.solver import build_lr_scheduler as build_d2_lr_scheduler
from .lr_scheduler import WarmupPolyLR
def build_lr_scheduler(
cfg: CfgNode, optimizer: torch.optim.Optimizer
) -> torch.optim.lr_scheduler._LRScheduler:
"""
Build a LR scheduler from config.
"""
name = cfg.SOLVER.LR_SCHEDULER_NAME
if name == "WarmupPolyLR":
return WarmupPolyLR(
optimizer,
cfg.SOLVER.MAX_ITER,
warmup_factor=cfg.SOLVER.WARMUP_FACTOR,
warmup_iters=cfg.SOLVER.WARMUP_ITERS,
warmup_method=cfg.SOLVER.WARMUP_METHOD,
power=cfg.SOLVER.POLY_LR_POWER,
constant_ending=cfg.SOLVER.POLY_LR_CONSTANT_ENDING,
)
else:
return build_d2_lr_scheduler(cfg, optimizer)
|
banmo-main
|
third_party/detectron2_old/projects/DeepLab/deeplab/build_solver.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from typing import Callable, Dict, List, Optional, Tuple, Union
import fvcore.nn.weight_init as weight_init
import torch
from torch import nn
from torch.nn import functional as F
from detectron2.config import configurable
from detectron2.layers import ASPP, Conv2d, DepthwiseSeparableConv2d, ShapeSpec, get_norm
from detectron2.modeling import SEM_SEG_HEADS_REGISTRY
from .loss import DeepLabCE
@SEM_SEG_HEADS_REGISTRY.register()
class DeepLabV3PlusHead(nn.Module):
"""
A semantic segmentation head described in :paper:`DeepLabV3+`.
"""
@configurable
def __init__(
self,
input_shape: Dict[str, ShapeSpec],
*,
project_channels: List[int],
aspp_dilations: List[int],
aspp_dropout: float,
decoder_channels: List[int],
common_stride: int,
norm: Union[str, Callable],
train_size: Optional[Tuple],
loss_weight: float = 1.0,
loss_type: str = "cross_entropy",
ignore_value: int = -1,
num_classes: Optional[int] = None,
use_depthwise_separable_conv: bool = False,
):
"""
NOTE: this interface is experimental.
Args:
input_shape: shape of the input features. They will be ordered by stride
and the last one (with largest stride) is used as the input to the
decoder (i.e. the ASPP module); the rest are low-level feature for
the intermediate levels of decoder.
project_channels (list[int]): a list of low-level feature channels.
The length should be len(in_features) - 1.
aspp_dilations (list(int)): a list of 3 dilations in ASPP.
aspp_dropout (float): apply dropout on the output of ASPP.
decoder_channels (list[int]): a list of output channels of each
decoder stage. It should have the same length as "in_features"
(each element in "in_features" corresponds to one decoder stage).
common_stride (int): output stride of decoder.
norm (str or callable): normalization for all conv layers.
train_size (tuple): (height, width) of training images.
loss_weight (float): loss weight.
loss_type (str): type of loss function, 2 opptions:
(1) "cross_entropy" is the standard cross entropy loss.
(2) "hard_pixel_mining" is the loss in DeepLab that samples
top k% hardest pixels.
ignore_value (int): category to be ignored during training.
num_classes (int): number of classes, if set to None, the decoder
will not construct a predictor.
use_depthwise_separable_conv (bool): use DepthwiseSeparableConv2d
in ASPP and decoder.
"""
super().__init__()
input_shape = sorted(input_shape.items(), key=lambda x: x[1].stride)
# fmt: off
self.in_features = [k for k, v in input_shape] # starting from "res2" to "res5"
in_channels = [x[1].channels for x in input_shape]
in_strides = [x[1].stride for x in input_shape]
aspp_channels = decoder_channels[-1]
self.ignore_value = ignore_value
self.common_stride = common_stride # output stride
self.loss_weight = loss_weight
self.loss_type = loss_type
self.decoder_only = num_classes is None
self.use_depthwise_separable_conv = use_depthwise_separable_conv
# fmt: on
assert (
len(project_channels) == len(self.in_features) - 1
), "Expected {} project_channels, got {}".format(
len(self.in_features) - 1, len(project_channels)
)
assert len(decoder_channels) == len(
self.in_features
), "Expected {} decoder_channels, got {}".format(
len(self.in_features), len(decoder_channels)
)
self.decoder = nn.ModuleDict()
use_bias = norm == ""
for idx, in_channel in enumerate(in_channels):
decoder_stage = nn.ModuleDict()
if idx == len(self.in_features) - 1:
# ASPP module
if train_size is not None:
train_h, train_w = train_size
encoder_stride = in_strides[-1]
if train_h % encoder_stride or train_w % encoder_stride:
raise ValueError("Crop size need to be divisible by encoder stride.")
pool_h = train_h // encoder_stride
pool_w = train_w // encoder_stride
pool_kernel_size = (pool_h, pool_w)
else:
pool_kernel_size = None
project_conv = ASPP(
in_channel,
aspp_channels,
aspp_dilations,
norm=norm,
activation=F.relu,
pool_kernel_size=pool_kernel_size,
dropout=aspp_dropout,
use_depthwise_separable_conv=use_depthwise_separable_conv,
)
fuse_conv = None
else:
project_conv = Conv2d(
in_channel,
project_channels[idx],
kernel_size=1,
bias=use_bias,
norm=get_norm(norm, project_channels[idx]),
activation=F.relu,
)
weight_init.c2_xavier_fill(project_conv)
if use_depthwise_separable_conv:
# We use a single 5x5 DepthwiseSeparableConv2d to replace
# 2 3x3 Conv2d since they have the same receptive field,
# proposed in :paper:`Panoptic-DeepLab`.
fuse_conv = DepthwiseSeparableConv2d(
project_channels[idx] + decoder_channels[idx + 1],
decoder_channels[idx],
kernel_size=5,
padding=2,
norm1=norm,
activation1=F.relu,
norm2=norm,
activation2=F.relu,
)
else:
fuse_conv = nn.Sequential(
Conv2d(
project_channels[idx] + decoder_channels[idx + 1],
decoder_channels[idx],
kernel_size=3,
padding=1,
bias=use_bias,
norm=get_norm(norm, decoder_channels[idx]),
activation=F.relu,
),
Conv2d(
decoder_channels[idx],
decoder_channels[idx],
kernel_size=3,
padding=1,
bias=use_bias,
norm=get_norm(norm, decoder_channels[idx]),
activation=F.relu,
),
)
weight_init.c2_xavier_fill(fuse_conv[0])
weight_init.c2_xavier_fill(fuse_conv[1])
decoder_stage["project_conv"] = project_conv
decoder_stage["fuse_conv"] = fuse_conv
self.decoder[self.in_features[idx]] = decoder_stage
if not self.decoder_only:
self.predictor = Conv2d(
decoder_channels[0], num_classes, kernel_size=1, stride=1, padding=0
)
nn.init.normal_(self.predictor.weight, 0, 0.001)
nn.init.constant_(self.predictor.bias, 0)
if self.loss_type == "cross_entropy":
self.loss = nn.CrossEntropyLoss(reduction="mean", ignore_index=self.ignore_value)
elif self.loss_type == "hard_pixel_mining":
self.loss = DeepLabCE(ignore_label=self.ignore_value, top_k_percent_pixels=0.2)
else:
raise ValueError("Unexpected loss type: %s" % self.loss_type)
@classmethod
def from_config(cls, cfg, input_shape):
if cfg.INPUT.CROP.ENABLED:
assert cfg.INPUT.CROP.TYPE == "absolute"
train_size = cfg.INPUT.CROP.SIZE
else:
train_size = None
decoder_channels = [cfg.MODEL.SEM_SEG_HEAD.CONVS_DIM] * (
len(cfg.MODEL.SEM_SEG_HEAD.IN_FEATURES) - 1
) + [cfg.MODEL.SEM_SEG_HEAD.ASPP_CHANNELS]
ret = dict(
input_shape={
k: v for k, v in input_shape.items() if k in cfg.MODEL.SEM_SEG_HEAD.IN_FEATURES
},
project_channels=cfg.MODEL.SEM_SEG_HEAD.PROJECT_CHANNELS,
aspp_dilations=cfg.MODEL.SEM_SEG_HEAD.ASPP_DILATIONS,
aspp_dropout=cfg.MODEL.SEM_SEG_HEAD.ASPP_DROPOUT,
decoder_channels=decoder_channels,
common_stride=cfg.MODEL.SEM_SEG_HEAD.COMMON_STRIDE,
norm=cfg.MODEL.SEM_SEG_HEAD.NORM,
train_size=train_size,
loss_weight=cfg.MODEL.SEM_SEG_HEAD.LOSS_WEIGHT,
loss_type=cfg.MODEL.SEM_SEG_HEAD.LOSS_TYPE,
ignore_value=cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE,
num_classes=cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES,
use_depthwise_separable_conv=cfg.MODEL.SEM_SEG_HEAD.USE_DEPTHWISE_SEPARABLE_CONV,
)
return ret
def forward(self, features, targets=None):
"""
Returns:
In training, returns (None, dict of losses)
In inference, returns (CxHxW logits, {})
"""
y = self.layers(features)
if self.decoder_only:
# Output from self.layers() only contains decoder feature.
return y
if self.training:
return None, self.losses(y, targets)
else:
y = F.interpolate(
y, scale_factor=self.common_stride, mode="bilinear", align_corners=False
)
return y, {}
def layers(self, features):
# Reverse feature maps into top-down order (from low to high resolution)
for f in self.in_features[::-1]:
x = features[f]
proj_x = self.decoder[f]["project_conv"](x)
if self.decoder[f]["fuse_conv"] is None:
# This is aspp module
y = proj_x
else:
# Upsample y
y = F.interpolate(y, size=proj_x.size()[2:], mode="bilinear", align_corners=False)
y = torch.cat([proj_x, y], dim=1)
y = self.decoder[f]["fuse_conv"](y)
if not self.decoder_only:
y = self.predictor(y)
return y
def losses(self, predictions, targets):
predictions = F.interpolate(
predictions, scale_factor=self.common_stride, mode="bilinear", align_corners=False
)
loss = self.loss(predictions, targets)
losses = {"loss_sem_seg": loss * self.loss_weight}
return losses
@SEM_SEG_HEADS_REGISTRY.register()
class DeepLabV3Head(nn.Module):
"""
A semantic segmentation head described in :paper:`DeepLabV3`.
"""
def __init__(self, cfg, input_shape: Dict[str, ShapeSpec]):
super().__init__()
# fmt: off
self.in_features = cfg.MODEL.SEM_SEG_HEAD.IN_FEATURES
in_channels = [input_shape[f].channels for f in self.in_features]
aspp_channels = cfg.MODEL.SEM_SEG_HEAD.ASPP_CHANNELS
aspp_dilations = cfg.MODEL.SEM_SEG_HEAD.ASPP_DILATIONS
self.ignore_value = cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE
num_classes = cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES
conv_dims = cfg.MODEL.SEM_SEG_HEAD.CONVS_DIM
self.common_stride = cfg.MODEL.SEM_SEG_HEAD.COMMON_STRIDE # output stride
norm = cfg.MODEL.SEM_SEG_HEAD.NORM
self.loss_weight = cfg.MODEL.SEM_SEG_HEAD.LOSS_WEIGHT
self.loss_type = cfg.MODEL.SEM_SEG_HEAD.LOSS_TYPE
train_crop_size = cfg.INPUT.CROP.SIZE
aspp_dropout = cfg.MODEL.SEM_SEG_HEAD.ASPP_DROPOUT
use_depthwise_separable_conv = cfg.MODEL.SEM_SEG_HEAD.USE_DEPTHWISE_SEPARABLE_CONV
# fmt: on
assert len(self.in_features) == 1
assert len(in_channels) == 1
# ASPP module
if cfg.INPUT.CROP.ENABLED:
assert cfg.INPUT.CROP.TYPE == "absolute"
train_crop_h, train_crop_w = train_crop_size
if train_crop_h % self.common_stride or train_crop_w % self.common_stride:
raise ValueError("Crop size need to be divisible by output stride.")
pool_h = train_crop_h // self.common_stride
pool_w = train_crop_w // self.common_stride
pool_kernel_size = (pool_h, pool_w)
else:
pool_kernel_size = None
self.aspp = ASPP(
in_channels[0],
aspp_channels,
aspp_dilations,
norm=norm,
activation=F.relu,
pool_kernel_size=pool_kernel_size,
dropout=aspp_dropout,
use_depthwise_separable_conv=use_depthwise_separable_conv,
)
self.predictor = Conv2d(conv_dims, num_classes, kernel_size=1, stride=1, padding=0)
nn.init.normal_(self.predictor.weight, 0, 0.001)
nn.init.constant_(self.predictor.bias, 0)
if self.loss_type == "cross_entropy":
self.loss = nn.CrossEntropyLoss(reduction="mean", ignore_index=self.ignore_value)
elif self.loss_type == "hard_pixel_mining":
self.loss = DeepLabCE(ignore_label=self.ignore_value, top_k_percent_pixels=0.2)
else:
raise ValueError("Unexpected loss type: %s" % self.loss_type)
def forward(self, features, targets=None):
"""
Returns:
In training, returns (None, dict of losses)
In inference, returns (CxHxW logits, {})
"""
x = features[self.in_features[0]]
x = self.aspp(x)
x = self.predictor(x)
if self.training:
return None, self.losses(x, targets)
else:
x = F.interpolate(
x, scale_factor=self.common_stride, mode="bilinear", align_corners=False
)
return x, {}
def losses(self, predictions, targets):
predictions = F.interpolate(
predictions, scale_factor=self.common_stride, mode="bilinear", align_corners=False
)
loss = self.loss(predictions, targets)
losses = {"loss_sem_seg": loss * self.loss_weight}
return losses
|
banmo-main
|
third_party/detectron2_old/projects/DeepLab/deeplab/semantic_seg.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
"""
PointRend Training Script.
This script is a simplified version of the training script in detectron2/tools.
"""
import os
import torch
import detectron2.data.transforms as T
import detectron2.utils.comm as comm
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import get_cfg
from detectron2.data import DatasetMapper, MetadataCatalog, build_detection_train_loader
from detectron2.engine import DefaultTrainer, default_argument_parser, default_setup, launch
from detectron2.evaluation import (
CityscapesInstanceEvaluator,
CityscapesSemSegEvaluator,
COCOEvaluator,
DatasetEvaluators,
LVISEvaluator,
SemSegEvaluator,
verify_results,
)
from detectron2.projects.point_rend import ColorAugSSDTransform, add_pointrend_config
def build_sem_seg_train_aug(cfg):
augs = [
T.ResizeShortestEdge(
cfg.INPUT.MIN_SIZE_TRAIN, cfg.INPUT.MAX_SIZE_TRAIN, cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING
)
]
if cfg.INPUT.CROP.ENABLED:
augs.append(
T.RandomCrop_CategoryAreaConstraint(
cfg.INPUT.CROP.TYPE,
cfg.INPUT.CROP.SIZE,
cfg.INPUT.CROP.SINGLE_CATEGORY_MAX_AREA,
cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE,
)
)
if cfg.INPUT.COLOR_AUG_SSD:
augs.append(ColorAugSSDTransform(img_format=cfg.INPUT.FORMAT))
augs.append(T.RandomFlip())
return augs
class Trainer(DefaultTrainer):
"""
We use the "DefaultTrainer" which contains a number pre-defined logic for
standard training workflow. They may not work for you, especially if you
are working on a new research project. In that case you can use the cleaner
"SimpleTrainer", or write your own training loop.
"""
@classmethod
def build_evaluator(cls, cfg, dataset_name, output_folder=None):
"""
Create evaluator(s) for a given dataset.
This uses the special metadata "evaluator_type" associated with each builtin dataset.
For your own dataset, you can simply create an evaluator manually in your
script and do not have to worry about the hacky if-else logic here.
"""
if output_folder is None:
output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
evaluator_list = []
evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
if evaluator_type == "lvis":
return LVISEvaluator(dataset_name, output_dir=output_folder)
if evaluator_type == "coco":
return COCOEvaluator(dataset_name, output_dir=output_folder)
if evaluator_type == "sem_seg":
return SemSegEvaluator(
dataset_name,
distributed=True,
output_dir=output_folder,
)
if evaluator_type == "cityscapes_instance":
assert (
torch.cuda.device_count() >= comm.get_rank()
), "CityscapesEvaluator currently do not work with multiple machines."
return CityscapesInstanceEvaluator(dataset_name)
if evaluator_type == "cityscapes_sem_seg":
assert (
torch.cuda.device_count() >= comm.get_rank()
), "CityscapesEvaluator currently do not work with multiple machines."
return CityscapesSemSegEvaluator(dataset_name)
if len(evaluator_list) == 0:
raise NotImplementedError(
"no Evaluator for the dataset {} with the type {}".format(
dataset_name, evaluator_type
)
)
if len(evaluator_list) == 1:
return evaluator_list[0]
return DatasetEvaluators(evaluator_list)
@classmethod
def build_train_loader(cls, cfg):
if "SemanticSegmentor" in cfg.MODEL.META_ARCHITECTURE:
mapper = DatasetMapper(cfg, is_train=True, augmentations=build_sem_seg_train_aug(cfg))
else:
mapper = None
return build_detection_train_loader(cfg, mapper=mapper)
def setup(args):
"""
Create configs and perform basic setups.
"""
cfg = get_cfg()
add_pointrend_config(cfg)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
default_setup(cfg, args)
return cfg
def main(args):
cfg = setup(args)
if args.eval_only:
model = Trainer.build_model(cfg)
DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
cfg.MODEL.WEIGHTS, resume=args.resume
)
res = Trainer.test(cfg, model)
if comm.is_main_process():
verify_results(cfg, res)
return res
trainer = Trainer(cfg)
trainer.resume_or_load(resume=args.resume)
return trainer.train()
if __name__ == "__main__":
args = default_argument_parser().parse_args()
print("Command Line Args:", args)
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
)
|
banmo-main
|
third_party/detectron2_old/projects/PointRend/train_net.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import torch
from torch.nn import functional as F
from detectron2.layers import cat
from detectron2.structures import BitMasks, Boxes
"""
Shape shorthand in this module:
N: minibatch dimension size, i.e. the number of RoIs for instance segmenation or the
number of images for semantic segmenation.
R: number of ROIs, combined over all images, in the minibatch
P: number of points
"""
def _as_tensor(x):
"""
An equivalent of `torch.as_tensor`, but works under tracing.
"""
if isinstance(x, (list, tuple)) and all([isinstance(t, torch.Tensor) for t in x]):
return torch.stack(x)
return torch.as_tensor(x)
def point_sample(input, point_coords, **kwargs):
"""
A wrapper around :function:`torch.nn.functional.grid_sample` to support 3D point_coords tensors.
Unlike :function:`torch.nn.functional.grid_sample` it assumes `point_coords` to lie inside
[0, 1] x [0, 1] square.
Args:
input (Tensor): A tensor of shape (N, C, H, W) that contains features map on a H x W grid.
point_coords (Tensor): A tensor of shape (N, P, 2) or (N, Hgrid, Wgrid, 2) that contains
[0, 1] x [0, 1] normalized point coordinates.
Returns:
output (Tensor): A tensor of shape (N, C, P) or (N, C, Hgrid, Wgrid) that contains
features for points in `point_coords`. The features are obtained via bilinear
interplation from `input` the same way as :function:`torch.nn.functional.grid_sample`.
"""
add_dim = False
if point_coords.dim() == 3:
add_dim = True
point_coords = point_coords.unsqueeze(2)
output = F.grid_sample(input, 2.0 * point_coords - 1.0, **kwargs)
if add_dim:
output = output.squeeze(3)
return output
def generate_regular_grid_point_coords(R, side_size, device):
"""
Generate regular square grid of points in [0, 1] x [0, 1] coordinate space.
Args:
R (int): The number of grids to sample, one for each region.
side_size (int): The side size of the regular grid.
device (torch.device): Desired device of returned tensor.
Returns:
(Tensor): A tensor of shape (R, side_size^2, 2) that contains coordinates
for the regular grids.
"""
aff = torch.tensor([[[0.5, 0, 0.5], [0, 0.5, 0.5]]], device=device)
r = F.affine_grid(aff, torch.Size((1, 1, side_size, side_size)), align_corners=False)
return r.view(1, -1, 2).expand(R, -1, -1)
def get_uncertain_point_coords_with_randomness(
coarse_logits, uncertainty_func, num_points, oversample_ratio, importance_sample_ratio
):
"""
Sample points in [0, 1] x [0, 1] coordinate space based on their uncertainty. The unceratinties
are calculated for each point using 'uncertainty_func' function that takes point's logit
prediction as input.
See PointRend paper for details.
Args:
coarse_logits (Tensor): A tensor of shape (N, C, Hmask, Wmask) or (N, 1, Hmask, Wmask) for
class-specific or class-agnostic prediction.
uncertainty_func: A function that takes a Tensor of shape (N, C, P) or (N, 1, P) that
contains logit predictions for P points and returns their uncertainties as a Tensor of
shape (N, 1, P).
num_points (int): The number of points P to sample.
oversample_ratio (int): Oversampling parameter.
importance_sample_ratio (float): Ratio of points that are sampled via importnace sampling.
Returns:
point_coords (Tensor): A tensor of shape (N, P, 2) that contains the coordinates of P
sampled points.
"""
assert oversample_ratio >= 1
assert importance_sample_ratio <= 1 and importance_sample_ratio >= 0
num_boxes = coarse_logits.shape[0]
num_sampled = int(num_points * oversample_ratio)
point_coords = torch.rand(num_boxes, num_sampled, 2, device=coarse_logits.device)
point_logits = point_sample(coarse_logits, point_coords, align_corners=False)
# It is crucial to calculate uncertainty based on the sampled prediction value for the points.
# Calculating uncertainties of the coarse predictions first and sampling them for points leads
# to incorrect results.
# To illustrate this: assume uncertainty_func(logits)=-abs(logits), a sampled point between
# two coarse predictions with -1 and 1 logits has 0 logits, and therefore 0 uncertainty value.
# However, if we calculate uncertainties for the coarse predictions first,
# both will have -1 uncertainty, and the sampled point will get -1 uncertainty.
point_uncertainties = uncertainty_func(point_logits)
num_uncertain_points = int(importance_sample_ratio * num_points)
num_random_points = num_points - num_uncertain_points
idx = torch.topk(point_uncertainties[:, 0, :], k=num_uncertain_points, dim=1)[1]
shift = num_sampled * torch.arange(num_boxes, dtype=torch.long, device=coarse_logits.device)
idx += shift[:, None]
point_coords = point_coords.view(-1, 2)[idx.view(-1), :].view(
num_boxes, num_uncertain_points, 2
)
if num_random_points > 0:
point_coords = cat(
[
point_coords,
torch.rand(num_boxes, num_random_points, 2, device=coarse_logits.device),
],
dim=1,
)
return point_coords
def get_uncertain_point_coords_on_grid(uncertainty_map, num_points):
"""
Find `num_points` most uncertain points from `uncertainty_map` grid.
Args:
uncertainty_map (Tensor): A tensor of shape (N, 1, H, W) that contains uncertainty
values for a set of points on a regular H x W grid.
num_points (int): The number of points P to select.
Returns:
point_indices (Tensor): A tensor of shape (N, P) that contains indices from
[0, H x W) of the most uncertain points.
point_coords (Tensor): A tensor of shape (N, P, 2) that contains [0, 1] x [0, 1] normalized
coordinates of the most uncertain points from the H x W grid.
"""
R, _, H, W = uncertainty_map.shape
h_step = 1.0 / float(H)
w_step = 1.0 / float(W)
num_points = min(H * W, num_points)
point_indices = torch.topk(uncertainty_map.view(R, H * W), k=num_points, dim=1)[1]
point_coords = torch.zeros(R, num_points, 2, dtype=torch.float, device=uncertainty_map.device)
point_coords[:, :, 0] = w_step / 2.0 + (point_indices % W).to(torch.float) * w_step
point_coords[:, :, 1] = h_step / 2.0 + (point_indices // W).to(torch.float) * h_step
return point_indices, point_coords
def point_sample_fine_grained_features(features_list, feature_scales, boxes, point_coords):
"""
Get features from feature maps in `features_list` that correspond to specific point coordinates
inside each bounding box from `boxes`.
Args:
features_list (list[Tensor]): A list of feature map tensors to get features from.
feature_scales (list[float]): A list of scales for tensors in `features_list`.
boxes (list[Boxes]): A list of I Boxes objects that contain R_1 + ... + R_I = R boxes all
together.
point_coords (Tensor): A tensor of shape (R, P, 2) that contains
[0, 1] x [0, 1] box-normalized coordinates of the P sampled points.
Returns:
point_features (Tensor): A tensor of shape (R, C, P) that contains features sampled
from all features maps in feature_list for P sampled points for all R boxes in `boxes`.
point_coords_wrt_image (Tensor): A tensor of shape (R, P, 2) that contains image-level
coordinates of P points.
"""
cat_boxes = Boxes.cat(boxes)
num_boxes = [b.tensor.size(0) for b in boxes]
point_coords_wrt_image = get_point_coords_wrt_image(cat_boxes.tensor, point_coords)
split_point_coords_wrt_image = torch.split(point_coords_wrt_image, num_boxes)
point_features = []
for idx_img, point_coords_wrt_image_per_image in enumerate(split_point_coords_wrt_image):
point_features_per_image = []
for idx_feature, feature_map in enumerate(features_list):
h, w = feature_map.shape[-2:]
scale = _as_tensor([w, h]) / feature_scales[idx_feature]
point_coords_scaled = point_coords_wrt_image_per_image / scale.to(feature_map.device)
point_features_per_image.append(
point_sample(
feature_map[idx_img].unsqueeze(0),
point_coords_scaled.unsqueeze(0),
align_corners=False,
)
.squeeze(0)
.transpose(1, 0)
)
point_features.append(cat(point_features_per_image, dim=1))
return cat(point_features, dim=0), point_coords_wrt_image
def get_point_coords_wrt_image(boxes_coords, point_coords):
"""
Convert box-normalized [0, 1] x [0, 1] point cooordinates to image-level coordinates.
Args:
boxes_coords (Tensor): A tensor of shape (R, 4) that contains bounding boxes.
coordinates.
point_coords (Tensor): A tensor of shape (R, P, 2) that contains
[0, 1] x [0, 1] box-normalized coordinates of the P sampled points.
Returns:
point_coords_wrt_image (Tensor): A tensor of shape (R, P, 2) that contains
image-normalized coordinates of P sampled points.
"""
with torch.no_grad():
point_coords_wrt_image = point_coords.clone()
point_coords_wrt_image[:, :, 0] = point_coords_wrt_image[:, :, 0] * (
boxes_coords[:, None, 2] - boxes_coords[:, None, 0]
)
point_coords_wrt_image[:, :, 1] = point_coords_wrt_image[:, :, 1] * (
boxes_coords[:, None, 3] - boxes_coords[:, None, 1]
)
point_coords_wrt_image[:, :, 0] += boxes_coords[:, None, 0]
point_coords_wrt_image[:, :, 1] += boxes_coords[:, None, 1]
return point_coords_wrt_image
def sample_point_labels(instances, point_coords):
"""
Sample point labels from ground truth mask given point_coords.
Args:
instances (list[Instances]): A list of N Instances, where N is the number of images
in the batch. So, i_th elememt of the list contains R_i objects and R_1 + ... + R_N is
equal to R. The ground-truth gt_masks in each instance will be used to compute labels.
points_coords (Tensor): A tensor of shape (R, P, 2), where R is the total number of
instances and P is the number of points for each instance. The coordinates are in
the absolute image pixel coordinate space, i.e. [0, H] x [0, W].
Returns:
Tensor: A tensor of shape (R, P) that contains the labels of P sampled points.
"""
with torch.no_grad():
gt_mask_logits = []
point_coords_splits = torch.split(
point_coords, [len(instances_per_image) for instances_per_image in instances]
)
for i, instances_per_image in enumerate(instances):
if len(instances_per_image) == 0:
continue
assert isinstance(
instances_per_image.gt_masks, BitMasks
), "Point head works with GT in 'bitmask' format. Set INPUT.MASK_FORMAT to 'bitmask'."
gt_bit_masks = instances_per_image.gt_masks.tensor
h, w = instances_per_image.gt_masks.image_size
scale = torch.tensor([w, h], dtype=torch.float, device=gt_bit_masks.device)
points_coord_grid_sample_format = point_coords_splits[i] / scale
gt_mask_logits.append(
point_sample(
gt_bit_masks.to(torch.float32).unsqueeze(1),
points_coord_grid_sample_format,
align_corners=False,
).squeeze(1)
)
point_labels = cat(gt_mask_logits)
return point_labels
|
banmo-main
|
third_party/detectron2_old/projects/PointRend/point_rend/point_features.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import logging
import math
import numpy as np
from typing import Dict, List, Tuple
import fvcore.nn.weight_init as weight_init
import torch
from torch import Tensor, nn
from torch.nn import functional as F
from detectron2.config import configurable
from detectron2.layers import Conv2d, ShapeSpec, cat, interpolate
from detectron2.modeling import ROI_MASK_HEAD_REGISTRY
from detectron2.modeling.roi_heads.mask_head import mask_rcnn_inference, mask_rcnn_loss
from detectron2.structures import Boxes
from .point_features import (
generate_regular_grid_point_coords,
get_point_coords_wrt_image,
get_uncertain_point_coords_on_grid,
get_uncertain_point_coords_with_randomness,
point_sample,
point_sample_fine_grained_features,
sample_point_labels,
)
from .point_head import build_point_head, roi_mask_point_loss
def calculate_uncertainty(logits, classes):
"""
We estimate uncerainty as L1 distance between 0.0 and the logit prediction in 'logits' for the
foreground class in `classes`.
Args:
logits (Tensor): A tensor of shape (R, C, ...) or (R, 1, ...) for class-specific or
class-agnostic, where R is the total number of predicted masks in all images and C is
the number of foreground classes. The values are logits.
classes (list): A list of length R that contains either predicted of ground truth class
for eash predicted mask.
Returns:
scores (Tensor): A tensor of shape (R, 1, ...) that contains uncertainty scores with
the most uncertain locations having the highest uncertainty score.
"""
if logits.shape[1] == 1:
gt_class_logits = logits.clone()
else:
gt_class_logits = logits[
torch.arange(logits.shape[0], device=logits.device), classes
].unsqueeze(1)
return -(torch.abs(gt_class_logits))
class ConvFCHead(nn.Module):
"""
A mask head with fully connected layers. Given pooled features it first reduces channels and
spatial dimensions with conv layers and then uses FC layers to predict coarse masks analogously
to the standard box head.
"""
_version = 2
@configurable
def __init__(
self, input_shape: ShapeSpec, *, conv_dim: int, fc_dims: List[int], output_shape: Tuple[int]
):
"""
Args:
conv_dim: the output dimension of the conv layers
fc_dims: a list of N>0 integers representing the output dimensions of N FC layers
output_shape: shape of the output mask prediction
"""
super().__init__()
# fmt: off
input_channels = input_shape.channels
input_h = input_shape.height
input_w = input_shape.width
self.output_shape = output_shape
# fmt: on
self.conv_layers = []
if input_channels > conv_dim:
self.reduce_channel_dim_conv = Conv2d(
input_channels,
conv_dim,
kernel_size=1,
stride=1,
padding=0,
bias=True,
activation=F.relu,
)
self.conv_layers.append(self.reduce_channel_dim_conv)
self.reduce_spatial_dim_conv = Conv2d(
conv_dim, conv_dim, kernel_size=2, stride=2, padding=0, bias=True, activation=F.relu
)
self.conv_layers.append(self.reduce_spatial_dim_conv)
input_dim = conv_dim * input_h * input_w
input_dim //= 4
self.fcs = []
for k, fc_dim in enumerate(fc_dims):
fc = nn.Linear(input_dim, fc_dim)
self.add_module("fc{}".format(k + 1), fc)
self.fcs.append(fc)
input_dim = fc_dim
output_dim = int(np.prod(self.output_shape))
self.prediction = nn.Linear(fc_dims[-1], output_dim)
# use normal distribution initialization for mask prediction layer
nn.init.normal_(self.prediction.weight, std=0.001)
nn.init.constant_(self.prediction.bias, 0)
for layer in self.conv_layers:
weight_init.c2_msra_fill(layer)
for layer in self.fcs:
weight_init.c2_xavier_fill(layer)
@classmethod
def from_config(cls, cfg, input_shape):
output_shape = (
cfg.MODEL.ROI_HEADS.NUM_CLASSES,
cfg.MODEL.ROI_MASK_HEAD.OUTPUT_SIDE_RESOLUTION,
cfg.MODEL.ROI_MASK_HEAD.OUTPUT_SIDE_RESOLUTION,
)
fc_dim = cfg.MODEL.ROI_MASK_HEAD.FC_DIM
num_fc = cfg.MODEL.ROI_MASK_HEAD.NUM_FC
ret = dict(
input_shape=input_shape,
conv_dim=cfg.MODEL.ROI_MASK_HEAD.CONV_DIM,
fc_dims=[fc_dim] * num_fc,
output_shape=output_shape,
)
return ret
def forward(self, x):
N = x.shape[0]
for layer in self.conv_layers:
x = layer(x)
x = torch.flatten(x, start_dim=1)
for layer in self.fcs:
x = F.relu(layer(x))
output_shape = [N] + list(self.output_shape)
return self.prediction(x).view(*output_shape)
def _load_from_state_dict(
self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
):
version = local_metadata.get("version", None)
if version is None or version < 2:
logger = logging.getLogger(__name__)
logger.warning(
"Weight format of PointRend models have changed! "
"Applying automatic conversion now ..."
)
for k in list(state_dict.keys()):
newk = k
if k.startswith(prefix + "coarse_mask_fc"):
newk = k.replace(prefix + "coarse_mask_fc", prefix + "fc")
if newk != k:
state_dict[newk] = state_dict[k]
del state_dict[k]
@ROI_MASK_HEAD_REGISTRY.register()
class PointRendMaskHead(nn.Module):
def __init__(self, cfg, input_shape: Dict[str, ShapeSpec]):
super().__init__()
self._feature_scales = {k: 1.0 / v.stride for k, v in input_shape.items()}
# point head
self._init_point_head(cfg, input_shape)
# coarse mask head
self.roi_pooler_in_features = cfg.MODEL.ROI_MASK_HEAD.IN_FEATURES
self.roi_pooler_size = cfg.MODEL.ROI_MASK_HEAD.POOLER_RESOLUTION
self._feature_scales = {k: 1.0 / v.stride for k, v in input_shape.items()}
in_channels = np.sum([input_shape[f].channels for f in self.roi_pooler_in_features])
self._init_roi_head(
cfg,
ShapeSpec(
channels=in_channels,
width=self.roi_pooler_size,
height=self.roi_pooler_size,
),
)
def _init_roi_head(self, cfg, input_shape):
self.coarse_head = ConvFCHead(cfg, input_shape)
def _init_point_head(self, cfg, input_shape):
# fmt: off
self.mask_point_on = cfg.MODEL.ROI_MASK_HEAD.POINT_HEAD_ON
if not self.mask_point_on:
return
assert cfg.MODEL.ROI_HEADS.NUM_CLASSES == cfg.MODEL.POINT_HEAD.NUM_CLASSES
self.mask_point_in_features = cfg.MODEL.POINT_HEAD.IN_FEATURES
self.mask_point_train_num_points = cfg.MODEL.POINT_HEAD.TRAIN_NUM_POINTS
self.mask_point_oversample_ratio = cfg.MODEL.POINT_HEAD.OVERSAMPLE_RATIO
self.mask_point_importance_sample_ratio = cfg.MODEL.POINT_HEAD.IMPORTANCE_SAMPLE_RATIO
# next three parameters are use in the adaptive subdivions inference procedure
self.mask_point_subdivision_init_resolution = cfg.MODEL.ROI_MASK_HEAD.OUTPUT_SIDE_RESOLUTION
self.mask_point_subdivision_steps = cfg.MODEL.POINT_HEAD.SUBDIVISION_STEPS
self.mask_point_subdivision_num_points = cfg.MODEL.POINT_HEAD.SUBDIVISION_NUM_POINTS
# fmt: on
in_channels = int(np.sum([input_shape[f].channels for f in self.mask_point_in_features]))
self.point_head = build_point_head(cfg, ShapeSpec(channels=in_channels, width=1, height=1))
# An optimization to skip unused subdivision steps: if after subdivision, all pixels on
# the mask will be selected and recomputed anyway, we should just double our init_resolution
while (
4 * self.mask_point_subdivision_init_resolution ** 2
<= self.mask_point_subdivision_num_points
):
self.mask_point_subdivision_init_resolution *= 2
self.mask_point_subdivision_steps -= 1
def forward(self, features, instances):
"""
Args:
features (dict[str, Tensor]): a dict of image-level features
instances (list[Instances]): proposals in training; detected
instances in inference
"""
if self.training:
proposal_boxes = [x.proposal_boxes for x in instances]
coarse_mask = self.coarse_head(self._roi_pooler(features, proposal_boxes))
losses = {"loss_mask": mask_rcnn_loss(coarse_mask, instances)}
if not self.mask_point_on:
return losses
point_coords, point_labels = self._sample_train_points(coarse_mask, instances)
point_fine_grained_features = self._point_pooler(features, proposal_boxes, point_coords)
point_logits = self._get_point_logits(
point_fine_grained_features, point_coords, coarse_mask
)
losses["loss_mask_point"] = roi_mask_point_loss(point_logits, instances, point_labels)
return losses
else:
pred_boxes = [x.pred_boxes for x in instances]
coarse_mask = self.coarse_head(self._roi_pooler(features, pred_boxes))
return self._subdivision_inference(features, coarse_mask, instances)
def _roi_pooler(self, features: List[Tensor], boxes: List[Boxes]):
"""
Extract per-box feature. This is similar to RoIAlign(sampling_ratio=1) except:
1. It's implemented by point_sample
2. It pools features across all levels and concat them, while typically
RoIAlign select one level for every box. However in the config we only use
one level (p2) so there is no difference.
Returns:
Tensor of shape (R, C, pooler_size, pooler_size) where R is the total number of boxes
"""
features_list = [features[k] for k in self.roi_pooler_in_features]
features_scales = [self._feature_scales[k] for k in self.roi_pooler_in_features]
num_boxes = sum(x.tensor.size(0) for x in boxes)
output_size = self.roi_pooler_size
point_coords = generate_regular_grid_point_coords(num_boxes, output_size, boxes[0].device)
# For regular grids of points, this function is equivalent to `len(features_list)' calls
# of `ROIAlign` (with `SAMPLING_RATIO=1`), and concat the results.
roi_features, _ = point_sample_fine_grained_features(
features_list, features_scales, boxes, point_coords
)
return roi_features.view(num_boxes, roi_features.shape[1], output_size, output_size)
def _sample_train_points(self, coarse_mask, instances):
assert self.training
gt_classes = cat([x.gt_classes for x in instances])
with torch.no_grad():
# sample point_coords
point_coords = get_uncertain_point_coords_with_randomness(
coarse_mask,
lambda logits: calculate_uncertainty(logits, gt_classes),
self.mask_point_train_num_points,
self.mask_point_oversample_ratio,
self.mask_point_importance_sample_ratio,
)
# sample point_labels
proposal_boxes = [x.proposal_boxes for x in instances]
cat_boxes = Boxes.cat(proposal_boxes)
point_coords_wrt_image = get_point_coords_wrt_image(cat_boxes.tensor, point_coords)
point_labels = sample_point_labels(instances, point_coords_wrt_image)
return point_coords, point_labels
def _point_pooler(self, features, proposal_boxes, point_coords):
point_features_list = [features[k] for k in self.mask_point_in_features]
point_features_scales = [self._feature_scales[k] for k in self.mask_point_in_features]
# sample image-level features
point_fine_grained_features, _ = point_sample_fine_grained_features(
point_features_list, point_features_scales, proposal_boxes, point_coords
)
return point_fine_grained_features
def _get_point_logits(self, point_fine_grained_features, point_coords, coarse_mask):
coarse_features = point_sample(coarse_mask, point_coords, align_corners=False)
point_logits = self.point_head(point_fine_grained_features, coarse_features)
return point_logits
def _subdivision_inference(self, features, mask_representations, instances):
assert not self.training
pred_boxes = [x.pred_boxes for x in instances]
pred_classes = cat([x.pred_classes for x in instances])
mask_logits = None
# +1 here to include an initial step to generate the coarsest mask
# prediction with init_resolution, when mask_logits is None.
# We compute initial mask by sampling on a regular grid. coarse_mask
# can be used as initial mask as well, but it's typically very low-res
# so it will be completely overwritten during subdivision anyway.
for _ in range(self.mask_point_subdivision_steps + 1):
if mask_logits is None:
point_coords = generate_regular_grid_point_coords(
pred_classes.size(0),
self.mask_point_subdivision_init_resolution,
pred_boxes[0].device,
)
else:
mask_logits = interpolate(
mask_logits, scale_factor=2, mode="bilinear", align_corners=False
)
uncertainty_map = calculate_uncertainty(mask_logits, pred_classes)
point_indices, point_coords = get_uncertain_point_coords_on_grid(
uncertainty_map, self.mask_point_subdivision_num_points
)
# Run the point head for every point in point_coords
fine_grained_features = self._point_pooler(features, pred_boxes, point_coords)
point_logits = self._get_point_logits(
fine_grained_features, point_coords, mask_representations
)
if mask_logits is None:
# Create initial mask_logits using point_logits on this regular grid
R, C, _ = point_logits.shape
mask_logits = point_logits.reshape(
R,
C,
self.mask_point_subdivision_init_resolution,
self.mask_point_subdivision_init_resolution,
)
# The subdivision code will fail with the empty list of boxes
if len(pred_classes) == 0:
mask_rcnn_inference(mask_logits, instances)
return instances
else:
# Put point predictions to the right places on the upsampled grid.
R, C, H, W = mask_logits.shape
point_indices = point_indices.unsqueeze(1).expand(-1, C, -1)
mask_logits = (
mask_logits.reshape(R, C, H * W)
.scatter_(2, point_indices, point_logits)
.view(R, C, H, W)
)
mask_rcnn_inference(mask_logits, instances)
return instances
@ROI_MASK_HEAD_REGISTRY.register()
class ImplicitPointRendMaskHead(PointRendMaskHead):
def __init__(self, cfg, input_shape: Dict[str, ShapeSpec]):
super().__init__(cfg, input_shape)
def _init_roi_head(self, cfg, input_shape):
assert hasattr(self, "num_params"), "Please initialize point_head first!"
self.parameter_head = ConvFCHead(cfg, input_shape, output_shape=(self.num_params,))
self.regularizer = cfg.MODEL.IMPLICIT_POINTREND.PARAMS_L2_REGULARIZER
def _init_point_head(self, cfg, input_shape):
# fmt: off
self.mask_point_on = True # always on
assert cfg.MODEL.ROI_HEADS.NUM_CLASSES == cfg.MODEL.POINT_HEAD.NUM_CLASSES
self.mask_point_in_features = cfg.MODEL.POINT_HEAD.IN_FEATURES
self.mask_point_train_num_points = cfg.MODEL.POINT_HEAD.TRAIN_NUM_POINTS
# next two parameters are use in the adaptive subdivions inference procedure
self.mask_point_subdivision_steps = cfg.MODEL.POINT_HEAD.SUBDIVISION_STEPS
self.mask_point_subdivision_num_points = cfg.MODEL.POINT_HEAD.SUBDIVISION_NUM_POINTS
# fmt: on
in_channels = int(np.sum([input_shape[f].channels for f in self.mask_point_in_features]))
self.point_head = build_point_head(cfg, ShapeSpec(channels=in_channels, width=1, height=1))
self.num_params = self.point_head.num_params
# inference parameters
self.mask_point_subdivision_init_resolution = int(
math.sqrt(self.mask_point_subdivision_num_points)
)
assert (
self.mask_point_subdivision_init_resolution
* self.mask_point_subdivision_init_resolution
== self.mask_point_subdivision_num_points
)
def forward(self, features, instances):
"""
Args:
features (dict[str, Tensor]): a dict of image-level features
instances (list[Instances]): proposals in training; detected
instances in inference
"""
if self.training:
proposal_boxes = [x.proposal_boxes for x in instances]
parameters = self.parameter_head(self._roi_pooler(features, proposal_boxes))
losses = {"loss_l2": self.regularizer * (parameters ** 2).mean()}
point_coords, point_labels = self._uniform_sample_train_points(instances)
point_fine_grained_features = self._point_pooler(features, proposal_boxes, point_coords)
point_logits = self._get_point_logits(
point_fine_grained_features, point_coords, parameters
)
losses["loss_mask_point"] = roi_mask_point_loss(point_logits, instances, point_labels)
return losses
else:
pred_boxes = [x.pred_boxes for x in instances]
parameters = self.parameter_head(self._roi_pooler(features, pred_boxes))
return self._subdivision_inference(features, parameters, instances)
def _uniform_sample_train_points(self, instances):
assert self.training
proposal_boxes = [x.proposal_boxes for x in instances]
cat_boxes = Boxes.cat(proposal_boxes)
# uniform sample
point_coords = torch.rand(
len(cat_boxes), self.mask_point_train_num_points, 2, device=cat_boxes.tensor.device
)
# sample point_labels
point_coords_wrt_image = get_point_coords_wrt_image(cat_boxes.tensor, point_coords)
point_labels = sample_point_labels(instances, point_coords_wrt_image)
return point_coords, point_labels
def _get_point_logits(self, fine_grained_features, point_coords, parameters):
return self.point_head(fine_grained_features, point_coords, parameters)
|
banmo-main
|
third_party/detectron2_old/projects/PointRend/point_rend/mask_head.py
|
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
from detectron2.config import CfgNode as CN
def add_pointrend_config(cfg):
"""
Add config for PointRend.
"""
# We retry random cropping until no single category in semantic segmentation GT occupies more
# than `SINGLE_CATEGORY_MAX_AREA` part of the crop.
cfg.INPUT.CROP.SINGLE_CATEGORY_MAX_AREA = 1.0
# Color augmentatition from SSD paper for semantic segmentation model during training.
cfg.INPUT.COLOR_AUG_SSD = False
# Names of the input feature maps to be used by a coarse mask head.
cfg.MODEL.ROI_MASK_HEAD.IN_FEATURES = ("p2",)
cfg.MODEL.ROI_MASK_HEAD.FC_DIM = 1024
cfg.MODEL.ROI_MASK_HEAD.NUM_FC = 2
# The side size of a coarse mask head prediction.
cfg.MODEL.ROI_MASK_HEAD.OUTPUT_SIDE_RESOLUTION = 7
# True if point head is used.
cfg.MODEL.ROI_MASK_HEAD.POINT_HEAD_ON = False
cfg.MODEL.POINT_HEAD = CN()
cfg.MODEL.POINT_HEAD.NAME = "StandardPointHead"
cfg.MODEL.POINT_HEAD.NUM_CLASSES = 80
# Names of the input feature maps to be used by a mask point head.
cfg.MODEL.POINT_HEAD.IN_FEATURES = ("p2",)
# Number of points sampled during training for a mask point head.
cfg.MODEL.POINT_HEAD.TRAIN_NUM_POINTS = 14 * 14
# Oversampling parameter for PointRend point sampling during training. Parameter `k` in the
# original paper.
cfg.MODEL.POINT_HEAD.OVERSAMPLE_RATIO = 3
# Importance sampling parameter for PointRend point sampling during training. Parametr `beta` in
# the original paper.
cfg.MODEL.POINT_HEAD.IMPORTANCE_SAMPLE_RATIO = 0.75
# Number of subdivision steps during inference.
cfg.MODEL.POINT_HEAD.SUBDIVISION_STEPS = 5
# Maximum number of points selected at each subdivision step (N).
cfg.MODEL.POINT_HEAD.SUBDIVISION_NUM_POINTS = 28 * 28
cfg.MODEL.POINT_HEAD.FC_DIM = 256
cfg.MODEL.POINT_HEAD.NUM_FC = 3
cfg.MODEL.POINT_HEAD.CLS_AGNOSTIC_MASK = False
# If True, then coarse prediction features are used as inout for each layer in PointRend's MLP.
cfg.MODEL.POINT_HEAD.COARSE_PRED_EACH_LAYER = True
cfg.MODEL.POINT_HEAD.COARSE_SEM_SEG_HEAD_NAME = "SemSegFPNHead"
"""
Add config for Implicit PointRend.
"""
cfg.MODEL.IMPLICIT_POINTREND = CN()
cfg.MODEL.IMPLICIT_POINTREND.IMAGE_FEATURE_ENABLED = True
cfg.MODEL.IMPLICIT_POINTREND.POS_ENC_ENABLED = True
cfg.MODEL.IMPLICIT_POINTREND.PARAMS_L2_REGULARIZER = 0.00001
|
banmo-main
|
third_party/detectron2_old/projects/PointRend/point_rend/config.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from .config import add_pointrend_config
from .mask_head import PointRendMaskHead, ImplicitPointRendMaskHead
from .semantic_seg import PointRendSemSegHead
from .color_augmentation import ColorAugSSDTransform
from . import roi_heads as _ # only registration
|
banmo-main
|
third_party/detectron2_old/projects/PointRend/point_rend/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import numpy as np
import fvcore.nn.weight_init as weight_init
import torch
from torch import nn
from torch.nn import functional as F
from detectron2.layers import ShapeSpec, cat
from detectron2.utils.events import get_event_storage
from detectron2.utils.registry import Registry
POINT_HEAD_REGISTRY = Registry("POINT_HEAD")
POINT_HEAD_REGISTRY.__doc__ = """
Registry for point heads, which makes prediction for a given set of per-point features.
The registered object will be called with `obj(cfg, input_shape)`.
"""
def roi_mask_point_loss(mask_logits, instances, point_labels):
"""
Compute the point-based loss for instance segmentation mask predictions
given point-wise mask prediction and its corresponding point-wise labels.
Args:
mask_logits (Tensor): A tensor of shape (R, C, P) or (R, 1, P) for class-specific or
class-agnostic, where R is the total number of predicted masks in all images, C is the
number of foreground classes, and P is the number of points sampled for each mask.
The values are logits.
instances (list[Instances]): A list of N Instances, where N is the number of images
in the batch. These instances are in 1:1 correspondence with the `mask_logits`. So, i_th
elememt of the list contains R_i objects and R_1 + ... + R_N is equal to R.
The ground-truth labels (class, box, mask, ...) associated with each instance are stored
in fields.
point_labels (Tensor): A tensor of shape (R, P), where R is the total number of
predicted masks and P is the number of points for each mask.
Labels with value of -1 will be ignored.
Returns:
point_loss (Tensor): A scalar tensor containing the loss.
"""
with torch.no_grad():
cls_agnostic_mask = mask_logits.size(1) == 1
total_num_masks = mask_logits.size(0)
gt_classes = []
for instances_per_image in instances:
if len(instances_per_image) == 0:
continue
if not cls_agnostic_mask:
gt_classes_per_image = instances_per_image.gt_classes.to(dtype=torch.int64)
gt_classes.append(gt_classes_per_image)
gt_mask_logits = point_labels
point_ignores = point_labels == -1
if gt_mask_logits.shape[0] == 0:
return mask_logits.sum() * 0
assert gt_mask_logits.numel() > 0, gt_mask_logits.shape
if cls_agnostic_mask:
mask_logits = mask_logits[:, 0]
else:
indices = torch.arange(total_num_masks)
gt_classes = cat(gt_classes, dim=0)
mask_logits = mask_logits[indices, gt_classes]
# Log the training accuracy (using gt classes and 0.0 threshold for the logits)
mask_accurate = (mask_logits > 0.0) == gt_mask_logits.to(dtype=torch.uint8)
mask_accurate = mask_accurate[~point_ignores]
mask_accuracy = mask_accurate.nonzero().size(0) / mask_accurate.numel()
get_event_storage().put_scalar("point/accuracy", mask_accuracy)
point_loss = F.binary_cross_entropy_with_logits(
mask_logits, gt_mask_logits.to(dtype=torch.float32), weight=~point_ignores, reduction="mean"
)
return point_loss
@POINT_HEAD_REGISTRY.register()
class StandardPointHead(nn.Module):
"""
A point head multi-layer perceptron which we model with conv1d layers with kernel 1. The head
takes both fine-grained and coarse prediction features as its input.
"""
def __init__(self, cfg, input_shape: ShapeSpec):
"""
The following attributes are parsed from config:
fc_dim: the output dimension of each FC layers
num_fc: the number of FC layers
coarse_pred_each_layer: if True, coarse prediction features are concatenated to each
layer's input
"""
super(StandardPointHead, self).__init__()
# fmt: off
num_classes = cfg.MODEL.POINT_HEAD.NUM_CLASSES
fc_dim = cfg.MODEL.POINT_HEAD.FC_DIM
num_fc = cfg.MODEL.POINT_HEAD.NUM_FC
cls_agnostic_mask = cfg.MODEL.POINT_HEAD.CLS_AGNOSTIC_MASK
self.coarse_pred_each_layer = cfg.MODEL.POINT_HEAD.COARSE_PRED_EACH_LAYER
input_channels = input_shape.channels
# fmt: on
fc_dim_in = input_channels + num_classes
self.fc_layers = []
for k in range(num_fc):
fc = nn.Conv1d(fc_dim_in, fc_dim, kernel_size=1, stride=1, padding=0, bias=True)
self.add_module("fc{}".format(k + 1), fc)
self.fc_layers.append(fc)
fc_dim_in = fc_dim
fc_dim_in += num_classes if self.coarse_pred_each_layer else 0
num_mask_classes = 1 if cls_agnostic_mask else num_classes
self.predictor = nn.Conv1d(fc_dim_in, num_mask_classes, kernel_size=1, stride=1, padding=0)
for layer in self.fc_layers:
weight_init.c2_msra_fill(layer)
# use normal distribution initialization for mask prediction layer
nn.init.normal_(self.predictor.weight, std=0.001)
if self.predictor.bias is not None:
nn.init.constant_(self.predictor.bias, 0)
def forward(self, fine_grained_features, coarse_features):
x = torch.cat((fine_grained_features, coarse_features), dim=1)
for layer in self.fc_layers:
x = F.relu(layer(x))
if self.coarse_pred_each_layer:
x = cat((x, coarse_features), dim=1)
return self.predictor(x)
@POINT_HEAD_REGISTRY.register()
class ImplicitPointHead(nn.Module):
"""
A point head multi-layer perceptron which we model with conv1d layers with kernel 1. The head
takes both fine-grained features and instance-wise MLP parameters as its input.
"""
def __init__(self, cfg, input_shape: ShapeSpec):
"""
The following attributes are parsed from config:
channels: the output dimension of each FC layers
num_layers: the number of FC layers (including the final prediction layer)
image_feature_enabled: if True, fine-grained image-level features are used
positional_encoding_enabled: if True, positional encoding is used
"""
super(ImplicitPointHead, self).__init__()
# fmt: off
self.num_layers = cfg.MODEL.POINT_HEAD.NUM_FC + 1
self.channels = cfg.MODEL.POINT_HEAD.FC_DIM
self.image_feature_enabled = cfg.MODEL.IMPLICIT_POINTREND.IMAGE_FEATURE_ENABLED
self.positional_encoding_enabled = cfg.MODEL.IMPLICIT_POINTREND.POS_ENC_ENABLED
self.num_classes = (
cfg.MODEL.POINT_HEAD.NUM_CLASSES if not cfg.MODEL.POINT_HEAD.CLS_AGNOSTIC_MASK else 1
)
self.in_channels = input_shape.channels
# fmt: on
if not self.image_feature_enabled:
self.in_channels = 0
if self.positional_encoding_enabled:
self.in_channels += 256
self.register_buffer("positional_encoding_gaussian_matrix", torch.randn((2, 128)))
assert self.in_channels > 0
num_weight_params, num_bias_params = [], []
assert self.num_layers >= 2
for l in range(self.num_layers):
if l == 0:
# input layer
num_weight_params.append(self.in_channels * self.channels)
num_bias_params.append(self.channels)
elif l == self.num_layers - 1:
# output layer
num_weight_params.append(self.channels * self.num_classes)
num_bias_params.append(self.num_classes)
else:
# intermediate layer
num_weight_params.append(self.channels * self.channels)
num_bias_params.append(self.channels)
self.num_weight_params = num_weight_params
self.num_bias_params = num_bias_params
self.num_params = sum(num_weight_params) + sum(num_bias_params)
def forward(self, fine_grained_features, point_coords, parameters):
# features: [R, channels, K]
# point_coords: [R, K, 2]
num_instances = fine_grained_features.size(0)
num_points = fine_grained_features.size(2)
if num_instances == 0:
return torch.zeros((0, 1, num_points), device=fine_grained_features.device)
if self.positional_encoding_enabled:
# locations: [R*K, 2]
locations = 2 * point_coords.reshape(num_instances * num_points, 2) - 1
locations = locations @ self.positional_encoding_gaussian_matrix.to(locations.device)
locations = 2 * np.pi * locations
locations = torch.cat([torch.sin(locations), torch.cos(locations)], dim=1)
# locations: [R, C, K]
locations = locations.reshape(num_instances, num_points, 256).permute(0, 2, 1)
if not self.image_feature_enabled:
fine_grained_features = locations
else:
fine_grained_features = torch.cat([locations, fine_grained_features], dim=1)
# features [R, C, K]
mask_feat = fine_grained_features.reshape(num_instances, self.in_channels, num_points)
weights, biases = self._parse_params(
parameters,
self.in_channels,
self.channels,
self.num_classes,
self.num_weight_params,
self.num_bias_params,
)
point_logits = self._dynamic_mlp(mask_feat, weights, biases, num_instances)
point_logits = point_logits.reshape(-1, self.num_classes, num_points)
return point_logits
@staticmethod
def _dynamic_mlp(features, weights, biases, num_instances):
assert features.dim() == 3, features.dim()
n_layers = len(weights)
x = features
for i, (w, b) in enumerate(zip(weights, biases)):
x = torch.einsum("nck,ndc->ndk", x, w) + b
if i < n_layers - 1:
x = F.relu(x)
return x
@staticmethod
def _parse_params(
pred_params,
in_channels,
channels,
num_classes,
num_weight_params,
num_bias_params,
):
assert pred_params.dim() == 2
assert len(num_weight_params) == len(num_bias_params)
assert pred_params.size(1) == sum(num_weight_params) + sum(num_bias_params)
num_instances = pred_params.size(0)
num_layers = len(num_weight_params)
params_splits = list(
torch.split_with_sizes(pred_params, num_weight_params + num_bias_params, dim=1)
)
weight_splits = params_splits[:num_layers]
bias_splits = params_splits[num_layers:]
for l in range(num_layers):
if l == 0:
# input layer
weight_splits[l] = weight_splits[l].reshape(num_instances, channels, in_channels)
bias_splits[l] = bias_splits[l].reshape(num_instances, channels, 1)
elif l < num_layers - 1:
# intermediate layer
weight_splits[l] = weight_splits[l].reshape(num_instances, channels, channels)
bias_splits[l] = bias_splits[l].reshape(num_instances, channels, 1)
else:
# output layer
weight_splits[l] = weight_splits[l].reshape(num_instances, num_classes, channels)
bias_splits[l] = bias_splits[l].reshape(num_instances, num_classes, 1)
return weight_splits, bias_splits
def build_point_head(cfg, input_channels):
"""
Build a point head defined by `cfg.MODEL.POINT_HEAD.NAME`.
"""
head_name = cfg.MODEL.POINT_HEAD.NAME
return POINT_HEAD_REGISTRY.get(head_name)(cfg, input_channels)
|
banmo-main
|
third_party/detectron2_old/projects/PointRend/point_rend/point_head.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import logging
from detectron2.modeling import ROI_HEADS_REGISTRY, StandardROIHeads
@ROI_HEADS_REGISTRY.register()
class PointRendROIHeads(StandardROIHeads):
"""
Identical to StandardROIHeads, except for some weights conversion code to
handle old models.
"""
_version = 2
def _load_from_state_dict(
self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
):
version = local_metadata.get("version", None)
if version is None or version < 2:
logger = logging.getLogger(__name__)
logger.warning(
"Weight format of PointRend models have changed! "
"Please upgrade your models. Applying automatic conversion now ..."
)
for k in list(state_dict.keys()):
newk = k
if k.startswith(prefix + "mask_point_head"):
newk = k.replace(prefix + "mask_point_head", prefix + "mask_head.point_head")
if k.startswith(prefix + "mask_coarse_head"):
newk = k.replace(prefix + "mask_coarse_head", prefix + "mask_head.coarse_head")
if newk != k:
state_dict[newk] = state_dict[k]
del state_dict[k]
@classmethod
def _init_mask_head(cls, cfg, input_shape):
if cfg.MODEL.MASK_ON and cfg.MODEL.ROI_MASK_HEAD.NAME != "PointRendMaskHead":
logger = logging.getLogger(__name__)
logger.warning(
"Config of PointRend models have changed! "
"Please upgrade your models. Applying automatic conversion now ..."
)
assert cfg.MODEL.ROI_MASK_HEAD.NAME == "CoarseMaskHead"
cfg.defrost()
cfg.MODEL.ROI_MASK_HEAD.NAME = "PointRendMaskHead"
cfg.MODEL.ROI_MASK_HEAD.POOLER_TYPE = ""
cfg.freeze()
return super()._init_mask_head(cfg, input_shape)
|
banmo-main
|
third_party/detectron2_old/projects/PointRend/point_rend/roi_heads.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import numpy as np
import random
import cv2
from fvcore.transforms.transform import Transform
class ColorAugSSDTransform(Transform):
"""
A color related data augmentation used in Single Shot Multibox Detector (SSD).
Wei Liu, Dragomir Anguelov, Dumitru Erhan, Christian Szegedy,
Scott Reed, Cheng-Yang Fu, Alexander C. Berg.
SSD: Single Shot MultiBox Detector. ECCV 2016.
Implementation based on:
https://github.com/weiliu89/caffe/blob
/4817bf8b4200b35ada8ed0dc378dceaf38c539e4
/src/caffe/util/im_transforms.cpp
https://github.com/chainer/chainercv/blob
/7159616642e0be7c5b3ef380b848e16b7e99355b/chainercv
/links/model/ssd/transforms.py
"""
def __init__(
self,
img_format,
brightness_delta=32,
contrast_low=0.5,
contrast_high=1.5,
saturation_low=0.5,
saturation_high=1.5,
hue_delta=18,
):
super().__init__()
assert img_format in ["BGR", "RGB"]
self.is_rgb = img_format == "RGB"
del img_format
self._set_attributes(locals())
def apply_coords(self, coords):
return coords
def apply_segmentation(self, segmentation):
return segmentation
def apply_image(self, img, interp=None):
if self.is_rgb:
img = img[:, :, [2, 1, 0]]
img = self.brightness(img)
if random.randrange(2):
img = self.contrast(img)
img = self.saturation(img)
img = self.hue(img)
else:
img = self.saturation(img)
img = self.hue(img)
img = self.contrast(img)
if self.is_rgb:
img = img[:, :, [2, 1, 0]]
return img
def convert(self, img, alpha=1, beta=0):
img = img.astype(np.float32) * alpha + beta
img = np.clip(img, 0, 255)
return img.astype(np.uint8)
def brightness(self, img):
if random.randrange(2):
return self.convert(
img, beta=random.uniform(-self.brightness_delta, self.brightness_delta)
)
return img
def contrast(self, img):
if random.randrange(2):
return self.convert(img, alpha=random.uniform(self.contrast_low, self.contrast_high))
return img
def saturation(self, img):
if random.randrange(2):
img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
img[:, :, 1] = self.convert(
img[:, :, 1], alpha=random.uniform(self.saturation_low, self.saturation_high)
)
return cv2.cvtColor(img, cv2.COLOR_HSV2BGR)
return img
def hue(self, img):
if random.randrange(2):
img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
img[:, :, 0] = (
img[:, :, 0].astype(int) + random.randint(-self.hue_delta, self.hue_delta)
) % 180
return cv2.cvtColor(img, cv2.COLOR_HSV2BGR)
return img
|
banmo-main
|
third_party/detectron2_old/projects/PointRend/point_rend/color_augmentation.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import numpy as np
from typing import Dict
import torch
from torch import nn
from torch.nn import functional as F
from detectron2.layers import ShapeSpec, cat
from detectron2.modeling import SEM_SEG_HEADS_REGISTRY
from .point_features import (
get_uncertain_point_coords_on_grid,
get_uncertain_point_coords_with_randomness,
point_sample,
)
from .point_head import build_point_head
def calculate_uncertainty(sem_seg_logits):
"""
For each location of the prediction `sem_seg_logits` we estimate uncerainty as the
difference between top first and top second predicted logits.
Args:
mask_logits (Tensor): A tensor of shape (N, C, ...), where N is the minibatch size and
C is the number of foreground classes. The values are logits.
Returns:
scores (Tensor): A tensor of shape (N, 1, ...) that contains uncertainty scores with
the most uncertain locations having the highest uncertainty score.
"""
top2_scores = torch.topk(sem_seg_logits, k=2, dim=1)[0]
return (top2_scores[:, 1] - top2_scores[:, 0]).unsqueeze(1)
@SEM_SEG_HEADS_REGISTRY.register()
class PointRendSemSegHead(nn.Module):
"""
A semantic segmentation head that combines a head set in `POINT_HEAD.COARSE_SEM_SEG_HEAD_NAME`
and a point head set in `MODEL.POINT_HEAD.NAME`.
"""
def __init__(self, cfg, input_shape: Dict[str, ShapeSpec]):
super().__init__()
self.ignore_value = cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE
self.coarse_sem_seg_head = SEM_SEG_HEADS_REGISTRY.get(
cfg.MODEL.POINT_HEAD.COARSE_SEM_SEG_HEAD_NAME
)(cfg, input_shape)
self._init_point_head(cfg, input_shape)
def _init_point_head(self, cfg, input_shape: Dict[str, ShapeSpec]):
# fmt: off
assert cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES == cfg.MODEL.POINT_HEAD.NUM_CLASSES
feature_channels = {k: v.channels for k, v in input_shape.items()}
self.in_features = cfg.MODEL.POINT_HEAD.IN_FEATURES
self.train_num_points = cfg.MODEL.POINT_HEAD.TRAIN_NUM_POINTS
self.oversample_ratio = cfg.MODEL.POINT_HEAD.OVERSAMPLE_RATIO
self.importance_sample_ratio = cfg.MODEL.POINT_HEAD.IMPORTANCE_SAMPLE_RATIO
self.subdivision_steps = cfg.MODEL.POINT_HEAD.SUBDIVISION_STEPS
self.subdivision_num_points = cfg.MODEL.POINT_HEAD.SUBDIVISION_NUM_POINTS
# fmt: on
in_channels = int(np.sum([feature_channels[f] for f in self.in_features]))
self.point_head = build_point_head(cfg, ShapeSpec(channels=in_channels, width=1, height=1))
def forward(self, features, targets=None):
coarse_sem_seg_logits = self.coarse_sem_seg_head.layers(features)
if self.training:
losses = self.coarse_sem_seg_head.losses(coarse_sem_seg_logits, targets)
with torch.no_grad():
point_coords = get_uncertain_point_coords_with_randomness(
coarse_sem_seg_logits,
calculate_uncertainty,
self.train_num_points,
self.oversample_ratio,
self.importance_sample_ratio,
)
coarse_features = point_sample(coarse_sem_seg_logits, point_coords, align_corners=False)
fine_grained_features = cat(
[
point_sample(features[in_feature], point_coords, align_corners=False)
for in_feature in self.in_features
],
dim=1,
)
point_logits = self.point_head(fine_grained_features, coarse_features)
point_targets = (
point_sample(
targets.unsqueeze(1).to(torch.float),
point_coords,
mode="nearest",
align_corners=False,
)
.squeeze(1)
.to(torch.long)
)
losses["loss_sem_seg_point"] = F.cross_entropy(
point_logits, point_targets, reduction="mean", ignore_index=self.ignore_value
)
return None, losses
else:
sem_seg_logits = coarse_sem_seg_logits.clone()
for _ in range(self.subdivision_steps):
sem_seg_logits = F.interpolate(
sem_seg_logits, scale_factor=2, mode="bilinear", align_corners=False
)
uncertainty_map = calculate_uncertainty(sem_seg_logits)
point_indices, point_coords = get_uncertain_point_coords_on_grid(
uncertainty_map, self.subdivision_num_points
)
fine_grained_features = cat(
[
point_sample(features[in_feature], point_coords, align_corners=False)
for in_feature in self.in_features
]
)
coarse_features = point_sample(
coarse_sem_seg_logits, point_coords, align_corners=False
)
point_logits = self.point_head(fine_grained_features, coarse_features)
# put sem seg point predictions to the right places on the upsampled grid.
N, C, H, W = sem_seg_logits.shape
point_indices = point_indices.unsqueeze(1).expand(-1, C, -1)
sem_seg_logits = (
sem_seg_logits.reshape(N, C, H * W)
.scatter_(2, point_indices, point_logits)
.view(N, C, H, W)
)
return sem_seg_logits, {}
|
banmo-main
|
third_party/detectron2_old/projects/PointRend/point_rend/semantic_seg.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
import copy
import json
import os
from collections import defaultdict
# This mapping is extracted from the official LVIS mapping:
# https://github.com/lvis-dataset/lvis-api/blob/master/data/coco_to_synset.json
COCO_SYNSET_CATEGORIES = [
{"synset": "person.n.01", "coco_cat_id": 1},
{"synset": "bicycle.n.01", "coco_cat_id": 2},
{"synset": "car.n.01", "coco_cat_id": 3},
{"synset": "motorcycle.n.01", "coco_cat_id": 4},
{"synset": "airplane.n.01", "coco_cat_id": 5},
{"synset": "bus.n.01", "coco_cat_id": 6},
{"synset": "train.n.01", "coco_cat_id": 7},
{"synset": "truck.n.01", "coco_cat_id": 8},
{"synset": "boat.n.01", "coco_cat_id": 9},
{"synset": "traffic_light.n.01", "coco_cat_id": 10},
{"synset": "fireplug.n.01", "coco_cat_id": 11},
{"synset": "stop_sign.n.01", "coco_cat_id": 13},
{"synset": "parking_meter.n.01", "coco_cat_id": 14},
{"synset": "bench.n.01", "coco_cat_id": 15},
{"synset": "bird.n.01", "coco_cat_id": 16},
{"synset": "cat.n.01", "coco_cat_id": 17},
{"synset": "dog.n.01", "coco_cat_id": 18},
{"synset": "horse.n.01", "coco_cat_id": 19},
{"synset": "sheep.n.01", "coco_cat_id": 20},
{"synset": "beef.n.01", "coco_cat_id": 21},
{"synset": "elephant.n.01", "coco_cat_id": 22},
{"synset": "bear.n.01", "coco_cat_id": 23},
{"synset": "zebra.n.01", "coco_cat_id": 24},
{"synset": "giraffe.n.01", "coco_cat_id": 25},
{"synset": "backpack.n.01", "coco_cat_id": 27},
{"synset": "umbrella.n.01", "coco_cat_id": 28},
{"synset": "bag.n.04", "coco_cat_id": 31},
{"synset": "necktie.n.01", "coco_cat_id": 32},
{"synset": "bag.n.06", "coco_cat_id": 33},
{"synset": "frisbee.n.01", "coco_cat_id": 34},
{"synset": "ski.n.01", "coco_cat_id": 35},
{"synset": "snowboard.n.01", "coco_cat_id": 36},
{"synset": "ball.n.06", "coco_cat_id": 37},
{"synset": "kite.n.03", "coco_cat_id": 38},
{"synset": "baseball_bat.n.01", "coco_cat_id": 39},
{"synset": "baseball_glove.n.01", "coco_cat_id": 40},
{"synset": "skateboard.n.01", "coco_cat_id": 41},
{"synset": "surfboard.n.01", "coco_cat_id": 42},
{"synset": "tennis_racket.n.01", "coco_cat_id": 43},
{"synset": "bottle.n.01", "coco_cat_id": 44},
{"synset": "wineglass.n.01", "coco_cat_id": 46},
{"synset": "cup.n.01", "coco_cat_id": 47},
{"synset": "fork.n.01", "coco_cat_id": 48},
{"synset": "knife.n.01", "coco_cat_id": 49},
{"synset": "spoon.n.01", "coco_cat_id": 50},
{"synset": "bowl.n.03", "coco_cat_id": 51},
{"synset": "banana.n.02", "coco_cat_id": 52},
{"synset": "apple.n.01", "coco_cat_id": 53},
{"synset": "sandwich.n.01", "coco_cat_id": 54},
{"synset": "orange.n.01", "coco_cat_id": 55},
{"synset": "broccoli.n.01", "coco_cat_id": 56},
{"synset": "carrot.n.01", "coco_cat_id": 57},
{"synset": "frank.n.02", "coco_cat_id": 58},
{"synset": "pizza.n.01", "coco_cat_id": 59},
{"synset": "doughnut.n.02", "coco_cat_id": 60},
{"synset": "cake.n.03", "coco_cat_id": 61},
{"synset": "chair.n.01", "coco_cat_id": 62},
{"synset": "sofa.n.01", "coco_cat_id": 63},
{"synset": "pot.n.04", "coco_cat_id": 64},
{"synset": "bed.n.01", "coco_cat_id": 65},
{"synset": "dining_table.n.01", "coco_cat_id": 67},
{"synset": "toilet.n.02", "coco_cat_id": 70},
{"synset": "television_receiver.n.01", "coco_cat_id": 72},
{"synset": "laptop.n.01", "coco_cat_id": 73},
{"synset": "mouse.n.04", "coco_cat_id": 74},
{"synset": "remote_control.n.01", "coco_cat_id": 75},
{"synset": "computer_keyboard.n.01", "coco_cat_id": 76},
{"synset": "cellular_telephone.n.01", "coco_cat_id": 77},
{"synset": "microwave.n.02", "coco_cat_id": 78},
{"synset": "oven.n.01", "coco_cat_id": 79},
{"synset": "toaster.n.02", "coco_cat_id": 80},
{"synset": "sink.n.01", "coco_cat_id": 81},
{"synset": "electric_refrigerator.n.01", "coco_cat_id": 82},
{"synset": "book.n.01", "coco_cat_id": 84},
{"synset": "clock.n.01", "coco_cat_id": 85},
{"synset": "vase.n.01", "coco_cat_id": 86},
{"synset": "scissors.n.01", "coco_cat_id": 87},
{"synset": "teddy.n.01", "coco_cat_id": 88},
{"synset": "hand_blower.n.01", "coco_cat_id": 89},
{"synset": "toothbrush.n.01", "coco_cat_id": 90},
]
def cocofy_lvis(input_filename, output_filename):
"""
Filter LVIS instance segmentation annotations to remove all categories that are not included in
COCO. The new json files can be used to evaluate COCO AP using `lvis-api`. The category ids in
the output json are the incontiguous COCO dataset ids.
Args:
input_filename (str): path to the LVIS json file.
output_filename (str): path to the COCOfied json file.
"""
with open(input_filename, "r") as f:
lvis_json = json.load(f)
lvis_annos = lvis_json.pop("annotations")
cocofied_lvis = copy.deepcopy(lvis_json)
lvis_json["annotations"] = lvis_annos
# Mapping from lvis cat id to coco cat id via synset
lvis_cat_id_to_synset = {cat["id"]: cat["synset"] for cat in lvis_json["categories"]}
synset_to_coco_cat_id = {x["synset"]: x["coco_cat_id"] for x in COCO_SYNSET_CATEGORIES}
# Synsets that we will keep in the dataset
synsets_to_keep = set(synset_to_coco_cat_id.keys())
coco_cat_id_with_instances = defaultdict(int)
new_annos = []
ann_id = 1
for ann in lvis_annos:
lvis_cat_id = ann["category_id"]
synset = lvis_cat_id_to_synset[lvis_cat_id]
if synset not in synsets_to_keep:
continue
coco_cat_id = synset_to_coco_cat_id[synset]
new_ann = copy.deepcopy(ann)
new_ann["category_id"] = coco_cat_id
new_ann["id"] = ann_id
ann_id += 1
new_annos.append(new_ann)
coco_cat_id_with_instances[coco_cat_id] += 1
cocofied_lvis["annotations"] = new_annos
for image in cocofied_lvis["images"]:
for key in ["not_exhaustive_category_ids", "neg_category_ids"]:
new_category_list = []
for lvis_cat_id in image[key]:
synset = lvis_cat_id_to_synset[lvis_cat_id]
if synset not in synsets_to_keep:
continue
coco_cat_id = synset_to_coco_cat_id[synset]
new_category_list.append(coco_cat_id)
coco_cat_id_with_instances[coco_cat_id] += 1
image[key] = new_category_list
coco_cat_id_with_instances = set(coco_cat_id_with_instances.keys())
new_categories = []
for cat in lvis_json["categories"]:
synset = cat["synset"]
if synset not in synsets_to_keep:
continue
coco_cat_id = synset_to_coco_cat_id[synset]
if coco_cat_id not in coco_cat_id_with_instances:
continue
new_cat = copy.deepcopy(cat)
new_cat["id"] = coco_cat_id
new_categories.append(new_cat)
cocofied_lvis["categories"] = new_categories
with open(output_filename, "w") as f:
json.dump(cocofied_lvis, f)
print("{} is COCOfied and stored in {}.".format(input_filename, output_filename))
if __name__ == "__main__":
dataset_dir = os.path.join(os.getenv("DETECTRON2_DATASETS", "datasets"), "lvis")
for s in ["lvis_v0.5_train", "lvis_v0.5_val"]:
print("Start COCOfing {}.".format(s))
cocofy_lvis(
os.path.join(dataset_dir, "{}.json".format(s)),
os.path.join(dataset_dir, "{}_cocofied.json".format(s)),
)
|
banmo-main
|
third_party/detectron2_old/datasets/prepare_cocofied_lvis.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.