python_code
stringlengths 0
4.04M
| repo_name
stringlengths 7
58
| file_path
stringlengths 5
147
|
---|---|---|
# Copyright (c) Facebook, Inc. and its affiliates.
import contextlib
import io
import logging
import os
from collections import defaultdict
from dataclasses import dataclass
from typing import Any, Dict, Iterable, List, Optional
from fvcore.common.timer import Timer
from detectron2.data import DatasetCatalog, MetadataCatalog
from detectron2.structures import BoxMode
from detectron2.utils.file_io import PathManager
from ..utils import maybe_prepend_base_path
DENSEPOSE_MASK_KEY = "dp_masks"
DENSEPOSE_IUV_KEYS_WITHOUT_MASK = ["dp_x", "dp_y", "dp_I", "dp_U", "dp_V"]
DENSEPOSE_CSE_KEYS_WITHOUT_MASK = ["dp_x", "dp_y", "dp_vertex", "ref_model"]
DENSEPOSE_ALL_POSSIBLE_KEYS = set(
DENSEPOSE_IUV_KEYS_WITHOUT_MASK + DENSEPOSE_CSE_KEYS_WITHOUT_MASK + [DENSEPOSE_MASK_KEY]
)
DENSEPOSE_METADATA_URL_PREFIX = "https://dl.fbaipublicfiles.com/densepose/data/"
@dataclass
class CocoDatasetInfo:
name: str
images_root: str
annotations_fpath: str
DATASETS = [
CocoDatasetInfo(
name="densepose_coco_2014_train",
images_root="coco/train2014",
annotations_fpath="coco/annotations/densepose_train2014.json",
),
CocoDatasetInfo(
name="densepose_coco_2014_minival",
images_root="coco/val2014",
annotations_fpath="coco/annotations/densepose_minival2014.json",
),
CocoDatasetInfo(
name="densepose_coco_2014_minival_100",
images_root="coco/val2014",
annotations_fpath="coco/annotations/densepose_minival2014_100.json",
),
CocoDatasetInfo(
name="densepose_coco_2014_valminusminival",
images_root="coco/val2014",
annotations_fpath="coco/annotations/densepose_valminusminival2014.json",
),
CocoDatasetInfo(
name="densepose_coco_2014_train_cse",
images_root="coco/train2014",
annotations_fpath="coco_cse/densepose_train2014_cse.json",
),
CocoDatasetInfo(
name="densepose_coco_2014_minival_cse",
images_root="coco/val2014",
annotations_fpath="coco_cse/densepose_minival2014_cse.json",
),
CocoDatasetInfo(
name="densepose_coco_2014_minival_100_cse",
images_root="coco/val2014",
annotations_fpath="coco_cse/densepose_minival2014_100_cse.json",
),
CocoDatasetInfo(
name="densepose_coco_2014_valminusminival_cse",
images_root="coco/val2014",
annotations_fpath="coco_cse/densepose_valminusminival2014_cse.json",
),
CocoDatasetInfo(
name="densepose_chimps",
images_root="densepose_chimps/images",
annotations_fpath="densepose_chimps/densepose_chimps_densepose.json",
),
CocoDatasetInfo(
name="densepose_chimps_cse_train",
images_root="densepose_chimps/images",
annotations_fpath="densepose_chimps/densepose_chimps_cse_train.json",
),
CocoDatasetInfo(
name="densepose_chimps_cse_val",
images_root="densepose_chimps/images",
annotations_fpath="densepose_chimps/densepose_chimps_cse_val.json",
),
CocoDatasetInfo(
name="posetrack2017_train",
images_root="posetrack2017/posetrack_data_2017",
annotations_fpath="posetrack2017/densepose_posetrack_train2017.json",
),
CocoDatasetInfo(
name="posetrack2017_val",
images_root="posetrack2017/posetrack_data_2017",
annotations_fpath="posetrack2017/densepose_posetrack_val2017.json",
),
CocoDatasetInfo(
name="lvis_v05_train",
images_root="coco/train2017",
annotations_fpath="lvis/lvis_v0.5_plus_dp_train.json",
),
CocoDatasetInfo(
name="lvis_v05_val",
images_root="coco/val2017",
annotations_fpath="lvis/lvis_v0.5_plus_dp_val.json",
),
]
BASE_DATASETS = [
CocoDatasetInfo(
name="base_coco_2017_train",
images_root="coco/train2017",
annotations_fpath="coco/annotations/instances_train2017.json",
),
CocoDatasetInfo(
name="base_coco_2017_val",
images_root="coco/val2017",
annotations_fpath="coco/annotations/instances_val2017.json",
),
CocoDatasetInfo(
name="base_coco_2017_val_100",
images_root="coco/val2017",
annotations_fpath="coco/annotations/instances_val2017_100.json",
),
]
def get_metadata(base_path: Optional[str]) -> Dict[str, Any]:
"""
Returns metadata associated with COCO DensePose datasets
Args:
base_path: Optional[str]
Base path used to load metadata from
Returns:
Dict[str, Any]
Metadata in the form of a dictionary
"""
meta = {
"densepose_transform_src": maybe_prepend_base_path(base_path, "UV_symmetry_transforms.mat"),
"densepose_smpl_subdiv": maybe_prepend_base_path(base_path, "SMPL_subdiv.mat"),
"densepose_smpl_subdiv_transform": maybe_prepend_base_path(
base_path,
"SMPL_SUBDIV_TRANSFORM.mat",
),
}
return meta
def _load_coco_annotations(json_file: str):
"""
Load COCO annotations from a JSON file
Args:
json_file: str
Path to the file to load annotations from
Returns:
Instance of `pycocotools.coco.COCO` that provides access to annotations
data
"""
from pycocotools.coco import COCO
logger = logging.getLogger(__name__)
timer = Timer()
with contextlib.redirect_stdout(io.StringIO()):
coco_api = COCO(json_file)
if timer.seconds() > 1:
logger.info("Loading {} takes {:.2f} seconds.".format(json_file, timer.seconds()))
return coco_api
def _add_categories_metadata(dataset_name: str, categories: List[Dict[str, Any]]):
meta = MetadataCatalog.get(dataset_name)
meta.categories = {c["id"]: c["name"] for c in categories}
logger = logging.getLogger(__name__)
logger.info("Dataset {} categories: {}".format(dataset_name, meta.categories))
def _verify_annotations_have_unique_ids(json_file: str, anns: List[List[Dict[str, Any]]]):
if "minival" in json_file:
# Skip validation on COCO2014 valminusminival and minival annotations
# The ratio of buggy annotations there is tiny and does not affect accuracy
# Therefore we explicitly white-list them
return
ann_ids = [ann["id"] for anns_per_image in anns for ann in anns_per_image]
assert len(set(ann_ids)) == len(ann_ids), "Annotation ids in '{}' are not unique!".format(
json_file
)
def _maybe_add_bbox(obj: Dict[str, Any], ann_dict: Dict[str, Any]):
if "bbox" not in ann_dict:
return
obj["bbox"] = ann_dict["bbox"]
obj["bbox_mode"] = BoxMode.XYWH_ABS
def _maybe_add_segm(obj: Dict[str, Any], ann_dict: Dict[str, Any]):
if "segmentation" not in ann_dict:
return
segm = ann_dict["segmentation"]
if not isinstance(segm, dict):
# filter out invalid polygons (< 3 points)
segm = [poly for poly in segm if len(poly) % 2 == 0 and len(poly) >= 6]
if len(segm) == 0:
return
obj["segmentation"] = segm
def _maybe_add_keypoints(obj: Dict[str, Any], ann_dict: Dict[str, Any]):
if "keypoints" not in ann_dict:
return
keypts = ann_dict["keypoints"] # list[int]
for idx, v in enumerate(keypts):
if idx % 3 != 2:
# COCO's segmentation coordinates are floating points in [0, H or W],
# but keypoint coordinates are integers in [0, H-1 or W-1]
# Therefore we assume the coordinates are "pixel indices" and
# add 0.5 to convert to floating point coordinates.
keypts[idx] = v + 0.5
obj["keypoints"] = keypts
def _maybe_add_densepose(obj: Dict[str, Any], ann_dict: Dict[str, Any]):
for key in DENSEPOSE_ALL_POSSIBLE_KEYS:
if key in ann_dict:
obj[key] = ann_dict[key]
def _combine_images_with_annotations(
dataset_name: str,
image_root: str,
img_datas: Iterable[Dict[str, Any]],
ann_datas: Iterable[Iterable[Dict[str, Any]]],
):
ann_keys = ["iscrowd", "category_id"]
dataset_dicts = []
contains_video_frame_info = False
for img_dict, ann_dicts in zip(img_datas, ann_datas):
record = {}
record["file_name"] = os.path.join(image_root, img_dict["file_name"])
record["height"] = img_dict["height"]
record["width"] = img_dict["width"]
record["image_id"] = img_dict["id"]
record["dataset"] = dataset_name
if "frame_id" in img_dict:
record["frame_id"] = img_dict["frame_id"]
record["video_id"] = img_dict.get("vid_id", None)
contains_video_frame_info = True
objs = []
for ann_dict in ann_dicts:
assert ann_dict["image_id"] == record["image_id"]
assert ann_dict.get("ignore", 0) == 0
obj = {key: ann_dict[key] for key in ann_keys if key in ann_dict}
_maybe_add_bbox(obj, ann_dict)
_maybe_add_segm(obj, ann_dict)
_maybe_add_keypoints(obj, ann_dict)
_maybe_add_densepose(obj, ann_dict)
objs.append(obj)
record["annotations"] = objs
dataset_dicts.append(record)
if contains_video_frame_info:
create_video_frame_mapping(dataset_name, dataset_dicts)
return dataset_dicts
def get_contiguous_id_to_category_id_map(metadata):
cat_id_2_cont_id = metadata.thing_dataset_id_to_contiguous_id
cont_id_2_cat_id = {}
for cat_id, cont_id in cat_id_2_cont_id.items():
if cont_id in cont_id_2_cat_id:
continue
cont_id_2_cat_id[cont_id] = cat_id
return cont_id_2_cat_id
def maybe_filter_categories_cocoapi(dataset_name, coco_api):
meta = MetadataCatalog.get(dataset_name)
cont_id_2_cat_id = get_contiguous_id_to_category_id_map(meta)
cat_id_2_cont_id = meta.thing_dataset_id_to_contiguous_id
# filter categories
cats = []
for cat in coco_api.dataset["categories"]:
cat_id = cat["id"]
if cat_id not in cat_id_2_cont_id:
continue
cont_id = cat_id_2_cont_id[cat_id]
if (cont_id in cont_id_2_cat_id) and (cont_id_2_cat_id[cont_id] == cat_id):
cats.append(cat)
coco_api.dataset["categories"] = cats
# filter annotations, if multiple categories are mapped to a single
# contiguous ID, use only one category ID and map all annotations to that category ID
anns = []
for ann in coco_api.dataset["annotations"]:
cat_id = ann["category_id"]
if cat_id not in cat_id_2_cont_id:
continue
cont_id = cat_id_2_cont_id[cat_id]
ann["category_id"] = cont_id_2_cat_id[cont_id]
anns.append(ann)
coco_api.dataset["annotations"] = anns
# recreate index
coco_api.createIndex()
def maybe_filter_and_map_categories_cocoapi(dataset_name, coco_api):
meta = MetadataCatalog.get(dataset_name)
category_id_map = meta.thing_dataset_id_to_contiguous_id
# map categories
cats = []
for cat in coco_api.dataset["categories"]:
cat_id = cat["id"]
if cat_id not in category_id_map:
continue
cat["id"] = category_id_map[cat_id]
cats.append(cat)
coco_api.dataset["categories"] = cats
# map annotation categories
anns = []
for ann in coco_api.dataset["annotations"]:
cat_id = ann["category_id"]
if cat_id not in category_id_map:
continue
ann["category_id"] = category_id_map[cat_id]
anns.append(ann)
coco_api.dataset["annotations"] = anns
# recreate index
coco_api.createIndex()
def create_video_frame_mapping(dataset_name, dataset_dicts):
mapping = defaultdict(dict)
for d in dataset_dicts:
video_id = d.get("video_id")
if video_id is None:
continue
mapping[video_id].update({d["frame_id"]: d["file_name"]})
MetadataCatalog.get(dataset_name).set(video_frame_mapping=mapping)
def load_coco_json(annotations_json_file: str, image_root: str, dataset_name: str):
"""
Loads a JSON file with annotations in COCO instances format.
Replaces `detectron2.data.datasets.coco.load_coco_json` to handle metadata
in a more flexible way. Postpones category mapping to a later stage to be
able to combine several datasets with different (but coherent) sets of
categories.
Args:
annotations_json_file: str
Path to the JSON file with annotations in COCO instances format.
image_root: str
directory that contains all the images
dataset_name: str
the name that identifies a dataset, e.g. "densepose_coco_2014_train"
extra_annotation_keys: Optional[List[str]]
If provided, these keys are used to extract additional data from
the annotations.
"""
coco_api = _load_coco_annotations(PathManager.get_local_path(annotations_json_file))
_add_categories_metadata(dataset_name, coco_api.loadCats(coco_api.getCatIds()))
# sort indices for reproducible results
img_ids = sorted(coco_api.imgs.keys())
# imgs is a list of dicts, each looks something like:
# {'license': 4,
# 'url': 'http://farm6.staticflickr.com/5454/9413846304_881d5e5c3b_z.jpg',
# 'file_name': 'COCO_val2014_000000001268.jpg',
# 'height': 427,
# 'width': 640,
# 'date_captured': '2013-11-17 05:57:24',
# 'id': 1268}
imgs = coco_api.loadImgs(img_ids)
logger = logging.getLogger(__name__)
logger.info("Loaded {} images in COCO format from {}".format(len(imgs), annotations_json_file))
# anns is a list[list[dict]], where each dict is an annotation
# record for an object. The inner list enumerates the objects in an image
# and the outer list enumerates over images.
anns = [coco_api.imgToAnns[img_id] for img_id in img_ids]
_verify_annotations_have_unique_ids(annotations_json_file, anns)
dataset_records = _combine_images_with_annotations(dataset_name, image_root, imgs, anns)
return dataset_records
def register_dataset(dataset_data: CocoDatasetInfo, datasets_root: Optional[str] = None):
"""
Registers provided COCO DensePose dataset
Args:
dataset_data: CocoDatasetInfo
Dataset data
datasets_root: Optional[str]
Datasets root folder (default: None)
"""
annotations_fpath = maybe_prepend_base_path(datasets_root, dataset_data.annotations_fpath)
images_root = maybe_prepend_base_path(datasets_root, dataset_data.images_root)
def load_annotations():
return load_coco_json(
annotations_json_file=annotations_fpath,
image_root=images_root,
dataset_name=dataset_data.name,
)
DatasetCatalog.register(dataset_data.name, load_annotations)
MetadataCatalog.get(dataset_data.name).set(
json_file=annotations_fpath,
image_root=images_root,
**get_metadata(DENSEPOSE_METADATA_URL_PREFIX)
)
def register_datasets(
datasets_data: Iterable[CocoDatasetInfo], datasets_root: Optional[str] = None
):
"""
Registers provided COCO DensePose datasets
Args:
datasets_data: Iterable[CocoDatasetInfo]
An iterable of dataset datas
datasets_root: Optional[str]
Datasets root folder (default: None)
"""
for dataset_data in datasets_data:
register_dataset(dataset_data, datasets_root)
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/data/datasets/coco.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from . import builtin # ensure the builtin datasets are registered
__all__ = [k for k in globals().keys() if "builtin" not in k and not k.startswith("_")]
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/data/datasets/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import logging
import os
from typing import Any, Dict, Iterable, List, Optional
from fvcore.common.timer import Timer
from detectron2.data import DatasetCatalog, MetadataCatalog
from detectron2.data.datasets.lvis import get_lvis_instances_meta
from detectron2.structures import BoxMode
from detectron2.utils.file_io import PathManager
from ..utils import maybe_prepend_base_path
from .coco import (
DENSEPOSE_ALL_POSSIBLE_KEYS,
DENSEPOSE_METADATA_URL_PREFIX,
CocoDatasetInfo,
get_metadata,
)
DATASETS = [
CocoDatasetInfo(
name="densepose_lvis_v1_ds1_train_v1",
images_root="coco_",
annotations_fpath="lvis/densepose_lvis_v1_ds1_train_v1.json",
),
CocoDatasetInfo(
name="densepose_lvis_v1_ds1_val_v1",
images_root="coco_",
annotations_fpath="lvis/densepose_lvis_v1_ds1_val_v1.json",
),
CocoDatasetInfo(
name="densepose_lvis_v1_ds2_train_v1",
images_root="coco_",
annotations_fpath="lvis/densepose_lvis_v1_ds2_train_v1.json",
),
CocoDatasetInfo(
name="densepose_lvis_v1_ds2_val_v1",
images_root="coco_",
annotations_fpath="lvis/densepose_lvis_v1_ds2_val_v1.json",
),
CocoDatasetInfo(
name="densepose_lvis_v1_ds1_val_animals_100",
images_root="coco_",
annotations_fpath="lvis/densepose_lvis_v1_val_animals_100_v2.json",
),
]
def _load_lvis_annotations(json_file: str):
"""
Load COCO annotations from a JSON file
Args:
json_file: str
Path to the file to load annotations from
Returns:
Instance of `pycocotools.coco.COCO` that provides access to annotations
data
"""
from lvis import LVIS
json_file = PathManager.get_local_path(json_file)
logger = logging.getLogger(__name__)
timer = Timer()
lvis_api = LVIS(json_file)
if timer.seconds() > 1:
logger.info("Loading {} takes {:.2f} seconds.".format(json_file, timer.seconds()))
return lvis_api
def _add_categories_metadata(dataset_name: str):
metadict = get_lvis_instances_meta(dataset_name)
categories = metadict["thing_classes"]
metadata = MetadataCatalog.get(dataset_name)
metadata.categories = {i + 1: categories[i] for i in range(len(categories))}
logger = logging.getLogger(__name__)
logger.info(f"Dataset {dataset_name} has {len(categories)} categories")
def _verify_annotations_have_unique_ids(json_file: str, anns: List[List[Dict[str, Any]]]):
ann_ids = [ann["id"] for anns_per_image in anns for ann in anns_per_image]
assert len(set(ann_ids)) == len(ann_ids), "Annotation ids in '{}' are not unique!".format(
json_file
)
def _maybe_add_bbox(obj: Dict[str, Any], ann_dict: Dict[str, Any]):
if "bbox" not in ann_dict:
return
obj["bbox"] = ann_dict["bbox"]
obj["bbox_mode"] = BoxMode.XYWH_ABS
def _maybe_add_segm(obj: Dict[str, Any], ann_dict: Dict[str, Any]):
if "segmentation" not in ann_dict:
return
segm = ann_dict["segmentation"]
if not isinstance(segm, dict):
# filter out invalid polygons (< 3 points)
segm = [poly for poly in segm if len(poly) % 2 == 0 and len(poly) >= 6]
if len(segm) == 0:
return
obj["segmentation"] = segm
def _maybe_add_keypoints(obj: Dict[str, Any], ann_dict: Dict[str, Any]):
if "keypoints" not in ann_dict:
return
keypts = ann_dict["keypoints"] # list[int]
for idx, v in enumerate(keypts):
if idx % 3 != 2:
# COCO's segmentation coordinates are floating points in [0, H or W],
# but keypoint coordinates are integers in [0, H-1 or W-1]
# Therefore we assume the coordinates are "pixel indices" and
# add 0.5 to convert to floating point coordinates.
keypts[idx] = v + 0.5
obj["keypoints"] = keypts
def _maybe_add_densepose(obj: Dict[str, Any], ann_dict: Dict[str, Any]):
for key in DENSEPOSE_ALL_POSSIBLE_KEYS:
if key in ann_dict:
obj[key] = ann_dict[key]
def _combine_images_with_annotations(
dataset_name: str,
image_root: str,
img_datas: Iterable[Dict[str, Any]],
ann_datas: Iterable[Iterable[Dict[str, Any]]],
):
dataset_dicts = []
def get_file_name(img_root, img_dict):
# Determine the path including the split folder ("train2017", "val2017", "test2017") from
# the coco_url field. Example:
# 'coco_url': 'http://images.cocodataset.org/train2017/000000155379.jpg'
split_folder, file_name = img_dict["coco_url"].split("/")[-2:]
return os.path.join(img_root + split_folder, file_name)
for img_dict, ann_dicts in zip(img_datas, ann_datas):
record = {}
record["file_name"] = get_file_name(image_root, img_dict)
record["height"] = img_dict["height"]
record["width"] = img_dict["width"]
record["not_exhaustive_category_ids"] = img_dict.get("not_exhaustive_category_ids", [])
record["neg_category_ids"] = img_dict.get("neg_category_ids", [])
record["image_id"] = img_dict["id"]
record["dataset"] = dataset_name
objs = []
for ann_dict in ann_dicts:
assert ann_dict["image_id"] == record["image_id"]
obj = {}
_maybe_add_bbox(obj, ann_dict)
obj["iscrowd"] = ann_dict.get("iscrowd", 0)
obj["category_id"] = ann_dict["category_id"]
_maybe_add_segm(obj, ann_dict)
_maybe_add_keypoints(obj, ann_dict)
_maybe_add_densepose(obj, ann_dict)
objs.append(obj)
record["annotations"] = objs
dataset_dicts.append(record)
return dataset_dicts
def load_lvis_json(annotations_json_file: str, image_root: str, dataset_name: str):
"""
Loads a JSON file with annotations in LVIS instances format.
Replaces `detectron2.data.datasets.coco.load_lvis_json` to handle metadata
in a more flexible way. Postpones category mapping to a later stage to be
able to combine several datasets with different (but coherent) sets of
categories.
Args:
annotations_json_file: str
Path to the JSON file with annotations in COCO instances format.
image_root: str
directory that contains all the images
dataset_name: str
the name that identifies a dataset, e.g. "densepose_coco_2014_train"
extra_annotation_keys: Optional[List[str]]
If provided, these keys are used to extract additional data from
the annotations.
"""
lvis_api = _load_lvis_annotations(PathManager.get_local_path(annotations_json_file))
_add_categories_metadata(dataset_name)
# sort indices for reproducible results
img_ids = sorted(lvis_api.imgs.keys())
# imgs is a list of dicts, each looks something like:
# {'license': 4,
# 'url': 'http://farm6.staticflickr.com/5454/9413846304_881d5e5c3b_z.jpg',
# 'file_name': 'COCO_val2014_000000001268.jpg',
# 'height': 427,
# 'width': 640,
# 'date_captured': '2013-11-17 05:57:24',
# 'id': 1268}
imgs = lvis_api.load_imgs(img_ids)
logger = logging.getLogger(__name__)
logger.info("Loaded {} images in LVIS format from {}".format(len(imgs), annotations_json_file))
# anns is a list[list[dict]], where each dict is an annotation
# record for an object. The inner list enumerates the objects in an image
# and the outer list enumerates over images.
anns = [lvis_api.img_ann_map[img_id] for img_id in img_ids]
_verify_annotations_have_unique_ids(annotations_json_file, anns)
dataset_records = _combine_images_with_annotations(dataset_name, image_root, imgs, anns)
return dataset_records
def register_dataset(dataset_data: CocoDatasetInfo, datasets_root: Optional[str] = None):
"""
Registers provided LVIS DensePose dataset
Args:
dataset_data: CocoDatasetInfo
Dataset data
datasets_root: Optional[str]
Datasets root folder (default: None)
"""
annotations_fpath = maybe_prepend_base_path(datasets_root, dataset_data.annotations_fpath)
images_root = maybe_prepend_base_path(datasets_root, dataset_data.images_root)
def load_annotations():
return load_lvis_json(
annotations_json_file=annotations_fpath,
image_root=images_root,
dataset_name=dataset_data.name,
)
DatasetCatalog.register(dataset_data.name, load_annotations)
MetadataCatalog.get(dataset_data.name).set(
json_file=annotations_fpath,
image_root=images_root,
evaluator_type="lvis",
**get_metadata(DENSEPOSE_METADATA_URL_PREFIX),
)
def register_datasets(
datasets_data: Iterable[CocoDatasetInfo], datasets_root: Optional[str] = None
):
"""
Registers provided LVIS DensePose datasets
Args:
datasets_data: Iterable[CocoDatasetInfo]
An iterable of dataset datas
datasets_root: Optional[str]
Datasets root folder (default: None)
"""
for dataset_data in datasets_data:
register_dataset(dataset_data, datasets_root)
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/data/datasets/lvis.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from enum import Enum
class DatasetType(Enum):
"""
Dataset type, mostly used for datasets that contain data to bootstrap models on
"""
VIDEO_LIST = "video_list"
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/data/datasets/dataset_type.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from .chimpnsee import register_dataset as register_chimpnsee_dataset
from .coco import BASE_DATASETS as BASE_COCO_DATASETS
from .coco import DATASETS as COCO_DATASETS
from .coco import register_datasets as register_coco_datasets
from .lvis import DATASETS as LVIS_DATASETS
from .lvis import register_datasets as register_lvis_datasets
DEFAULT_DATASETS_ROOT = "datasets"
register_coco_datasets(COCO_DATASETS, DEFAULT_DATASETS_ROOT)
register_coco_datasets(BASE_COCO_DATASETS, DEFAULT_DATASETS_ROOT)
register_lvis_datasets(LVIS_DATASETS, DEFAULT_DATASETS_ROOT)
register_chimpnsee_dataset(DEFAULT_DATASETS_ROOT) # pyre-ignore[19]
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/data/datasets/builtin.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import logging
from collections import UserDict
from dataclasses import dataclass
from typing import Iterable, Optional
from ..utils import maybe_prepend_base_path
@dataclass
class MeshInfo:
name: str
data: str
geodists: Optional[str] = None
symmetry: Optional[str] = None
texcoords: Optional[str] = None
class _MeshCatalog(UserDict):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.mesh_ids = {}
self.mesh_names = {}
self.max_mesh_id = -1
def __setitem__(self, key, value):
if key in self:
logger = logging.getLogger(__name__)
logger.warning(
f"Overwriting mesh catalog entry '{key}': old value {self[key]}"
f", new value {value}"
)
mesh_id = self.mesh_ids[key]
else:
self.max_mesh_id += 1
mesh_id = self.max_mesh_id
super().__setitem__(key, value)
self.mesh_ids[key] = mesh_id
self.mesh_names[mesh_id] = key
def get_mesh_id(self, shape_name: str) -> int:
return self.mesh_ids[shape_name]
def get_mesh_name(self, mesh_id: int) -> str:
return self.mesh_names[mesh_id]
MeshCatalog = _MeshCatalog()
def register_mesh(mesh_info: MeshInfo, base_path: Optional[str]):
geodists, symmetry, texcoords = mesh_info.geodists, mesh_info.symmetry, mesh_info.texcoords
if geodists:
geodists = maybe_prepend_base_path(base_path, geodists)
if symmetry:
symmetry = maybe_prepend_base_path(base_path, symmetry)
if texcoords:
texcoords = maybe_prepend_base_path(base_path, texcoords)
MeshCatalog[mesh_info.name] = MeshInfo(
name=mesh_info.name,
data=maybe_prepend_base_path(base_path, mesh_info.data),
geodists=geodists,
symmetry=symmetry,
texcoords=texcoords,
)
def register_meshes(mesh_infos: Iterable[MeshInfo], base_path: Optional[str]):
for mesh_info in mesh_infos:
register_mesh(mesh_info, base_path)
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/data/meshes/catalog.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from . import builtin
__all__ = [k for k in globals().keys() if "builtin" not in k and not k.startswith("_")]
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/data/meshes/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from .catalog import MeshInfo, register_meshes
DENSEPOSE_MESHES_DIR = "https://dl.fbaipublicfiles.com/densepose/meshes/"
MESHES = [
MeshInfo(
name="smpl_27554",
data="smpl_27554.pkl",
geodists="geodists/geodists_smpl_27554.pkl",
symmetry="symmetry/symmetry_smpl_27554.pkl",
texcoords="texcoords/texcoords_smpl_27554.pkl",
),
MeshInfo(
name="chimp_5029",
data="chimp_5029.pkl",
geodists="geodists/geodists_chimp_5029.pkl",
symmetry="symmetry/symmetry_chimp_5029.pkl",
texcoords="texcoords/texcoords_chimp_5029.pkl",
),
MeshInfo(
name="cat_5001",
data="cat_5001.pkl",
geodists="geodists/geodists_cat_5001.pkl",
symmetry="symmetry/symmetry_cat_5001.pkl",
texcoords="texcoords/texcoords_cat_5001.pkl",
),
MeshInfo(
name="cat_7466",
data="cat_7466.pkl",
geodists="geodists/geodists_cat_7466.pkl",
symmetry="symmetry/symmetry_cat_7466.pkl",
texcoords="texcoords/texcoords_cat_7466.pkl",
),
MeshInfo(
name="sheep_5004",
data="sheep_5004.pkl",
geodists="geodists/geodists_sheep_5004.pkl",
symmetry="symmetry/symmetry_sheep_5004.pkl",
texcoords="texcoords/texcoords_sheep_5004.pkl",
),
MeshInfo(
name="zebra_5002",
data="zebra_5002.pkl",
geodists="geodists/geodists_zebra_5002.pkl",
symmetry="symmetry/symmetry_zebra_5002.pkl",
texcoords="texcoords/texcoords_zebra_5002.pkl",
),
MeshInfo(
name="horse_5004",
data="horse_5004.pkl",
geodists="geodists/geodists_horse_5004.pkl",
symmetry="symmetry/symmetry_horse_5004.pkl",
texcoords="texcoords/texcoords_zebra_5002.pkl",
),
MeshInfo(
name="giraffe_5002",
data="giraffe_5002.pkl",
geodists="geodists/geodists_giraffe_5002.pkl",
symmetry="symmetry/symmetry_giraffe_5002.pkl",
texcoords="texcoords/texcoords_giraffe_5002.pkl",
),
MeshInfo(
name="elephant_5002",
data="elephant_5002.pkl",
geodists="geodists/geodists_elephant_5002.pkl",
symmetry="symmetry/symmetry_elephant_5002.pkl",
texcoords="texcoords/texcoords_elephant_5002.pkl",
),
MeshInfo(
name="dog_5002",
data="dog_5002.pkl",
geodists="geodists/geodists_dog_5002.pkl",
symmetry="symmetry/symmetry_dog_5002.pkl",
texcoords="texcoords/texcoords_dog_5002.pkl",
),
MeshInfo(
name="dog_7466",
data="dog_7466.pkl",
geodists="geodists/geodists_dog_7466.pkl",
symmetry="symmetry/symmetry_dog_7466.pkl",
texcoords="texcoords/texcoords_dog_7466.pkl",
),
MeshInfo(
name="cow_5002",
data="cow_5002.pkl",
geodists="geodists/geodists_cow_5002.pkl",
symmetry="symmetry/symmetry_cow_5002.pkl",
texcoords="texcoords/texcoords_cow_5002.pkl",
),
MeshInfo(
name="bear_4936",
data="bear_4936.pkl",
geodists="geodists/geodists_bear_4936.pkl",
symmetry="symmetry/symmetry_bear_4936.pkl",
texcoords="texcoords/texcoords_bear_4936.pkl",
),
]
register_meshes(MESHES, DENSEPOSE_MESHES_DIR)
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/data/meshes/builtin.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from detectron2.structures import BitMasks, Instances
from densepose.converters import ToMaskConverter
class MaskFromDensePoseSampler:
"""
Produce mask GT from DensePose predictions
This sampler simply converts DensePose predictions to BitMasks
that a contain a bool tensor of the size of the input image
"""
def __call__(self, instances: Instances) -> BitMasks:
"""
Converts predicted data from `instances` into the GT mask data
Args:
instances (Instances): predicted results, expected to have `pred_densepose` field
Returns:
Boolean Tensor of the size of the input image that has non-zero
values at pixels that are estimated to belong to the detected object
"""
return ToMaskConverter.convert(
instances.pred_densepose, instances.pred_boxes, instances.image_size
)
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/data/samplers/mask_from_densepose.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional
from detectron2.structures import Instances
ModelOutput = Dict[str, Any]
SampledData = Dict[str, Any]
@dataclass
class _Sampler:
"""
Sampler registry entry that contains:
- src (str): source field to sample from (deleted after sampling)
- dst (Optional[str]): destination field to sample to, if not None
- func (Optional[Callable: Any -> Any]): function that performs sampling,
if None, reference copy is performed
"""
src: str
dst: Optional[str]
func: Optional[Callable[[Any], Any]]
class PredictionToGroundTruthSampler:
"""
Sampler implementation that converts predictions to GT using registered
samplers for different fields of `Instances`.
"""
def __init__(self, dataset_name: str = ""):
self.dataset_name = dataset_name
self._samplers = {}
self.register_sampler("pred_boxes", "gt_boxes", None)
self.register_sampler("pred_classes", "gt_classes", None)
# delete scores
self.register_sampler("scores")
def __call__(self, model_output: List[ModelOutput]) -> List[SampledData]:
"""
Transform model output into ground truth data through sampling
Args:
model_output (Dict[str, Any]): model output
Returns:
Dict[str, Any]: sampled data
"""
for model_output_i in model_output:
instances: Instances = model_output_i["instances"]
# transform data in each field
for _, sampler in self._samplers.items():
if not instances.has(sampler.src) or sampler.dst is None:
continue
if sampler.func is None:
instances.set(sampler.dst, instances.get(sampler.src))
else:
instances.set(sampler.dst, sampler.func(instances))
# delete model output data that was transformed
for _, sampler in self._samplers.items():
if sampler.src != sampler.dst and instances.has(sampler.src):
instances.remove(sampler.src)
model_output_i["dataset"] = self.dataset_name
return model_output
def register_sampler(
self,
prediction_attr: str,
gt_attr: Optional[str] = None,
func: Optional[Callable[[Any], Any]] = None,
):
"""
Register sampler for a field
Args:
prediction_attr (str): field to replace with a sampled value
gt_attr (Optional[str]): field to store the sampled value to, if not None
func (Optional[Callable: Any -> Any]): sampler function
"""
self._samplers[(prediction_attr, gt_attr)] = _Sampler(
src=prediction_attr, dst=gt_attr, func=func
)
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/data/samplers/prediction_to_gt.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from .densepose_uniform import DensePoseUniformSampler
from .densepose_confidence_based import DensePoseConfidenceBasedSampler
from .densepose_cse_uniform import DensePoseCSEUniformSampler
from .densepose_cse_confidence_based import DensePoseCSEConfidenceBasedSampler
from .mask_from_densepose import MaskFromDensePoseSampler
from .prediction_to_gt import PredictionToGroundTruthSampler
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/data/samplers/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from .densepose_cse_base import DensePoseCSEBaseSampler
from .densepose_uniform import DensePoseUniformSampler
class DensePoseCSEUniformSampler(DensePoseCSEBaseSampler, DensePoseUniformSampler):
"""
Uniform Sampler for CSE
"""
pass
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/data/samplers/densepose_cse_uniform.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from typing import Any, Dict, List, Tuple
import torch
from torch.nn import functional as F
from detectron2.structures import BoxMode, Instances
from densepose.converters import ToChartResultConverter
from densepose.converters.base import IntTupleBox, make_int_box
from densepose.structures import DensePoseDataRelative, DensePoseList
class DensePoseBaseSampler:
"""
Base DensePose sampler to produce DensePose data from DensePose predictions.
Samples for each class are drawn according to some distribution over all pixels estimated
to belong to that class.
"""
def __init__(self, count_per_class: int = 8):
"""
Constructor
Args:
count_per_class (int): the sampler produces at most `count_per_class`
samples for each category
"""
self.count_per_class = count_per_class
def __call__(self, instances: Instances) -> DensePoseList:
"""
Convert DensePose predictions (an instance of `DensePoseChartPredictorOutput`)
into DensePose annotations data (an instance of `DensePoseList`)
"""
boxes_xyxy_abs = instances.pred_boxes.tensor.clone().cpu()
boxes_xywh_abs = BoxMode.convert(boxes_xyxy_abs, BoxMode.XYXY_ABS, BoxMode.XYWH_ABS)
dp_datas = []
for i in range(len(boxes_xywh_abs)):
annotation_i = self._sample(instances[i], make_int_box(boxes_xywh_abs[i]))
annotation_i[DensePoseDataRelative.S_KEY] = self._resample_mask( # pyre-ignore[6]
instances[i].pred_densepose
)
dp_datas.append(DensePoseDataRelative(annotation_i))
# create densepose annotations on CPU
dp_list = DensePoseList(dp_datas, boxes_xyxy_abs, instances.image_size)
return dp_list
def _sample(self, instance: Instances, bbox_xywh: IntTupleBox) -> Dict[str, List[Any]]:
"""
Sample DensPoseDataRelative from estimation results
"""
labels, dp_result = self._produce_labels_and_results(instance)
annotation = {
DensePoseDataRelative.X_KEY: [],
DensePoseDataRelative.Y_KEY: [],
DensePoseDataRelative.U_KEY: [],
DensePoseDataRelative.V_KEY: [],
DensePoseDataRelative.I_KEY: [],
}
n, h, w = dp_result.shape
for part_id in range(1, DensePoseDataRelative.N_PART_LABELS + 1):
# indices - tuple of 3 1D tensors of size k
# 0: index along the first dimension N
# 1: index along H dimension
# 2: index along W dimension
indices = torch.nonzero(labels.expand(n, h, w) == part_id, as_tuple=True)
# values - an array of size [n, k]
# n: number of channels (U, V, confidences)
# k: number of points labeled with part_id
values = dp_result[indices].view(n, -1)
k = values.shape[1]
count = min(self.count_per_class, k)
if count <= 0:
continue
index_sample = self._produce_index_sample(values, count)
sampled_values = values[:, index_sample]
sampled_y = indices[1][index_sample] + 0.5
sampled_x = indices[2][index_sample] + 0.5
# prepare / normalize data
x = (sampled_x / w * 256.0).cpu().tolist()
y = (sampled_y / h * 256.0).cpu().tolist()
u = sampled_values[0].clamp(0, 1).cpu().tolist()
v = sampled_values[1].clamp(0, 1).cpu().tolist()
fine_segm_labels = [part_id] * count
# extend annotations
annotation[DensePoseDataRelative.X_KEY].extend(x)
annotation[DensePoseDataRelative.Y_KEY].extend(y)
annotation[DensePoseDataRelative.U_KEY].extend(u)
annotation[DensePoseDataRelative.V_KEY].extend(v)
annotation[DensePoseDataRelative.I_KEY].extend(fine_segm_labels)
return annotation
def _produce_index_sample(self, values: torch.Tensor, count: int):
"""
Abstract method to produce a sample of indices to select data
To be implemented in descendants
Args:
values (torch.Tensor): an array of size [n, k] that contains
estimated values (U, V, confidences);
n: number of channels (U, V, confidences)
k: number of points labeled with part_id
count (int): number of samples to produce, should be positive and <= k
Return:
list(int): indices of values (along axis 1) selected as a sample
"""
raise NotImplementedError
def _produce_labels_and_results(self, instance: Instances) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Method to get labels and DensePose results from an instance
Args:
instance (Instances): an instance of `DensePoseChartPredictorOutput`
Return:
labels (torch.Tensor): shape [H, W], DensePose segmentation labels
dp_result (torch.Tensor): shape [2, H, W], stacked DensePose results u and v
"""
converter = ToChartResultConverter
chart_result = converter.convert(instance.pred_densepose, instance.pred_boxes)
labels, dp_result = chart_result.labels.cpu(), chart_result.uv.cpu()
return labels, dp_result
def _resample_mask(self, output: Any) -> torch.Tensor:
"""
Convert DensePose predictor output to segmentation annotation - tensors of size
(256, 256) and type `int64`.
Args:
output: DensePose predictor output with the following attributes:
- coarse_segm: tensor of size [N, D, H, W] with unnormalized coarse
segmentation scores
- fine_segm: tensor of size [N, C, H, W] with unnormalized fine
segmentation scores
Return:
Tensor of size (S, S) and type `int64` with coarse segmentation annotations,
where S = DensePoseDataRelative.MASK_SIZE
"""
sz = DensePoseDataRelative.MASK_SIZE
S = (
F.interpolate(output.coarse_segm, (sz, sz), mode="bilinear", align_corners=False)
.argmax(dim=1)
.long()
)
I = (
(
F.interpolate(
output.fine_segm, (sz, sz), mode="bilinear", align_corners=False
).argmax(dim=1)
* (S > 0).long()
)
.squeeze()
.cpu()
)
# Map fine segmentation results to coarse segmentation ground truth
# TODO: extract this into separate classes
# coarse segmentation: 1 = Torso, 2 = Right Hand, 3 = Left Hand,
# 4 = Left Foot, 5 = Right Foot, 6 = Upper Leg Right, 7 = Upper Leg Left,
# 8 = Lower Leg Right, 9 = Lower Leg Left, 10 = Upper Arm Left,
# 11 = Upper Arm Right, 12 = Lower Arm Left, 13 = Lower Arm Right,
# 14 = Head
# fine segmentation: 1, 2 = Torso, 3 = Right Hand, 4 = Left Hand,
# 5 = Left Foot, 6 = Right Foot, 7, 9 = Upper Leg Right,
# 8, 10 = Upper Leg Left, 11, 13 = Lower Leg Right,
# 12, 14 = Lower Leg Left, 15, 17 = Upper Arm Left,
# 16, 18 = Upper Arm Right, 19, 21 = Lower Arm Left,
# 20, 22 = Lower Arm Right, 23, 24 = Head
FINE_TO_COARSE_SEGMENTATION = {
1: 1,
2: 1,
3: 2,
4: 3,
5: 4,
6: 5,
7: 6,
8: 7,
9: 6,
10: 7,
11: 8,
12: 9,
13: 8,
14: 9,
15: 10,
16: 11,
17: 10,
18: 11,
19: 12,
20: 13,
21: 12,
22: 13,
23: 14,
24: 14,
}
mask = torch.zeros((sz, sz), dtype=torch.int64, device=torch.device("cpu"))
for i in range(DensePoseDataRelative.N_PART_LABELS):
mask[I == i + 1] = FINE_TO_COARSE_SEGMENTATION[i + 1]
return mask
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/data/samplers/densepose_base.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import random
import torch
from .densepose_base import DensePoseBaseSampler
class DensePoseUniformSampler(DensePoseBaseSampler):
"""
Samples DensePose data from DensePose predictions.
Samples for each class are drawn uniformly over all pixels estimated
to belong to that class.
"""
def __init__(self, count_per_class: int = 8):
"""
Constructor
Args:
count_per_class (int): the sampler produces at most `count_per_class`
samples for each category
"""
super().__init__(count_per_class)
def _produce_index_sample(self, values: torch.Tensor, count: int):
"""
Produce a uniform sample of indices to select data
Args:
values (torch.Tensor): an array of size [n, k] that contains
estimated values (U, V, confidences);
n: number of channels (U, V, confidences)
k: number of points labeled with part_id
count (int): number of samples to produce, should be positive and <= k
Return:
list(int): indices of values (along axis 1) selected as a sample
"""
k = values.shape[1]
return random.sample(range(k), count)
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/data/samplers/densepose_uniform.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import random
from typing import Optional, Tuple
import torch
from torch.nn import functional as F
from detectron2.config import CfgNode
from detectron2.structures import Instances
from densepose.converters.base import IntTupleBox
from .densepose_cse_base import DensePoseCSEBaseSampler
class DensePoseCSEConfidenceBasedSampler(DensePoseCSEBaseSampler):
"""
Samples DensePose data from DensePose predictions.
Samples for each class are drawn using confidence value estimates.
"""
def __init__(
self,
cfg: CfgNode,
use_gt_categories: bool,
embedder: torch.nn.Module,
confidence_channel: str,
count_per_class: int = 8,
search_count_multiplier: Optional[float] = None,
search_proportion: Optional[float] = None,
):
"""
Constructor
Args:
cfg (CfgNode): the config of the model
embedder (torch.nn.Module): necessary to compute mesh vertex embeddings
confidence_channel (str): confidence channel to use for sampling;
possible values:
"coarse_segm_confidence": confidences for coarse segmentation
(default: "coarse_segm_confidence")
count_per_class (int): the sampler produces at most `count_per_class`
samples for each category (default: 8)
search_count_multiplier (float or None): if not None, the total number
of the most confident estimates of a given class to consider is
defined as `min(search_count_multiplier * count_per_class, N)`,
where `N` is the total number of estimates of the class; cannot be
specified together with `search_proportion` (default: None)
search_proportion (float or None): if not None, the total number of the
of the most confident estimates of a given class to consider is
defined as `min(max(search_proportion * N, count_per_class), N)`,
where `N` is the total number of estimates of the class; cannot be
specified together with `search_count_multiplier` (default: None)
"""
super().__init__(cfg, use_gt_categories, embedder, count_per_class)
self.confidence_channel = confidence_channel
self.search_count_multiplier = search_count_multiplier
self.search_proportion = search_proportion
assert (search_count_multiplier is None) or (search_proportion is None), (
f"Cannot specify both search_count_multiplier (={search_count_multiplier})"
f"and search_proportion (={search_proportion})"
)
def _produce_index_sample(self, values: torch.Tensor, count: int):
"""
Produce a sample of indices to select data based on confidences
Args:
values (torch.Tensor): a tensor of length k that contains confidences
k: number of points labeled with part_id
count (int): number of samples to produce, should be positive and <= k
Return:
list(int): indices of values (along axis 1) selected as a sample
"""
k = values.shape[1]
if k == count:
index_sample = list(range(k))
else:
# take the best count * search_count_multiplier pixels,
# sample from them uniformly
# (here best = smallest variance)
_, sorted_confidence_indices = torch.sort(values[0])
if self.search_count_multiplier is not None:
search_count = min(int(count * self.search_count_multiplier), k) # pyre-ignore[58]
elif self.search_proportion is not None:
search_count = min(max(int(k * self.search_proportion), count), k)
else:
search_count = min(count, k)
sample_from_top = random.sample(range(search_count), count)
index_sample = sorted_confidence_indices[:search_count][sample_from_top]
return index_sample
def _produce_mask_and_results(
self, instance: Instances, bbox_xywh: IntTupleBox
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Method to get labels and DensePose results from an instance
Args:
instance (Instances): an instance of
`DensePoseEmbeddingPredictorOutputWithConfidences`
bbox_xywh (IntTupleBox): the corresponding bounding box
Return:
mask (torch.Tensor): shape [H, W], DensePose segmentation mask
embeddings (Tuple[torch.Tensor]): a tensor of shape [D, H, W]
DensePose CSE Embeddings
other_values: a tensor of shape [1, H, W], DensePose CSE confidence
"""
_, _, w, h = bbox_xywh
densepose_output = instance.pred_densepose
mask, embeddings, _ = super()._produce_mask_and_results(instance, bbox_xywh)
other_values = F.interpolate(
getattr(densepose_output, self.confidence_channel), size=(h, w), mode="bilinear"
)[0].cpu()
return mask, embeddings, other_values
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/data/samplers/densepose_cse_confidence_based.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import random
from typing import Optional, Tuple
import torch
from densepose.converters import ToChartResultConverterWithConfidences
from .densepose_base import DensePoseBaseSampler
class DensePoseConfidenceBasedSampler(DensePoseBaseSampler):
"""
Samples DensePose data from DensePose predictions.
Samples for each class are drawn using confidence value estimates.
"""
def __init__(
self,
confidence_channel: str,
count_per_class: int = 8,
search_count_multiplier: Optional[float] = None,
search_proportion: Optional[float] = None,
):
"""
Constructor
Args:
confidence_channel (str): confidence channel to use for sampling;
possible values:
"sigma_2": confidences for UV values
"fine_segm_confidence": confidences for fine segmentation
"coarse_segm_confidence": confidences for coarse segmentation
(default: "sigma_2")
count_per_class (int): the sampler produces at most `count_per_class`
samples for each category (default: 8)
search_count_multiplier (float or None): if not None, the total number
of the most confident estimates of a given class to consider is
defined as `min(search_count_multiplier * count_per_class, N)`,
where `N` is the total number of estimates of the class; cannot be
specified together with `search_proportion` (default: None)
search_proportion (float or None): if not None, the total number of the
of the most confident estimates of a given class to consider is
defined as `min(max(search_proportion * N, count_per_class), N)`,
where `N` is the total number of estimates of the class; cannot be
specified together with `search_count_multiplier` (default: None)
"""
super().__init__(count_per_class)
self.confidence_channel = confidence_channel
self.search_count_multiplier = search_count_multiplier
self.search_proportion = search_proportion
assert (search_count_multiplier is None) or (search_proportion is None), (
f"Cannot specify both search_count_multiplier (={search_count_multiplier})"
f"and search_proportion (={search_proportion})"
)
def _produce_index_sample(self, values: torch.Tensor, count: int):
"""
Produce a sample of indices to select data based on confidences
Args:
values (torch.Tensor): an array of size [n, k] that contains
estimated values (U, V, confidences);
n: number of channels (U, V, confidences)
k: number of points labeled with part_id
count (int): number of samples to produce, should be positive and <= k
Return:
list(int): indices of values (along axis 1) selected as a sample
"""
k = values.shape[1]
if k == count:
index_sample = list(range(k))
else:
# take the best count * search_count_multiplier pixels,
# sample from them uniformly
# (here best = smallest variance)
_, sorted_confidence_indices = torch.sort(values[2])
if self.search_count_multiplier is not None:
search_count = min(int(count * self.search_count_multiplier), k) # pyre-ignore[58]
elif self.search_proportion is not None:
search_count = min(max(int(k * self.search_proportion), count), k)
else:
search_count = min(count, k)
sample_from_top = random.sample(range(search_count), count)
index_sample = sorted_confidence_indices[:search_count][sample_from_top]
return index_sample
def _produce_labels_and_results(self, instance) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Method to get labels and DensePose results from an instance, with confidences
Args:
instance (Instances): an instance of `DensePoseChartPredictorOutputWithConfidences`
Return:
labels (torch.Tensor): shape [H, W], DensePose segmentation labels
dp_result (torch.Tensor): shape [3, H, W], DensePose results u and v
stacked with the confidence channel
"""
converter = ToChartResultConverterWithConfidences
chart_result = converter.convert(instance.pred_densepose, instance.pred_boxes)
labels, dp_result = chart_result.labels.cpu(), chart_result.uv.cpu()
dp_result = torch.cat(
(dp_result, getattr(chart_result, self.confidence_channel)[None].cpu())
)
return labels, dp_result
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/data/samplers/densepose_confidence_based.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from typing import Any, Dict, List, Tuple
import torch
from torch.nn import functional as F
from detectron2.config import CfgNode
from detectron2.structures import Instances
from densepose.converters.base import IntTupleBox
from densepose.data.utils import get_class_to_mesh_name_mapping
from densepose.modeling.cse.utils import squared_euclidean_distance_matrix
from densepose.structures import DensePoseDataRelative
from .densepose_base import DensePoseBaseSampler
class DensePoseCSEBaseSampler(DensePoseBaseSampler):
"""
Base DensePose sampler to produce DensePose data from DensePose predictions.
Samples for each class are drawn according to some distribution over all pixels estimated
to belong to that class.
"""
def __init__(
self,
cfg: CfgNode,
use_gt_categories: bool,
embedder: torch.nn.Module,
count_per_class: int = 8,
):
"""
Constructor
Args:
cfg (CfgNode): the config of the model
embedder (torch.nn.Module): necessary to compute mesh vertex embeddings
count_per_class (int): the sampler produces at most `count_per_class`
samples for each category
"""
super().__init__(count_per_class)
self.embedder = embedder
self.class_to_mesh_name = get_class_to_mesh_name_mapping(cfg)
self.use_gt_categories = use_gt_categories
def _sample(self, instance: Instances, bbox_xywh: IntTupleBox) -> Dict[str, List[Any]]:
"""
Sample DensPoseDataRelative from estimation results
"""
if self.use_gt_categories:
instance_class = instance.dataset_classes.tolist()[0]
else:
instance_class = instance.pred_classes.tolist()[0]
mesh_name = self.class_to_mesh_name[instance_class]
annotation = {
DensePoseDataRelative.X_KEY: [],
DensePoseDataRelative.Y_KEY: [],
DensePoseDataRelative.VERTEX_IDS_KEY: [],
DensePoseDataRelative.MESH_NAME_KEY: mesh_name,
}
mask, embeddings, other_values = self._produce_mask_and_results(instance, bbox_xywh)
indices = torch.nonzero(mask, as_tuple=True)
selected_embeddings = embeddings.permute(1, 2, 0)[indices]
values = other_values[:, indices[0], indices[1]]
k = values.shape[1]
count = min(self.count_per_class, k)
if count <= 0:
return annotation
index_sample = self._produce_index_sample(values, count)
closest_vertices = squared_euclidean_distance_matrix(
selected_embeddings[index_sample], self.embedder(mesh_name)
)
closest_vertices = torch.argmin(closest_vertices, dim=1)
sampled_y = indices[0][index_sample] + 0.5
sampled_x = indices[1][index_sample] + 0.5
# prepare / normalize data
_, _, w, h = bbox_xywh
x = (sampled_x / w * 256.0).cpu().tolist()
y = (sampled_y / h * 256.0).cpu().tolist()
# extend annotations
annotation[DensePoseDataRelative.X_KEY].extend(x)
annotation[DensePoseDataRelative.Y_KEY].extend(y)
annotation[DensePoseDataRelative.VERTEX_IDS_KEY].extend(closest_vertices.cpu().tolist())
return annotation
def _produce_mask_and_results(
self, instance: Instances, bbox_xywh: IntTupleBox
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Method to get labels and DensePose results from an instance
Args:
instance (Instances): an instance of `DensePoseEmbeddingPredictorOutput`
bbox_xywh (IntTupleBox): the corresponding bounding box
Return:
mask (torch.Tensor): shape [H, W], DensePose segmentation mask
embeddings (Tuple[torch.Tensor]): a tensor of shape [D, H, W],
DensePose CSE Embeddings
other_values (Tuple[torch.Tensor]): a tensor of shape [0, H, W],
for potential other values
"""
densepose_output = instance.pred_densepose
S = densepose_output.coarse_segm
E = densepose_output.embedding
_, _, w, h = bbox_xywh
embeddings = F.interpolate(E, size=(h, w), mode="bilinear")[0].cpu()
coarse_segm_resized = F.interpolate(S, size=(h, w), mode="bilinear")[0].cpu()
mask = coarse_segm_resized.argmax(0) > 0
other_values = torch.empty((0, h, w))
return mask, embeddings, other_values
def _resample_mask(self, output: Any) -> torch.Tensor:
"""
Convert DensePose predictor output to segmentation annotation - tensors of size
(256, 256) and type `int64`.
Args:
output: DensePose predictor output with the following attributes:
- coarse_segm: tensor of size [N, D, H, W] with unnormalized coarse
segmentation scores
Return:
Tensor of size (S, S) and type `int64` with coarse segmentation annotations,
where S = DensePoseDataRelative.MASK_SIZE
"""
sz = DensePoseDataRelative.MASK_SIZE
mask = (
F.interpolate(output.coarse_segm, (sz, sz), mode="bilinear", align_corners=False)
.argmax(dim=1)
.long()
.squeeze()
.cpu()
)
return mask
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/data/samplers/densepose_cse_base.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from .image import ImageResizeTransform
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/data/transform/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import torch
class ImageResizeTransform:
"""
Transform that resizes images loaded from a dataset
(BGR data in NCHW channel order, typically uint8) to a format ready to be
consumed by DensePose training (BGR float32 data in NCHW channel order)
"""
def __init__(self, min_size: int = 800, max_size: int = 1333):
self.min_size = min_size
self.max_size = max_size
def __call__(self, images: torch.Tensor) -> torch.Tensor:
"""
Args:
images (torch.Tensor): tensor of size [N, 3, H, W] that contains
BGR data (typically in uint8)
Returns:
images (torch.Tensor): tensor of size [N, 3, H1, W1] where
H1 and W1 are chosen to respect the specified min and max sizes
and preserve the original aspect ratio, the data channels
follow BGR order and the data type is `torch.float32`
"""
# resize with min size
images = images.float()
min_size = min(images.shape[-2:])
max_size = max(images.shape[-2:])
scale = min(self.min_size / min_size, self.max_size / max_size)
images = torch.nn.functional.interpolate(
images, scale_factor=scale, mode="bilinear", align_corners=False
)
return images
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/data/transform/image.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from .trainer import Trainer
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/engine/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import logging
import os
from collections import OrderedDict
from typing import List, Optional, Union
import torch
from torch import nn
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import CfgNode
from detectron2.engine import DefaultTrainer
from detectron2.evaluation import (
DatasetEvaluator,
DatasetEvaluators,
inference_on_dataset,
print_csv_format,
)
from detectron2.solver.build import get_default_optimizer_params, maybe_add_gradient_clipping
from detectron2.utils import comm
from detectron2.utils.events import EventWriter, get_event_storage
from densepose import DensePoseDatasetMapperTTA, DensePoseGeneralizedRCNNWithTTA, load_from_cfg
from densepose.data import (
DatasetMapper,
build_combined_loader,
build_detection_test_loader,
build_detection_train_loader,
build_inference_based_loaders,
has_inference_based_loaders,
)
from densepose.evaluation.d2_evaluator_adapter import Detectron2COCOEvaluatorAdapter
from densepose.evaluation.evaluator import DensePoseCOCOEvaluator, build_densepose_evaluator_storage
from densepose.modeling.cse import Embedder
class SampleCountingLoader:
def __init__(self, loader):
self.loader = loader
def __iter__(self):
it = iter(self.loader)
storage = get_event_storage()
while True:
try:
batch = next(it)
num_inst_per_dataset = {}
for data in batch:
dataset_name = data["dataset"]
if dataset_name not in num_inst_per_dataset:
num_inst_per_dataset[dataset_name] = 0
num_inst = len(data["instances"])
num_inst_per_dataset[dataset_name] += num_inst
for dataset_name in num_inst_per_dataset:
storage.put_scalar(f"batch/{dataset_name}", num_inst_per_dataset[dataset_name])
yield batch
except StopIteration:
break
class SampleCountMetricPrinter(EventWriter):
def __init__(self):
self.logger = logging.getLogger(__name__)
def write(self):
storage = get_event_storage()
batch_stats_strs = []
for key, buf in storage.histories().items():
if key.startswith("batch/"):
batch_stats_strs.append(f"{key} {buf.avg(20)}")
self.logger.info(", ".join(batch_stats_strs))
class Trainer(DefaultTrainer):
@classmethod
def extract_embedder_from_model(cls, model: nn.Module) -> Optional[Embedder]:
if isinstance(model, nn.parallel.DistributedDataParallel):
model = model.module
if hasattr(model, "roi_heads") and hasattr(model.roi_heads, "embedder"):
# pyre-fixme[16]: `Tensor` has no attribute `embedder`.
return model.roi_heads.embedder
return None
# TODO: the only reason to copy the base class code here is to pass the embedder from
# the model to the evaluator; that should be refactored to avoid unnecessary copy-pasting
@classmethod
def test(
cls,
cfg: CfgNode,
model: nn.Module,
evaluators: Optional[Union[DatasetEvaluator, List[DatasetEvaluator]]] = None,
):
"""
Args:
cfg (CfgNode):
model (nn.Module):
evaluators (DatasetEvaluator, list[DatasetEvaluator] or None): if None, will call
:meth:`build_evaluator`. Otherwise, must have the same length as
``cfg.DATASETS.TEST``.
Returns:
dict: a dict of result metrics
"""
logger = logging.getLogger(__name__)
if isinstance(evaluators, DatasetEvaluator):
evaluators = [evaluators]
if evaluators is not None:
assert len(cfg.DATASETS.TEST) == len(evaluators), "{} != {}".format(
len(cfg.DATASETS.TEST), len(evaluators)
)
results = OrderedDict()
for idx, dataset_name in enumerate(cfg.DATASETS.TEST):
data_loader = cls.build_test_loader(cfg, dataset_name)
# When evaluators are passed in as arguments,
# implicitly assume that evaluators can be created before data_loader.
if evaluators is not None:
evaluator = evaluators[idx]
else:
try:
embedder = cls.extract_embedder_from_model(model)
evaluator = cls.build_evaluator(cfg, dataset_name, embedder=embedder)
except NotImplementedError:
logger.warn(
"No evaluator found. Use `DefaultTrainer.test(evaluators=)`, "
"or implement its `build_evaluator` method."
)
results[dataset_name] = {}
continue
if cfg.DENSEPOSE_EVALUATION.DISTRIBUTED_INFERENCE or comm.is_main_process():
results_i = inference_on_dataset(model, data_loader, evaluator)
else:
results_i = {}
results[dataset_name] = results_i
if comm.is_main_process():
assert isinstance(
results_i, dict
), "Evaluator must return a dict on the main process. Got {} instead.".format(
results_i
)
logger.info("Evaluation results for {} in csv format:".format(dataset_name))
print_csv_format(results_i)
if len(results) == 1:
results = list(results.values())[0]
return results
@classmethod
def build_evaluator(
cls,
cfg: CfgNode,
dataset_name: str,
output_folder: Optional[str] = None,
embedder: Optional[Embedder] = None,
) -> DatasetEvaluators:
if output_folder is None:
output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
evaluators = []
distributed = cfg.DENSEPOSE_EVALUATION.DISTRIBUTED_INFERENCE
# Note: we currently use COCO evaluator for both COCO and LVIS datasets
# to have compatible metrics. LVIS bbox evaluator could also be used
# with an adapter to properly handle filtered / mapped categories
# evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
# if evaluator_type == "coco":
# evaluators.append(COCOEvaluator(dataset_name, output_dir=output_folder))
# elif evaluator_type == "lvis":
# evaluators.append(LVISEvaluator(dataset_name, output_dir=output_folder))
evaluators.append(
Detectron2COCOEvaluatorAdapter(
dataset_name, output_dir=output_folder, distributed=distributed
)
)
if cfg.MODEL.DENSEPOSE_ON:
storage = build_densepose_evaluator_storage(cfg, output_folder)
evaluators.append(
DensePoseCOCOEvaluator(
dataset_name,
distributed,
output_folder,
evaluator_type=cfg.DENSEPOSE_EVALUATION.TYPE,
min_iou_threshold=cfg.DENSEPOSE_EVALUATION.MIN_IOU_THRESHOLD,
storage=storage,
embedder=embedder,
should_evaluate_mesh_alignment=cfg.DENSEPOSE_EVALUATION.EVALUATE_MESH_ALIGNMENT,
mesh_alignment_mesh_names=cfg.DENSEPOSE_EVALUATION.MESH_ALIGNMENT_MESH_NAMES,
)
)
return DatasetEvaluators(evaluators)
@classmethod
def build_optimizer(cls, cfg: CfgNode, model: nn.Module):
params = get_default_optimizer_params(
model,
base_lr=cfg.SOLVER.BASE_LR,
weight_decay_norm=cfg.SOLVER.WEIGHT_DECAY_NORM,
bias_lr_factor=cfg.SOLVER.BIAS_LR_FACTOR,
weight_decay_bias=cfg.SOLVER.WEIGHT_DECAY_BIAS,
overrides={
"features": {
"lr": cfg.SOLVER.BASE_LR * cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.FEATURES_LR_FACTOR,
},
"embeddings": {
"lr": cfg.SOLVER.BASE_LR * cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.EMBEDDING_LR_FACTOR,
},
},
)
optimizer = torch.optim.SGD(
params,
cfg.SOLVER.BASE_LR,
momentum=cfg.SOLVER.MOMENTUM,
nesterov=cfg.SOLVER.NESTEROV,
weight_decay=cfg.SOLVER.WEIGHT_DECAY,
)
return maybe_add_gradient_clipping(cfg, optimizer)
@classmethod
def build_test_loader(cls, cfg: CfgNode, dataset_name):
return build_detection_test_loader(cfg, dataset_name, mapper=DatasetMapper(cfg, False))
@classmethod
def build_train_loader(cls, cfg: CfgNode):
data_loader = build_detection_train_loader(cfg, mapper=DatasetMapper(cfg, True))
if not has_inference_based_loaders(cfg):
return data_loader
model = cls.build_model(cfg)
model.to(cfg.BOOTSTRAP_MODEL.DEVICE)
DetectionCheckpointer(model).resume_or_load(cfg.BOOTSTRAP_MODEL.WEIGHTS, resume=False)
inference_based_loaders, ratios = build_inference_based_loaders(cfg, model)
loaders = [data_loader] + inference_based_loaders
ratios = [1.0] + ratios
combined_data_loader = build_combined_loader(cfg, loaders, ratios)
sample_counting_loader = SampleCountingLoader(combined_data_loader)
return sample_counting_loader
def build_writers(self):
writers = super().build_writers()
writers.append(SampleCountMetricPrinter())
return writers
@classmethod
def test_with_TTA(cls, cfg: CfgNode, model):
logger = logging.getLogger("detectron2.trainer")
# In the end of training, run an evaluation with TTA
# Only support some R-CNN models.
logger.info("Running inference with test-time augmentation ...")
transform_data = load_from_cfg(cfg)
model = DensePoseGeneralizedRCNNWithTTA(
cfg, model, transform_data, DensePoseDatasetMapperTTA(cfg)
)
evaluators = [
cls.build_evaluator(
cfg, name, output_folder=os.path.join(cfg.OUTPUT_DIR, "inference_TTA")
)
for name in cfg.DATASETS.TEST
]
res = cls.test(cfg, model, evaluators) # pyre-ignore[6]
res = OrderedDict({k + "_TTA": v for k, v in res.items()})
return res
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/densepose/engine/trainer.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import random
import unittest
from densepose.data.video import FirstKFramesSelector, LastKFramesSelector, RandomKFramesSelector
class TestFrameSelector(unittest.TestCase):
def test_frame_selector_random_k_1(self):
_SEED = 43
_K = 4
random.seed(_SEED)
selector = RandomKFramesSelector(_K)
frame_tss = list(range(0, 20, 2))
_SELECTED_GT = [0, 8, 4, 6]
selected = selector(frame_tss)
self.assertEqual(_SELECTED_GT, selected)
def test_frame_selector_random_k_2(self):
_SEED = 43
_K = 10
random.seed(_SEED)
selector = RandomKFramesSelector(_K)
frame_tss = list(range(0, 6, 2))
_SELECTED_GT = [0, 2, 4]
selected = selector(frame_tss)
self.assertEqual(_SELECTED_GT, selected)
def test_frame_selector_first_k_1(self):
_K = 4
selector = FirstKFramesSelector(_K)
frame_tss = list(range(0, 20, 2))
_SELECTED_GT = frame_tss[:_K]
selected = selector(frame_tss)
self.assertEqual(_SELECTED_GT, selected)
def test_frame_selector_first_k_2(self):
_K = 10
selector = FirstKFramesSelector(_K)
frame_tss = list(range(0, 6, 2))
_SELECTED_GT = frame_tss[:_K]
selected = selector(frame_tss)
self.assertEqual(_SELECTED_GT, selected)
def test_frame_selector_last_k_1(self):
_K = 4
selector = LastKFramesSelector(_K)
frame_tss = list(range(0, 20, 2))
_SELECTED_GT = frame_tss[-_K:]
selected = selector(frame_tss)
self.assertEqual(_SELECTED_GT, selected)
def test_frame_selector_last_k_2(self):
_K = 10
selector = LastKFramesSelector(_K)
frame_tss = list(range(0, 6, 2))
_SELECTED_GT = frame_tss[-_K:]
selected = selector(frame_tss)
self.assertEqual(_SELECTED_GT, selected)
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/tests/test_frame_selector.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import unittest
from .common import (
get_config_files,
get_evolution_config_files,
get_hrnet_config_files,
get_quick_schedules_config_files,
setup,
)
class TestSetup(unittest.TestCase):
def _test_setup(self, config_file):
setup(config_file)
def test_setup_configs(self):
config_files = get_config_files()
for config_file in config_files:
self._test_setup(config_file)
def test_setup_evolution_configs(self):
config_files = get_evolution_config_files()
for config_file in config_files:
self._test_setup(config_file)
def test_setup_hrnet_configs(self):
config_files = get_hrnet_config_files()
for config_file in config_files:
self._test_setup(config_file)
def test_setup_quick_schedules_configs(self):
config_files = get_quick_schedules_config_files()
for config_file in config_files:
self._test_setup(config_file)
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/tests/test_setup.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import unittest
from densepose.structures import normalized_coords_transform
class TestStructures(unittest.TestCase):
def test_normalized_coords_transform(self):
bbox = (32, 24, 288, 216)
x0, y0, w, h = bbox
xmin, ymin, xmax, ymax = x0, y0, x0 + w, y0 + h
f = normalized_coords_transform(*bbox)
# Top-left
expected_p, actual_p = (-1, -1), f((xmin, ymin))
self.assertEqual(expected_p, actual_p)
# Top-right
expected_p, actual_p = (1, -1), f((xmax, ymin))
self.assertEqual(expected_p, actual_p)
# Bottom-left
expected_p, actual_p = (-1, 1), f((xmin, ymax))
self.assertEqual(expected_p, actual_p)
# Bottom-right
expected_p, actual_p = (1, 1), f((xmax, ymax))
self.assertEqual(expected_p, actual_p)
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/tests/test_structures.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import unittest
import torch
from detectron2.structures import BitMasks, Boxes, Instances
from .common import get_model
# TODO(plabatut): Modularize detectron2 tests and re-use
def make_model_inputs(image, instances=None):
if instances is None:
return {"image": image}
return {"image": image, "instances": instances}
def make_empty_instances(h, w):
instances = Instances((h, w))
instances.gt_boxes = Boxes(torch.rand(0, 4))
instances.gt_classes = torch.tensor([]).to(dtype=torch.int64)
instances.gt_masks = BitMasks(torch.rand(0, h, w))
return instances
class ModelE2ETest(unittest.TestCase):
CONFIG_PATH = ""
def setUp(self):
self.model = get_model(self.CONFIG_PATH)
def _test_eval(self, sizes):
inputs = [make_model_inputs(torch.rand(3, size[0], size[1])) for size in sizes]
self.model.eval()
self.model(inputs)
class DensePoseRCNNE2ETest(ModelE2ETest):
CONFIG_PATH = "densepose_rcnn_R_101_FPN_s1x.yaml"
def test_empty_data(self):
self._test_eval([(200, 250), (200, 249)])
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/tests/test_model_e2e.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import contextlib
import os
import random
import tempfile
import unittest
import torch
import torchvision.io as io
from densepose.data.transform import ImageResizeTransform
from densepose.data.video import RandomKFramesSelector, VideoKeyframeDataset
try:
import av
except ImportError:
av = None
# copied from torchvision test/test_io.py
def _create_video_frames(num_frames, height, width):
y, x = torch.meshgrid(torch.linspace(-2, 2, height), torch.linspace(-2, 2, width))
data = []
for i in range(num_frames):
xc = float(i) / num_frames
yc = 1 - float(i) / (2 * num_frames)
d = torch.exp(-((x - xc) ** 2 + (y - yc) ** 2) / 2) * 255
data.append(d.unsqueeze(2).repeat(1, 1, 3).byte())
return torch.stack(data, 0)
# adapted from torchvision test/test_io.py
@contextlib.contextmanager
def temp_video(num_frames, height, width, fps, lossless=False, video_codec=None, options=None):
if lossless:
if video_codec is not None:
raise ValueError("video_codec can't be specified together with lossless")
if options is not None:
raise ValueError("options can't be specified together with lossless")
video_codec = "libx264rgb"
options = {"crf": "0"}
if video_codec is None:
video_codec = "libx264"
if options is None:
options = {}
data = _create_video_frames(num_frames, height, width)
with tempfile.NamedTemporaryFile(suffix=".mp4") as f:
f.close()
io.write_video(f.name, data, fps=fps, video_codec=video_codec, options=options)
yield f.name, data
os.unlink(f.name)
@unittest.skipIf(av is None, "PyAV unavailable")
class TestVideoKeyframeDataset(unittest.TestCase):
def test_read_keyframes_all(self):
with temp_video(60, 300, 300, 5, video_codec="mpeg4") as (fname, data):
video_list = [fname]
category_list = [None]
dataset = VideoKeyframeDataset(video_list, category_list)
self.assertEqual(len(dataset), 1)
data1, categories1 = dataset[0]["images"], dataset[0]["categories"]
self.assertEqual(data1.shape, torch.Size((5, 3, 300, 300)))
self.assertEqual(data1.dtype, torch.float32)
self.assertIsNone(categories1[0])
return
self.assertTrue(False)
def test_read_keyframes_with_selector(self):
with temp_video(60, 300, 300, 5, video_codec="mpeg4") as (fname, data):
video_list = [fname]
category_list = [None]
random.seed(0)
frame_selector = RandomKFramesSelector(3)
dataset = VideoKeyframeDataset(video_list, category_list, frame_selector)
self.assertEqual(len(dataset), 1)
data1, categories1 = dataset[0]["images"], dataset[0]["categories"]
self.assertEqual(data1.shape, torch.Size((3, 3, 300, 300)))
self.assertEqual(data1.dtype, torch.float32)
self.assertIsNone(categories1[0])
return
self.assertTrue(False)
def test_read_keyframes_with_selector_with_transform(self):
with temp_video(60, 300, 300, 5, video_codec="mpeg4") as (fname, data):
video_list = [fname]
category_list = [None]
random.seed(0)
frame_selector = RandomKFramesSelector(1)
transform = ImageResizeTransform()
dataset = VideoKeyframeDataset(video_list, category_list, frame_selector, transform)
data1, categories1 = dataset[0]["images"], dataset[0]["categories"]
self.assertEqual(len(dataset), 1)
self.assertEqual(data1.shape, torch.Size((1, 3, 800, 800)))
self.assertEqual(data1.dtype, torch.float32)
self.assertIsNone(categories1[0])
return
self.assertTrue(False)
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/tests/test_video_keyframe_dataset.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import random
import unittest
from typing import Any, Iterable, Iterator, Tuple
from densepose.data import CombinedDataLoader
def _grouper(iterable: Iterable[Any], n: int, fillvalue=None) -> Iterator[Tuple[Any]]:
"""
Group elements of an iterable by chunks of size `n`, e.g.
grouper(range(9), 4) ->
(0, 1, 2, 3), (4, 5, 6, 7), (8, None, None, None)
"""
it = iter(iterable)
while True:
values = []
for _ in range(n):
try:
value = next(it)
except StopIteration:
values.extend([fillvalue] * (n - len(values)))
yield tuple(values)
return
values.append(value)
yield tuple(values)
class TestCombinedDataLoader(unittest.TestCase):
def test_combine_loaders_1(self):
loader1 = _grouper([f"1_{i}" for i in range(10)], 2)
loader2 = _grouper([f"2_{i}" for i in range(11)], 3)
batch_size = 4
ratios = (0.1, 0.9)
random.seed(43)
combined = CombinedDataLoader((loader1, loader2), batch_size, ratios)
BATCHES_GT = [
["1_0", "1_1", "2_0", "2_1"],
["2_2", "2_3", "2_4", "2_5"],
["1_2", "1_3", "2_6", "2_7"],
["2_8", "2_9", "2_10", None],
]
for i, batch in enumerate(combined):
self.assertEqual(len(batch), batch_size)
self.assertEqual(batch, BATCHES_GT[i])
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/tests/test_combine_data_loader.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import unittest
import torch
from detectron2.structures import Boxes, BoxMode, Instances
from densepose.modeling.losses.utils import ChartBasedAnnotationsAccumulator
from densepose.structures import DensePoseDataRelative, DensePoseList
image_shape = (100, 100)
instances = Instances(image_shape)
n_instances = 3
instances.proposal_boxes = Boxes(torch.rand(n_instances, 4))
instances.gt_boxes = Boxes(torch.rand(n_instances, 4))
# instances.gt_densepose = None cannot happen because instances attributes need a length
class TestChartBasedAnnotationsAccumulator(unittest.TestCase):
def test_chart_based_annotations_accumulator_no_gt_densepose(self):
accumulator = ChartBasedAnnotationsAccumulator()
accumulator.accumulate(instances)
expected_values = {"nxt_bbox_with_dp_index": 0, "nxt_bbox_index": n_instances}
for key in accumulator.__dict__:
self.assertEqual(getattr(accumulator, key), expected_values.get(key, []))
def test_chart_based_annotations_accumulator_gt_densepose_none(self):
instances.gt_densepose = [None] * n_instances
accumulator = ChartBasedAnnotationsAccumulator()
accumulator.accumulate(instances)
expected_values = {"nxt_bbox_with_dp_index": 0, "nxt_bbox_index": n_instances}
for key in accumulator.__dict__:
self.assertEqual(getattr(accumulator, key), expected_values.get(key, []))
def test_chart_based_annotations_accumulator_gt_densepose(self):
data_relative_keys = [
DensePoseDataRelative.X_KEY,
DensePoseDataRelative.Y_KEY,
DensePoseDataRelative.I_KEY,
DensePoseDataRelative.U_KEY,
DensePoseDataRelative.V_KEY,
DensePoseDataRelative.S_KEY,
]
annotations = [DensePoseDataRelative({k: [0] for k in data_relative_keys})] * n_instances
instances.gt_densepose = DensePoseList(annotations, instances.gt_boxes, image_shape)
accumulator = ChartBasedAnnotationsAccumulator()
accumulator.accumulate(instances)
bbox_xywh_est = BoxMode.convert(
instances.proposal_boxes.tensor.clone(), BoxMode.XYXY_ABS, BoxMode.XYWH_ABS
)
bbox_xywh_gt = BoxMode.convert(
instances.gt_boxes.tensor.clone(), BoxMode.XYXY_ABS, BoxMode.XYWH_ABS
)
expected_values = {
"s_gt": [
torch.zeros((3, DensePoseDataRelative.MASK_SIZE, DensePoseDataRelative.MASK_SIZE))
]
* n_instances,
"bbox_xywh_est": bbox_xywh_est.split(1),
"bbox_xywh_gt": bbox_xywh_gt.split(1),
"point_bbox_with_dp_indices": [torch.tensor([i]) for i in range(n_instances)],
"point_bbox_indices": [torch.tensor([i]) for i in range(n_instances)],
"bbox_indices": list(range(n_instances)),
"nxt_bbox_with_dp_index": n_instances,
"nxt_bbox_index": n_instances,
}
default_value = [torch.tensor([0])] * 3
for key in accumulator.__dict__:
to_test = getattr(accumulator, key)
gt_value = expected_values.get(key, default_value)
if key in ["nxt_bbox_with_dp_index", "nxt_bbox_index"]:
self.assertEqual(to_test, gt_value)
elif key == "bbox_indices":
self.assertListEqual(to_test, gt_value)
else:
self.assertTrue(torch.allclose(torch.stack(to_test), torch.stack(gt_value)))
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/tests/test_chart_based_annotations_accumulator.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import unittest
import torch
from detectron2.structures import Boxes, BoxMode, Instances
from densepose.modeling.losses.embed_utils import CseAnnotationsAccumulator
from densepose.structures import DensePoseDataRelative, DensePoseList
class TestCseAnnotationsAccumulator(unittest.TestCase):
def test_cse_annotations_accumulator_nodp(self):
instances_lst = [
self._create_instances_nodp(),
]
self._test_template(instances_lst)
def test_cse_annotations_accumulator_sparsedp(self):
instances_lst = [
self._create_instances_sparsedp(),
]
self._test_template(instances_lst)
def test_cse_annotations_accumulator_fulldp(self):
instances_lst = [
self._create_instances_fulldp(),
]
self._test_template(instances_lst)
def test_cse_annotations_accumulator_combined(self):
instances_lst = [
self._create_instances_nodp(),
self._create_instances_sparsedp(),
self._create_instances_fulldp(),
]
self._test_template(instances_lst)
def _test_template(self, instances_lst):
acc = CseAnnotationsAccumulator()
for instances in instances_lst:
acc.accumulate(instances)
packed_anns = acc.pack()
self._check_correspondence(packed_anns, instances_lst)
def _create_instances_nodp(self):
image_shape = (480, 640)
instances = Instances(image_shape)
instances.gt_boxes = Boxes(
torch.as_tensor(
[
[40.0, 40.0, 140.0, 140.0],
[160.0, 160.0, 270.0, 270.0],
[40.0, 160.0, 160.0, 280.0],
]
)
)
instances.proposal_boxes = Boxes(
torch.as_tensor(
[
[41.0, 39.0, 142.0, 138.0],
[161.0, 159.0, 272.0, 268.0],
[41.0, 159.0, 162.0, 278.0],
]
)
)
# do not add gt_densepose
return instances
def _create_instances_sparsedp(self):
image_shape = (540, 720)
instances = Instances(image_shape)
instances.gt_boxes = Boxes(
torch.as_tensor(
[
[50.0, 50.0, 130.0, 130.0],
[150.0, 150.0, 240.0, 240.0],
[50.0, 150.0, 230.0, 330.0],
]
)
)
instances.proposal_boxes = Boxes(
torch.as_tensor(
[
[49.0, 51.0, 131.0, 129.0],
[151.0, 149.0, 241.0, 239.0],
[51.0, 149.0, 232.0, 329.0],
]
)
)
instances.gt_densepose = DensePoseList(
[
None,
self._create_dp_data(
{
"dp_x": [81.69, 153.47, 151.00],
"dp_y": [162.24, 128.71, 113.81],
"dp_vertex": [0, 1, 2],
"ref_model": "zebra_5002",
"dp_masks": [],
},
{"c": (166, 133), "r": 64},
),
None,
],
instances.gt_boxes,
image_shape,
)
return instances
def _create_instances_fulldp(self):
image_shape = (680, 840)
instances = Instances(image_shape)
instances.gt_boxes = Boxes(
torch.as_tensor(
[
[65.0, 55.0, 165.0, 155.0],
[170.0, 175.0, 275.0, 280.0],
[55.0, 165.0, 165.0, 275.0],
]
)
)
instances.proposal_boxes = Boxes(
torch.as_tensor(
[
[66.0, 54.0, 166.0, 154.0],
[171.0, 174.0, 276.0, 279.0],
[56.0, 164.0, 166.0, 274.0],
]
)
)
instances.gt_densepose = DensePoseList(
[
self._create_dp_data(
{
"dp_x": [149.99, 198.62, 157.59],
"dp_y": [170.74, 197.73, 123.12],
"dp_vertex": [3, 4, 5],
"ref_model": "cat_5001",
"dp_masks": [],
},
{"c": (100, 100), "r": 50},
),
self._create_dp_data(
{
"dp_x": [234.53, 116.72, 71.66],
"dp_y": [107.53, 11.31, 142.32],
"dp_vertex": [6, 7, 8],
"ref_model": "dog_5002",
"dp_masks": [],
},
{"c": (200, 150), "r": 40},
),
self._create_dp_data(
{
"dp_x": [225.54, 202.61, 135.90],
"dp_y": [167.46, 181.00, 211.47],
"dp_vertex": [9, 10, 11],
"ref_model": "elephant_5002",
"dp_masks": [],
},
{"c": (100, 200), "r": 45},
),
],
instances.gt_boxes,
image_shape,
)
return instances
def _create_dp_data(self, anns, blob_def=None):
dp_data = DensePoseDataRelative(anns)
if blob_def is not None:
dp_data.segm[
blob_def["c"][0] - blob_def["r"] : blob_def["c"][0] + blob_def["r"],
blob_def["c"][1] - blob_def["r"] : blob_def["c"][1] + blob_def["r"],
] = 1
return dp_data
def _check_correspondence(self, packed_anns, instances_lst):
instance_idx = 0
data_idx = 0
pt_offset = 0
if packed_anns is not None:
bbox_xyxy_gt = BoxMode.convert(
packed_anns.bbox_xywh_gt.clone(), BoxMode.XYWH_ABS, BoxMode.XYXY_ABS
)
bbox_xyxy_est = BoxMode.convert(
packed_anns.bbox_xywh_est.clone(), BoxMode.XYWH_ABS, BoxMode.XYXY_ABS
)
for instances in instances_lst:
if not hasattr(instances, "gt_densepose"):
instance_idx += len(instances)
continue
for i, dp_data in enumerate(instances.gt_densepose):
if dp_data is None:
instance_idx += 1
continue
n_pts = len(dp_data.x)
self.assertTrue(
torch.allclose(dp_data.x, packed_anns.x_gt[pt_offset : pt_offset + n_pts])
)
self.assertTrue(
torch.allclose(dp_data.y, packed_anns.y_gt[pt_offset : pt_offset + n_pts])
)
self.assertTrue(torch.allclose(dp_data.segm, packed_anns.coarse_segm_gt[data_idx]))
self.assertTrue(
torch.allclose(
torch.ones(n_pts, dtype=torch.long) * dp_data.mesh_id,
packed_anns.vertex_mesh_ids_gt[pt_offset : pt_offset + n_pts],
)
)
self.assertTrue(
torch.allclose(
dp_data.vertex_ids, packed_anns.vertex_ids_gt[pt_offset : pt_offset + n_pts]
)
)
self.assertTrue(
torch.allclose(instances.gt_boxes.tensor[i], bbox_xyxy_gt[data_idx])
)
self.assertTrue(
torch.allclose(instances.proposal_boxes.tensor[i], bbox_xyxy_est[data_idx])
)
self.assertTrue(
torch.allclose(
torch.ones(n_pts, dtype=torch.long) * data_idx,
packed_anns.point_bbox_with_dp_indices[pt_offset : pt_offset + n_pts],
)
)
self.assertTrue(
torch.allclose(
torch.ones(n_pts, dtype=torch.long) * instance_idx,
packed_anns.point_bbox_indices[pt_offset : pt_offset + n_pts],
)
)
self.assertEqual(instance_idx, packed_anns.bbox_indices[data_idx])
pt_offset += n_pts
instance_idx += 1
data_idx += 1
if data_idx == 0:
self.assertIsNone(packed_anns)
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/tests/test_cse_annotations_accumulator.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import contextlib
import os
import tempfile
import unittest
import torch
from torchvision.utils import save_image
from densepose.data.image_list_dataset import ImageListDataset
from densepose.data.transform import ImageResizeTransform
@contextlib.contextmanager
def temp_image(height, width):
random_image = torch.rand(height, width)
with tempfile.NamedTemporaryFile(suffix=".jpg") as f:
f.close()
save_image(random_image, f.name)
yield f.name
os.unlink(f.name)
class TestImageListDataset(unittest.TestCase):
def test_image_list_dataset(self):
height, width = 720, 1280
with temp_image(height, width) as image_fpath:
image_list = [image_fpath]
category_list = [None]
dataset = ImageListDataset(image_list, category_list)
self.assertEqual(len(dataset), 1)
data1, categories1 = dataset[0]["images"], dataset[0]["categories"]
self.assertEqual(data1.shape, torch.Size((1, 3, height, width)))
self.assertEqual(data1.dtype, torch.float32)
self.assertIsNone(categories1[0])
def test_image_list_dataset_with_transform(self):
height, width = 720, 1280
with temp_image(height, width) as image_fpath:
image_list = [image_fpath]
category_list = [None]
transform = ImageResizeTransform()
dataset = ImageListDataset(image_list, category_list, transform)
self.assertEqual(len(dataset), 1)
data1, categories1 = dataset[0]["images"], dataset[0]["categories"]
self.assertEqual(data1.shape, torch.Size((1, 3, 749, 1333)))
self.assertEqual(data1.dtype, torch.float32)
self.assertIsNone(categories1[0])
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/tests/test_image_list_dataset.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import os
import torch
from detectron2.config import get_cfg
from detectron2.engine import default_setup
from detectron2.modeling import build_model
from densepose import add_densepose_config
_BASE_CONFIG_DIR = "configs"
_EVOLUTION_CONFIG_SUB_DIR = "evolution"
_HRNET_CONFIG_SUB_DIR = "HRNet"
_QUICK_SCHEDULES_CONFIG_SUB_DIR = "quick_schedules"
_BASE_CONFIG_FILE_PREFIX = "Base-"
_CONFIG_FILE_EXT = ".yaml"
def _get_base_config_dir():
"""
Return the base directory for configurations
"""
return os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", _BASE_CONFIG_DIR)
def _get_evolution_config_dir():
"""
Return the base directory for evolution configurations
"""
return os.path.join(_get_base_config_dir(), _EVOLUTION_CONFIG_SUB_DIR)
def _get_hrnet_config_dir():
"""
Return the base directory for HRNet configurations
"""
return os.path.join(_get_base_config_dir(), _HRNET_CONFIG_SUB_DIR)
def _get_quick_schedules_config_dir():
"""
Return the base directory for quick schedules configurations
"""
return os.path.join(_get_base_config_dir(), _QUICK_SCHEDULES_CONFIG_SUB_DIR)
def _collect_config_files(config_dir):
"""
Collect all configuration files (i.e. densepose_*.yaml) directly in the specified directory
"""
start = _get_base_config_dir()
results = []
for entry in os.listdir(config_dir):
path = os.path.join(config_dir, entry)
if not os.path.isfile(path):
continue
_, ext = os.path.splitext(entry)
if ext != _CONFIG_FILE_EXT:
continue
if entry.startswith(_BASE_CONFIG_FILE_PREFIX):
continue
config_file = os.path.relpath(path, start)
results.append(config_file)
return results
def get_config_files():
"""
Get all the configuration files (relative to the base configuration directory)
"""
return _collect_config_files(_get_base_config_dir())
def get_evolution_config_files():
"""
Get all the evolution configuration files (relative to the base configuration directory)
"""
return _collect_config_files(_get_evolution_config_dir())
def get_hrnet_config_files():
"""
Get all the HRNet configuration files (relative to the base configuration directory)
"""
return _collect_config_files(_get_hrnet_config_dir())
def get_quick_schedules_config_files():
"""
Get all the quick schedules configuration files (relative to the base configuration directory)
"""
return _collect_config_files(_get_quick_schedules_config_dir())
def get_model_config(config_file):
"""
Load and return the configuration from the specified file (relative to the base configuration
directory)
"""
cfg = get_cfg()
add_densepose_config(cfg)
path = os.path.join(_get_base_config_dir(), config_file)
cfg.merge_from_file(path)
if not torch.cuda.is_available():
cfg.MODEL_DEVICE = "cpu"
return cfg
def get_model(config_file):
"""
Get the model from the specified file (relative to the base configuration directory)
"""
cfg = get_model_config(config_file)
return build_model(cfg)
def setup(config_file):
"""
Setup the configuration from the specified file (relative to the base configuration directory)
"""
cfg = get_model_config(config_file)
cfg.freeze()
default_setup(cfg, {})
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/tests/common.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import unittest
import torch
from densepose.data.transform import ImageResizeTransform
class TestImageResizeTransform(unittest.TestCase):
def test_image_resize_1(self):
images_batch = torch.ones((3, 3, 100, 100), dtype=torch.uint8) * 100
transform = ImageResizeTransform()
images_transformed = transform(images_batch)
IMAGES_GT = torch.ones((3, 3, 800, 800), dtype=torch.float) * 100
self.assertEqual(images_transformed.size(), IMAGES_GT.size())
self.assertAlmostEqual(torch.abs(IMAGES_GT - images_transformed).max().item(), 0.0)
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/tests/test_image_resize_transform.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import io
import tempfile
import unittest
from contextlib import ExitStack
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
from detectron2.utils import comm
from densepose.evaluation.tensor_storage import (
SingleProcessFileTensorStorage,
SingleProcessRamTensorStorage,
SizeData,
storage_gather,
)
class TestSingleProcessRamTensorStorage(unittest.TestCase):
def test_read_write_1(self):
schema = {
"tf": SizeData(dtype="float32", shape=(112, 112)),
"ti": SizeData(dtype="int32", shape=(4, 64, 64)),
}
# generate data which corresponds to the schema
data_elts = []
torch.manual_seed(23)
for _i in range(3):
data_elt = {
"tf": torch.rand((112, 112), dtype=torch.float32),
"ti": (torch.rand(4, 64, 64) * 1000).to(dtype=torch.int32),
}
data_elts.append(data_elt)
storage = SingleProcessRamTensorStorage(schema, io.BytesIO())
# write data to the storage
for i in range(3):
record_id = storage.put(data_elts[i])
self.assertEqual(record_id, i)
# read data from the storage
for i in range(3):
record = storage.get(i)
self.assertEqual(len(record), len(schema))
for field_name in schema:
self.assertTrue(field_name in record)
self.assertEqual(data_elts[i][field_name].shape, record[field_name].shape)
self.assertEqual(data_elts[i][field_name].dtype, record[field_name].dtype)
self.assertTrue(torch.allclose(data_elts[i][field_name], record[field_name]))
class TestSingleProcessFileTensorStorage(unittest.TestCase):
def test_read_write_1(self):
schema = {
"tf": SizeData(dtype="float32", shape=(112, 112)),
"ti": SizeData(dtype="int32", shape=(4, 64, 64)),
}
# generate data which corresponds to the schema
data_elts = []
torch.manual_seed(23)
for _i in range(3):
data_elt = {
"tf": torch.rand((112, 112), dtype=torch.float32),
"ti": (torch.rand(4, 64, 64) * 1000).to(dtype=torch.int32),
}
data_elts.append(data_elt)
# WARNING: opens the file several times! may not work on all platforms
with tempfile.NamedTemporaryFile() as hFile:
storage = SingleProcessFileTensorStorage(schema, hFile.name, "wb")
# write data to the storage
for i in range(3):
record_id = storage.put(data_elts[i])
self.assertEqual(record_id, i)
hFile.seek(0)
storage = SingleProcessFileTensorStorage(schema, hFile.name, "rb")
# read data from the storage
for i in range(3):
record = storage.get(i)
self.assertEqual(len(record), len(schema))
for field_name in schema:
self.assertTrue(field_name in record)
self.assertEqual(data_elts[i][field_name].shape, record[field_name].shape)
self.assertEqual(data_elts[i][field_name].dtype, record[field_name].dtype)
self.assertTrue(torch.allclose(data_elts[i][field_name], record[field_name]))
def _find_free_port():
"""
Copied from detectron2/engine/launch.py
"""
import socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Binding to port 0 will cause the OS to find an available port for us
sock.bind(("", 0))
port = sock.getsockname()[1]
sock.close()
# NOTE: there is still a chance the port could be taken by other processes.
return port
def launch(main_func, nprocs, args=()):
port = _find_free_port()
dist_url = f"tcp://127.0.0.1:{port}"
# dist_url = "env://"
mp.spawn(
distributed_worker, nprocs=nprocs, args=(main_func, nprocs, dist_url, args), daemon=False
)
def distributed_worker(local_rank, main_func, nprocs, dist_url, args):
dist.init_process_group(
backend="gloo", init_method=dist_url, world_size=nprocs, rank=local_rank
)
comm.synchronize()
assert comm._LOCAL_PROCESS_GROUP is None
pg = dist.new_group(list(range(nprocs)))
comm._LOCAL_PROCESS_GROUP = pg
main_func(*args)
def ram_read_write_worker():
schema = {
"tf": SizeData(dtype="float32", shape=(112, 112)),
"ti": SizeData(dtype="int32", shape=(4, 64, 64)),
}
storage = SingleProcessRamTensorStorage(schema, io.BytesIO())
world_size = comm.get_world_size()
rank = comm.get_rank()
data_elts = []
# prepare different number of tensors in different processes
for i in range(rank + 1):
data_elt = {
"tf": torch.ones((112, 112), dtype=torch.float32) * (rank + i * world_size),
"ti": torch.ones((4, 64, 64), dtype=torch.int32) * (rank + i * world_size),
}
data_elts.append(data_elt)
# write data to the single process storage
for i in range(rank + 1):
record_id = storage.put(data_elts[i])
assert record_id == i, f"Process {rank}: record ID {record_id}, expected {i}"
comm.synchronize()
# gather all data in process rank 0
multi_storage = storage_gather(storage)
if rank != 0:
return
# read and check data from the multiprocess storage
for j in range(world_size):
for i in range(j):
record = multi_storage.get(j, i)
record_gt = {
"tf": torch.ones((112, 112), dtype=torch.float32) * (j + i * world_size),
"ti": torch.ones((4, 64, 64), dtype=torch.int32) * (j + i * world_size),
}
assert len(record) == len(schema), (
f"Process {rank}: multi storage record, rank {j}, id {i}: "
f"expected {len(schema)} fields in the record, got {len(record)}"
)
for field_name in schema:
assert field_name in record, (
f"Process {rank}: multi storage record, rank {j}, id {i}: "
f"field {field_name} not in the record"
)
assert record_gt[field_name].shape == record[field_name].shape, (
f"Process {rank}: multi storage record, rank {j}, id {i}: "
f"field {field_name}, expected shape {record_gt[field_name].shape} "
f"got {record[field_name].shape}"
)
assert record_gt[field_name].dtype == record[field_name].dtype, (
f"Process {rank}: multi storage record, rank {j}, id {i}: "
f"field {field_name}, expected dtype {record_gt[field_name].dtype} "
f"got {record[field_name].dtype}"
)
assert torch.allclose(record_gt[field_name], record[field_name]), (
f"Process {rank}: multi storage record, rank {j}, id {i}: "
f"field {field_name}, tensors are not close enough:"
f"L-inf {(record_gt[field_name]-record[field_name]).abs_().max()} "
f"L1 {(record_gt[field_name]-record[field_name]).abs_().sum()} "
)
def file_read_write_worker(rank_to_fpath):
schema = {
"tf": SizeData(dtype="float32", shape=(112, 112)),
"ti": SizeData(dtype="int32", shape=(4, 64, 64)),
}
world_size = comm.get_world_size()
rank = comm.get_rank()
storage = SingleProcessFileTensorStorage(schema, rank_to_fpath[rank], "wb")
data_elts = []
# prepare different number of tensors in different processes
for i in range(rank + 1):
data_elt = {
"tf": torch.ones((112, 112), dtype=torch.float32) * (rank + i * world_size),
"ti": torch.ones((4, 64, 64), dtype=torch.int32) * (rank + i * world_size),
}
data_elts.append(data_elt)
# write data to the single process storage
for i in range(rank + 1):
record_id = storage.put(data_elts[i])
assert record_id == i, f"Process {rank}: record ID {record_id}, expected {i}"
comm.synchronize()
# gather all data in process rank 0
multi_storage = storage_gather(storage)
if rank != 0:
return
# read and check data from the multiprocess storage
for j in range(world_size):
for i in range(j):
record = multi_storage.get(j, i)
record_gt = {
"tf": torch.ones((112, 112), dtype=torch.float32) * (j + i * world_size),
"ti": torch.ones((4, 64, 64), dtype=torch.int32) * (j + i * world_size),
}
assert len(record) == len(schema), (
f"Process {rank}: multi storage record, rank {j}, id {i}: "
f"expected {len(schema)} fields in the record, got {len(record)}"
)
for field_name in schema:
assert field_name in record, (
f"Process {rank}: multi storage record, rank {j}, id {i}: "
f"field {field_name} not in the record"
)
assert record_gt[field_name].shape == record[field_name].shape, (
f"Process {rank}: multi storage record, rank {j}, id {i}: "
f"field {field_name}, expected shape {record_gt[field_name].shape} "
f"got {record[field_name].shape}"
)
assert record_gt[field_name].dtype == record[field_name].dtype, (
f"Process {rank}: multi storage record, rank {j}, id {i}: "
f"field {field_name}, expected dtype {record_gt[field_name].dtype} "
f"got {record[field_name].dtype}"
)
assert torch.allclose(record_gt[field_name], record[field_name]), (
f"Process {rank}: multi storage record, rank {j}, id {i}: "
f"field {field_name}, tensors are not close enough:"
f"L-inf {(record_gt[field_name]-record[field_name]).abs_().max()} "
f"L1 {(record_gt[field_name]-record[field_name]).abs_().sum()} "
)
class TestMultiProcessRamTensorStorage(unittest.TestCase):
def test_read_write_1(self):
launch(ram_read_write_worker, 8)
class TestMultiProcessFileTensorStorage(unittest.TestCase):
def test_read_write_1(self):
with ExitStack() as stack:
# WARNING: opens the files several times! may not work on all platforms
rank_to_fpath = {
i: stack.enter_context(tempfile.NamedTemporaryFile()).name for i in range(8)
}
launch(file_read_write_worker, 8, (rank_to_fpath,))
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/tests/test_tensor_storage.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import unittest
from densepose.data.datasets.builtin import COCO_DATASETS, DENSEPOSE_ANNOTATIONS_DIR, LVIS_DATASETS
from densepose.data.datasets.coco import load_coco_json
from densepose.data.datasets.lvis import load_lvis_json
from densepose.data.utils import maybe_prepend_base_path
from densepose.structures import DensePoseDataRelative
class TestDatasetLoadedAnnotations(unittest.TestCase):
COCO_DATASET_DATA = {
"densepose_coco_2014_train": {"n_instances": 39210},
"densepose_coco_2014_minival": {"n_instances": 2243},
"densepose_coco_2014_minival_100": {"n_instances": 164},
"densepose_coco_2014_valminusminival": {"n_instances": 7297},
"densepose_coco_2014_train_cse": {"n_instances": 39210},
"densepose_coco_2014_minival_cse": {"n_instances": 2243},
"densepose_coco_2014_minival_100_cse": {"n_instances": 164},
"densepose_coco_2014_valminusminival_cse": {"n_instances": 7297},
"densepose_chimps": {"n_instances": 930},
"posetrack2017_train": {"n_instances": 8274},
"posetrack2017_val": {"n_instances": 4753},
"lvis_v05_train": {"n_instances": 5186},
"lvis_v05_val": {"n_instances": 1037},
}
LVIS_DATASET_DATA = {
"densepose_lvis_v1_train1": {"n_instances": 3394},
"densepose_lvis_v1_train2": {"n_instances": 1800},
"densepose_lvis_v1_val": {"n_instances": 1037},
"densepose_lvis_v1_val_animals_100": {"n_instances": 89},
}
def generic_coco_test(self, dataset_info):
if dataset_info.name not in self.COCO_DATASET_DATA:
return
n_inst = self.COCO_DATASET_DATA[dataset_info.name]["n_instances"]
self.generic_test(dataset_info, n_inst, load_coco_json)
def generic_lvis_test(self, dataset_info):
if dataset_info.name not in self.LVIS_DATASET_DATA:
return
n_inst = self.LVIS_DATASET_DATA[dataset_info.name]["n_instances"]
self.generic_test(dataset_info, n_inst, load_lvis_json)
def generic_test(self, dataset_info, n_inst, loader_fun):
datasets_root = DENSEPOSE_ANNOTATIONS_DIR
annotations_fpath = maybe_prepend_base_path(datasets_root, dataset_info.annotations_fpath)
images_root = maybe_prepend_base_path(datasets_root, dataset_info.images_root)
image_annotation_dicts = loader_fun(
annotations_json_file=annotations_fpath,
image_root=images_root,
dataset_name=dataset_info.name,
)
num_valid = sum(
1
for image_annotation_dict in image_annotation_dicts
for ann in image_annotation_dict["annotations"]
if DensePoseDataRelative.validate_annotation(ann)[0]
)
self.assertEqual(num_valid, n_inst)
def coco_test_fun(dataset_info):
return lambda self: self.generic_coco_test(dataset_info)
for dataset_info in COCO_DATASETS:
setattr(
TestDatasetLoadedAnnotations,
f"test_coco_builtin_loaded_annotations_{dataset_info.name}",
coco_test_fun(dataset_info),
)
def lvis_test_fun(dataset_info):
return lambda self: self.generic_lvis_test(dataset_info)
for dataset_info in LVIS_DATASETS:
setattr(
TestDatasetLoadedAnnotations,
f"test_lvis_builtin_loaded_annotations_{dataset_info.name}",
lvis_test_fun(dataset_info),
)
|
banmo-main
|
third_party/detectron2_old/projects/DensePose/tests/test_dataset_loaded_annotations.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
"""
Panoptic-DeepLab Training Script.
This script is a simplified version of the training script in detectron2/tools.
"""
import os
import torch
import detectron2.data.transforms as T
import detectron2.utils.comm as comm
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import get_cfg
from detectron2.data import MetadataCatalog, build_detection_train_loader
from detectron2.engine import DefaultTrainer, default_argument_parser, default_setup, launch
from detectron2.evaluation import (
CityscapesInstanceEvaluator,
CityscapesSemSegEvaluator,
COCOEvaluator,
COCOPanopticEvaluator,
DatasetEvaluators,
)
from detectron2.projects.deeplab import build_lr_scheduler
from detectron2.projects.panoptic_deeplab import (
PanopticDeeplabDatasetMapper,
add_panoptic_deeplab_config,
)
from detectron2.solver import get_default_optimizer_params
from detectron2.solver.build import maybe_add_gradient_clipping
def build_sem_seg_train_aug(cfg):
augs = [
T.ResizeShortestEdge(
cfg.INPUT.MIN_SIZE_TRAIN, cfg.INPUT.MAX_SIZE_TRAIN, cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING
)
]
if cfg.INPUT.CROP.ENABLED:
augs.append(T.RandomCrop(cfg.INPUT.CROP.TYPE, cfg.INPUT.CROP.SIZE))
augs.append(T.RandomFlip())
return augs
class Trainer(DefaultTrainer):
"""
We use the "DefaultTrainer" which contains a number pre-defined logic for
standard training workflow. They may not work for you, especially if you
are working on a new research project. In that case you can use the cleaner
"SimpleTrainer", or write your own training loop.
"""
@classmethod
def build_evaluator(cls, cfg, dataset_name, output_folder=None):
"""
Create evaluator(s) for a given dataset.
This uses the special metadata "evaluator_type" associated with each builtin dataset.
For your own dataset, you can simply create an evaluator manually in your
script and do not have to worry about the hacky if-else logic here.
"""
if cfg.MODEL.PANOPTIC_DEEPLAB.BENCHMARK_NETWORK_SPEED:
return None
if output_folder is None:
output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
evaluator_list = []
evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
if evaluator_type in ["cityscapes_panoptic_seg", "coco_panoptic_seg"]:
evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder))
if evaluator_type == "cityscapes_panoptic_seg":
assert (
torch.cuda.device_count() >= comm.get_rank()
), "CityscapesEvaluator currently do not work with multiple machines."
evaluator_list.append(CityscapesSemSegEvaluator(dataset_name))
evaluator_list.append(CityscapesInstanceEvaluator(dataset_name))
if evaluator_type == "coco_panoptic_seg":
# `thing_classes` in COCO panoptic metadata includes both thing and
# stuff classes for visualization. COCOEvaluator requires metadata
# which only contains thing classes, thus we map the name of
# panoptic datasets to their corresponding instance datasets.
dataset_name_mapper = {
"coco_2017_val_panoptic": "coco_2017_val",
"coco_2017_val_100_panoptic": "coco_2017_val_100",
}
evaluator_list.append(
COCOEvaluator(dataset_name_mapper[dataset_name], output_dir=output_folder)
)
if len(evaluator_list) == 0:
raise NotImplementedError(
"no Evaluator for the dataset {} with the type {}".format(
dataset_name, evaluator_type
)
)
elif len(evaluator_list) == 1:
return evaluator_list[0]
return DatasetEvaluators(evaluator_list)
@classmethod
def build_train_loader(cls, cfg):
mapper = PanopticDeeplabDatasetMapper(cfg, augmentations=build_sem_seg_train_aug(cfg))
return build_detection_train_loader(cfg, mapper=mapper)
@classmethod
def build_lr_scheduler(cls, cfg, optimizer):
"""
It now calls :func:`detectron2.solver.build_lr_scheduler`.
Overwrite it if you'd like a different scheduler.
"""
return build_lr_scheduler(cfg, optimizer)
@classmethod
def build_optimizer(cls, cfg, model):
"""
Build an optimizer from config.
"""
params = get_default_optimizer_params(
model,
weight_decay=cfg.SOLVER.WEIGHT_DECAY,
weight_decay_norm=cfg.SOLVER.WEIGHT_DECAY_NORM,
)
optimizer_type = cfg.SOLVER.OPTIMIZER
if optimizer_type == "SGD":
return maybe_add_gradient_clipping(cfg, torch.optim.SGD)(
params,
cfg.SOLVER.BASE_LR,
momentum=cfg.SOLVER.MOMENTUM,
nesterov=cfg.SOLVER.NESTEROV,
)
elif optimizer_type == "ADAM":
return maybe_add_gradient_clipping(cfg, torch.optim.Adam)(params, cfg.SOLVER.BASE_LR)
else:
raise NotImplementedError(f"no optimizer type {optimizer_type}")
def setup(args):
"""
Create configs and perform basic setups.
"""
cfg = get_cfg()
add_panoptic_deeplab_config(cfg)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
default_setup(cfg, args)
return cfg
def main(args):
cfg = setup(args)
if args.eval_only:
model = Trainer.build_model(cfg)
DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
cfg.MODEL.WEIGHTS, resume=args.resume
)
res = Trainer.test(cfg, model)
return res
trainer = Trainer(cfg)
trainer.resume_or_load(resume=args.resume)
return trainer.train()
if __name__ == "__main__":
args = default_argument_parser().parse_args()
print("Command Line Args:", args)
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
)
|
banmo-main
|
third_party/detectron2_old/projects/Panoptic-DeepLab/train_net.py
|
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
from detectron2.config import CfgNode as CN
from detectron2.projects.deeplab import add_deeplab_config
def add_panoptic_deeplab_config(cfg):
"""
Add config for Panoptic-DeepLab.
"""
# Reuse DeepLab config.
add_deeplab_config(cfg)
# Target generation parameters.
cfg.INPUT.GAUSSIAN_SIGMA = 10
cfg.INPUT.IGNORE_STUFF_IN_OFFSET = True
cfg.INPUT.SMALL_INSTANCE_AREA = 4096
cfg.INPUT.SMALL_INSTANCE_WEIGHT = 3
cfg.INPUT.IGNORE_CROWD_IN_SEMANTIC = False
# Optimizer type.
cfg.SOLVER.OPTIMIZER = "ADAM"
# Panoptic-DeepLab semantic segmentation head.
# We add an extra convolution before predictor.
cfg.MODEL.SEM_SEG_HEAD.HEAD_CHANNELS = 256
cfg.MODEL.SEM_SEG_HEAD.LOSS_TOP_K = 0.2
# Panoptic-DeepLab instance segmentation head.
cfg.MODEL.INS_EMBED_HEAD = CN()
cfg.MODEL.INS_EMBED_HEAD.NAME = "PanopticDeepLabInsEmbedHead"
cfg.MODEL.INS_EMBED_HEAD.IN_FEATURES = ["res2", "res3", "res5"]
cfg.MODEL.INS_EMBED_HEAD.PROJECT_FEATURES = ["res2", "res3"]
cfg.MODEL.INS_EMBED_HEAD.PROJECT_CHANNELS = [32, 64]
cfg.MODEL.INS_EMBED_HEAD.ASPP_CHANNELS = 256
cfg.MODEL.INS_EMBED_HEAD.ASPP_DILATIONS = [6, 12, 18]
cfg.MODEL.INS_EMBED_HEAD.ASPP_DROPOUT = 0.1
# We add an extra convolution before predictor.
cfg.MODEL.INS_EMBED_HEAD.HEAD_CHANNELS = 32
cfg.MODEL.INS_EMBED_HEAD.CONVS_DIM = 128
cfg.MODEL.INS_EMBED_HEAD.COMMON_STRIDE = 4
cfg.MODEL.INS_EMBED_HEAD.NORM = "SyncBN"
cfg.MODEL.INS_EMBED_HEAD.CENTER_LOSS_WEIGHT = 200.0
cfg.MODEL.INS_EMBED_HEAD.OFFSET_LOSS_WEIGHT = 0.01
# Panoptic-DeepLab post-processing setting.
cfg.MODEL.PANOPTIC_DEEPLAB = CN()
# Stuff area limit, ignore stuff region below this number.
cfg.MODEL.PANOPTIC_DEEPLAB.STUFF_AREA = 2048
cfg.MODEL.PANOPTIC_DEEPLAB.CENTER_THRESHOLD = 0.1
cfg.MODEL.PANOPTIC_DEEPLAB.NMS_KERNEL = 7
cfg.MODEL.PANOPTIC_DEEPLAB.TOP_K_INSTANCE = 200
# If set to False, Panoptic-DeepLab will not evaluate instance segmentation.
cfg.MODEL.PANOPTIC_DEEPLAB.PREDICT_INSTANCES = True
cfg.MODEL.PANOPTIC_DEEPLAB.USE_DEPTHWISE_SEPARABLE_CONV = False
# This is the padding parameter for images with various sizes. ASPP layers
# requires input images to be divisible by the average pooling size and we
# can use `MODEL.PANOPTIC_DEEPLAB.SIZE_DIVISIBILITY` to pad all images to
# a fixed resolution (e.g. 640x640 for COCO) to avoid having a image size
# that is not divisible by ASPP average pooling size.
cfg.MODEL.PANOPTIC_DEEPLAB.SIZE_DIVISIBILITY = -1
# Only evaluates network speed (ignores post-processing).
cfg.MODEL.PANOPTIC_DEEPLAB.BENCHMARK_NETWORK_SPEED = False
|
banmo-main
|
third_party/detectron2_old/projects/Panoptic-DeepLab/panoptic_deeplab/config.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from .config import add_panoptic_deeplab_config
from .dataset_mapper import PanopticDeeplabDatasetMapper
from .panoptic_seg import (
PanopticDeepLab,
INS_EMBED_BRANCHES_REGISTRY,
build_ins_embed_branch,
PanopticDeepLabSemSegHead,
PanopticDeepLabInsEmbedHead,
)
|
banmo-main
|
third_party/detectron2_old/projects/Panoptic-DeepLab/panoptic_deeplab/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import copy
import logging
import numpy as np
from typing import Callable, List, Union
import torch
from panopticapi.utils import rgb2id
from detectron2.config import configurable
from detectron2.data import MetadataCatalog
from detectron2.data import detection_utils as utils
from detectron2.data import transforms as T
from .target_generator import PanopticDeepLabTargetGenerator
__all__ = ["PanopticDeeplabDatasetMapper"]
class PanopticDeeplabDatasetMapper:
"""
The callable currently does the following:
1. Read the image from "file_name" and label from "pan_seg_file_name"
2. Applies random scale, crop and flip transforms to image and label
3. Prepare data to Tensor and generate training targets from label
"""
@configurable
def __init__(
self,
*,
augmentations: List[Union[T.Augmentation, T.Transform]],
image_format: str,
panoptic_target_generator: Callable,
):
"""
NOTE: this interface is experimental.
Args:
augmentations: a list of augmentations or deterministic transforms to apply
image_format: an image format supported by :func:`detection_utils.read_image`.
panoptic_target_generator: a callable that takes "panoptic_seg" and
"segments_info" to generate training targets for the model.
"""
# fmt: off
self.augmentations = T.AugmentationList(augmentations)
self.image_format = image_format
# fmt: on
logger = logging.getLogger(__name__)
logger.info("Augmentations used in training: " + str(augmentations))
self.panoptic_target_generator = panoptic_target_generator
@classmethod
def from_config(cls, cfg):
augs = [
T.ResizeShortestEdge(
cfg.INPUT.MIN_SIZE_TRAIN,
cfg.INPUT.MAX_SIZE_TRAIN,
cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING,
)
]
if cfg.INPUT.CROP.ENABLED:
augs.append(T.RandomCrop(cfg.INPUT.CROP.TYPE, cfg.INPUT.CROP.SIZE))
augs.append(T.RandomFlip())
# Assume always applies to the training set.
dataset_names = cfg.DATASETS.TRAIN
meta = MetadataCatalog.get(dataset_names[0])
panoptic_target_generator = PanopticDeepLabTargetGenerator(
ignore_label=meta.ignore_label,
thing_ids=list(meta.thing_dataset_id_to_contiguous_id.values()),
sigma=cfg.INPUT.GAUSSIAN_SIGMA,
ignore_stuff_in_offset=cfg.INPUT.IGNORE_STUFF_IN_OFFSET,
small_instance_area=cfg.INPUT.SMALL_INSTANCE_AREA,
small_instance_weight=cfg.INPUT.SMALL_INSTANCE_WEIGHT,
ignore_crowd_in_semantic=cfg.INPUT.IGNORE_CROWD_IN_SEMANTIC,
)
ret = {
"augmentations": augs,
"image_format": cfg.INPUT.FORMAT,
"panoptic_target_generator": panoptic_target_generator,
}
return ret
def __call__(self, dataset_dict):
"""
Args:
dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.
Returns:
dict: a format that builtin models in detectron2 accept
"""
dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below
# Load image.
image = utils.read_image(dataset_dict["file_name"], format=self.image_format)
utils.check_image_size(dataset_dict, image)
# Panoptic label is encoded in RGB image.
pan_seg_gt = utils.read_image(dataset_dict.pop("pan_seg_file_name"), "RGB")
# Reuses semantic transform for panoptic labels.
aug_input = T.AugInput(image, sem_seg=pan_seg_gt)
_ = self.augmentations(aug_input)
image, pan_seg_gt = aug_input.image, aug_input.sem_seg
# Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,
# but not efficient on large generic data structures due to the use of pickle & mp.Queue.
# Therefore it's important to use torch.Tensor.
dataset_dict["image"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))
# Generates training targets for Panoptic-DeepLab.
targets = self.panoptic_target_generator(rgb2id(pan_seg_gt), dataset_dict["segments_info"])
dataset_dict.update(targets)
return dataset_dict
|
banmo-main
|
third_party/detectron2_old/projects/Panoptic-DeepLab/panoptic_deeplab/dataset_mapper.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import numpy as np
from typing import Callable, Dict, List, Union
import fvcore.nn.weight_init as weight_init
import torch
from torch import nn
from torch.nn import functional as F
from detectron2.config import configurable
from detectron2.data import MetadataCatalog
from detectron2.layers import Conv2d, DepthwiseSeparableConv2d, ShapeSpec, get_norm
from detectron2.modeling import (
META_ARCH_REGISTRY,
SEM_SEG_HEADS_REGISTRY,
build_backbone,
build_sem_seg_head,
)
from detectron2.modeling.postprocessing import sem_seg_postprocess
from detectron2.projects.deeplab import DeepLabV3PlusHead
from detectron2.projects.deeplab.loss import DeepLabCE
from detectron2.structures import BitMasks, ImageList, Instances
from detectron2.utils.registry import Registry
from .post_processing import get_panoptic_segmentation
__all__ = ["PanopticDeepLab", "INS_EMBED_BRANCHES_REGISTRY", "build_ins_embed_branch"]
INS_EMBED_BRANCHES_REGISTRY = Registry("INS_EMBED_BRANCHES")
INS_EMBED_BRANCHES_REGISTRY.__doc__ = """
Registry for instance embedding branches, which make instance embedding
predictions from feature maps.
"""
@META_ARCH_REGISTRY.register()
class PanopticDeepLab(nn.Module):
"""
Main class for panoptic segmentation architectures.
"""
def __init__(self, cfg):
super().__init__()
self.backbone = build_backbone(cfg)
self.sem_seg_head = build_sem_seg_head(cfg, self.backbone.output_shape())
self.ins_embed_head = build_ins_embed_branch(cfg, self.backbone.output_shape())
self.register_buffer("pixel_mean", torch.tensor(cfg.MODEL.PIXEL_MEAN).view(-1, 1, 1), False)
self.register_buffer("pixel_std", torch.tensor(cfg.MODEL.PIXEL_STD).view(-1, 1, 1), False)
self.meta = MetadataCatalog.get(cfg.DATASETS.TRAIN[0])
self.stuff_area = cfg.MODEL.PANOPTIC_DEEPLAB.STUFF_AREA
self.threshold = cfg.MODEL.PANOPTIC_DEEPLAB.CENTER_THRESHOLD
self.nms_kernel = cfg.MODEL.PANOPTIC_DEEPLAB.NMS_KERNEL
self.top_k = cfg.MODEL.PANOPTIC_DEEPLAB.TOP_K_INSTANCE
self.predict_instances = cfg.MODEL.PANOPTIC_DEEPLAB.PREDICT_INSTANCES
self.use_depthwise_separable_conv = cfg.MODEL.PANOPTIC_DEEPLAB.USE_DEPTHWISE_SEPARABLE_CONV
assert (
cfg.MODEL.SEM_SEG_HEAD.USE_DEPTHWISE_SEPARABLE_CONV
== cfg.MODEL.PANOPTIC_DEEPLAB.USE_DEPTHWISE_SEPARABLE_CONV
)
self.size_divisibility = cfg.MODEL.PANOPTIC_DEEPLAB.SIZE_DIVISIBILITY
self.benchmark_network_speed = cfg.MODEL.PANOPTIC_DEEPLAB.BENCHMARK_NETWORK_SPEED
@property
def device(self):
return self.pixel_mean.device
def forward(self, batched_inputs):
"""
Args:
batched_inputs: a list, batched outputs of :class:`DatasetMapper`.
Each item in the list contains the inputs for one image.
For now, each item in the list is a dict that contains:
* "image": Tensor, image in (C, H, W) format.
* "sem_seg": semantic segmentation ground truth
* "center": center points heatmap ground truth
* "offset": pixel offsets to center points ground truth
* Other information that's included in the original dicts, such as:
"height", "width" (int): the output resolution of the model (may be different
from input resolution), used in inference.
Returns:
list[dict]:
each dict is the results for one image. The dict contains the following keys:
* "panoptic_seg", "sem_seg": see documentation
:doc:`/tutorials/models` for the standard output format
* "instances": available if ``predict_instances is True``. see documentation
:doc:`/tutorials/models` for the standard output format
"""
images = [x["image"].to(self.device) for x in batched_inputs]
images = [(x - self.pixel_mean) / self.pixel_std for x in images]
# To avoid error in ASPP layer when input has different size.
size_divisibility = (
self.size_divisibility
if self.size_divisibility > 0
else self.backbone.size_divisibility
)
images = ImageList.from_tensors(images, size_divisibility)
features = self.backbone(images.tensor)
losses = {}
if "sem_seg" in batched_inputs[0]:
targets = [x["sem_seg"].to(self.device) for x in batched_inputs]
targets = ImageList.from_tensors(
targets, size_divisibility, self.sem_seg_head.ignore_value
).tensor
if "sem_seg_weights" in batched_inputs[0]:
# The default D2 DatasetMapper may not contain "sem_seg_weights"
# Avoid error in testing when default DatasetMapper is used.
weights = [x["sem_seg_weights"].to(self.device) for x in batched_inputs]
weights = ImageList.from_tensors(weights, size_divisibility).tensor
else:
weights = None
else:
targets = None
weights = None
sem_seg_results, sem_seg_losses = self.sem_seg_head(features, targets, weights)
losses.update(sem_seg_losses)
if "center" in batched_inputs[0] and "offset" in batched_inputs[0]:
center_targets = [x["center"].to(self.device) for x in batched_inputs]
center_targets = ImageList.from_tensors(
center_targets, size_divisibility
).tensor.unsqueeze(1)
center_weights = [x["center_weights"].to(self.device) for x in batched_inputs]
center_weights = ImageList.from_tensors(center_weights, size_divisibility).tensor
offset_targets = [x["offset"].to(self.device) for x in batched_inputs]
offset_targets = ImageList.from_tensors(offset_targets, size_divisibility).tensor
offset_weights = [x["offset_weights"].to(self.device) for x in batched_inputs]
offset_weights = ImageList.from_tensors(offset_weights, size_divisibility).tensor
else:
center_targets = None
center_weights = None
offset_targets = None
offset_weights = None
center_results, offset_results, center_losses, offset_losses = self.ins_embed_head(
features, center_targets, center_weights, offset_targets, offset_weights
)
losses.update(center_losses)
losses.update(offset_losses)
if self.training:
return losses
if self.benchmark_network_speed:
return []
processed_results = []
for sem_seg_result, center_result, offset_result, input_per_image, image_size in zip(
sem_seg_results, center_results, offset_results, batched_inputs, images.image_sizes
):
height = input_per_image.get("height")
width = input_per_image.get("width")
r = sem_seg_postprocess(sem_seg_result, image_size, height, width)
c = sem_seg_postprocess(center_result, image_size, height, width)
o = sem_seg_postprocess(offset_result, image_size, height, width)
# Post-processing to get panoptic segmentation.
panoptic_image, _ = get_panoptic_segmentation(
r.argmax(dim=0, keepdim=True),
c,
o,
thing_ids=self.meta.thing_dataset_id_to_contiguous_id.values(),
label_divisor=self.meta.label_divisor,
stuff_area=self.stuff_area,
void_label=-1,
threshold=self.threshold,
nms_kernel=self.nms_kernel,
top_k=self.top_k,
)
# For semantic segmentation evaluation.
processed_results.append({"sem_seg": r})
panoptic_image = panoptic_image.squeeze(0)
semantic_prob = F.softmax(r, dim=0)
# For panoptic segmentation evaluation.
processed_results[-1]["panoptic_seg"] = (panoptic_image, None)
# For instance segmentation evaluation.
if self.predict_instances:
instances = []
panoptic_image_cpu = panoptic_image.cpu().numpy()
for panoptic_label in np.unique(panoptic_image_cpu):
if panoptic_label == -1:
continue
pred_class = panoptic_label // self.meta.label_divisor
isthing = pred_class in list(
self.meta.thing_dataset_id_to_contiguous_id.values()
)
# Get instance segmentation results.
if isthing:
instance = Instances((height, width))
# Evaluation code takes continuous id starting from 0
instance.pred_classes = torch.tensor(
[pred_class], device=panoptic_image.device
)
mask = panoptic_image == panoptic_label
instance.pred_masks = mask.unsqueeze(0)
# Average semantic probability
sem_scores = semantic_prob[pred_class, ...]
sem_scores = torch.mean(sem_scores[mask])
# Center point probability
mask_indices = torch.nonzero(mask).float()
center_y, center_x = (
torch.mean(mask_indices[:, 0]),
torch.mean(mask_indices[:, 1]),
)
center_scores = c[0, int(center_y.item()), int(center_x.item())]
# Confidence score is semantic prob * center prob.
instance.scores = torch.tensor(
[sem_scores * center_scores], device=panoptic_image.device
)
# Get bounding boxes
instance.pred_boxes = BitMasks(instance.pred_masks).get_bounding_boxes()
instances.append(instance)
if len(instances) > 0:
processed_results[-1]["instances"] = Instances.cat(instances)
return processed_results
@SEM_SEG_HEADS_REGISTRY.register()
class PanopticDeepLabSemSegHead(DeepLabV3PlusHead):
"""
A semantic segmentation head described in :paper:`Panoptic-DeepLab`.
"""
@configurable
def __init__(
self,
input_shape: Dict[str, ShapeSpec],
*,
decoder_channels: List[int],
norm: Union[str, Callable],
head_channels: int,
loss_weight: float,
loss_type: str,
loss_top_k: float,
ignore_value: int,
num_classes: int,
**kwargs,
):
"""
NOTE: this interface is experimental.
Args:
input_shape (ShapeSpec): shape of the input feature
decoder_channels (list[int]): a list of output channels of each
decoder stage. It should have the same length as "input_shape"
(each element in "input_shape" corresponds to one decoder stage).
norm (str or callable): normalization for all conv layers.
head_channels (int): the output channels of extra convolutions
between decoder and predictor.
loss_weight (float): loss weight.
loss_top_k: (float): setting the top k% hardest pixels for
"hard_pixel_mining" loss.
loss_type, ignore_value, num_classes: the same as the base class.
"""
super().__init__(
input_shape,
decoder_channels=decoder_channels,
norm=norm,
ignore_value=ignore_value,
**kwargs,
)
assert self.decoder_only
self.loss_weight = loss_weight
use_bias = norm == ""
# `head` is additional transform before predictor
if self.use_depthwise_separable_conv:
# We use a single 5x5 DepthwiseSeparableConv2d to replace
# 2 3x3 Conv2d since they have the same receptive field.
self.head = DepthwiseSeparableConv2d(
decoder_channels[0],
head_channels,
kernel_size=5,
padding=2,
norm1=norm,
activation1=F.relu,
norm2=norm,
activation2=F.relu,
)
else:
self.head = nn.Sequential(
Conv2d(
decoder_channels[0],
decoder_channels[0],
kernel_size=3,
padding=1,
bias=use_bias,
norm=get_norm(norm, decoder_channels[0]),
activation=F.relu,
),
Conv2d(
decoder_channels[0],
head_channels,
kernel_size=3,
padding=1,
bias=use_bias,
norm=get_norm(norm, head_channels),
activation=F.relu,
),
)
weight_init.c2_xavier_fill(self.head[0])
weight_init.c2_xavier_fill(self.head[1])
self.predictor = Conv2d(head_channels, num_classes, kernel_size=1)
nn.init.normal_(self.predictor.weight, 0, 0.001)
nn.init.constant_(self.predictor.bias, 0)
if loss_type == "cross_entropy":
self.loss = nn.CrossEntropyLoss(reduction="mean", ignore_index=ignore_value)
elif loss_type == "hard_pixel_mining":
self.loss = DeepLabCE(ignore_label=ignore_value, top_k_percent_pixels=loss_top_k)
else:
raise ValueError("Unexpected loss type: %s" % loss_type)
@classmethod
def from_config(cls, cfg, input_shape):
ret = super().from_config(cfg, input_shape)
ret["head_channels"] = cfg.MODEL.SEM_SEG_HEAD.HEAD_CHANNELS
ret["loss_top_k"] = cfg.MODEL.SEM_SEG_HEAD.LOSS_TOP_K
return ret
def forward(self, features, targets=None, weights=None):
"""
Returns:
In training, returns (None, dict of losses)
In inference, returns (CxHxW logits, {})
"""
y = self.layers(features)
if self.training:
return None, self.losses(y, targets, weights)
else:
y = F.interpolate(
y, scale_factor=self.common_stride, mode="bilinear", align_corners=False
)
return y, {}
def layers(self, features):
assert self.decoder_only
y = super().layers(features)
y = self.head(y)
y = self.predictor(y)
return y
def losses(self, predictions, targets, weights=None):
predictions = F.interpolate(
predictions, scale_factor=self.common_stride, mode="bilinear", align_corners=False
)
loss = self.loss(predictions, targets, weights)
losses = {"loss_sem_seg": loss * self.loss_weight}
return losses
def build_ins_embed_branch(cfg, input_shape):
"""
Build a instance embedding branch from `cfg.MODEL.INS_EMBED_HEAD.NAME`.
"""
name = cfg.MODEL.INS_EMBED_HEAD.NAME
return INS_EMBED_BRANCHES_REGISTRY.get(name)(cfg, input_shape)
@INS_EMBED_BRANCHES_REGISTRY.register()
class PanopticDeepLabInsEmbedHead(DeepLabV3PlusHead):
"""
A instance embedding head described in :paper:`Panoptic-DeepLab`.
"""
@configurable
def __init__(
self,
input_shape: Dict[str, ShapeSpec],
*,
decoder_channels: List[int],
norm: Union[str, Callable],
head_channels: int,
center_loss_weight: float,
offset_loss_weight: float,
**kwargs,
):
"""
NOTE: this interface is experimental.
Args:
input_shape (ShapeSpec): shape of the input feature
decoder_channels (list[int]): a list of output channels of each
decoder stage. It should have the same length as "input_shape"
(each element in "input_shape" corresponds to one decoder stage).
norm (str or callable): normalization for all conv layers.
head_channels (int): the output channels of extra convolutions
between decoder and predictor.
center_loss_weight (float): loss weight for center point prediction.
offset_loss_weight (float): loss weight for center offset prediction.
"""
super().__init__(input_shape, decoder_channels=decoder_channels, norm=norm, **kwargs)
assert self.decoder_only
self.center_loss_weight = center_loss_weight
self.offset_loss_weight = offset_loss_weight
use_bias = norm == ""
# center prediction
# `head` is additional transform before predictor
self.center_head = nn.Sequential(
Conv2d(
decoder_channels[0],
decoder_channels[0],
kernel_size=3,
padding=1,
bias=use_bias,
norm=get_norm(norm, decoder_channels[0]),
activation=F.relu,
),
Conv2d(
decoder_channels[0],
head_channels,
kernel_size=3,
padding=1,
bias=use_bias,
norm=get_norm(norm, head_channels),
activation=F.relu,
),
)
weight_init.c2_xavier_fill(self.center_head[0])
weight_init.c2_xavier_fill(self.center_head[1])
self.center_predictor = Conv2d(head_channels, 1, kernel_size=1)
nn.init.normal_(self.center_predictor.weight, 0, 0.001)
nn.init.constant_(self.center_predictor.bias, 0)
# offset prediction
# `head` is additional transform before predictor
if self.use_depthwise_separable_conv:
# We use a single 5x5 DepthwiseSeparableConv2d to replace
# 2 3x3 Conv2d since they have the same receptive field.
self.offset_head = DepthwiseSeparableConv2d(
decoder_channels[0],
head_channels,
kernel_size=5,
padding=2,
norm1=norm,
activation1=F.relu,
norm2=norm,
activation2=F.relu,
)
else:
self.offset_head = nn.Sequential(
Conv2d(
decoder_channels[0],
decoder_channels[0],
kernel_size=3,
padding=1,
bias=use_bias,
norm=get_norm(norm, decoder_channels[0]),
activation=F.relu,
),
Conv2d(
decoder_channels[0],
head_channels,
kernel_size=3,
padding=1,
bias=use_bias,
norm=get_norm(norm, head_channels),
activation=F.relu,
),
)
weight_init.c2_xavier_fill(self.offset_head[0])
weight_init.c2_xavier_fill(self.offset_head[1])
self.offset_predictor = Conv2d(head_channels, 2, kernel_size=1)
nn.init.normal_(self.offset_predictor.weight, 0, 0.001)
nn.init.constant_(self.offset_predictor.bias, 0)
self.center_loss = nn.MSELoss(reduction="none")
self.offset_loss = nn.L1Loss(reduction="none")
@classmethod
def from_config(cls, cfg, input_shape):
if cfg.INPUT.CROP.ENABLED:
assert cfg.INPUT.CROP.TYPE == "absolute"
train_size = cfg.INPUT.CROP.SIZE
else:
train_size = None
decoder_channels = [cfg.MODEL.INS_EMBED_HEAD.CONVS_DIM] * (
len(cfg.MODEL.INS_EMBED_HEAD.IN_FEATURES) - 1
) + [cfg.MODEL.INS_EMBED_HEAD.ASPP_CHANNELS]
ret = dict(
input_shape={
k: v for k, v in input_shape.items() if k in cfg.MODEL.INS_EMBED_HEAD.IN_FEATURES
},
project_channels=cfg.MODEL.INS_EMBED_HEAD.PROJECT_CHANNELS,
aspp_dilations=cfg.MODEL.INS_EMBED_HEAD.ASPP_DILATIONS,
aspp_dropout=cfg.MODEL.INS_EMBED_HEAD.ASPP_DROPOUT,
decoder_channels=decoder_channels,
common_stride=cfg.MODEL.INS_EMBED_HEAD.COMMON_STRIDE,
norm=cfg.MODEL.INS_EMBED_HEAD.NORM,
train_size=train_size,
head_channels=cfg.MODEL.INS_EMBED_HEAD.HEAD_CHANNELS,
center_loss_weight=cfg.MODEL.INS_EMBED_HEAD.CENTER_LOSS_WEIGHT,
offset_loss_weight=cfg.MODEL.INS_EMBED_HEAD.OFFSET_LOSS_WEIGHT,
use_depthwise_separable_conv=cfg.MODEL.SEM_SEG_HEAD.USE_DEPTHWISE_SEPARABLE_CONV,
)
return ret
def forward(
self,
features,
center_targets=None,
center_weights=None,
offset_targets=None,
offset_weights=None,
):
"""
Returns:
In training, returns (None, dict of losses)
In inference, returns (CxHxW logits, {})
"""
center, offset = self.layers(features)
if self.training:
return (
None,
None,
self.center_losses(center, center_targets, center_weights),
self.offset_losses(offset, offset_targets, offset_weights),
)
else:
center = F.interpolate(
center, scale_factor=self.common_stride, mode="bilinear", align_corners=False
)
offset = (
F.interpolate(
offset, scale_factor=self.common_stride, mode="bilinear", align_corners=False
)
* self.common_stride
)
return center, offset, {}, {}
def layers(self, features):
assert self.decoder_only
y = super().layers(features)
# center
center = self.center_head(y)
center = self.center_predictor(center)
# offset
offset = self.offset_head(y)
offset = self.offset_predictor(offset)
return center, offset
def center_losses(self, predictions, targets, weights):
predictions = F.interpolate(
predictions, scale_factor=self.common_stride, mode="bilinear", align_corners=False
)
loss = self.center_loss(predictions, targets) * weights
if weights.sum() > 0:
loss = loss.sum() / weights.sum()
else:
loss = loss.sum() * 0
losses = {"loss_center": loss * self.center_loss_weight}
return losses
def offset_losses(self, predictions, targets, weights):
predictions = (
F.interpolate(
predictions, scale_factor=self.common_stride, mode="bilinear", align_corners=False
)
* self.common_stride
)
loss = self.offset_loss(predictions, targets) * weights
if weights.sum() > 0:
loss = loss.sum() / weights.sum()
else:
loss = loss.sum() * 0
losses = {"loss_offset": loss * self.offset_loss_weight}
return losses
|
banmo-main
|
third_party/detectron2_old/projects/Panoptic-DeepLab/panoptic_deeplab/panoptic_seg.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# Reference: https://github.com/bowenc0221/panoptic-deeplab/blob/master/segmentation/model/post_processing/instance_post_processing.py # noqa
from collections import Counter
import torch
import torch.nn.functional as F
def find_instance_center(center_heatmap, threshold=0.1, nms_kernel=3, top_k=None):
"""
Find the center points from the center heatmap.
Args:
center_heatmap: A Tensor of shape [1, H, W] of raw center heatmap output.
threshold: A float, threshold applied to center heatmap score.
nms_kernel: An integer, NMS max pooling kernel size.
top_k: An integer, top k centers to keep.
Returns:
A Tensor of shape [K, 2] where K is the number of center points. The
order of second dim is (y, x).
"""
# Thresholding, setting values below threshold to -1.
center_heatmap = F.threshold(center_heatmap, threshold, -1)
# NMS
nms_padding = (nms_kernel - 1) // 2
center_heatmap_max_pooled = F.max_pool2d(
center_heatmap, kernel_size=nms_kernel, stride=1, padding=nms_padding
)
center_heatmap[center_heatmap != center_heatmap_max_pooled] = -1
# Squeeze first two dimensions.
center_heatmap = center_heatmap.squeeze()
assert len(center_heatmap.size()) == 2, "Something is wrong with center heatmap dimension."
# Find non-zero elements.
if top_k is None:
return torch.nonzero(center_heatmap > 0)
else:
# find top k centers.
top_k_scores, _ = torch.topk(torch.flatten(center_heatmap), top_k)
return torch.nonzero(center_heatmap > top_k_scores[-1].clamp_(min=0))
def group_pixels(center_points, offsets):
"""
Gives each pixel in the image an instance id.
Args:
center_points: A Tensor of shape [K, 2] where K is the number of center points.
The order of second dim is (y, x).
offsets: A Tensor of shape [2, H, W] of raw offset output. The order of
second dim is (offset_y, offset_x).
Returns:
A Tensor of shape [1, H, W] with values in range [1, K], which represents
the center this pixel belongs to.
"""
height, width = offsets.size()[1:]
# Generates a coordinate map, where each location is the coordinate of
# that location.
y_coord, x_coord = torch.meshgrid(
torch.arange(height, dtype=offsets.dtype, device=offsets.device),
torch.arange(width, dtype=offsets.dtype, device=offsets.device),
)
coord = torch.cat((y_coord.unsqueeze(0), x_coord.unsqueeze(0)), dim=0)
center_loc = coord + offsets
center_loc = center_loc.flatten(1).T.unsqueeze_(0) # [1, H*W, 2]
center_points = center_points.unsqueeze(1) # [K, 1, 2]
# Distance: [K, H*W].
distance = torch.norm(center_points - center_loc, dim=-1)
# Finds center with minimum distance at each location, offset by 1, to
# reserve id=0 for stuff.
instance_id = torch.argmin(distance, dim=0).reshape((1, height, width)) + 1
return instance_id
def get_instance_segmentation(
sem_seg, center_heatmap, offsets, thing_seg, thing_ids, threshold=0.1, nms_kernel=3, top_k=None
):
"""
Post-processing for instance segmentation, gets class agnostic instance id.
Args:
sem_seg: A Tensor of shape [1, H, W], predicted semantic label.
center_heatmap: A Tensor of shape [1, H, W] of raw center heatmap output.
offsets: A Tensor of shape [2, H, W] of raw offset output. The order of
second dim is (offset_y, offset_x).
thing_seg: A Tensor of shape [1, H, W], predicted foreground mask,
if not provided, inference from semantic prediction.
thing_ids: A set of ids from contiguous category ids belonging
to thing categories.
threshold: A float, threshold applied to center heatmap score.
nms_kernel: An integer, NMS max pooling kernel size.
top_k: An integer, top k centers to keep.
Returns:
A Tensor of shape [1, H, W] with value 0 represent stuff (not instance)
and other positive values represent different instances.
A Tensor of shape [1, K, 2] where K is the number of center points.
The order of second dim is (y, x).
"""
center_points = find_instance_center(
center_heatmap, threshold=threshold, nms_kernel=nms_kernel, top_k=top_k
)
if center_points.size(0) == 0:
return torch.zeros_like(sem_seg), center_points.unsqueeze(0)
ins_seg = group_pixels(center_points, offsets)
return thing_seg * ins_seg, center_points.unsqueeze(0)
def merge_semantic_and_instance(
sem_seg, ins_seg, semantic_thing_seg, label_divisor, thing_ids, stuff_area, void_label
):
"""
Post-processing for panoptic segmentation, by merging semantic segmentation
label and class agnostic instance segmentation label.
Args:
sem_seg: A Tensor of shape [1, H, W], predicted category id for each pixel.
ins_seg: A Tensor of shape [1, H, W], predicted instance id for each pixel.
semantic_thing_seg: A Tensor of shape [1, H, W], predicted foreground mask.
label_divisor: An integer, used to convert panoptic id =
semantic id * label_divisor + instance_id.
thing_ids: Set, a set of ids from contiguous category ids belonging
to thing categories.
stuff_area: An integer, remove stuff whose area is less tan stuff_area.
void_label: An integer, indicates the region has no confident prediction.
Returns:
A Tensor of shape [1, H, W].
"""
# In case thing mask does not align with semantic prediction.
pan_seg = torch.zeros_like(sem_seg) + void_label
is_thing = (ins_seg > 0) & (semantic_thing_seg > 0)
# Keep track of instance id for each class.
class_id_tracker = Counter()
# Paste thing by majority voting.
instance_ids = torch.unique(ins_seg)
for ins_id in instance_ids:
if ins_id == 0:
continue
# Make sure only do majority voting within `semantic_thing_seg`.
thing_mask = (ins_seg == ins_id) & is_thing
if torch.nonzero(thing_mask).size(0) == 0:
continue
class_id, _ = torch.mode(sem_seg[thing_mask].view(-1))
class_id_tracker[class_id.item()] += 1
new_ins_id = class_id_tracker[class_id.item()]
pan_seg[thing_mask] = class_id * label_divisor + new_ins_id
# Paste stuff to unoccupied area.
class_ids = torch.unique(sem_seg)
for class_id in class_ids:
if class_id.item() in thing_ids:
# thing class
continue
# Calculate stuff area.
stuff_mask = (sem_seg == class_id) & (ins_seg == 0)
if stuff_mask.sum().item() >= stuff_area:
pan_seg[stuff_mask] = class_id * label_divisor
return pan_seg
def get_panoptic_segmentation(
sem_seg,
center_heatmap,
offsets,
thing_ids,
label_divisor,
stuff_area,
void_label,
threshold=0.1,
nms_kernel=7,
top_k=200,
foreground_mask=None,
):
"""
Post-processing for panoptic segmentation.
Args:
sem_seg: A Tensor of shape [1, H, W] of predicted semantic label.
center_heatmap: A Tensor of shape [1, H, W] of raw center heatmap output.
offsets: A Tensor of shape [2, H, W] of raw offset output. The order of
second dim is (offset_y, offset_x).
thing_ids: A set of ids from contiguous category ids belonging
to thing categories.
label_divisor: An integer, used to convert panoptic id =
semantic id * label_divisor + instance_id.
stuff_area: An integer, remove stuff whose area is less tan stuff_area.
void_label: An integer, indicates the region has no confident prediction.
threshold: A float, threshold applied to center heatmap score.
nms_kernel: An integer, NMS max pooling kernel size.
top_k: An integer, top k centers to keep.
foreground_mask: Optional, A Tensor of shape [1, H, W] of predicted
binary foreground mask. If not provided, it will be generated from
sem_seg.
Returns:
A Tensor of shape [1, H, W], int64.
"""
if sem_seg.dim() != 3 and sem_seg.size(0) != 1:
raise ValueError("Semantic prediction with un-supported shape: {}.".format(sem_seg.size()))
if center_heatmap.dim() != 3:
raise ValueError(
"Center prediction with un-supported dimension: {}.".format(center_heatmap.dim())
)
if offsets.dim() != 3:
raise ValueError("Offset prediction with un-supported dimension: {}.".format(offsets.dim()))
if foreground_mask is not None:
if foreground_mask.dim() != 3 and foreground_mask.size(0) != 1:
raise ValueError(
"Foreground prediction with un-supported shape: {}.".format(sem_seg.size())
)
thing_seg = foreground_mask
else:
# inference from semantic segmentation
thing_seg = torch.zeros_like(sem_seg)
for thing_class in list(thing_ids):
thing_seg[sem_seg == thing_class] = 1
instance, center = get_instance_segmentation(
sem_seg,
center_heatmap,
offsets,
thing_seg,
thing_ids,
threshold=threshold,
nms_kernel=nms_kernel,
top_k=top_k,
)
panoptic = merge_semantic_and_instance(
sem_seg, instance, thing_seg, label_divisor, thing_ids, stuff_area, void_label
)
return panoptic, center
|
banmo-main
|
third_party/detectron2_old/projects/Panoptic-DeepLab/panoptic_deeplab/post_processing.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# Reference: https://github.com/bowenc0221/panoptic-deeplab/blob/aa934324b55a34ce95fea143aea1cb7a6dbe04bd/segmentation/data/transforms/target_transforms.py#L11 # noqa
import numpy as np
import torch
class PanopticDeepLabTargetGenerator(object):
"""
Generates training targets for Panoptic-DeepLab.
"""
def __init__(
self,
ignore_label,
thing_ids,
sigma=8,
ignore_stuff_in_offset=False,
small_instance_area=0,
small_instance_weight=1,
ignore_crowd_in_semantic=False,
):
"""
Args:
ignore_label: Integer, the ignore label for semantic segmentation.
thing_ids: Set, a set of ids from contiguous category ids belonging
to thing categories.
sigma: the sigma for Gaussian kernel.
ignore_stuff_in_offset: Boolean, whether to ignore stuff region when
training the offset branch.
small_instance_area: Integer, indicates largest area for small instances.
small_instance_weight: Integer, indicates semantic loss weights for
small instances.
ignore_crowd_in_semantic: Boolean, whether to ignore crowd region in
semantic segmentation branch, crowd region is ignored in the original
TensorFlow implementation.
"""
self.ignore_label = ignore_label
self.thing_ids = set(thing_ids)
self.ignore_stuff_in_offset = ignore_stuff_in_offset
self.small_instance_area = small_instance_area
self.small_instance_weight = small_instance_weight
self.ignore_crowd_in_semantic = ignore_crowd_in_semantic
# Generate the default Gaussian image for each center
self.sigma = sigma
size = 6 * sigma + 3
x = np.arange(0, size, 1, float)
y = x[:, np.newaxis]
x0, y0 = 3 * sigma + 1, 3 * sigma + 1
self.g = np.exp(-((x - x0) ** 2 + (y - y0) ** 2) / (2 * sigma ** 2))
def __call__(self, panoptic, segments_info):
"""Generates the training target.
reference: https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/preparation/createPanopticImgs.py # noqa
reference: https://github.com/facebookresearch/detectron2/blob/master/datasets/prepare_panoptic_fpn.py#L18 # noqa
Args:
panoptic: numpy.array, panoptic label, we assume it is already
converted from rgb image by panopticapi.utils.rgb2id.
segments_info (list[dict]): see detectron2 documentation of "Use Custom Datasets".
Returns:
A dictionary with fields:
- sem_seg: Tensor, semantic label, shape=(H, W).
- center: Tensor, center heatmap, shape=(H, W).
- center_points: List, center coordinates, with tuple
(y-coord, x-coord).
- offset: Tensor, offset, shape=(2, H, W), first dim is
(offset_y, offset_x).
- sem_seg_weights: Tensor, loss weight for semantic prediction,
shape=(H, W).
- center_weights: Tensor, ignore region of center prediction,
shape=(H, W), used as weights for center regression 0 is
ignore, 1 is has instance. Multiply this mask to loss.
- offset_weights: Tensor, ignore region of offset prediction,
shape=(H, W), used as weights for offset regression 0 is
ignore, 1 is has instance. Multiply this mask to loss.
"""
height, width = panoptic.shape[0], panoptic.shape[1]
semantic = np.zeros_like(panoptic, dtype=np.uint8) + self.ignore_label
center = np.zeros((height, width), dtype=np.float32)
center_pts = []
offset = np.zeros((2, height, width), dtype=np.float32)
y_coord, x_coord = np.meshgrid(
np.arange(height, dtype=np.float32), np.arange(width, dtype=np.float32), indexing="ij"
)
# Generate pixel-wise loss weights
semantic_weights = np.ones_like(panoptic, dtype=np.uint8)
# 0: ignore, 1: has instance
# three conditions for a region to be ignored for instance branches:
# (1) It is labeled as `ignore_label`
# (2) It is crowd region (iscrowd=1)
# (3) (Optional) It is stuff region (for offset branch)
center_weights = np.zeros_like(panoptic, dtype=np.uint8)
offset_weights = np.zeros_like(panoptic, dtype=np.uint8)
for seg in segments_info:
cat_id = seg["category_id"]
if not (self.ignore_crowd_in_semantic and seg["iscrowd"]):
semantic[panoptic == seg["id"]] = cat_id
if not seg["iscrowd"]:
# Ignored regions are not in `segments_info`.
# Handle crowd region.
center_weights[panoptic == seg["id"]] = 1
if not self.ignore_stuff_in_offset or cat_id in self.thing_ids:
offset_weights[panoptic == seg["id"]] = 1
if cat_id in self.thing_ids:
# find instance center
mask_index = np.where(panoptic == seg["id"])
if len(mask_index[0]) == 0:
# the instance is completely cropped
continue
# Find instance area
ins_area = len(mask_index[0])
if ins_area < self.small_instance_area:
semantic_weights[panoptic == seg["id"]] = self.small_instance_weight
center_y, center_x = np.mean(mask_index[0]), np.mean(mask_index[1])
center_pts.append([center_y, center_x])
# generate center heatmap
y, x = int(round(center_y)), int(round(center_x))
sigma = self.sigma
# upper left
ul = int(np.round(x - 3 * sigma - 1)), int(np.round(y - 3 * sigma - 1))
# bottom right
br = int(np.round(x + 3 * sigma + 2)), int(np.round(y + 3 * sigma + 2))
# start and end indices in default Gaussian image
gaussian_x0, gaussian_x1 = max(0, -ul[0]), min(br[0], width) - ul[0]
gaussian_y0, gaussian_y1 = max(0, -ul[1]), min(br[1], height) - ul[1]
# start and end indices in center heatmap image
center_x0, center_x1 = max(0, ul[0]), min(br[0], width)
center_y0, center_y1 = max(0, ul[1]), min(br[1], height)
center[center_y0:center_y1, center_x0:center_x1] = np.maximum(
center[center_y0:center_y1, center_x0:center_x1],
self.g[gaussian_y0:gaussian_y1, gaussian_x0:gaussian_x1],
)
# generate offset (2, h, w) -> (y-dir, x-dir)
offset[0][mask_index] = center_y - y_coord[mask_index]
offset[1][mask_index] = center_x - x_coord[mask_index]
center_weights = center_weights[None]
offset_weights = offset_weights[None]
return dict(
sem_seg=torch.as_tensor(semantic.astype("long")),
center=torch.as_tensor(center.astype(np.float32)),
center_points=center_pts,
offset=torch.as_tensor(offset.astype(np.float32)),
sem_seg_weights=torch.as_tensor(semantic_weights.astype(np.float32)),
center_weights=torch.as_tensor(center_weights.astype(np.float32)),
offset_weights=torch.as_tensor(offset_weights.astype(np.float32)),
)
|
banmo-main
|
third_party/detectron2_old/projects/Panoptic-DeepLab/panoptic_deeplab/target_generator.py
|
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
"""
Point supervision Training Script.
This script is a simplified version of the training script in detectron2/tools.
"""
import os
import detectron2.utils.comm as comm
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import get_cfg
from detectron2.data import MetadataCatalog, build_detection_train_loader
from detectron2.engine import DefaultTrainer, default_argument_parser, default_setup, launch
from detectron2.evaluation import COCOEvaluator, DatasetEvaluators, verify_results
from detectron2.projects.point_rend import add_pointrend_config
from detectron2.utils.logger import setup_logger
from point_sup import PointSupDatasetMapper, add_point_sup_config
class Trainer(DefaultTrainer):
"""
We use the "DefaultTrainer" which contains pre-defined default logic for
standard training workflow. They may not work for you, especially if you
are working on a new research project. In that case you can write your
own training loop. You can use "tools/plain_train_net.py" as an example.
"""
@classmethod
def build_evaluator(cls, cfg, dataset_name, output_folder=None):
"""
Create evaluator(s) for a given dataset.
This uses the special metadata "evaluator_type" associated with each builtin dataset.
For your own dataset, you can simply create an evaluator manually in your
script and do not have to worry about the hacky if-else logic here.
"""
if output_folder is None:
output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
evaluator_list = []
evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
if evaluator_type == "coco":
evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder))
if len(evaluator_list) == 0:
raise NotImplementedError(
"no Evaluator for the dataset {} with the type {}".format(
dataset_name, evaluator_type
)
)
elif len(evaluator_list) == 1:
return evaluator_list[0]
return DatasetEvaluators(evaluator_list)
@classmethod
def build_train_loader(cls, cfg):
if cfg.INPUT.POINT_SUP:
mapper = PointSupDatasetMapper(cfg, is_train=True)
else:
mapper = None
return build_detection_train_loader(cfg, mapper=mapper)
def setup(args):
"""
Create configs and perform basic setups.
"""
cfg = get_cfg()
add_pointrend_config(cfg)
add_point_sup_config(cfg)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
default_setup(cfg, args)
# Setup logger for "point_sup" module
setup_logger(output=cfg.OUTPUT_DIR, distributed_rank=comm.get_rank(), name="point_sup")
return cfg
def main(args):
cfg = setup(args)
if args.eval_only:
model = Trainer.build_model(cfg)
DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
cfg.MODEL.WEIGHTS, resume=args.resume
)
res = Trainer.test(cfg, model)
if cfg.TEST.AUG.ENABLED:
res.update(Trainer.test_with_TTA(cfg, model))
if comm.is_main_process():
verify_results(cfg, res)
return res
"""
If you'd like to do anything fancier than the standard training logic,
consider writing your own training loop (see plain_train_net.py) or
subclassing the trainer.
"""
trainer = Trainer(cfg)
trainer.resume_or_load(resume=args.resume)
return trainer.train()
if __name__ == "__main__":
args = default_argument_parser().parse_args()
print("Command Line Args:", args)
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
)
|
banmo-main
|
third_party/detectron2_old/projects/PointSup/train_net.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import copy
import json
import numpy as np
import os
import sys
import pycocotools.mask as mask_utils
from detectron2.utils.env import seed_all_rng
from detectron2.utils.file_io import PathManager
def get_point_annotations(input_filename, output_filename, num_points_per_instance):
with PathManager.open(input_filename, "r") as f:
coco_json = json.load(f)
coco_annos = coco_json.pop("annotations")
coco_points_json = copy.deepcopy(coco_json)
imgs = {}
for img in coco_json["images"]:
imgs[img["id"]] = img
new_annos = []
for ann in coco_annos:
# convert mask
t = imgs[ann["image_id"]]
h, w = t["height"], t["width"]
segm = ann.pop("segmentation")
if type(segm) == list:
# polygon -- a single object might consist of multiple parts
# we merge all parts into one mask rle code
rles = mask_utils.frPyObjects(segm, h, w)
rle = mask_utils.merge(rles)
elif type(segm["counts"]) == list:
# uncompressed RLE
rle = mask_utils.frPyObjects(segm, h, w)
else:
# rle
rle = segm
mask = mask_utils.decode(rle)
new_ann = copy.deepcopy(ann)
# sample points in image coordinates
box = ann["bbox"]
point_coords_wrt_image = np.random.rand(num_points_per_instance, 2)
point_coords_wrt_image[:, 0] = point_coords_wrt_image[:, 0] * box[2]
point_coords_wrt_image[:, 1] = point_coords_wrt_image[:, 1] * box[3]
point_coords_wrt_image[:, 0] += box[0]
point_coords_wrt_image[:, 1] += box[1]
# round to integer coordinates
point_coords_wrt_image = np.floor(point_coords_wrt_image).astype(int)
# get labels
assert (point_coords_wrt_image >= 0).all(), (point_coords_wrt_image, mask.shape)
assert (point_coords_wrt_image[:, 0] < w).all(), (point_coords_wrt_image, mask.shape)
assert (point_coords_wrt_image[:, 1] < h).all(), (point_coords_wrt_image, mask.shape)
point_labels = mask[point_coords_wrt_image[:, 1], point_coords_wrt_image[:, 0]]
# store new annotations
new_ann["point_coords"] = point_coords_wrt_image.tolist()
new_ann["point_labels"] = point_labels.tolist()
new_annos.append(new_ann)
coco_points_json["annotations"] = new_annos
with PathManager.open(output_filename, "w") as f:
json.dump(coco_points_json, f)
print("{} is modified and stored in {}.".format(input_filename, output_filename))
if __name__ == "__main__":
"""
Generate point-based supervision for COCO dataset.
Usage:
python tools/prepare_coco_point_annotations_without_masks.py \
NUM_POINTS_PER_INSTANCE NUM_VERSIONS_WITH_DIFFERENT_SEED
Example to generate point-based COCO dataset with 10 points per instance:
python tools/prepare_coco_point_annotations_without_masks.py 10
"""
# Fix random seed
seed_all_rng(12345)
assert len(sys.argv) >= 2, "Please provide number of points to sample per instance"
dataset_dir = os.path.join(os.getenv("DETECTRON2_DATASETS", "datasets"), "coco/annotations")
num_points_per_instance = int(sys.argv[1])
if len(sys.argv) == 3:
repeat = int(sys.argv[2])
else:
repeat = 1
s = "instances_train2017"
for version in range(repeat):
print(
"Start sampling {} points per instance for annotations {}.".format(
num_points_per_instance, s
)
)
get_point_annotations(
os.path.join(dataset_dir, "{}.json".format(s)),
os.path.join(
dataset_dir,
"{}_n{}_v{}_without_masks.json".format(s, num_points_per_instance, version + 1),
),
num_points_per_instance,
)
|
banmo-main
|
third_party/detectron2_old/projects/PointSup/tools/prepare_coco_point_annotations_without_masks.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import numpy as np
from typing import Any, List
from detectron2.modeling import ROI_MASK_HEAD_REGISTRY
from detectron2.modeling.roi_heads.mask_head import MaskRCNNConvUpsampleHead, mask_rcnn_inference
from detectron2.projects.point_rend import ImplicitPointRendMaskHead
from detectron2.projects.point_rend.point_features import point_sample
from detectron2.projects.point_rend.point_head import roi_mask_point_loss
from detectron2.structures import Instances
from .point_utils import get_point_coords_from_point_annotation
__all__ = [
"ImplicitPointRendPointSupHead",
"MaskRCNNConvUpsamplePointSupHead",
]
@ROI_MASK_HEAD_REGISTRY.register()
class MaskRCNNConvUpsamplePointSupHead(MaskRCNNConvUpsampleHead):
"""
A mask head with several conv layers, plus an upsample layer (with `ConvTranspose2d`).
Predictions are made with a final 1x1 conv layer.
The difference with `MaskRCNNConvUpsampleHead` is that this head is trained
with point supervision. Please use the `MaskRCNNConvUpsampleHead` if you want
to train the model with mask supervision.
"""
def forward(self, x, instances: List[Instances]) -> Any:
"""
Args:
x: input region feature(s) provided by :class:`ROIHeads`.
instances (list[Instances]): contains the boxes & labels corresponding
to the input features.
Exact format is up to its caller to decide.
Typically, this is the foreground instances in training, with
"proposal_boxes" field and other gt annotations.
In inference, it contains boxes that are already predicted.
Returns:
A dict of losses in training. The predicted "instances" in inference.
"""
x = self.layers(x)
if self.training:
N, C, H, W = x.shape
assert H == W
proposal_boxes = [x.proposal_boxes for x in instances]
assert N == np.sum(len(x) for x in proposal_boxes)
if N == 0:
return {"loss_mask": x.sum() * 0}
# Training with point supervision
# Sanity check: annotation should not contain gt_masks
assert not instances[0].has("gt_masks")
point_coords, point_labels = get_point_coords_from_point_annotation(instances)
mask_logits = point_sample(
x,
point_coords,
align_corners=False,
)
return {"loss_mask": roi_mask_point_loss(mask_logits, instances, point_labels)}
else:
mask_rcnn_inference(x, instances)
return instances
@ROI_MASK_HEAD_REGISTRY.register()
class ImplicitPointRendPointSupHead(ImplicitPointRendMaskHead):
def _uniform_sample_train_points(self, instances):
assert self.training
assert not instances[0].has("gt_masks")
point_coords, point_labels = get_point_coords_from_point_annotation(instances)
return point_coords, point_labels
|
banmo-main
|
third_party/detectron2_old/projects/PointSup/point_sup/mask_head.py
|
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
def add_point_sup_config(cfg):
"""
Add config for point supervision.
"""
# Use point annotation
cfg.INPUT.POINT_SUP = False
# Sample only part of points in each iteration.
# Default: 0, use all available points.
cfg.INPUT.SAMPLE_POINTS = 0
|
banmo-main
|
third_party/detectron2_old/projects/PointSup/point_sup/config.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from . import register_point_annotations
from .config import add_point_sup_config
from .dataset_mapper import PointSupDatasetMapper
from .mask_head import MaskRCNNConvUpsamplePointSupHead
from .point_utils import get_point_coords_from_point_annotation
|
banmo-main
|
third_party/detectron2_old/projects/PointSup/point_sup/__init__.py
|
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import numpy as np
import torch
# fmt: off
from detectron2.data.detection_utils import \
annotations_to_instances as base_annotations_to_instances
from detectron2.data.detection_utils import \
transform_instance_annotations as base_transform_instance_annotations
# fmt: on
def annotations_to_instances(annos, image_size, sample_points=0):
"""
Create an :class:`Instances` object used by the models,
from instance annotations in the dataset dict.
Args:
annos (list[dict]): a list of instance annotations in one image, each
element for one instance.
image_size (tuple): height, width
sample_points (int): subsample points at each iteration
Returns:
Instances:
It will contain fields "gt_boxes", "gt_classes",
"gt_point_coords", "gt_point_labels", if they can be obtained from `annos`.
This is the format that builtin models with point supervision expect.
"""
target = base_annotations_to_instances(annos, image_size)
assert "point_coords" in annos[0]
assert "point_labels" in annos[0]
assert "segmentation" not in annos[0], "Please remove mask annotation"
if len(annos) and "point_labels" in annos[0]:
point_coords = []
point_labels = []
for i, _ in enumerate(annos):
# Already in the image coordinate system
point_coords_wrt_image = np.array(annos[i]["point_coords"])
point_labels_wrt_image = np.array(annos[i]["point_labels"])
if sample_points > 0:
random_indices = np.random.choice(
point_coords_wrt_image.shape[0],
sample_points,
replace=point_coords_wrt_image.shape[0] < sample_points,
).astype(int)
point_coords_wrt_image = point_coords_wrt_image[random_indices]
point_labels_wrt_image = point_labels_wrt_image[random_indices]
assert point_coords_wrt_image.shape[0] == point_labels_wrt_image.size
point_coords.append(point_coords_wrt_image)
point_labels.append(point_labels_wrt_image)
point_coords = torch.stack([torch.from_numpy(x) for x in point_coords])
point_labels = torch.stack([torch.from_numpy(x) for x in point_labels])
target.gt_point_coords = point_coords
target.gt_point_labels = point_labels
return target
def transform_instance_annotations(annotation, transforms, image_size):
"""
Apply transforms to box, and point annotations of a single instance.
It will use `transforms.apply_box` for the box, and
`transforms.apply_coords` for points.
Args:
annotation (dict): dict of instance annotations for a single instance.
It will be modified in-place.
transforms (TransformList or list[Transform]):
image_size (tuple): the height, width of the transformed image
Returns:
dict:
the same input dict with fields "bbox", "point_coords", "point_labels"
transformed according to `transforms`.
The "bbox_mode" field will be set to XYXY_ABS.
"""
annotation = base_transform_instance_annotations(annotation, transforms, image_size)
assert "segmentation" not in annotation
assert "point_coords" in annotation
assert "point_labels" in annotation
point_coords = annotation["point_coords"]
point_labels = np.array(annotation["point_labels"]).astype(np.float)
point_coords = transforms.apply_coords(point_coords)
# Set all out-of-boundary points to "unlabeled"
inside = (point_coords >= np.array([0, 0])) & (point_coords <= np.array(image_size[::-1]))
inside = inside.all(axis=1)
point_labels[~inside] = -1
annotation["point_coords"] = point_coords
annotation["point_labels"] = point_labels
return annotation
|
banmo-main
|
third_party/detectron2_old/projects/PointSup/point_sup/detection_utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import copy
import logging
import numpy as np
from typing import List, Union
import torch
import detectron2.data.detection_utils as utils
import detectron2.data.transforms as T
from detectron2.config import configurable
from .detection_utils import annotations_to_instances, transform_instance_annotations
__all__ = [
"PointSupDatasetMapper",
]
class PointSupDatasetMapper:
"""
The callable currently does the following:
1. Read the image from "file_name"
2. Applies transforms to the image and annotations
3. Prepare data and annotations to Tensor and :class:`Instances`
"""
@configurable
def __init__(
self,
is_train: bool,
*,
augmentations: List[Union[T.Augmentation, T.Transform]],
image_format: str,
# Extra data augmentation for point supervision
sample_points: int = 0,
):
"""
NOTE: this interface is experimental.
Args:
is_train: whether it's used in training or inference
augmentations: a list of augmentations or deterministic transforms to apply
image_format: an image format supported by :func:`detection_utils.read_image`.
sample_points: subsample points at each iteration
"""
# fmt: off
self.is_train = is_train
self.augmentations = T.AugmentationList(augmentations)
self.image_format = image_format
self.sample_points = sample_points
# fmt: on
logger = logging.getLogger(__name__)
mode = "training" if is_train else "inference"
logger.info(f"[DatasetMapper] Augmentations used in {mode}: {augmentations}")
logger.info(f"Point Augmentations used in {mode}: sample {sample_points} points")
@classmethod
def from_config(cls, cfg, is_train: bool = True):
augs = utils.build_augmentation(cfg, is_train)
if cfg.INPUT.CROP.ENABLED and is_train:
raise ValueError("Crop augmentation not supported to point supervision.")
ret = {
"is_train": is_train,
"augmentations": augs,
"image_format": cfg.INPUT.FORMAT,
"sample_points": cfg.INPUT.SAMPLE_POINTS,
}
return ret
def __call__(self, dataset_dict):
"""
Args:
dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.
Returns:
dict: a format that builtin models in detectron2 accept
"""
dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below
image = utils.read_image(dataset_dict["file_name"], format=self.image_format)
utils.check_image_size(dataset_dict, image)
aug_input = T.AugInput(image)
transforms = self.augmentations(aug_input)
image = aug_input.image
image_shape = image.shape[:2] # h, w
# Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,
# but not efficient on large generic data structures due to the use of pickle & mp.Queue.
# Therefore it's important to use torch.Tensor.
dataset_dict["image"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))
if not self.is_train:
dataset_dict.pop("annotations", None)
return dataset_dict
if "annotations" in dataset_dict:
# Maps points from the closed interval [0, image_size - 1] on discrete
# image coordinates to the half-open interval [x1, x2) on continuous image
# coordinates. We use the continuous-discrete conversion from Heckbert
# 1990 ("What is the coordinate of a pixel?"): d = floor(c) and c = d + 0.5,
# where d is a discrete coordinate and c is a continuous coordinate.
for ann in dataset_dict["annotations"]:
point_coords_wrt_image = np.array(ann["point_coords"]).astype(np.float)
point_coords_wrt_image = point_coords_wrt_image + 0.5
ann["point_coords"] = point_coords_wrt_image
annos = [
# also need to transform point coordinates
transform_instance_annotations(
obj,
transforms,
image_shape,
)
for obj in dataset_dict.pop("annotations")
if obj.get("iscrowd", 0) == 0
]
instances = annotations_to_instances(
annos,
image_shape,
sample_points=self.sample_points,
)
dataset_dict["instances"] = utils.filter_empty_instances(instances)
return dataset_dict
|
banmo-main
|
third_party/detectron2_old/projects/PointSup/point_sup/dataset_mapper.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import torch
from detectron2.layers import cat
def get_point_coords_from_point_annotation(instances):
"""
Load point coords and their corresponding labels from point annotation.
Args:
instances (list[Instances]): A list of N Instances, where N is the number of images
in the batch. These instances are in 1:1
correspondence with the pred_mask_logits. The ground-truth labels (class, box, mask,
...) associated with each instance are stored in fields.
Returns:
point_coords (Tensor): A tensor of shape (N, P, 2) that contains the coordinates of P
sampled points.
point_labels (Tensor): A tensor of shape (N, P) that contains the labels of P
sampled points. `point_labels` takes 3 possible values:
- 0: the point belongs to background
- 1: the point belongs to the object
- -1: the point is ignored during training
"""
point_coords_list = []
point_labels_list = []
for instances_per_image in instances:
if len(instances_per_image) == 0:
continue
point_coords = instances_per_image.gt_point_coords.to(torch.float32)
point_labels = instances_per_image.gt_point_labels.to(torch.float32).clone()
proposal_boxes_per_image = instances_per_image.proposal_boxes.tensor
# Convert point coordinate system, ground truth points are in image coord.
point_coords_wrt_box = get_point_coords_wrt_box(proposal_boxes_per_image, point_coords)
# Ignore points that are outside predicted boxes.
point_ignores = (
(point_coords_wrt_box[:, :, 0] < 0)
| (point_coords_wrt_box[:, :, 0] > 1)
| (point_coords_wrt_box[:, :, 1] < 0)
| (point_coords_wrt_box[:, :, 1] > 1)
)
point_labels[point_ignores] = -1
point_coords_list.append(point_coords_wrt_box)
point_labels_list.append(point_labels)
return (
cat(point_coords_list, dim=0),
cat(point_labels_list, dim=0),
)
def get_point_coords_wrt_box(boxes_coords, point_coords):
"""
Convert image-level absolute coordinates to box-normalized [0, 1] x [0, 1] point cooordinates.
Args:
boxes_coords (Tensor): A tensor of shape (R, 4) that contains bounding boxes.
coordinates.
point_coords (Tensor): A tensor of shape (R, P, 2) that contains
image-normalized coordinates of P sampled points.
Returns:
point_coords_wrt_box (Tensor): A tensor of shape (R, P, 2) that contains
[0, 1] x [0, 1] box-normalized coordinates of the P sampled points.
"""
with torch.no_grad():
point_coords_wrt_box = point_coords.clone()
point_coords_wrt_box[:, :, 0] -= boxes_coords[:, None, 0]
point_coords_wrt_box[:, :, 1] -= boxes_coords[:, None, 1]
point_coords_wrt_box[:, :, 0] = point_coords_wrt_box[:, :, 0] / (
boxes_coords[:, None, 2] - boxes_coords[:, None, 0]
)
point_coords_wrt_box[:, :, 1] = point_coords_wrt_box[:, :, 1] / (
boxes_coords[:, None, 3] - boxes_coords[:, None, 1]
)
return point_coords_wrt_box
|
banmo-main
|
third_party/detectron2_old/projects/PointSup/point_sup/point_utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import logging
import os
from detectron2.data import DatasetCatalog, MetadataCatalog
from detectron2.data.datasets.builtin import _get_builtin_metadata
from detectron2.data.datasets.coco import load_coco_json
logger = logging.getLogger(__name__)
# COCO dataset
def register_coco_instances_with_points(name, metadata, json_file, image_root):
"""
Register a dataset in COCO's json annotation format for
instance segmentation with point annotation.
The point annotation json does not have "segmentation" field, instead,
it has "point_coords" and "point_labels" fields.
Args:
name (str): the name that identifies a dataset, e.g. "coco_2014_train".
metadata (dict): extra metadata associated with this dataset. You can
leave it as an empty dict.
json_file (str): path to the json instance annotation file.
image_root (str or path-like): directory which contains all the images.
"""
assert isinstance(name, str), name
assert isinstance(json_file, (str, os.PathLike)), json_file
assert isinstance(image_root, (str, os.PathLike)), image_root
# 1. register a function which returns dicts
DatasetCatalog.register(
name, lambda: load_coco_json(json_file, image_root, name, ["point_coords", "point_labels"])
)
# 2. Optionally, add metadata about this dataset,
# since they might be useful in evaluation, visualization or logging
MetadataCatalog.get(name).set(
json_file=json_file, image_root=image_root, evaluator_type="coco", **metadata
)
_PREDEFINED_SPLITS_COCO = {}
_PREDEFINED_SPLITS_COCO["coco"] = {
# point annotations without masks
"coco_2017_train_points_n10_v1_without_masks": (
"coco/train2017",
"coco/annotations/instances_train2017_n10_v1_without_masks.json",
),
}
def register_all_coco_train_points(root):
for dataset_name, splits_per_dataset in _PREDEFINED_SPLITS_COCO.items():
for key, (image_root, json_file) in splits_per_dataset.items():
# Assume pre-defined datasets live in `./datasets`.
register_coco_instances_with_points(
key,
_get_builtin_metadata(dataset_name),
os.path.join(root, json_file) if "://" not in json_file else json_file,
os.path.join(root, image_root),
)
_root = os.getenv("DETECTRON2_DATASETS", "datasets")
register_all_coco_train_points(_root)
|
banmo-main
|
third_party/detectron2_old/projects/PointSup/point_sup/register_point_annotations.py
|
#!/usr/bin/env python
import sys
import torch
from fvcore.nn.precise_bn import update_bn_stats
from torch import nn
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import LazyConfig, instantiate
from detectron2.evaluation import inference_on_dataset
from detectron2.utils.events import EventStorage
from detectron2.utils.logger import setup_logger
logger = setup_logger()
setup_logger(name="fvcore")
class CycleBatchNormList(nn.ModuleList):
"""
A hacky way to implement domain-specific BatchNorm
if it's guaranteed that a fixed number of domains will be
called with fixed order.
"""
def __init__(self, length, channels):
super().__init__([nn.BatchNorm2d(channels, affine=False) for k in range(length)])
# shared affine, domain-specific BN
self.weight = nn.Parameter(torch.ones(channels))
self.bias = nn.Parameter(torch.zeros(channels))
self._pos = 0
def forward(self, x):
ret = self[self._pos](x)
self._pos = (self._pos + 1) % len(self)
w = self.weight.reshape(1, -1, 1, 1)
b = self.bias.reshape(1, -1, 1, 1)
return ret * w + b
if __name__ == "__main__":
checkpoint = sys.argv[1]
cfg = LazyConfig.load_rel("./configs/retinanet_SyncBNhead.py")
model = cfg.model
model.head.norm = lambda c: CycleBatchNormList(len(model.head_in_features), c)
model = instantiate(model)
model.cuda()
DetectionCheckpointer(model).load(checkpoint)
cfg.dataloader.train.total_batch_size = 8
logger.info("Running PreciseBN ...")
with EventStorage(), torch.no_grad():
update_bn_stats(model, instantiate(cfg.dataloader.train), 500)
logger.info("Running evaluation ...")
inference_on_dataset(
model, instantiate(cfg.dataloader.test), instantiate(cfg.dataloader.evaluator)
)
|
banmo-main
|
third_party/detectron2_old/projects/Rethinking-BatchNorm/retinanet-eval-domain-specific.py
|
import math
import torch
import torch.distributed as dist
from detectron2.modeling.roi_heads import FastRCNNConvFCHead, MaskRCNNConvUpsampleHead
from detectron2.utils import comm
from fvcore.nn.distributed import differentiable_all_gather
def concat_all_gather(input):
bs_int = input.shape[0]
size_list = comm.all_gather(bs_int)
max_size = max(size_list)
max_shape = (max_size,) + input.shape[1:]
padded_input = input.new_zeros(max_shape)
padded_input[:bs_int] = input
all_inputs = differentiable_all_gather(padded_input)
inputs = [x[:sz] for sz, x in zip(size_list, all_inputs)]
return inputs, size_list
def batch_shuffle(x):
# gather from all gpus
batch_size_this = x.shape[0]
all_xs, batch_size_all = concat_all_gather(x)
all_xs_concat = torch.cat(all_xs, dim=0)
total_bs = sum(batch_size_all)
rank = dist.get_rank()
assert batch_size_all[rank] == batch_size_this
idx_range = (sum(batch_size_all[:rank]), sum(batch_size_all[: rank + 1]))
# random shuffle index
idx_shuffle = torch.randperm(total_bs, device=x.device)
# broadcast to all gpus
dist.broadcast(idx_shuffle, src=0)
# index for restoring
idx_unshuffle = torch.argsort(idx_shuffle)
# shuffled index for this gpu
splits = torch.split(idx_shuffle, math.ceil(total_bs / dist.get_world_size()))
if len(splits) > rank:
idx_this = splits[rank]
else:
idx_this = idx_shuffle.new_zeros([0])
return all_xs_concat[idx_this], idx_unshuffle[idx_range[0] : idx_range[1]]
def batch_unshuffle(x, idx_unshuffle):
all_x, _ = concat_all_gather(x)
x_gather = torch.cat(all_x, dim=0)
return x_gather[idx_unshuffle]
def wrap_shuffle(module_type, method):
def new_method(self, x):
if self.training:
x, idx = batch_shuffle(x)
x = getattr(module_type, method)(self, x)
if self.training:
x = batch_unshuffle(x, idx)
return x
return type(module_type.__name__ + "WithShuffle", (module_type,), {method: new_method})
from .mask_rcnn_BNhead import model, dataloader, lr_multiplier, optimizer, train
model.roi_heads.box_head._target_ = wrap_shuffle(FastRCNNConvFCHead, "forward")
model.roi_heads.mask_head._target_ = wrap_shuffle(MaskRCNNConvUpsampleHead, "layers")
|
banmo-main
|
third_party/detectron2_old/projects/Rethinking-BatchNorm/configs/mask_rcnn_BNhead_shuffle.py
|
from detectron2.model_zoo import get_config
model = get_config("common/models/mask_rcnn_fpn.py").model
model.backbone.bottom_up.freeze_at = 2
model.roi_heads.box_head.conv_norm = model.roi_heads.mask_head.conv_norm = "BN"
# 4conv1fc head
model.roi_heads.box_head.conv_dims = [256, 256, 256, 256]
model.roi_heads.box_head.fc_dims = [1024]
dataloader = get_config("common/data/coco.py").dataloader
lr_multiplier = get_config("common/coco_schedule.py").lr_multiplier_3x
optimizer = get_config("common/optim.py").SGD
train = get_config("common/train.py").train
train.max_iter = 270000 # 3x for batchsize = 16
|
banmo-main
|
third_party/detectron2_old/projects/Rethinking-BatchNorm/configs/mask_rcnn_BNhead.py
|
from .mask_rcnn_BNhead import model, dataloader, lr_multiplier, optimizer, train
model.roi_heads.box_head.conv_norm = model.roi_heads.mask_head.conv_norm = "SyncBN"
|
banmo-main
|
third_party/detectron2_old/projects/Rethinking-BatchNorm/configs/mask_rcnn_SyncBNhead.py
|
from torch.nn import BatchNorm2d
from torch.nn import functional as F
class BatchNormBatchStat(BatchNorm2d):
"""
BN that uses batch stat in inference
"""
def forward(self, input):
if self.training:
return super().forward(input)
return F.batch_norm(input, None, None, self.weight, self.bias, True, 1.0, self.eps)
# After training with the base config, it's sufficient to load its model with
# this config only for inference -- because the training-time behavior is identical.
from .mask_rcnn_BNhead import model, dataloader, lr_multiplier, optimizer, train
model.roi_heads.box_head.conv_norm = model.roi_heads.mask_head.conv_norm = BatchNormBatchStat
|
banmo-main
|
third_party/detectron2_old/projects/Rethinking-BatchNorm/configs/mask_rcnn_BNhead_batch_stats.py
|
from typing import List
import torch
from torch import Tensor, nn
from detectron2.modeling.meta_arch.retinanet import RetinaNetHead
def apply_sequential(inputs, modules):
for mod in modules:
if isinstance(mod, (nn.BatchNorm2d, nn.SyncBatchNorm)):
# for BN layer, normalize all inputs together
shapes = [i.shape for i in inputs]
spatial_sizes = [s[2] * s[3] for s in shapes]
x = [i.flatten(2) for i in inputs]
x = torch.cat(x, dim=2).unsqueeze(3)
x = mod(x).split(spatial_sizes, dim=2)
inputs = [i.view(s) for s, i in zip(shapes, x)]
else:
inputs = [mod(i) for i in inputs]
return inputs
class RetinaNetHead_SharedTrainingBN(RetinaNetHead):
def forward(self, features: List[Tensor]):
logits = apply_sequential(features, list(self.cls_subnet) + [self.cls_score])
bbox_reg = apply_sequential(features, list(self.bbox_subnet) + [self.bbox_pred])
return logits, bbox_reg
from .retinanet_SyncBNhead import model, dataloader, lr_multiplier, optimizer, train
model.head._target_ = RetinaNetHead_SharedTrainingBN
|
banmo-main
|
third_party/detectron2_old/projects/Rethinking-BatchNorm/configs/retinanet_SyncBNhead_SharedTraining.py
|
from detectron2.model_zoo import get_config
model = get_config("common/models/retinanet.py").model
model.backbone.bottom_up.freeze_at = 2
model.head.norm = "SyncBN"
dataloader = get_config("common/data/coco.py").dataloader
lr_multiplier = get_config("common/coco_schedule.py").lr_multiplier_3x
optimizer = get_config("common/optim.py").SGD
train = get_config("common/train.py").train
optimizer.lr = 0.01
train.max_iter = 270000 # 3x for batchsize = 16
|
banmo-main
|
third_party/detectron2_old/projects/Rethinking-BatchNorm/configs/retinanet_SyncBNhead.py
|
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
import glob
import os
from setuptools import find_packages, setup
import torch
from torch.utils.cpp_extension import CUDA_HOME, CppExtension, CUDAExtension
def get_extensions():
this_dir = os.path.dirname(os.path.abspath(__file__))
extensions_dir = os.path.join(this_dir, "tensormask", "layers", "csrc")
main_source = os.path.join(extensions_dir, "vision.cpp")
sources = glob.glob(os.path.join(extensions_dir, "**", "*.cpp"))
source_cuda = glob.glob(os.path.join(extensions_dir, "**", "*.cu")) + glob.glob(
os.path.join(extensions_dir, "*.cu")
)
sources = [main_source] + sources
extension = CppExtension
extra_compile_args = {"cxx": []}
define_macros = []
if (torch.cuda.is_available() and CUDA_HOME is not None) or os.getenv("FORCE_CUDA", "0") == "1":
extension = CUDAExtension
sources += source_cuda
define_macros += [("WITH_CUDA", None)]
extra_compile_args["nvcc"] = [
"-DCUDA_HAS_FP16=1",
"-D__CUDA_NO_HALF_OPERATORS__",
"-D__CUDA_NO_HALF_CONVERSIONS__",
"-D__CUDA_NO_HALF2_OPERATORS__",
]
# It's better if pytorch can do this by default ..
CC = os.environ.get("CC", None)
if CC is not None:
extra_compile_args["nvcc"].append("-ccbin={}".format(CC))
sources = [os.path.join(extensions_dir, s) for s in sources]
include_dirs = [extensions_dir]
ext_modules = [
extension(
"tensormask._C",
sources,
include_dirs=include_dirs,
define_macros=define_macros,
extra_compile_args=extra_compile_args,
)
]
return ext_modules
setup(
name="tensormask",
version="0.1",
author="FAIR",
packages=find_packages(exclude=("configs", "tests")),
python_requires=">=3.6",
ext_modules=get_extensions(),
cmdclass={"build_ext": torch.utils.cpp_extension.BuildExtension},
)
|
banmo-main
|
third_party/detectron2_old/projects/TensorMask/setup.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
"""
TensorMask Training Script.
This script is a simplified version of the training script in detectron2/tools.
"""
import os
import detectron2.utils.comm as comm
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import get_cfg
from detectron2.engine import DefaultTrainer, default_argument_parser, default_setup, launch
from detectron2.evaluation import COCOEvaluator, verify_results
from tensormask import add_tensormask_config
class Trainer(DefaultTrainer):
@classmethod
def build_evaluator(cls, cfg, dataset_name, output_folder=None):
if output_folder is None:
output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
return COCOEvaluator(dataset_name, output_dir=output_folder)
def setup(args):
"""
Create configs and perform basic setups.
"""
cfg = get_cfg()
add_tensormask_config(cfg)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
default_setup(cfg, args)
return cfg
def main(args):
cfg = setup(args)
if args.eval_only:
model = Trainer.build_model(cfg)
DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
cfg.MODEL.WEIGHTS, resume=args.resume
)
res = Trainer.test(cfg, model)
if comm.is_main_process():
verify_results(cfg, res)
return res
trainer = Trainer(cfg)
trainer.resume_or_load(resume=args.resume)
return trainer.train()
if __name__ == "__main__":
args = default_argument_parser().parse_args()
print("Command Line Args:", args)
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
)
|
banmo-main
|
third_party/detectron2_old/projects/TensorMask/train_net.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
|
banmo-main
|
third_party/detectron2_old/projects/TensorMask/tests/__init__.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
import unittest
import torch
from torch.autograd import gradcheck
from tensormask.layers.swap_align2nat import SwapAlign2Nat
class SwapAlign2NatTest(unittest.TestCase):
@unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
def test_swap_align2nat_gradcheck_cuda(self):
dtype = torch.float64
device = torch.device("cuda")
m = SwapAlign2Nat(2).to(dtype=dtype, device=device)
x = torch.rand(2, 4, 10, 10, dtype=dtype, device=device, requires_grad=True)
self.assertTrue(gradcheck(m, x), "gradcheck failed for SwapAlign2Nat CUDA")
def _swap_align2nat(self, tensor, lambda_val):
"""
The basic setup for testing Swap_Align
"""
op = SwapAlign2Nat(lambda_val, pad_val=0.0)
input = torch.from_numpy(tensor[None, :, :, :].astype("float32"))
output = op.forward(input.cuda()).cpu().numpy()
return output[0]
if __name__ == "__main__":
unittest.main()
|
banmo-main
|
third_party/detectron2_old/projects/TensorMask/tests/test_swap_align2nat.py
|
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
from detectron2.config import CfgNode as CN
def add_tensormask_config(cfg):
"""
Add config for TensorMask.
"""
cfg.MODEL.TENSOR_MASK = CN()
# Anchor parameters
cfg.MODEL.TENSOR_MASK.IN_FEATURES = ["p2", "p3", "p4", "p5", "p6", "p7"]
# Convolutions to use in the towers
cfg.MODEL.TENSOR_MASK.NUM_CONVS = 4
# Number of foreground classes.
cfg.MODEL.TENSOR_MASK.NUM_CLASSES = 80
# Channel size for the classification tower
cfg.MODEL.TENSOR_MASK.CLS_CHANNELS = 256
cfg.MODEL.TENSOR_MASK.SCORE_THRESH_TEST = 0.05
# Only the top (1000 * #levels) candidate boxes across all levels are
# considered jointly during test (to improve speed)
cfg.MODEL.TENSOR_MASK.TOPK_CANDIDATES_TEST = 6000
cfg.MODEL.TENSOR_MASK.NMS_THRESH_TEST = 0.5
# Box parameters
# Channel size for the box tower
cfg.MODEL.TENSOR_MASK.BBOX_CHANNELS = 128
# Weights on (dx, dy, dw, dh)
cfg.MODEL.TENSOR_MASK.BBOX_REG_WEIGHTS = (1.5, 1.5, 0.75, 0.75)
# Loss parameters
cfg.MODEL.TENSOR_MASK.FOCAL_LOSS_GAMMA = 3.0
cfg.MODEL.TENSOR_MASK.FOCAL_LOSS_ALPHA = 0.3
# Mask parameters
# Channel size for the mask tower
cfg.MODEL.TENSOR_MASK.MASK_CHANNELS = 128
# Mask loss weight
cfg.MODEL.TENSOR_MASK.MASK_LOSS_WEIGHT = 2.0
# weight on positive pixels within the mask
cfg.MODEL.TENSOR_MASK.POSITIVE_WEIGHT = 1.5
# Whether to predict in the aligned representation
cfg.MODEL.TENSOR_MASK.ALIGNED_ON = False
# Whether to use the bipyramid architecture
cfg.MODEL.TENSOR_MASK.BIPYRAMID_ON = False
|
banmo-main
|
third_party/detectron2_old/projects/TensorMask/tensormask/config.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import copy
import math
from typing import List
import torch
import torch.nn.functional as F
from fvcore.nn import sigmoid_focal_loss_star_jit, smooth_l1_loss
from torch import nn
from detectron2.layers import ShapeSpec, batched_nms, cat, paste_masks_in_image
from detectron2.modeling.anchor_generator import DefaultAnchorGenerator
from detectron2.modeling.backbone import build_backbone
from detectron2.modeling.box_regression import Box2BoxTransform
from detectron2.modeling.meta_arch.build import META_ARCH_REGISTRY
from detectron2.modeling.meta_arch.retinanet import permute_to_N_HWA_K
from detectron2.structures import Boxes, ImageList, Instances
from tensormask.layers import SwapAlign2Nat
__all__ = ["TensorMask"]
def permute_all_cls_and_box_to_N_HWA_K_and_concat(pred_logits, pred_anchor_deltas, num_classes=80):
"""
Rearrange the tensor layout from the network output, i.e.:
list[Tensor]: #lvl tensors of shape (N, A x K, Hi, Wi)
to per-image predictions, i.e.:
Tensor: of shape (N x sum(Hi x Wi x A), K)
"""
# for each feature level, permute the outputs to make them be in the
# same format as the labels.
pred_logits_flattened = [permute_to_N_HWA_K(x, num_classes) for x in pred_logits]
pred_anchor_deltas_flattened = [permute_to_N_HWA_K(x, 4) for x in pred_anchor_deltas]
# concatenate on the first dimension (representing the feature levels), to
# take into account the way the labels were generated (with all feature maps
# being concatenated as well)
pred_logits = cat(pred_logits_flattened, dim=1).view(-1, num_classes)
pred_anchor_deltas = cat(pred_anchor_deltas_flattened, dim=1).view(-1, 4)
return pred_logits, pred_anchor_deltas
def _assignment_rule(
gt_boxes,
anchor_boxes,
unit_lengths,
min_anchor_size,
scale_thresh=2.0,
spatial_thresh=1.0,
uniqueness_on=True,
):
"""
Given two lists of boxes of N ground truth boxes and M anchor boxes,
compute the assignment between the two, following the assignment rules in
https://arxiv.org/abs/1903.12174.
The box order must be (xmin, ymin, xmax, ymax), so please make sure to convert
to BoxMode.XYXY_ABS before calling this function.
Args:
gt_boxes, anchor_boxes (Boxes): two Boxes. Contains N & M boxes/anchors, respectively.
unit_lengths (Tensor): Contains the unit lengths of M anchor boxes.
min_anchor_size (float): Minimum size of the anchor, in pixels
scale_thresh (float): The `scale` threshold: the maximum size of the anchor
should not be greater than scale_thresh x max(h, w) of
the ground truth box.
spatial_thresh (float): The `spatial` threshold: the l2 distance between the
center of the anchor and the ground truth box should not
be greater than spatial_thresh x u where u is the unit length.
Returns:
matches (Tensor[int64]): a vector of length M, where matches[i] is a matched
ground-truth index in [0, N)
match_labels (Tensor[int8]): a vector of length M, where pred_labels[i] indicates
whether a prediction is a true or false positive or ignored
"""
gt_boxes, anchor_boxes = gt_boxes.tensor, anchor_boxes.tensor
N = gt_boxes.shape[0]
M = anchor_boxes.shape[0]
if N == 0 or M == 0:
return (
gt_boxes.new_full((N,), 0, dtype=torch.int64),
gt_boxes.new_full((N,), -1, dtype=torch.int8),
)
# Containment rule
lt = torch.min(gt_boxes[:, None, :2], anchor_boxes[:, :2]) # [N,M,2]
rb = torch.max(gt_boxes[:, None, 2:], anchor_boxes[:, 2:]) # [N,M,2]
union = cat([lt, rb], dim=2) # [N,M,4]
dummy_gt_boxes = torch.zeros_like(gt_boxes)
anchor = dummy_gt_boxes[:, None, :] + anchor_boxes[:, :] # [N,M,4]
contain_matrix = torch.all(union == anchor, dim=2) # [N,M]
# Centrality rule, scale
gt_size_lower = torch.max(gt_boxes[:, 2:] - gt_boxes[:, :2], dim=1)[0] # [N]
gt_size_upper = gt_size_lower * scale_thresh # [N]
# Fall back for small objects
gt_size_upper[gt_size_upper < min_anchor_size] = min_anchor_size
# Due to sampling of locations, the anchor sizes are deducted with sampling strides
anchor_size = (
torch.max(anchor_boxes[:, 2:] - anchor_boxes[:, :2], dim=1)[0] - unit_lengths
) # [M]
size_diff_upper = gt_size_upper[:, None] - anchor_size # [N,M]
scale_matrix = size_diff_upper >= 0 # [N,M]
# Centrality rule, spatial
gt_center = (gt_boxes[:, 2:] + gt_boxes[:, :2]) / 2 # [N,2]
anchor_center = (anchor_boxes[:, 2:] + anchor_boxes[:, :2]) / 2 # [M,2]
offset_center = gt_center[:, None, :] - anchor_center[:, :] # [N,M,2]
offset_center /= unit_lengths[:, None] # [N,M,2]
spatial_square = spatial_thresh * spatial_thresh
spatial_matrix = torch.sum(offset_center * offset_center, dim=2) <= spatial_square
assign_matrix = (contain_matrix & scale_matrix & spatial_matrix).int()
# assign_matrix is N (gt) x M (predicted)
# Max over gt elements (dim 0) to find best gt candidate for each prediction
matched_vals, matches = assign_matrix.max(dim=0)
match_labels = matches.new_full(matches.size(), 1, dtype=torch.int8)
match_labels[matched_vals == 0] = 0
match_labels[matched_vals == 1] = 1
# find all the elements that match to ground truths multiple times
not_unique_idxs = assign_matrix.sum(dim=0) > 1
if uniqueness_on:
match_labels[not_unique_idxs] = 0
else:
match_labels[not_unique_idxs] = -1
return matches, match_labels
# TODO make the paste_mask function in d2 core support mask list
def _paste_mask_lists_in_image(masks, boxes, image_shape, threshold=0.5):
"""
Paste a list of masks that are of various resolutions (e.g., 28 x 28) into an image.
The location, height, and width for pasting each mask is determined by their
corresponding bounding boxes in boxes.
Args:
masks (list(Tensor)): A list of Tensor of shape (1, Hmask_i, Wmask_i).
Values are in [0, 1]. The list length, Bimg, is the
number of detected object instances in the image.
boxes (Boxes): A Boxes of length Bimg. boxes.tensor[i] and masks[i] correspond
to the same object instance.
image_shape (tuple): height, width
threshold (float): A threshold in [0, 1] for converting the (soft) masks to
binary masks.
Returns:
img_masks (Tensor): A tensor of shape (Bimg, Himage, Wimage), where Bimg is the
number of detected object instances and Himage, Wimage are the image width
and height. img_masks[i] is a binary mask for object instance i.
"""
if len(masks) == 0:
return torch.empty((0, 1) + image_shape, dtype=torch.uint8)
# Loop over masks groups. Each group has the same mask prediction size.
img_masks = []
ind_masks = []
mask_sizes = torch.tensor([m.shape[-1] for m in masks])
unique_sizes = torch.unique(mask_sizes)
for msize in unique_sizes.tolist():
cur_ind = torch.where(mask_sizes == msize)[0]
ind_masks.append(cur_ind)
cur_masks = cat([masks[i] for i in cur_ind])
cur_boxes = boxes[cur_ind]
img_masks.append(paste_masks_in_image(cur_masks, cur_boxes, image_shape, threshold))
img_masks = cat(img_masks)
ind_masks = cat(ind_masks)
img_masks_out = torch.empty_like(img_masks)
img_masks_out[ind_masks, :, :] = img_masks
return img_masks_out
def _postprocess(results, result_mask_info, output_height, output_width, mask_threshold=0.5):
"""
Post-process the output boxes for TensorMask.
The input images are often resized when entering an object detector.
As a result, we often need the outputs of the detector in a different
resolution from its inputs.
This function will postprocess the raw outputs of TensorMask
to produce outputs according to the desired output resolution.
Args:
results (Instances): the raw outputs from the detector.
`results.image_size` contains the input image resolution the detector sees.
This object might be modified in-place. Note that it does not contain the field
`pred_masks`, which is provided by another input `result_masks`.
result_mask_info (list[Tensor], Boxes): a pair of two items for mask related results.
The first item is a list of #detection tensors, each is the predicted masks.
The second item is the anchors corresponding to the predicted masks.
output_height, output_width: the desired output resolution.
Returns:
Instances: the postprocessed output from the model, based on the output resolution
"""
scale_x, scale_y = (output_width / results.image_size[1], output_height / results.image_size[0])
results = Instances((output_height, output_width), **results.get_fields())
output_boxes = results.pred_boxes
output_boxes.tensor[:, 0::2] *= scale_x
output_boxes.tensor[:, 1::2] *= scale_y
output_boxes.clip(results.image_size)
inds_nonempty = output_boxes.nonempty()
results = results[inds_nonempty]
result_masks, result_anchors = result_mask_info
if result_masks:
result_anchors.tensor[:, 0::2] *= scale_x
result_anchors.tensor[:, 1::2] *= scale_y
result_masks = [x for (i, x) in zip(inds_nonempty.tolist(), result_masks) if i]
results.pred_masks = _paste_mask_lists_in_image(
result_masks,
result_anchors[inds_nonempty],
results.image_size,
threshold=mask_threshold,
)
return results
class TensorMaskAnchorGenerator(DefaultAnchorGenerator):
"""
For a set of image sizes and feature maps, computes a set of anchors for TensorMask.
It also computes the unit lengths and indexes for each anchor box.
"""
def grid_anchors_with_unit_lengths_and_indexes(self, grid_sizes):
anchors = []
unit_lengths = []
indexes = []
for lvl, (size, stride, base_anchors) in enumerate(
zip(grid_sizes, self.strides, self.cell_anchors)
):
grid_height, grid_width = size
device = base_anchors.device
shifts_x = torch.arange(
0, grid_width * stride, step=stride, dtype=torch.float32, device=device
)
shifts_y = torch.arange(
0, grid_height * stride, step=stride, dtype=torch.float32, device=device
)
shift_y, shift_x = torch.meshgrid(shifts_y, shifts_x)
shifts = torch.stack((shift_x, shift_y, shift_x, shift_y), dim=2)
# Stack anchors in shapes of (HWA, 4)
cur_anchor = (shifts[:, :, None, :] + base_anchors.view(1, 1, -1, 4)).view(-1, 4)
anchors.append(cur_anchor)
unit_lengths.append(
torch.full((cur_anchor.shape[0],), stride, dtype=torch.float32, device=device)
)
# create mask indexes using mesh grid
shifts_l = torch.full((1,), lvl, dtype=torch.int64, device=device)
shifts_i = torch.zeros((1,), dtype=torch.int64, device=device)
shifts_h = torch.arange(0, grid_height, dtype=torch.int64, device=device)
shifts_w = torch.arange(0, grid_width, dtype=torch.int64, device=device)
shifts_a = torch.arange(0, base_anchors.shape[0], dtype=torch.int64, device=device)
grids = torch.meshgrid(shifts_l, shifts_i, shifts_h, shifts_w, shifts_a)
indexes.append(torch.stack(grids, dim=5).view(-1, 5))
return anchors, unit_lengths, indexes
def forward(self, features):
"""
Returns:
list[list[Boxes]]: a list of #image elements. Each is a list of #feature level Boxes.
The Boxes contains anchors of this image on the specific feature level.
list[list[Tensor]]: a list of #image elements. Each is a list of #feature level tensors.
The tensor contains strides, or unit lengths for the anchors.
list[list[Tensor]]: a list of #image elements. Each is a list of #feature level tensors.
The Tensor contains indexes for the anchors, with the last dimension meaning
(L, N, H, W, A), where L is level, I is image (not set yet), H is height,
W is width, and A is anchor.
"""
num_images = len(features[0])
grid_sizes = [feature_map.shape[-2:] for feature_map in features]
anchors_list, lengths_list, indexes_list = self.grid_anchors_with_unit_lengths_and_indexes(
grid_sizes
)
# Convert anchors from Tensor to Boxes
anchors_per_im = [Boxes(x) for x in anchors_list]
# TODO it can be simplified to not return duplicated information for
# each image, just like detectron2's own AnchorGenerator
anchors = [copy.deepcopy(anchors_per_im) for _ in range(num_images)]
unit_lengths = [copy.deepcopy(lengths_list) for _ in range(num_images)]
indexes = [copy.deepcopy(indexes_list) for _ in range(num_images)]
return anchors, unit_lengths, indexes
@META_ARCH_REGISTRY.register()
class TensorMask(nn.Module):
"""
TensorMask model. Creates FPN backbone, anchors and a head for classification
and box regression. Calculates and applies proper losses to class, box, and
masks.
"""
def __init__(self, cfg):
super().__init__()
# fmt: off
self.num_classes = cfg.MODEL.TENSOR_MASK.NUM_CLASSES
self.in_features = cfg.MODEL.TENSOR_MASK.IN_FEATURES
self.anchor_sizes = cfg.MODEL.ANCHOR_GENERATOR.SIZES
self.num_levels = len(cfg.MODEL.ANCHOR_GENERATOR.SIZES)
# Loss parameters:
self.focal_loss_alpha = cfg.MODEL.TENSOR_MASK.FOCAL_LOSS_ALPHA
self.focal_loss_gamma = cfg.MODEL.TENSOR_MASK.FOCAL_LOSS_GAMMA
# Inference parameters:
self.score_threshold = cfg.MODEL.TENSOR_MASK.SCORE_THRESH_TEST
self.topk_candidates = cfg.MODEL.TENSOR_MASK.TOPK_CANDIDATES_TEST
self.nms_threshold = cfg.MODEL.TENSOR_MASK.NMS_THRESH_TEST
self.detections_im = cfg.TEST.DETECTIONS_PER_IMAGE
# Mask parameters:
self.mask_on = cfg.MODEL.MASK_ON
self.mask_loss_weight = cfg.MODEL.TENSOR_MASK.MASK_LOSS_WEIGHT
self.mask_pos_weight = torch.tensor(cfg.MODEL.TENSOR_MASK.POSITIVE_WEIGHT,
dtype=torch.float32)
self.bipyramid_on = cfg.MODEL.TENSOR_MASK.BIPYRAMID_ON
# fmt: on
# build the backbone
self.backbone = build_backbone(cfg)
backbone_shape = self.backbone.output_shape()
feature_shapes = [backbone_shape[f] for f in self.in_features]
feature_strides = [x.stride for x in feature_shapes]
# build anchors
self.anchor_generator = TensorMaskAnchorGenerator(cfg, feature_shapes)
self.num_anchors = self.anchor_generator.num_cell_anchors[0]
anchors_min_level = cfg.MODEL.ANCHOR_GENERATOR.SIZES[0]
self.mask_sizes = [size // feature_strides[0] for size in anchors_min_level]
self.min_anchor_size = min(anchors_min_level) - feature_strides[0]
# head of the TensorMask
self.head = TensorMaskHead(
cfg, self.num_levels, self.num_anchors, self.mask_sizes, feature_shapes
)
# box transform
self.box2box_transform = Box2BoxTransform(weights=cfg.MODEL.TENSOR_MASK.BBOX_REG_WEIGHTS)
self.register_buffer("pixel_mean", torch.tensor(cfg.MODEL.PIXEL_MEAN).view(-1, 1, 1), False)
self.register_buffer("pixel_std", torch.tensor(cfg.MODEL.PIXEL_STD).view(-1, 1, 1), False)
@property
def device(self):
return self.pixel_mean.device
def forward(self, batched_inputs):
"""
Args:
batched_inputs: a list, batched outputs of :class:`DetectionTransform` .
Each item in the list contains the inputs for one image.
For now, each item in the list is a dict that contains:
image: Tensor, image in (C, H, W) format.
instances: Instances
Other information that's included in the original dicts, such as:
"height", "width" (int): the output resolution of the model, used in inference.
See :meth:`postprocess` for details.
Returns:
losses (dict[str: Tensor]): mapping from a named loss to a tensor
storing the loss. Used during training only.
"""
images = self.preprocess_image(batched_inputs)
if "instances" in batched_inputs[0]:
gt_instances = [x["instances"].to(self.device) for x in batched_inputs]
else:
gt_instances = None
features = self.backbone(images.tensor)
features = [features[f] for f in self.in_features]
# apply the TensorMask head
pred_logits, pred_deltas, pred_masks = self.head(features)
# generate anchors based on features, is it image specific?
anchors, unit_lengths, indexes = self.anchor_generator(features)
if self.training:
# get ground truths for class labels and box targets, it will label each anchor
gt_class_info, gt_delta_info, gt_mask_info, num_fg = self.get_ground_truth(
anchors, unit_lengths, indexes, gt_instances
)
# compute the loss
return self.losses(
gt_class_info,
gt_delta_info,
gt_mask_info,
num_fg,
pred_logits,
pred_deltas,
pred_masks,
)
else:
# do inference to get the output
results = self.inference(pred_logits, pred_deltas, pred_masks, anchors, indexes, images)
processed_results = []
for results_im, input_im, image_size in zip(
results, batched_inputs, images.image_sizes
):
height = input_im.get("height", image_size[0])
width = input_im.get("width", image_size[1])
# this is to do post-processing with the image size
result_box, result_mask = results_im
r = _postprocess(result_box, result_mask, height, width)
processed_results.append({"instances": r})
return processed_results
def losses(
self,
gt_class_info,
gt_delta_info,
gt_mask_info,
num_fg,
pred_logits,
pred_deltas,
pred_masks,
):
"""
Args:
For `gt_class_info`, `gt_delta_info`, `gt_mask_info` and `num_fg` parameters, see
:meth:`TensorMask.get_ground_truth`.
For `pred_logits`, `pred_deltas` and `pred_masks`, see
:meth:`TensorMaskHead.forward`.
Returns:
losses (dict[str: Tensor]): mapping from a named loss to a scalar tensor
storing the loss. Used during training only. The potential dict keys are:
"loss_cls", "loss_box_reg" and "loss_mask".
"""
gt_classes_target, gt_valid_inds = gt_class_info
gt_deltas, gt_fg_inds = gt_delta_info
gt_masks, gt_mask_inds = gt_mask_info
loss_normalizer = torch.tensor(max(1, num_fg), dtype=torch.float32, device=self.device)
# classification and regression
pred_logits, pred_deltas = permute_all_cls_and_box_to_N_HWA_K_and_concat(
pred_logits, pred_deltas, self.num_classes
)
loss_cls = (
sigmoid_focal_loss_star_jit(
pred_logits[gt_valid_inds],
gt_classes_target[gt_valid_inds],
alpha=self.focal_loss_alpha,
gamma=self.focal_loss_gamma,
reduction="sum",
)
/ loss_normalizer
)
if num_fg == 0:
loss_box_reg = pred_deltas.sum() * 0
else:
loss_box_reg = (
smooth_l1_loss(pred_deltas[gt_fg_inds], gt_deltas, beta=0.0, reduction="sum")
/ loss_normalizer
)
losses = {"loss_cls": loss_cls, "loss_box_reg": loss_box_reg}
# mask prediction
if self.mask_on:
loss_mask = 0
for lvl in range(self.num_levels):
cur_level_factor = 2 ** lvl if self.bipyramid_on else 1
for anc in range(self.num_anchors):
cur_gt_mask_inds = gt_mask_inds[lvl][anc]
if cur_gt_mask_inds is None:
loss_mask += pred_masks[lvl][anc][0, 0, 0, 0] * 0
else:
cur_mask_size = self.mask_sizes[anc] * cur_level_factor
# TODO maybe there are numerical issues when mask sizes are large
cur_size_divider = torch.tensor(
self.mask_loss_weight / (cur_mask_size ** 2),
dtype=torch.float32,
device=self.device,
)
cur_pred_masks = pred_masks[lvl][anc][
cur_gt_mask_inds[:, 0], # N
:, # V x U
cur_gt_mask_inds[:, 1], # H
cur_gt_mask_inds[:, 2], # W
]
loss_mask += F.binary_cross_entropy_with_logits(
cur_pred_masks.view(-1, cur_mask_size, cur_mask_size), # V, U
gt_masks[lvl][anc].to(dtype=torch.float32),
reduction="sum",
weight=cur_size_divider,
pos_weight=self.mask_pos_weight,
)
losses["loss_mask"] = loss_mask / loss_normalizer
return losses
@torch.no_grad()
def get_ground_truth(self, anchors, unit_lengths, indexes, targets):
"""
Args:
anchors (list[list[Boxes]]): a list of N=#image elements. Each is a
list of #feature level Boxes. The Boxes contains anchors of
this image on the specific feature level.
unit_lengths (list[list[Tensor]]): a list of N=#image elements. Each is a
list of #feature level Tensor. The tensor contains unit lengths for anchors of
this image on the specific feature level.
indexes (list[list[Tensor]]): a list of N=#image elements. Each is a
list of #feature level Tensor. The tensor contains the 5D index of
each anchor, the second dimension means (L, N, H, W, A), where L
is level, I is image, H is height, W is width, and A is anchor.
targets (list[Instances]): a list of N `Instances`s. The i-th
`Instances` contains the ground-truth per-instance annotations
for the i-th input image. Specify `targets` during training only.
Returns:
gt_class_info (Tensor, Tensor): A pair of two tensors for classification.
The first one is an integer tensor of shape (R, #classes) storing ground-truth
labels for each anchor. R is the total number of anchors in the batch.
The second one is an integer tensor of shape (R,), to indicate which
anchors are valid for loss computation, which anchors are not.
gt_delta_info (Tensor, Tensor): A pair of two tensors for boxes.
The first one, of shape (F, 4). F=#foreground anchors.
The last dimension represents ground-truth box2box transform
targets (dx, dy, dw, dh) that map each anchor to its matched ground-truth box.
Only foreground anchors have values in this tensor. Could be `None` if F=0.
The second one, of shape (R,), is an integer tensor indicating which anchors
are foreground ones used for box regression. Could be `None` if F=0.
gt_mask_info (list[list[Tensor]], list[list[Tensor]]): A pair of two lists for masks.
The first one is a list of P=#feature level elements. Each is a
list of A=#anchor tensors. Each tensor contains the ground truth
masks of the same size and for the same feature level. Could be `None`.
The second one is a list of P=#feature level elements. Each is a
list of A=#anchor tensors. Each tensor contains the location of the ground truth
masks of the same size and for the same feature level. The second dimension means
(N, H, W), where N is image, H is height, and W is width. Could be `None`.
num_fg (int): F=#foreground anchors, used later for loss normalization.
"""
gt_classes = []
gt_deltas = []
gt_masks = [[[] for _ in range(self.num_anchors)] for _ in range(self.num_levels)]
gt_mask_inds = [[[] for _ in range(self.num_anchors)] for _ in range(self.num_levels)]
anchors = [Boxes.cat(anchors_i) for anchors_i in anchors]
unit_lengths = [cat(unit_lengths_i) for unit_lengths_i in unit_lengths]
indexes = [cat(indexes_i) for indexes_i in indexes]
num_fg = 0
for i, (anchors_im, unit_lengths_im, indexes_im, targets_im) in enumerate(
zip(anchors, unit_lengths, indexes, targets)
):
# Initialize all
gt_classes_i = torch.full_like(
unit_lengths_im, self.num_classes, dtype=torch.int64, device=self.device
)
# Ground truth classes
has_gt = len(targets_im) > 0
if has_gt:
# Compute the pairwise matrix
gt_matched_inds, anchor_labels = _assignment_rule(
targets_im.gt_boxes, anchors_im, unit_lengths_im, self.min_anchor_size
)
# Find the foreground instances
fg_inds = anchor_labels == 1
fg_anchors = anchors_im[fg_inds]
num_fg += len(fg_anchors)
# Find the ground truths for foreground instances
gt_fg_matched_inds = gt_matched_inds[fg_inds]
# Assign labels for foreground instances
gt_classes_i[fg_inds] = targets_im.gt_classes[gt_fg_matched_inds]
# Anchors with label -1 are ignored, others are left as negative
gt_classes_i[anchor_labels == -1] = -1
# Boxes
# Ground truth box regression, only for foregrounds
matched_gt_boxes = targets_im[gt_fg_matched_inds].gt_boxes
# Compute box regression offsets for foregrounds only
gt_deltas_i = self.box2box_transform.get_deltas(
fg_anchors.tensor, matched_gt_boxes.tensor
)
gt_deltas.append(gt_deltas_i)
# Masks
if self.mask_on:
# Compute masks for each level and each anchor
matched_indexes = indexes_im[fg_inds, :]
for lvl in range(self.num_levels):
ids_lvl = matched_indexes[:, 0] == lvl
if torch.any(ids_lvl):
cur_level_factor = 2 ** lvl if self.bipyramid_on else 1
for anc in range(self.num_anchors):
ids_lvl_anchor = ids_lvl & (matched_indexes[:, 4] == anc)
if torch.any(ids_lvl_anchor):
gt_masks[lvl][anc].append(
targets_im[
gt_fg_matched_inds[ids_lvl_anchor]
].gt_masks.crop_and_resize(
fg_anchors[ids_lvl_anchor].tensor,
self.mask_sizes[anc] * cur_level_factor,
)
)
# Select (N, H, W) dimensions
gt_mask_inds_lvl_anc = matched_indexes[ids_lvl_anchor, 1:4]
# Set the image index to the current image
gt_mask_inds_lvl_anc[:, 0] = i
gt_mask_inds[lvl][anc].append(gt_mask_inds_lvl_anc)
gt_classes.append(gt_classes_i)
# Classes and boxes
gt_classes = cat(gt_classes)
gt_valid_inds = gt_classes >= 0
gt_fg_inds = gt_valid_inds & (gt_classes < self.num_classes)
gt_classes_target = torch.zeros(
(gt_classes.shape[0], self.num_classes), dtype=torch.float32, device=self.device
)
gt_classes_target[gt_fg_inds, gt_classes[gt_fg_inds]] = 1
gt_deltas = cat(gt_deltas) if gt_deltas else None
# Masks
gt_masks = [[cat(mla) if mla else None for mla in ml] for ml in gt_masks]
gt_mask_inds = [[cat(ila) if ila else None for ila in il] for il in gt_mask_inds]
return (
(gt_classes_target, gt_valid_inds),
(gt_deltas, gt_fg_inds),
(gt_masks, gt_mask_inds),
num_fg,
)
def inference(self, pred_logits, pred_deltas, pred_masks, anchors, indexes, images):
"""
Arguments:
pred_logits, pred_deltas, pred_masks: Same as the output of:
meth:`TensorMaskHead.forward`
anchors, indexes: Same as the input of meth:`TensorMask.get_ground_truth`
images (ImageList): the input images
Returns:
results (List[Instances]): a list of #images elements.
"""
assert len(anchors) == len(images)
results = []
pred_logits = [permute_to_N_HWA_K(x, self.num_classes) for x in pred_logits]
pred_deltas = [permute_to_N_HWA_K(x, 4) for x in pred_deltas]
pred_logits = cat(pred_logits, dim=1)
pred_deltas = cat(pred_deltas, dim=1)
for img_idx, (anchors_im, indexes_im) in enumerate(zip(anchors, indexes)):
# Get the size of the current image
image_size = images.image_sizes[img_idx]
logits_im = pred_logits[img_idx]
deltas_im = pred_deltas[img_idx]
if self.mask_on:
masks_im = [[mla[img_idx] for mla in ml] for ml in pred_masks]
else:
masks_im = [None] * self.num_levels
results_im = self.inference_single_image(
logits_im,
deltas_im,
masks_im,
Boxes.cat(anchors_im),
cat(indexes_im),
tuple(image_size),
)
results.append(results_im)
return results
def inference_single_image(
self, pred_logits, pred_deltas, pred_masks, anchors, indexes, image_size
):
"""
Single-image inference. Return bounding-box detection results by thresholding
on scores and applying non-maximum suppression (NMS).
Arguments:
pred_logits (list[Tensor]): list of #feature levels. Each entry contains
tensor of size (AxHxW, K)
pred_deltas (list[Tensor]): Same shape as 'pred_logits' except that K becomes 4.
pred_masks (list[list[Tensor]]): List of #feature levels, each is a list of #anchors.
Each entry contains tensor of size (M_i*M_i, H, W). `None` if mask_on=False.
anchors (list[Boxes]): list of #feature levels. Each entry contains
a Boxes object, which contains all the anchors for that
image in that feature level.
image_size (tuple(H, W)): a tuple of the image height and width.
Returns:
Same as `inference`, but for only one image.
"""
pred_logits = pred_logits.flatten().sigmoid_()
# We get top locations across all levels to accelerate the inference speed,
# which does not seem to affect the accuracy.
# First select values above the threshold
logits_top_idxs = torch.where(pred_logits > self.score_threshold)[0]
# Then get the top values
num_topk = min(self.topk_candidates, logits_top_idxs.shape[0])
pred_prob, topk_idxs = pred_logits[logits_top_idxs].sort(descending=True)
# Keep top k scoring values
pred_prob = pred_prob[:num_topk]
# Keep top k values
top_idxs = logits_top_idxs[topk_idxs[:num_topk]]
# class index
cls_idxs = top_idxs % self.num_classes
# HWA index
top_idxs //= self.num_classes
# predict boxes
pred_boxes = self.box2box_transform.apply_deltas(
pred_deltas[top_idxs], anchors[top_idxs].tensor
)
# apply nms
keep = batched_nms(pred_boxes, pred_prob, cls_idxs, self.nms_threshold)
# pick the top ones
keep = keep[: self.detections_im]
results = Instances(image_size)
results.pred_boxes = Boxes(pred_boxes[keep])
results.scores = pred_prob[keep]
results.pred_classes = cls_idxs[keep]
# deal with masks
result_masks, result_anchors = [], None
if self.mask_on:
# index and anchors, useful for masks
top_indexes = indexes[top_idxs]
top_anchors = anchors[top_idxs]
result_indexes = top_indexes[keep]
result_anchors = top_anchors[keep]
# Get masks and do sigmoid
for lvl, _, h, w, anc in result_indexes.tolist():
cur_size = self.mask_sizes[anc] * (2 ** lvl if self.bipyramid_on else 1)
result_masks.append(
torch.sigmoid(pred_masks[lvl][anc][:, h, w].view(1, cur_size, cur_size))
)
return results, (result_masks, result_anchors)
def preprocess_image(self, batched_inputs):
"""
Normalize, pad and batch the input images.
"""
images = [x["image"].to(self.device) for x in batched_inputs]
images = [(x - self.pixel_mean) / self.pixel_std for x in images]
images = ImageList.from_tensors(images, self.backbone.size_divisibility)
return images
class TensorMaskHead(nn.Module):
def __init__(self, cfg, num_levels, num_anchors, mask_sizes, input_shape: List[ShapeSpec]):
"""
TensorMask head.
"""
super().__init__()
# fmt: off
self.in_features = cfg.MODEL.TENSOR_MASK.IN_FEATURES
in_channels = input_shape[0].channels
num_classes = cfg.MODEL.TENSOR_MASK.NUM_CLASSES
cls_channels = cfg.MODEL.TENSOR_MASK.CLS_CHANNELS
num_convs = cfg.MODEL.TENSOR_MASK.NUM_CONVS
# box parameters
bbox_channels = cfg.MODEL.TENSOR_MASK.BBOX_CHANNELS
# mask parameters
self.mask_on = cfg.MODEL.MASK_ON
self.mask_sizes = mask_sizes
mask_channels = cfg.MODEL.TENSOR_MASK.MASK_CHANNELS
self.align_on = cfg.MODEL.TENSOR_MASK.ALIGNED_ON
self.bipyramid_on = cfg.MODEL.TENSOR_MASK.BIPYRAMID_ON
# fmt: on
# class subnet
cls_subnet = []
cur_channels = in_channels
for _ in range(num_convs):
cls_subnet.append(
nn.Conv2d(cur_channels, cls_channels, kernel_size=3, stride=1, padding=1)
)
cur_channels = cls_channels
cls_subnet.append(nn.ReLU())
self.cls_subnet = nn.Sequential(*cls_subnet)
self.cls_score = nn.Conv2d(
cur_channels, num_anchors * num_classes, kernel_size=3, stride=1, padding=1
)
modules_list = [self.cls_subnet, self.cls_score]
# box subnet
bbox_subnet = []
cur_channels = in_channels
for _ in range(num_convs):
bbox_subnet.append(
nn.Conv2d(cur_channels, bbox_channels, kernel_size=3, stride=1, padding=1)
)
cur_channels = bbox_channels
bbox_subnet.append(nn.ReLU())
self.bbox_subnet = nn.Sequential(*bbox_subnet)
self.bbox_pred = nn.Conv2d(
cur_channels, num_anchors * 4, kernel_size=3, stride=1, padding=1
)
modules_list.extend([self.bbox_subnet, self.bbox_pred])
# mask subnet
if self.mask_on:
mask_subnet = []
cur_channels = in_channels
for _ in range(num_convs):
mask_subnet.append(
nn.Conv2d(cur_channels, mask_channels, kernel_size=3, stride=1, padding=1)
)
cur_channels = mask_channels
mask_subnet.append(nn.ReLU())
self.mask_subnet = nn.Sequential(*mask_subnet)
modules_list.append(self.mask_subnet)
for mask_size in self.mask_sizes:
cur_mask_module = "mask_pred_%02d" % mask_size
self.add_module(
cur_mask_module,
nn.Conv2d(
cur_channels, mask_size * mask_size, kernel_size=1, stride=1, padding=0
),
)
modules_list.append(getattr(self, cur_mask_module))
if self.align_on:
if self.bipyramid_on:
for lvl in range(num_levels):
cur_mask_module = "align2nat_%02d" % lvl
lambda_val = 2 ** lvl
setattr(self, cur_mask_module, SwapAlign2Nat(lambda_val))
# Also the fusing layer, stay at the same channel size
mask_fuse = [
nn.Conv2d(cur_channels, cur_channels, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
]
self.mask_fuse = nn.Sequential(*mask_fuse)
modules_list.append(self.mask_fuse)
else:
self.align2nat = SwapAlign2Nat(1)
# Initialization
for modules in modules_list:
for layer in modules.modules():
if isinstance(layer, nn.Conv2d):
torch.nn.init.normal_(layer.weight, mean=0, std=0.01)
torch.nn.init.constant_(layer.bias, 0)
# Use prior in model initialization to improve stability
bias_value = -(math.log((1 - 0.01) / 0.01))
torch.nn.init.constant_(self.cls_score.bias, bias_value)
def forward(self, features):
"""
Arguments:
features (list[Tensor]): FPN feature map tensors in high to low resolution.
Each tensor in the list correspond to different feature levels.
Returns:
pred_logits (list[Tensor]): #lvl tensors, each has shape (N, AxK, Hi, Wi).
The tensor predicts the classification probability
at each spatial position for each of the A anchors and K object
classes.
pred_deltas (list[Tensor]): #lvl tensors, each has shape (N, Ax4, Hi, Wi).
The tensor predicts 4-vector (dx,dy,dw,dh) box
regression values for every anchor. These values are the
relative offset between the anchor and the ground truth box.
pred_masks (list(list[Tensor])): #lvl list of tensors, each is a list of
A tensors of shape (N, M_{i,a}, Hi, Wi).
The tensor predicts a dense set of M_ixM_i masks at every location.
"""
pred_logits = [self.cls_score(self.cls_subnet(x)) for x in features]
pred_deltas = [self.bbox_pred(self.bbox_subnet(x)) for x in features]
pred_masks = None
if self.mask_on:
mask_feats = [self.mask_subnet(x) for x in features]
if self.bipyramid_on:
mask_feat_high_res = mask_feats[0]
H, W = mask_feat_high_res.shape[-2:]
mask_feats_up = []
for lvl, mask_feat in enumerate(mask_feats):
lambda_val = 2.0 ** lvl
mask_feat_up = mask_feat
if lvl > 0:
mask_feat_up = F.interpolate(
mask_feat, scale_factor=lambda_val, mode="bilinear", align_corners=False
)
mask_feats_up.append(
self.mask_fuse(mask_feat_up[:, :, :H, :W] + mask_feat_high_res)
)
mask_feats = mask_feats_up
pred_masks = []
for lvl, mask_feat in enumerate(mask_feats):
cur_masks = []
for mask_size in self.mask_sizes:
cur_mask_module = getattr(self, "mask_pred_%02d" % mask_size)
cur_mask = cur_mask_module(mask_feat)
if self.align_on:
if self.bipyramid_on:
cur_mask_module = getattr(self, "align2nat_%02d" % lvl)
cur_mask = cur_mask_module(cur_mask)
else:
cur_mask = self.align2nat(cur_mask)
cur_masks.append(cur_mask)
pred_masks.append(cur_masks)
return pred_logits, pred_deltas, pred_masks
|
banmo-main
|
third_party/detectron2_old/projects/TensorMask/tensormask/arch.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from .config import add_tensormask_config
from .arch import TensorMask
|
banmo-main
|
third_party/detectron2_old/projects/TensorMask/tensormask/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from torch import nn
from torch.autograd import Function
from torch.autograd.function import once_differentiable
from tensormask import _C
class _SwapAlign2Nat(Function):
@staticmethod
def forward(ctx, X, lambda_val, pad_val):
ctx.lambda_val = lambda_val
ctx.input_shape = X.size()
Y = _C.swap_align2nat_forward(X, lambda_val, pad_val)
return Y
@staticmethod
@once_differentiable
def backward(ctx, gY):
lambda_val = ctx.lambda_val
bs, ch, h, w = ctx.input_shape
gX = _C.swap_align2nat_backward(gY, lambda_val, bs, ch, h, w)
return gX, None, None
swap_align2nat = _SwapAlign2Nat.apply
class SwapAlign2Nat(nn.Module):
"""
The op `SwapAlign2Nat` described in https://arxiv.org/abs/1903.12174.
Given an input tensor that predicts masks of shape (N, C=VxU, H, W),
apply the op, it will return masks of shape (N, V'xU', H', W') where
the unit lengths of (V, U) and (H, W) are swapped, and the mask representation
is transformed from aligned to natural.
Args:
lambda_val (int): the relative unit length ratio between (V, U) and (H, W),
as we always have larger unit lengths for (V, U) than (H, W),
lambda_val is always >= 1.
pad_val (float): padding value for the values falling outside of the input
tensor, default set to -6 as sigmoid(-6) is ~0, indicating
that is no masks outside of the tensor.
"""
def __init__(self, lambda_val, pad_val=-6.0):
super(SwapAlign2Nat, self).__init__()
self.lambda_val = lambda_val
self.pad_val = pad_val
def forward(self, X):
return swap_align2nat(X, self.lambda_val, self.pad_val)
def __repr__(self):
tmpstr = self.__class__.__name__ + "("
tmpstr += "lambda_val=" + str(self.lambda_val)
tmpstr += ", pad_val=" + str(self.pad_val)
tmpstr += ")"
return tmpstr
|
banmo-main
|
third_party/detectron2_old/projects/TensorMask/tensormask/layers/swap_align2nat.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from .swap_align2nat import SwapAlign2Nat, swap_align2nat
__all__ = [k for k in globals().keys() if not k.startswith("_")]
|
banmo-main
|
third_party/detectron2_old/projects/TensorMask/tensormask/layers/__init__.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
"""
DeepLab Training Script.
This script is a simplified version of the training script in detectron2/tools.
"""
import os
import torch
import detectron2.data.transforms as T
import detectron2.utils.comm as comm
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import get_cfg
from detectron2.data import DatasetMapper, MetadataCatalog, build_detection_train_loader
from detectron2.engine import DefaultTrainer, default_argument_parser, default_setup, launch
from detectron2.evaluation import CityscapesSemSegEvaluator, DatasetEvaluators, SemSegEvaluator
from detectron2.projects.deeplab import add_deeplab_config, build_lr_scheduler
def build_sem_seg_train_aug(cfg):
augs = [
T.ResizeShortestEdge(
cfg.INPUT.MIN_SIZE_TRAIN, cfg.INPUT.MAX_SIZE_TRAIN, cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING
)
]
if cfg.INPUT.CROP.ENABLED:
augs.append(
T.RandomCrop_CategoryAreaConstraint(
cfg.INPUT.CROP.TYPE,
cfg.INPUT.CROP.SIZE,
cfg.INPUT.CROP.SINGLE_CATEGORY_MAX_AREA,
cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE,
)
)
augs.append(T.RandomFlip())
return augs
class Trainer(DefaultTrainer):
"""
We use the "DefaultTrainer" which contains a number pre-defined logic for
standard training workflow. They may not work for you, especially if you
are working on a new research project. In that case you can use the cleaner
"SimpleTrainer", or write your own training loop.
"""
@classmethod
def build_evaluator(cls, cfg, dataset_name, output_folder=None):
"""
Create evaluator(s) for a given dataset.
This uses the special metadata "evaluator_type" associated with each builtin dataset.
For your own dataset, you can simply create an evaluator manually in your
script and do not have to worry about the hacky if-else logic here.
"""
if output_folder is None:
output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
evaluator_list = []
evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
if evaluator_type == "sem_seg":
return SemSegEvaluator(
dataset_name,
distributed=True,
output_dir=output_folder,
)
if evaluator_type == "cityscapes_sem_seg":
assert (
torch.cuda.device_count() >= comm.get_rank()
), "CityscapesEvaluator currently do not work with multiple machines."
return CityscapesSemSegEvaluator(dataset_name)
if len(evaluator_list) == 0:
raise NotImplementedError(
"no Evaluator for the dataset {} with the type {}".format(
dataset_name, evaluator_type
)
)
if len(evaluator_list) == 1:
return evaluator_list[0]
return DatasetEvaluators(evaluator_list)
@classmethod
def build_train_loader(cls, cfg):
if "SemanticSegmentor" in cfg.MODEL.META_ARCHITECTURE:
mapper = DatasetMapper(cfg, is_train=True, augmentations=build_sem_seg_train_aug(cfg))
else:
mapper = None
return build_detection_train_loader(cfg, mapper=mapper)
@classmethod
def build_lr_scheduler(cls, cfg, optimizer):
"""
It now calls :func:`detectron2.solver.build_lr_scheduler`.
Overwrite it if you'd like a different scheduler.
"""
return build_lr_scheduler(cfg, optimizer)
def setup(args):
"""
Create configs and perform basic setups.
"""
cfg = get_cfg()
add_deeplab_config(cfg)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
default_setup(cfg, args)
return cfg
def main(args):
cfg = setup(args)
if args.eval_only:
model = Trainer.build_model(cfg)
DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
cfg.MODEL.WEIGHTS, resume=args.resume
)
res = Trainer.test(cfg, model)
return res
trainer = Trainer(cfg)
trainer.resume_or_load(resume=args.resume)
return trainer.train()
if __name__ == "__main__":
args = default_argument_parser().parse_args()
print("Command Line Args:", args)
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
)
|
banmo-main
|
third_party/detectron2_old/projects/DeepLab/train_net.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import math
from typing import List
import torch
from detectron2.solver.lr_scheduler import _get_warmup_factor_at_iter
# NOTE: PyTorch's LR scheduler interface uses names that assume the LR changes
# only on epoch boundaries. We typically use iteration based schedules instead.
# As a result, "epoch" (e.g., as in self.last_epoch) should be understood to mean
# "iteration" instead.
# FIXME: ideally this would be achieved with a CombinedLRScheduler, separating
# MultiStepLR with WarmupLR but the current LRScheduler design doesn't allow it.
class WarmupPolyLR(torch.optim.lr_scheduler._LRScheduler):
"""
Poly learning rate schedule used to train DeepLab.
Paper: DeepLab: Semantic Image Segmentation with Deep Convolutional Nets,
Atrous Convolution, and Fully Connected CRFs.
Reference: https://github.com/tensorflow/models/blob/21b73d22f3ed05b650e85ac50849408dd36de32e/research/deeplab/utils/train_utils.py#L337 # noqa
"""
def __init__(
self,
optimizer: torch.optim.Optimizer,
max_iters: int,
warmup_factor: float = 0.001,
warmup_iters: int = 1000,
warmup_method: str = "linear",
last_epoch: int = -1,
power: float = 0.9,
constant_ending: float = 0.0,
):
self.max_iters = max_iters
self.warmup_factor = warmup_factor
self.warmup_iters = warmup_iters
self.warmup_method = warmup_method
self.power = power
self.constant_ending = constant_ending
super().__init__(optimizer, last_epoch)
def get_lr(self) -> List[float]:
warmup_factor = _get_warmup_factor_at_iter(
self.warmup_method, self.last_epoch, self.warmup_iters, self.warmup_factor
)
if self.constant_ending > 0 and warmup_factor == 1.0:
# Constant ending lr.
if (
math.pow((1.0 - self.last_epoch / self.max_iters), self.power)
< self.constant_ending
):
return [base_lr * self.constant_ending for base_lr in self.base_lrs]
return [
base_lr * warmup_factor * math.pow((1.0 - self.last_epoch / self.max_iters), self.power)
for base_lr in self.base_lrs
]
def _compute_values(self) -> List[float]:
# The new interface
return self.get_lr()
|
banmo-main
|
third_party/detectron2_old/projects/DeepLab/deeplab/lr_scheduler.py
|
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
def add_deeplab_config(cfg):
"""
Add config for DeepLab.
"""
# We retry random cropping until no single category in semantic segmentation GT occupies more
# than `SINGLE_CATEGORY_MAX_AREA` part of the crop.
cfg.INPUT.CROP.SINGLE_CATEGORY_MAX_AREA = 1.0
# Used for `poly` learning rate schedule.
cfg.SOLVER.POLY_LR_POWER = 0.9
cfg.SOLVER.POLY_LR_CONSTANT_ENDING = 0.0
# Loss type, choose from `cross_entropy`, `hard_pixel_mining`.
cfg.MODEL.SEM_SEG_HEAD.LOSS_TYPE = "hard_pixel_mining"
# DeepLab settings
cfg.MODEL.SEM_SEG_HEAD.PROJECT_FEATURES = ["res2"]
cfg.MODEL.SEM_SEG_HEAD.PROJECT_CHANNELS = [48]
cfg.MODEL.SEM_SEG_HEAD.ASPP_CHANNELS = 256
cfg.MODEL.SEM_SEG_HEAD.ASPP_DILATIONS = [6, 12, 18]
cfg.MODEL.SEM_SEG_HEAD.ASPP_DROPOUT = 0.1
cfg.MODEL.SEM_SEG_HEAD.USE_DEPTHWISE_SEPARABLE_CONV = False
# Backbone new configs
cfg.MODEL.RESNETS.RES4_DILATION = 1
cfg.MODEL.RESNETS.RES5_MULTI_GRID = [1, 2, 4]
# ResNet stem type from: `basic`, `deeplab`
cfg.MODEL.RESNETS.STEM_TYPE = "deeplab"
|
banmo-main
|
third_party/detectron2_old/projects/DeepLab/deeplab/config.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from .build_solver import build_lr_scheduler
from .config import add_deeplab_config
from .resnet import build_resnet_deeplab_backbone
from .semantic_seg import DeepLabV3Head, DeepLabV3PlusHead
|
banmo-main
|
third_party/detectron2_old/projects/DeepLab/deeplab/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import torch
import torch.nn as nn
class DeepLabCE(nn.Module):
"""
Hard pixel mining with cross entropy loss, for semantic segmentation.
This is used in TensorFlow DeepLab frameworks.
Paper: DeeperLab: Single-Shot Image Parser
Reference: https://github.com/tensorflow/models/blob/bd488858d610e44df69da6f89277e9de8a03722c/research/deeplab/utils/train_utils.py#L33 # noqa
Arguments:
ignore_label: Integer, label to ignore.
top_k_percent_pixels: Float, the value lies in [0.0, 1.0]. When its
value < 1.0, only compute the loss for the top k percent pixels
(e.g., the top 20% pixels). This is useful for hard pixel mining.
weight: Tensor, a manual rescaling weight given to each class.
"""
def __init__(self, ignore_label=-1, top_k_percent_pixels=1.0, weight=None):
super(DeepLabCE, self).__init__()
self.top_k_percent_pixels = top_k_percent_pixels
self.ignore_label = ignore_label
self.criterion = nn.CrossEntropyLoss(
weight=weight, ignore_index=ignore_label, reduction="none"
)
def forward(self, logits, labels, weights=None):
if weights is None:
pixel_losses = self.criterion(logits, labels).contiguous().view(-1)
else:
# Apply per-pixel loss weights.
pixel_losses = self.criterion(logits, labels) * weights
pixel_losses = pixel_losses.contiguous().view(-1)
if self.top_k_percent_pixels == 1.0:
return pixel_losses.mean()
top_k_pixels = int(self.top_k_percent_pixels * pixel_losses.numel())
pixel_losses, _ = torch.topk(pixel_losses, top_k_pixels)
return pixel_losses.mean()
|
banmo-main
|
third_party/detectron2_old/projects/DeepLab/deeplab/loss.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import fvcore.nn.weight_init as weight_init
import torch.nn.functional as F
from detectron2.layers import CNNBlockBase, Conv2d, get_norm
from detectron2.modeling import BACKBONE_REGISTRY
from detectron2.modeling.backbone.resnet import (
BasicStem,
BottleneckBlock,
DeformBottleneckBlock,
ResNet,
)
class DeepLabStem(CNNBlockBase):
"""
The DeepLab ResNet stem (layers before the first residual block).
"""
def __init__(self, in_channels=3, out_channels=128, norm="BN"):
"""
Args:
norm (str or callable): norm after the first conv layer.
See :func:`layers.get_norm` for supported format.
"""
super().__init__(in_channels, out_channels, 4)
self.in_channels = in_channels
self.conv1 = Conv2d(
in_channels,
out_channels // 2,
kernel_size=3,
stride=2,
padding=1,
bias=False,
norm=get_norm(norm, out_channels // 2),
)
self.conv2 = Conv2d(
out_channels // 2,
out_channels // 2,
kernel_size=3,
stride=1,
padding=1,
bias=False,
norm=get_norm(norm, out_channels // 2),
)
self.conv3 = Conv2d(
out_channels // 2,
out_channels,
kernel_size=3,
stride=1,
padding=1,
bias=False,
norm=get_norm(norm, out_channels),
)
weight_init.c2_msra_fill(self.conv1)
weight_init.c2_msra_fill(self.conv2)
weight_init.c2_msra_fill(self.conv3)
def forward(self, x):
x = self.conv1(x)
x = F.relu_(x)
x = self.conv2(x)
x = F.relu_(x)
x = self.conv3(x)
x = F.relu_(x)
x = F.max_pool2d(x, kernel_size=3, stride=2, padding=1)
return x
@BACKBONE_REGISTRY.register()
def build_resnet_deeplab_backbone(cfg, input_shape):
"""
Create a ResNet instance from config.
Returns:
ResNet: a :class:`ResNet` instance.
"""
# need registration of new blocks/stems?
norm = cfg.MODEL.RESNETS.NORM
if cfg.MODEL.RESNETS.STEM_TYPE == "basic":
stem = BasicStem(
in_channels=input_shape.channels,
out_channels=cfg.MODEL.RESNETS.STEM_OUT_CHANNELS,
norm=norm,
)
elif cfg.MODEL.RESNETS.STEM_TYPE == "deeplab":
stem = DeepLabStem(
in_channels=input_shape.channels,
out_channels=cfg.MODEL.RESNETS.STEM_OUT_CHANNELS,
norm=norm,
)
else:
raise ValueError("Unknown stem type: {}".format(cfg.MODEL.RESNETS.STEM_TYPE))
# fmt: off
freeze_at = cfg.MODEL.BACKBONE.FREEZE_AT
out_features = cfg.MODEL.RESNETS.OUT_FEATURES
depth = cfg.MODEL.RESNETS.DEPTH
num_groups = cfg.MODEL.RESNETS.NUM_GROUPS
width_per_group = cfg.MODEL.RESNETS.WIDTH_PER_GROUP
bottleneck_channels = num_groups * width_per_group
in_channels = cfg.MODEL.RESNETS.STEM_OUT_CHANNELS
out_channels = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS
stride_in_1x1 = cfg.MODEL.RESNETS.STRIDE_IN_1X1
res4_dilation = cfg.MODEL.RESNETS.RES4_DILATION
res5_dilation = cfg.MODEL.RESNETS.RES5_DILATION
deform_on_per_stage = cfg.MODEL.RESNETS.DEFORM_ON_PER_STAGE
deform_modulated = cfg.MODEL.RESNETS.DEFORM_MODULATED
deform_num_groups = cfg.MODEL.RESNETS.DEFORM_NUM_GROUPS
res5_multi_grid = cfg.MODEL.RESNETS.RES5_MULTI_GRID
# fmt: on
assert res4_dilation in {1, 2}, "res4_dilation cannot be {}.".format(res4_dilation)
assert res5_dilation in {1, 2, 4}, "res5_dilation cannot be {}.".format(res5_dilation)
if res4_dilation == 2:
# Always dilate res5 if res4 is dilated.
assert res5_dilation == 4
num_blocks_per_stage = {50: [3, 4, 6, 3], 101: [3, 4, 23, 3], 152: [3, 8, 36, 3]}[depth]
stages = []
# Avoid creating variables without gradients
# It consumes extra memory and may cause allreduce to fail
out_stage_idx = [{"res2": 2, "res3": 3, "res4": 4, "res5": 5}[f] for f in out_features]
max_stage_idx = max(out_stage_idx)
for idx, stage_idx in enumerate(range(2, max_stage_idx + 1)):
if stage_idx == 4:
dilation = res4_dilation
elif stage_idx == 5:
dilation = res5_dilation
else:
dilation = 1
first_stride = 1 if idx == 0 or dilation > 1 else 2
stage_kargs = {
"num_blocks": num_blocks_per_stage[idx],
"stride_per_block": [first_stride] + [1] * (num_blocks_per_stage[idx] - 1),
"in_channels": in_channels,
"out_channels": out_channels,
"norm": norm,
}
stage_kargs["bottleneck_channels"] = bottleneck_channels
stage_kargs["stride_in_1x1"] = stride_in_1x1
stage_kargs["dilation"] = dilation
stage_kargs["num_groups"] = num_groups
if deform_on_per_stage[idx]:
stage_kargs["block_class"] = DeformBottleneckBlock
stage_kargs["deform_modulated"] = deform_modulated
stage_kargs["deform_num_groups"] = deform_num_groups
else:
stage_kargs["block_class"] = BottleneckBlock
if stage_idx == 5:
stage_kargs.pop("dilation")
stage_kargs["dilation_per_block"] = [dilation * mg for mg in res5_multi_grid]
blocks = ResNet.make_stage(**stage_kargs)
in_channels = out_channels
out_channels *= 2
bottleneck_channels *= 2
stages.append(blocks)
return ResNet(stem, stages, out_features=out_features).freeze(freeze_at)
|
banmo-main
|
third_party/detectron2_old/projects/DeepLab/deeplab/resnet.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import torch
from detectron2.config import CfgNode
from detectron2.solver import build_lr_scheduler as build_d2_lr_scheduler
from .lr_scheduler import WarmupPolyLR
def build_lr_scheduler(
cfg: CfgNode, optimizer: torch.optim.Optimizer
) -> torch.optim.lr_scheduler._LRScheduler:
"""
Build a LR scheduler from config.
"""
name = cfg.SOLVER.LR_SCHEDULER_NAME
if name == "WarmupPolyLR":
return WarmupPolyLR(
optimizer,
cfg.SOLVER.MAX_ITER,
warmup_factor=cfg.SOLVER.WARMUP_FACTOR,
warmup_iters=cfg.SOLVER.WARMUP_ITERS,
warmup_method=cfg.SOLVER.WARMUP_METHOD,
power=cfg.SOLVER.POLY_LR_POWER,
constant_ending=cfg.SOLVER.POLY_LR_CONSTANT_ENDING,
)
else:
return build_d2_lr_scheduler(cfg, optimizer)
|
banmo-main
|
third_party/detectron2_old/projects/DeepLab/deeplab/build_solver.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from typing import Callable, Dict, List, Optional, Tuple, Union
import fvcore.nn.weight_init as weight_init
import torch
from torch import nn
from torch.nn import functional as F
from detectron2.config import configurable
from detectron2.layers import ASPP, Conv2d, DepthwiseSeparableConv2d, ShapeSpec, get_norm
from detectron2.modeling import SEM_SEG_HEADS_REGISTRY
from .loss import DeepLabCE
@SEM_SEG_HEADS_REGISTRY.register()
class DeepLabV3PlusHead(nn.Module):
"""
A semantic segmentation head described in :paper:`DeepLabV3+`.
"""
@configurable
def __init__(
self,
input_shape: Dict[str, ShapeSpec],
*,
project_channels: List[int],
aspp_dilations: List[int],
aspp_dropout: float,
decoder_channels: List[int],
common_stride: int,
norm: Union[str, Callable],
train_size: Optional[Tuple],
loss_weight: float = 1.0,
loss_type: str = "cross_entropy",
ignore_value: int = -1,
num_classes: Optional[int] = None,
use_depthwise_separable_conv: bool = False,
):
"""
NOTE: this interface is experimental.
Args:
input_shape: shape of the input features. They will be ordered by stride
and the last one (with largest stride) is used as the input to the
decoder (i.e. the ASPP module); the rest are low-level feature for
the intermediate levels of decoder.
project_channels (list[int]): a list of low-level feature channels.
The length should be len(in_features) - 1.
aspp_dilations (list(int)): a list of 3 dilations in ASPP.
aspp_dropout (float): apply dropout on the output of ASPP.
decoder_channels (list[int]): a list of output channels of each
decoder stage. It should have the same length as "in_features"
(each element in "in_features" corresponds to one decoder stage).
common_stride (int): output stride of decoder.
norm (str or callable): normalization for all conv layers.
train_size (tuple): (height, width) of training images.
loss_weight (float): loss weight.
loss_type (str): type of loss function, 2 opptions:
(1) "cross_entropy" is the standard cross entropy loss.
(2) "hard_pixel_mining" is the loss in DeepLab that samples
top k% hardest pixels.
ignore_value (int): category to be ignored during training.
num_classes (int): number of classes, if set to None, the decoder
will not construct a predictor.
use_depthwise_separable_conv (bool): use DepthwiseSeparableConv2d
in ASPP and decoder.
"""
super().__init__()
input_shape = sorted(input_shape.items(), key=lambda x: x[1].stride)
# fmt: off
self.in_features = [k for k, v in input_shape] # starting from "res2" to "res5"
in_channels = [x[1].channels for x in input_shape]
in_strides = [x[1].stride for x in input_shape]
aspp_channels = decoder_channels[-1]
self.ignore_value = ignore_value
self.common_stride = common_stride # output stride
self.loss_weight = loss_weight
self.loss_type = loss_type
self.decoder_only = num_classes is None
self.use_depthwise_separable_conv = use_depthwise_separable_conv
# fmt: on
assert (
len(project_channels) == len(self.in_features) - 1
), "Expected {} project_channels, got {}".format(
len(self.in_features) - 1, len(project_channels)
)
assert len(decoder_channels) == len(
self.in_features
), "Expected {} decoder_channels, got {}".format(
len(self.in_features), len(decoder_channels)
)
self.decoder = nn.ModuleDict()
use_bias = norm == ""
for idx, in_channel in enumerate(in_channels):
decoder_stage = nn.ModuleDict()
if idx == len(self.in_features) - 1:
# ASPP module
if train_size is not None:
train_h, train_w = train_size
encoder_stride = in_strides[-1]
if train_h % encoder_stride or train_w % encoder_stride:
raise ValueError("Crop size need to be divisible by encoder stride.")
pool_h = train_h // encoder_stride
pool_w = train_w // encoder_stride
pool_kernel_size = (pool_h, pool_w)
else:
pool_kernel_size = None
project_conv = ASPP(
in_channel,
aspp_channels,
aspp_dilations,
norm=norm,
activation=F.relu,
pool_kernel_size=pool_kernel_size,
dropout=aspp_dropout,
use_depthwise_separable_conv=use_depthwise_separable_conv,
)
fuse_conv = None
else:
project_conv = Conv2d(
in_channel,
project_channels[idx],
kernel_size=1,
bias=use_bias,
norm=get_norm(norm, project_channels[idx]),
activation=F.relu,
)
weight_init.c2_xavier_fill(project_conv)
if use_depthwise_separable_conv:
# We use a single 5x5 DepthwiseSeparableConv2d to replace
# 2 3x3 Conv2d since they have the same receptive field,
# proposed in :paper:`Panoptic-DeepLab`.
fuse_conv = DepthwiseSeparableConv2d(
project_channels[idx] + decoder_channels[idx + 1],
decoder_channels[idx],
kernel_size=5,
padding=2,
norm1=norm,
activation1=F.relu,
norm2=norm,
activation2=F.relu,
)
else:
fuse_conv = nn.Sequential(
Conv2d(
project_channels[idx] + decoder_channels[idx + 1],
decoder_channels[idx],
kernel_size=3,
padding=1,
bias=use_bias,
norm=get_norm(norm, decoder_channels[idx]),
activation=F.relu,
),
Conv2d(
decoder_channels[idx],
decoder_channels[idx],
kernel_size=3,
padding=1,
bias=use_bias,
norm=get_norm(norm, decoder_channels[idx]),
activation=F.relu,
),
)
weight_init.c2_xavier_fill(fuse_conv[0])
weight_init.c2_xavier_fill(fuse_conv[1])
decoder_stage["project_conv"] = project_conv
decoder_stage["fuse_conv"] = fuse_conv
self.decoder[self.in_features[idx]] = decoder_stage
if not self.decoder_only:
self.predictor = Conv2d(
decoder_channels[0], num_classes, kernel_size=1, stride=1, padding=0
)
nn.init.normal_(self.predictor.weight, 0, 0.001)
nn.init.constant_(self.predictor.bias, 0)
if self.loss_type == "cross_entropy":
self.loss = nn.CrossEntropyLoss(reduction="mean", ignore_index=self.ignore_value)
elif self.loss_type == "hard_pixel_mining":
self.loss = DeepLabCE(ignore_label=self.ignore_value, top_k_percent_pixels=0.2)
else:
raise ValueError("Unexpected loss type: %s" % self.loss_type)
@classmethod
def from_config(cls, cfg, input_shape):
if cfg.INPUT.CROP.ENABLED:
assert cfg.INPUT.CROP.TYPE == "absolute"
train_size = cfg.INPUT.CROP.SIZE
else:
train_size = None
decoder_channels = [cfg.MODEL.SEM_SEG_HEAD.CONVS_DIM] * (
len(cfg.MODEL.SEM_SEG_HEAD.IN_FEATURES) - 1
) + [cfg.MODEL.SEM_SEG_HEAD.ASPP_CHANNELS]
ret = dict(
input_shape={
k: v for k, v in input_shape.items() if k in cfg.MODEL.SEM_SEG_HEAD.IN_FEATURES
},
project_channels=cfg.MODEL.SEM_SEG_HEAD.PROJECT_CHANNELS,
aspp_dilations=cfg.MODEL.SEM_SEG_HEAD.ASPP_DILATIONS,
aspp_dropout=cfg.MODEL.SEM_SEG_HEAD.ASPP_DROPOUT,
decoder_channels=decoder_channels,
common_stride=cfg.MODEL.SEM_SEG_HEAD.COMMON_STRIDE,
norm=cfg.MODEL.SEM_SEG_HEAD.NORM,
train_size=train_size,
loss_weight=cfg.MODEL.SEM_SEG_HEAD.LOSS_WEIGHT,
loss_type=cfg.MODEL.SEM_SEG_HEAD.LOSS_TYPE,
ignore_value=cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE,
num_classes=cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES,
use_depthwise_separable_conv=cfg.MODEL.SEM_SEG_HEAD.USE_DEPTHWISE_SEPARABLE_CONV,
)
return ret
def forward(self, features, targets=None):
"""
Returns:
In training, returns (None, dict of losses)
In inference, returns (CxHxW logits, {})
"""
y = self.layers(features)
if self.decoder_only:
# Output from self.layers() only contains decoder feature.
return y
if self.training:
return None, self.losses(y, targets)
else:
y = F.interpolate(
y, scale_factor=self.common_stride, mode="bilinear", align_corners=False
)
return y, {}
def layers(self, features):
# Reverse feature maps into top-down order (from low to high resolution)
for f in self.in_features[::-1]:
x = features[f]
proj_x = self.decoder[f]["project_conv"](x)
if self.decoder[f]["fuse_conv"] is None:
# This is aspp module
y = proj_x
else:
# Upsample y
y = F.interpolate(y, size=proj_x.size()[2:], mode="bilinear", align_corners=False)
y = torch.cat([proj_x, y], dim=1)
y = self.decoder[f]["fuse_conv"](y)
if not self.decoder_only:
y = self.predictor(y)
return y
def losses(self, predictions, targets):
predictions = F.interpolate(
predictions, scale_factor=self.common_stride, mode="bilinear", align_corners=False
)
loss = self.loss(predictions, targets)
losses = {"loss_sem_seg": loss * self.loss_weight}
return losses
@SEM_SEG_HEADS_REGISTRY.register()
class DeepLabV3Head(nn.Module):
"""
A semantic segmentation head described in :paper:`DeepLabV3`.
"""
def __init__(self, cfg, input_shape: Dict[str, ShapeSpec]):
super().__init__()
# fmt: off
self.in_features = cfg.MODEL.SEM_SEG_HEAD.IN_FEATURES
in_channels = [input_shape[f].channels for f in self.in_features]
aspp_channels = cfg.MODEL.SEM_SEG_HEAD.ASPP_CHANNELS
aspp_dilations = cfg.MODEL.SEM_SEG_HEAD.ASPP_DILATIONS
self.ignore_value = cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE
num_classes = cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES
conv_dims = cfg.MODEL.SEM_SEG_HEAD.CONVS_DIM
self.common_stride = cfg.MODEL.SEM_SEG_HEAD.COMMON_STRIDE # output stride
norm = cfg.MODEL.SEM_SEG_HEAD.NORM
self.loss_weight = cfg.MODEL.SEM_SEG_HEAD.LOSS_WEIGHT
self.loss_type = cfg.MODEL.SEM_SEG_HEAD.LOSS_TYPE
train_crop_size = cfg.INPUT.CROP.SIZE
aspp_dropout = cfg.MODEL.SEM_SEG_HEAD.ASPP_DROPOUT
use_depthwise_separable_conv = cfg.MODEL.SEM_SEG_HEAD.USE_DEPTHWISE_SEPARABLE_CONV
# fmt: on
assert len(self.in_features) == 1
assert len(in_channels) == 1
# ASPP module
if cfg.INPUT.CROP.ENABLED:
assert cfg.INPUT.CROP.TYPE == "absolute"
train_crop_h, train_crop_w = train_crop_size
if train_crop_h % self.common_stride or train_crop_w % self.common_stride:
raise ValueError("Crop size need to be divisible by output stride.")
pool_h = train_crop_h // self.common_stride
pool_w = train_crop_w // self.common_stride
pool_kernel_size = (pool_h, pool_w)
else:
pool_kernel_size = None
self.aspp = ASPP(
in_channels[0],
aspp_channels,
aspp_dilations,
norm=norm,
activation=F.relu,
pool_kernel_size=pool_kernel_size,
dropout=aspp_dropout,
use_depthwise_separable_conv=use_depthwise_separable_conv,
)
self.predictor = Conv2d(conv_dims, num_classes, kernel_size=1, stride=1, padding=0)
nn.init.normal_(self.predictor.weight, 0, 0.001)
nn.init.constant_(self.predictor.bias, 0)
if self.loss_type == "cross_entropy":
self.loss = nn.CrossEntropyLoss(reduction="mean", ignore_index=self.ignore_value)
elif self.loss_type == "hard_pixel_mining":
self.loss = DeepLabCE(ignore_label=self.ignore_value, top_k_percent_pixels=0.2)
else:
raise ValueError("Unexpected loss type: %s" % self.loss_type)
def forward(self, features, targets=None):
"""
Returns:
In training, returns (None, dict of losses)
In inference, returns (CxHxW logits, {})
"""
x = features[self.in_features[0]]
x = self.aspp(x)
x = self.predictor(x)
if self.training:
return None, self.losses(x, targets)
else:
x = F.interpolate(
x, scale_factor=self.common_stride, mode="bilinear", align_corners=False
)
return x, {}
def losses(self, predictions, targets):
predictions = F.interpolate(
predictions, scale_factor=self.common_stride, mode="bilinear", align_corners=False
)
loss = self.loss(predictions, targets)
losses = {"loss_sem_seg": loss * self.loss_weight}
return losses
|
banmo-main
|
third_party/detectron2_old/projects/DeepLab/deeplab/semantic_seg.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
"""
PointRend Training Script.
This script is a simplified version of the training script in detectron2/tools.
"""
import os
import torch
import detectron2.data.transforms as T
import detectron2.utils.comm as comm
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import get_cfg
from detectron2.data import DatasetMapper, MetadataCatalog, build_detection_train_loader
from detectron2.engine import DefaultTrainer, default_argument_parser, default_setup, launch
from detectron2.evaluation import (
CityscapesInstanceEvaluator,
CityscapesSemSegEvaluator,
COCOEvaluator,
DatasetEvaluators,
LVISEvaluator,
SemSegEvaluator,
verify_results,
)
from detectron2.projects.point_rend import ColorAugSSDTransform, add_pointrend_config
def build_sem_seg_train_aug(cfg):
augs = [
T.ResizeShortestEdge(
cfg.INPUT.MIN_SIZE_TRAIN, cfg.INPUT.MAX_SIZE_TRAIN, cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING
)
]
if cfg.INPUT.CROP.ENABLED:
augs.append(
T.RandomCrop_CategoryAreaConstraint(
cfg.INPUT.CROP.TYPE,
cfg.INPUT.CROP.SIZE,
cfg.INPUT.CROP.SINGLE_CATEGORY_MAX_AREA,
cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE,
)
)
if cfg.INPUT.COLOR_AUG_SSD:
augs.append(ColorAugSSDTransform(img_format=cfg.INPUT.FORMAT))
augs.append(T.RandomFlip())
return augs
class Trainer(DefaultTrainer):
"""
We use the "DefaultTrainer" which contains a number pre-defined logic for
standard training workflow. They may not work for you, especially if you
are working on a new research project. In that case you can use the cleaner
"SimpleTrainer", or write your own training loop.
"""
@classmethod
def build_evaluator(cls, cfg, dataset_name, output_folder=None):
"""
Create evaluator(s) for a given dataset.
This uses the special metadata "evaluator_type" associated with each builtin dataset.
For your own dataset, you can simply create an evaluator manually in your
script and do not have to worry about the hacky if-else logic here.
"""
if output_folder is None:
output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
evaluator_list = []
evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
if evaluator_type == "lvis":
return LVISEvaluator(dataset_name, output_dir=output_folder)
if evaluator_type == "coco":
return COCOEvaluator(dataset_name, output_dir=output_folder)
if evaluator_type == "sem_seg":
return SemSegEvaluator(
dataset_name,
distributed=True,
output_dir=output_folder,
)
if evaluator_type == "cityscapes_instance":
assert (
torch.cuda.device_count() >= comm.get_rank()
), "CityscapesEvaluator currently do not work with multiple machines."
return CityscapesInstanceEvaluator(dataset_name)
if evaluator_type == "cityscapes_sem_seg":
assert (
torch.cuda.device_count() >= comm.get_rank()
), "CityscapesEvaluator currently do not work with multiple machines."
return CityscapesSemSegEvaluator(dataset_name)
if len(evaluator_list) == 0:
raise NotImplementedError(
"no Evaluator for the dataset {} with the type {}".format(
dataset_name, evaluator_type
)
)
if len(evaluator_list) == 1:
return evaluator_list[0]
return DatasetEvaluators(evaluator_list)
@classmethod
def build_train_loader(cls, cfg):
if "SemanticSegmentor" in cfg.MODEL.META_ARCHITECTURE:
mapper = DatasetMapper(cfg, is_train=True, augmentations=build_sem_seg_train_aug(cfg))
else:
mapper = None
return build_detection_train_loader(cfg, mapper=mapper)
def setup(args):
"""
Create configs and perform basic setups.
"""
cfg = get_cfg()
add_pointrend_config(cfg)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
default_setup(cfg, args)
return cfg
def main(args):
cfg = setup(args)
if args.eval_only:
model = Trainer.build_model(cfg)
DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
cfg.MODEL.WEIGHTS, resume=args.resume
)
res = Trainer.test(cfg, model)
if comm.is_main_process():
verify_results(cfg, res)
return res
trainer = Trainer(cfg)
trainer.resume_or_load(resume=args.resume)
return trainer.train()
if __name__ == "__main__":
args = default_argument_parser().parse_args()
print("Command Line Args:", args)
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
)
|
banmo-main
|
third_party/detectron2_old/projects/PointRend/train_net.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import torch
from torch.nn import functional as F
from detectron2.layers import cat
from detectron2.structures import BitMasks, Boxes
"""
Shape shorthand in this module:
N: minibatch dimension size, i.e. the number of RoIs for instance segmenation or the
number of images for semantic segmenation.
R: number of ROIs, combined over all images, in the minibatch
P: number of points
"""
def _as_tensor(x):
"""
An equivalent of `torch.as_tensor`, but works under tracing.
"""
if isinstance(x, (list, tuple)) and all([isinstance(t, torch.Tensor) for t in x]):
return torch.stack(x)
return torch.as_tensor(x)
def point_sample(input, point_coords, **kwargs):
"""
A wrapper around :function:`torch.nn.functional.grid_sample` to support 3D point_coords tensors.
Unlike :function:`torch.nn.functional.grid_sample` it assumes `point_coords` to lie inside
[0, 1] x [0, 1] square.
Args:
input (Tensor): A tensor of shape (N, C, H, W) that contains features map on a H x W grid.
point_coords (Tensor): A tensor of shape (N, P, 2) or (N, Hgrid, Wgrid, 2) that contains
[0, 1] x [0, 1] normalized point coordinates.
Returns:
output (Tensor): A tensor of shape (N, C, P) or (N, C, Hgrid, Wgrid) that contains
features for points in `point_coords`. The features are obtained via bilinear
interplation from `input` the same way as :function:`torch.nn.functional.grid_sample`.
"""
add_dim = False
if point_coords.dim() == 3:
add_dim = True
point_coords = point_coords.unsqueeze(2)
output = F.grid_sample(input, 2.0 * point_coords - 1.0, **kwargs)
if add_dim:
output = output.squeeze(3)
return output
def generate_regular_grid_point_coords(R, side_size, device):
"""
Generate regular square grid of points in [0, 1] x [0, 1] coordinate space.
Args:
R (int): The number of grids to sample, one for each region.
side_size (int): The side size of the regular grid.
device (torch.device): Desired device of returned tensor.
Returns:
(Tensor): A tensor of shape (R, side_size^2, 2) that contains coordinates
for the regular grids.
"""
aff = torch.tensor([[[0.5, 0, 0.5], [0, 0.5, 0.5]]], device=device)
r = F.affine_grid(aff, torch.Size((1, 1, side_size, side_size)), align_corners=False)
return r.view(1, -1, 2).expand(R, -1, -1)
def get_uncertain_point_coords_with_randomness(
coarse_logits, uncertainty_func, num_points, oversample_ratio, importance_sample_ratio
):
"""
Sample points in [0, 1] x [0, 1] coordinate space based on their uncertainty. The unceratinties
are calculated for each point using 'uncertainty_func' function that takes point's logit
prediction as input.
See PointRend paper for details.
Args:
coarse_logits (Tensor): A tensor of shape (N, C, Hmask, Wmask) or (N, 1, Hmask, Wmask) for
class-specific or class-agnostic prediction.
uncertainty_func: A function that takes a Tensor of shape (N, C, P) or (N, 1, P) that
contains logit predictions for P points and returns their uncertainties as a Tensor of
shape (N, 1, P).
num_points (int): The number of points P to sample.
oversample_ratio (int): Oversampling parameter.
importance_sample_ratio (float): Ratio of points that are sampled via importnace sampling.
Returns:
point_coords (Tensor): A tensor of shape (N, P, 2) that contains the coordinates of P
sampled points.
"""
assert oversample_ratio >= 1
assert importance_sample_ratio <= 1 and importance_sample_ratio >= 0
num_boxes = coarse_logits.shape[0]
num_sampled = int(num_points * oversample_ratio)
point_coords = torch.rand(num_boxes, num_sampled, 2, device=coarse_logits.device)
point_logits = point_sample(coarse_logits, point_coords, align_corners=False)
# It is crucial to calculate uncertainty based on the sampled prediction value for the points.
# Calculating uncertainties of the coarse predictions first and sampling them for points leads
# to incorrect results.
# To illustrate this: assume uncertainty_func(logits)=-abs(logits), a sampled point between
# two coarse predictions with -1 and 1 logits has 0 logits, and therefore 0 uncertainty value.
# However, if we calculate uncertainties for the coarse predictions first,
# both will have -1 uncertainty, and the sampled point will get -1 uncertainty.
point_uncertainties = uncertainty_func(point_logits)
num_uncertain_points = int(importance_sample_ratio * num_points)
num_random_points = num_points - num_uncertain_points
idx = torch.topk(point_uncertainties[:, 0, :], k=num_uncertain_points, dim=1)[1]
shift = num_sampled * torch.arange(num_boxes, dtype=torch.long, device=coarse_logits.device)
idx += shift[:, None]
point_coords = point_coords.view(-1, 2)[idx.view(-1), :].view(
num_boxes, num_uncertain_points, 2
)
if num_random_points > 0:
point_coords = cat(
[
point_coords,
torch.rand(num_boxes, num_random_points, 2, device=coarse_logits.device),
],
dim=1,
)
return point_coords
def get_uncertain_point_coords_on_grid(uncertainty_map, num_points):
"""
Find `num_points` most uncertain points from `uncertainty_map` grid.
Args:
uncertainty_map (Tensor): A tensor of shape (N, 1, H, W) that contains uncertainty
values for a set of points on a regular H x W grid.
num_points (int): The number of points P to select.
Returns:
point_indices (Tensor): A tensor of shape (N, P) that contains indices from
[0, H x W) of the most uncertain points.
point_coords (Tensor): A tensor of shape (N, P, 2) that contains [0, 1] x [0, 1] normalized
coordinates of the most uncertain points from the H x W grid.
"""
R, _, H, W = uncertainty_map.shape
h_step = 1.0 / float(H)
w_step = 1.0 / float(W)
num_points = min(H * W, num_points)
point_indices = torch.topk(uncertainty_map.view(R, H * W), k=num_points, dim=1)[1]
point_coords = torch.zeros(R, num_points, 2, dtype=torch.float, device=uncertainty_map.device)
point_coords[:, :, 0] = w_step / 2.0 + (point_indices % W).to(torch.float) * w_step
point_coords[:, :, 1] = h_step / 2.0 + (point_indices // W).to(torch.float) * h_step
return point_indices, point_coords
def point_sample_fine_grained_features(features_list, feature_scales, boxes, point_coords):
"""
Get features from feature maps in `features_list` that correspond to specific point coordinates
inside each bounding box from `boxes`.
Args:
features_list (list[Tensor]): A list of feature map tensors to get features from.
feature_scales (list[float]): A list of scales for tensors in `features_list`.
boxes (list[Boxes]): A list of I Boxes objects that contain R_1 + ... + R_I = R boxes all
together.
point_coords (Tensor): A tensor of shape (R, P, 2) that contains
[0, 1] x [0, 1] box-normalized coordinates of the P sampled points.
Returns:
point_features (Tensor): A tensor of shape (R, C, P) that contains features sampled
from all features maps in feature_list for P sampled points for all R boxes in `boxes`.
point_coords_wrt_image (Tensor): A tensor of shape (R, P, 2) that contains image-level
coordinates of P points.
"""
cat_boxes = Boxes.cat(boxes)
num_boxes = [b.tensor.size(0) for b in boxes]
point_coords_wrt_image = get_point_coords_wrt_image(cat_boxes.tensor, point_coords)
split_point_coords_wrt_image = torch.split(point_coords_wrt_image, num_boxes)
point_features = []
for idx_img, point_coords_wrt_image_per_image in enumerate(split_point_coords_wrt_image):
point_features_per_image = []
for idx_feature, feature_map in enumerate(features_list):
h, w = feature_map.shape[-2:]
scale = _as_tensor([w, h]) / feature_scales[idx_feature]
point_coords_scaled = point_coords_wrt_image_per_image / scale.to(feature_map.device)
point_features_per_image.append(
point_sample(
feature_map[idx_img].unsqueeze(0),
point_coords_scaled.unsqueeze(0),
align_corners=False,
)
.squeeze(0)
.transpose(1, 0)
)
point_features.append(cat(point_features_per_image, dim=1))
return cat(point_features, dim=0), point_coords_wrt_image
def get_point_coords_wrt_image(boxes_coords, point_coords):
"""
Convert box-normalized [0, 1] x [0, 1] point cooordinates to image-level coordinates.
Args:
boxes_coords (Tensor): A tensor of shape (R, 4) that contains bounding boxes.
coordinates.
point_coords (Tensor): A tensor of shape (R, P, 2) that contains
[0, 1] x [0, 1] box-normalized coordinates of the P sampled points.
Returns:
point_coords_wrt_image (Tensor): A tensor of shape (R, P, 2) that contains
image-normalized coordinates of P sampled points.
"""
with torch.no_grad():
point_coords_wrt_image = point_coords.clone()
point_coords_wrt_image[:, :, 0] = point_coords_wrt_image[:, :, 0] * (
boxes_coords[:, None, 2] - boxes_coords[:, None, 0]
)
point_coords_wrt_image[:, :, 1] = point_coords_wrt_image[:, :, 1] * (
boxes_coords[:, None, 3] - boxes_coords[:, None, 1]
)
point_coords_wrt_image[:, :, 0] += boxes_coords[:, None, 0]
point_coords_wrt_image[:, :, 1] += boxes_coords[:, None, 1]
return point_coords_wrt_image
def sample_point_labels(instances, point_coords):
"""
Sample point labels from ground truth mask given point_coords.
Args:
instances (list[Instances]): A list of N Instances, where N is the number of images
in the batch. So, i_th elememt of the list contains R_i objects and R_1 + ... + R_N is
equal to R. The ground-truth gt_masks in each instance will be used to compute labels.
points_coords (Tensor): A tensor of shape (R, P, 2), where R is the total number of
instances and P is the number of points for each instance. The coordinates are in
the absolute image pixel coordinate space, i.e. [0, H] x [0, W].
Returns:
Tensor: A tensor of shape (R, P) that contains the labels of P sampled points.
"""
with torch.no_grad():
gt_mask_logits = []
point_coords_splits = torch.split(
point_coords, [len(instances_per_image) for instances_per_image in instances]
)
for i, instances_per_image in enumerate(instances):
if len(instances_per_image) == 0:
continue
assert isinstance(
instances_per_image.gt_masks, BitMasks
), "Point head works with GT in 'bitmask' format. Set INPUT.MASK_FORMAT to 'bitmask'."
gt_bit_masks = instances_per_image.gt_masks.tensor
h, w = instances_per_image.gt_masks.image_size
scale = torch.tensor([w, h], dtype=torch.float, device=gt_bit_masks.device)
points_coord_grid_sample_format = point_coords_splits[i] / scale
gt_mask_logits.append(
point_sample(
gt_bit_masks.to(torch.float32).unsqueeze(1),
points_coord_grid_sample_format,
align_corners=False,
).squeeze(1)
)
point_labels = cat(gt_mask_logits)
return point_labels
|
banmo-main
|
third_party/detectron2_old/projects/PointRend/point_rend/point_features.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import logging
import math
import numpy as np
from typing import Dict, List, Tuple
import fvcore.nn.weight_init as weight_init
import torch
from torch import Tensor, nn
from torch.nn import functional as F
from detectron2.config import configurable
from detectron2.layers import Conv2d, ShapeSpec, cat, interpolate
from detectron2.modeling import ROI_MASK_HEAD_REGISTRY
from detectron2.modeling.roi_heads.mask_head import mask_rcnn_inference, mask_rcnn_loss
from detectron2.structures import Boxes
from .point_features import (
generate_regular_grid_point_coords,
get_point_coords_wrt_image,
get_uncertain_point_coords_on_grid,
get_uncertain_point_coords_with_randomness,
point_sample,
point_sample_fine_grained_features,
sample_point_labels,
)
from .point_head import build_point_head, roi_mask_point_loss
def calculate_uncertainty(logits, classes):
"""
We estimate uncerainty as L1 distance between 0.0 and the logit prediction in 'logits' for the
foreground class in `classes`.
Args:
logits (Tensor): A tensor of shape (R, C, ...) or (R, 1, ...) for class-specific or
class-agnostic, where R is the total number of predicted masks in all images and C is
the number of foreground classes. The values are logits.
classes (list): A list of length R that contains either predicted of ground truth class
for eash predicted mask.
Returns:
scores (Tensor): A tensor of shape (R, 1, ...) that contains uncertainty scores with
the most uncertain locations having the highest uncertainty score.
"""
if logits.shape[1] == 1:
gt_class_logits = logits.clone()
else:
gt_class_logits = logits[
torch.arange(logits.shape[0], device=logits.device), classes
].unsqueeze(1)
return -(torch.abs(gt_class_logits))
class ConvFCHead(nn.Module):
"""
A mask head with fully connected layers. Given pooled features it first reduces channels and
spatial dimensions with conv layers and then uses FC layers to predict coarse masks analogously
to the standard box head.
"""
_version = 2
@configurable
def __init__(
self, input_shape: ShapeSpec, *, conv_dim: int, fc_dims: List[int], output_shape: Tuple[int]
):
"""
Args:
conv_dim: the output dimension of the conv layers
fc_dims: a list of N>0 integers representing the output dimensions of N FC layers
output_shape: shape of the output mask prediction
"""
super().__init__()
# fmt: off
input_channels = input_shape.channels
input_h = input_shape.height
input_w = input_shape.width
self.output_shape = output_shape
# fmt: on
self.conv_layers = []
if input_channels > conv_dim:
self.reduce_channel_dim_conv = Conv2d(
input_channels,
conv_dim,
kernel_size=1,
stride=1,
padding=0,
bias=True,
activation=F.relu,
)
self.conv_layers.append(self.reduce_channel_dim_conv)
self.reduce_spatial_dim_conv = Conv2d(
conv_dim, conv_dim, kernel_size=2, stride=2, padding=0, bias=True, activation=F.relu
)
self.conv_layers.append(self.reduce_spatial_dim_conv)
input_dim = conv_dim * input_h * input_w
input_dim //= 4
self.fcs = []
for k, fc_dim in enumerate(fc_dims):
fc = nn.Linear(input_dim, fc_dim)
self.add_module("fc{}".format(k + 1), fc)
self.fcs.append(fc)
input_dim = fc_dim
output_dim = int(np.prod(self.output_shape))
self.prediction = nn.Linear(fc_dims[-1], output_dim)
# use normal distribution initialization for mask prediction layer
nn.init.normal_(self.prediction.weight, std=0.001)
nn.init.constant_(self.prediction.bias, 0)
for layer in self.conv_layers:
weight_init.c2_msra_fill(layer)
for layer in self.fcs:
weight_init.c2_xavier_fill(layer)
@classmethod
def from_config(cls, cfg, input_shape):
output_shape = (
cfg.MODEL.ROI_HEADS.NUM_CLASSES,
cfg.MODEL.ROI_MASK_HEAD.OUTPUT_SIDE_RESOLUTION,
cfg.MODEL.ROI_MASK_HEAD.OUTPUT_SIDE_RESOLUTION,
)
fc_dim = cfg.MODEL.ROI_MASK_HEAD.FC_DIM
num_fc = cfg.MODEL.ROI_MASK_HEAD.NUM_FC
ret = dict(
input_shape=input_shape,
conv_dim=cfg.MODEL.ROI_MASK_HEAD.CONV_DIM,
fc_dims=[fc_dim] * num_fc,
output_shape=output_shape,
)
return ret
def forward(self, x):
N = x.shape[0]
for layer in self.conv_layers:
x = layer(x)
x = torch.flatten(x, start_dim=1)
for layer in self.fcs:
x = F.relu(layer(x))
output_shape = [N] + list(self.output_shape)
return self.prediction(x).view(*output_shape)
def _load_from_state_dict(
self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
):
version = local_metadata.get("version", None)
if version is None or version < 2:
logger = logging.getLogger(__name__)
logger.warning(
"Weight format of PointRend models have changed! "
"Applying automatic conversion now ..."
)
for k in list(state_dict.keys()):
newk = k
if k.startswith(prefix + "coarse_mask_fc"):
newk = k.replace(prefix + "coarse_mask_fc", prefix + "fc")
if newk != k:
state_dict[newk] = state_dict[k]
del state_dict[k]
@ROI_MASK_HEAD_REGISTRY.register()
class PointRendMaskHead(nn.Module):
def __init__(self, cfg, input_shape: Dict[str, ShapeSpec]):
super().__init__()
self._feature_scales = {k: 1.0 / v.stride for k, v in input_shape.items()}
# point head
self._init_point_head(cfg, input_shape)
# coarse mask head
self.roi_pooler_in_features = cfg.MODEL.ROI_MASK_HEAD.IN_FEATURES
self.roi_pooler_size = cfg.MODEL.ROI_MASK_HEAD.POOLER_RESOLUTION
self._feature_scales = {k: 1.0 / v.stride for k, v in input_shape.items()}
in_channels = np.sum([input_shape[f].channels for f in self.roi_pooler_in_features])
self._init_roi_head(
cfg,
ShapeSpec(
channels=in_channels,
width=self.roi_pooler_size,
height=self.roi_pooler_size,
),
)
def _init_roi_head(self, cfg, input_shape):
self.coarse_head = ConvFCHead(cfg, input_shape)
def _init_point_head(self, cfg, input_shape):
# fmt: off
self.mask_point_on = cfg.MODEL.ROI_MASK_HEAD.POINT_HEAD_ON
if not self.mask_point_on:
return
assert cfg.MODEL.ROI_HEADS.NUM_CLASSES == cfg.MODEL.POINT_HEAD.NUM_CLASSES
self.mask_point_in_features = cfg.MODEL.POINT_HEAD.IN_FEATURES
self.mask_point_train_num_points = cfg.MODEL.POINT_HEAD.TRAIN_NUM_POINTS
self.mask_point_oversample_ratio = cfg.MODEL.POINT_HEAD.OVERSAMPLE_RATIO
self.mask_point_importance_sample_ratio = cfg.MODEL.POINT_HEAD.IMPORTANCE_SAMPLE_RATIO
# next three parameters are use in the adaptive subdivions inference procedure
self.mask_point_subdivision_init_resolution = cfg.MODEL.ROI_MASK_HEAD.OUTPUT_SIDE_RESOLUTION
self.mask_point_subdivision_steps = cfg.MODEL.POINT_HEAD.SUBDIVISION_STEPS
self.mask_point_subdivision_num_points = cfg.MODEL.POINT_HEAD.SUBDIVISION_NUM_POINTS
# fmt: on
in_channels = int(np.sum([input_shape[f].channels for f in self.mask_point_in_features]))
self.point_head = build_point_head(cfg, ShapeSpec(channels=in_channels, width=1, height=1))
# An optimization to skip unused subdivision steps: if after subdivision, all pixels on
# the mask will be selected and recomputed anyway, we should just double our init_resolution
while (
4 * self.mask_point_subdivision_init_resolution ** 2
<= self.mask_point_subdivision_num_points
):
self.mask_point_subdivision_init_resolution *= 2
self.mask_point_subdivision_steps -= 1
def forward(self, features, instances):
"""
Args:
features (dict[str, Tensor]): a dict of image-level features
instances (list[Instances]): proposals in training; detected
instances in inference
"""
if self.training:
proposal_boxes = [x.proposal_boxes for x in instances]
coarse_mask = self.coarse_head(self._roi_pooler(features, proposal_boxes))
losses = {"loss_mask": mask_rcnn_loss(coarse_mask, instances)}
if not self.mask_point_on:
return losses
point_coords, point_labels = self._sample_train_points(coarse_mask, instances)
point_fine_grained_features = self._point_pooler(features, proposal_boxes, point_coords)
point_logits = self._get_point_logits(
point_fine_grained_features, point_coords, coarse_mask
)
losses["loss_mask_point"] = roi_mask_point_loss(point_logits, instances, point_labels)
return losses
else:
pred_boxes = [x.pred_boxes for x in instances]
coarse_mask = self.coarse_head(self._roi_pooler(features, pred_boxes))
return self._subdivision_inference(features, coarse_mask, instances)
def _roi_pooler(self, features: List[Tensor], boxes: List[Boxes]):
"""
Extract per-box feature. This is similar to RoIAlign(sampling_ratio=1) except:
1. It's implemented by point_sample
2. It pools features across all levels and concat them, while typically
RoIAlign select one level for every box. However in the config we only use
one level (p2) so there is no difference.
Returns:
Tensor of shape (R, C, pooler_size, pooler_size) where R is the total number of boxes
"""
features_list = [features[k] for k in self.roi_pooler_in_features]
features_scales = [self._feature_scales[k] for k in self.roi_pooler_in_features]
num_boxes = sum(x.tensor.size(0) for x in boxes)
output_size = self.roi_pooler_size
point_coords = generate_regular_grid_point_coords(num_boxes, output_size, boxes[0].device)
# For regular grids of points, this function is equivalent to `len(features_list)' calls
# of `ROIAlign` (with `SAMPLING_RATIO=1`), and concat the results.
roi_features, _ = point_sample_fine_grained_features(
features_list, features_scales, boxes, point_coords
)
return roi_features.view(num_boxes, roi_features.shape[1], output_size, output_size)
def _sample_train_points(self, coarse_mask, instances):
assert self.training
gt_classes = cat([x.gt_classes for x in instances])
with torch.no_grad():
# sample point_coords
point_coords = get_uncertain_point_coords_with_randomness(
coarse_mask,
lambda logits: calculate_uncertainty(logits, gt_classes),
self.mask_point_train_num_points,
self.mask_point_oversample_ratio,
self.mask_point_importance_sample_ratio,
)
# sample point_labels
proposal_boxes = [x.proposal_boxes for x in instances]
cat_boxes = Boxes.cat(proposal_boxes)
point_coords_wrt_image = get_point_coords_wrt_image(cat_boxes.tensor, point_coords)
point_labels = sample_point_labels(instances, point_coords_wrt_image)
return point_coords, point_labels
def _point_pooler(self, features, proposal_boxes, point_coords):
point_features_list = [features[k] for k in self.mask_point_in_features]
point_features_scales = [self._feature_scales[k] for k in self.mask_point_in_features]
# sample image-level features
point_fine_grained_features, _ = point_sample_fine_grained_features(
point_features_list, point_features_scales, proposal_boxes, point_coords
)
return point_fine_grained_features
def _get_point_logits(self, point_fine_grained_features, point_coords, coarse_mask):
coarse_features = point_sample(coarse_mask, point_coords, align_corners=False)
point_logits = self.point_head(point_fine_grained_features, coarse_features)
return point_logits
def _subdivision_inference(self, features, mask_representations, instances):
assert not self.training
pred_boxes = [x.pred_boxes for x in instances]
pred_classes = cat([x.pred_classes for x in instances])
mask_logits = None
# +1 here to include an initial step to generate the coarsest mask
# prediction with init_resolution, when mask_logits is None.
# We compute initial mask by sampling on a regular grid. coarse_mask
# can be used as initial mask as well, but it's typically very low-res
# so it will be completely overwritten during subdivision anyway.
for _ in range(self.mask_point_subdivision_steps + 1):
if mask_logits is None:
point_coords = generate_regular_grid_point_coords(
pred_classes.size(0),
self.mask_point_subdivision_init_resolution,
pred_boxes[0].device,
)
else:
mask_logits = interpolate(
mask_logits, scale_factor=2, mode="bilinear", align_corners=False
)
uncertainty_map = calculate_uncertainty(mask_logits, pred_classes)
point_indices, point_coords = get_uncertain_point_coords_on_grid(
uncertainty_map, self.mask_point_subdivision_num_points
)
# Run the point head for every point in point_coords
fine_grained_features = self._point_pooler(features, pred_boxes, point_coords)
point_logits = self._get_point_logits(
fine_grained_features, point_coords, mask_representations
)
if mask_logits is None:
# Create initial mask_logits using point_logits on this regular grid
R, C, _ = point_logits.shape
mask_logits = point_logits.reshape(
R,
C,
self.mask_point_subdivision_init_resolution,
self.mask_point_subdivision_init_resolution,
)
# The subdivision code will fail with the empty list of boxes
if len(pred_classes) == 0:
mask_rcnn_inference(mask_logits, instances)
return instances
else:
# Put point predictions to the right places on the upsampled grid.
R, C, H, W = mask_logits.shape
point_indices = point_indices.unsqueeze(1).expand(-1, C, -1)
mask_logits = (
mask_logits.reshape(R, C, H * W)
.scatter_(2, point_indices, point_logits)
.view(R, C, H, W)
)
mask_rcnn_inference(mask_logits, instances)
return instances
@ROI_MASK_HEAD_REGISTRY.register()
class ImplicitPointRendMaskHead(PointRendMaskHead):
def __init__(self, cfg, input_shape: Dict[str, ShapeSpec]):
super().__init__(cfg, input_shape)
def _init_roi_head(self, cfg, input_shape):
assert hasattr(self, "num_params"), "Please initialize point_head first!"
self.parameter_head = ConvFCHead(cfg, input_shape, output_shape=(self.num_params,))
self.regularizer = cfg.MODEL.IMPLICIT_POINTREND.PARAMS_L2_REGULARIZER
def _init_point_head(self, cfg, input_shape):
# fmt: off
self.mask_point_on = True # always on
assert cfg.MODEL.ROI_HEADS.NUM_CLASSES == cfg.MODEL.POINT_HEAD.NUM_CLASSES
self.mask_point_in_features = cfg.MODEL.POINT_HEAD.IN_FEATURES
self.mask_point_train_num_points = cfg.MODEL.POINT_HEAD.TRAIN_NUM_POINTS
# next two parameters are use in the adaptive subdivions inference procedure
self.mask_point_subdivision_steps = cfg.MODEL.POINT_HEAD.SUBDIVISION_STEPS
self.mask_point_subdivision_num_points = cfg.MODEL.POINT_HEAD.SUBDIVISION_NUM_POINTS
# fmt: on
in_channels = int(np.sum([input_shape[f].channels for f in self.mask_point_in_features]))
self.point_head = build_point_head(cfg, ShapeSpec(channels=in_channels, width=1, height=1))
self.num_params = self.point_head.num_params
# inference parameters
self.mask_point_subdivision_init_resolution = int(
math.sqrt(self.mask_point_subdivision_num_points)
)
assert (
self.mask_point_subdivision_init_resolution
* self.mask_point_subdivision_init_resolution
== self.mask_point_subdivision_num_points
)
def forward(self, features, instances):
"""
Args:
features (dict[str, Tensor]): a dict of image-level features
instances (list[Instances]): proposals in training; detected
instances in inference
"""
if self.training:
proposal_boxes = [x.proposal_boxes for x in instances]
parameters = self.parameter_head(self._roi_pooler(features, proposal_boxes))
losses = {"loss_l2": self.regularizer * (parameters ** 2).mean()}
point_coords, point_labels = self._uniform_sample_train_points(instances)
point_fine_grained_features = self._point_pooler(features, proposal_boxes, point_coords)
point_logits = self._get_point_logits(
point_fine_grained_features, point_coords, parameters
)
losses["loss_mask_point"] = roi_mask_point_loss(point_logits, instances, point_labels)
return losses
else:
pred_boxes = [x.pred_boxes for x in instances]
parameters = self.parameter_head(self._roi_pooler(features, pred_boxes))
return self._subdivision_inference(features, parameters, instances)
def _uniform_sample_train_points(self, instances):
assert self.training
proposal_boxes = [x.proposal_boxes for x in instances]
cat_boxes = Boxes.cat(proposal_boxes)
# uniform sample
point_coords = torch.rand(
len(cat_boxes), self.mask_point_train_num_points, 2, device=cat_boxes.tensor.device
)
# sample point_labels
point_coords_wrt_image = get_point_coords_wrt_image(cat_boxes.tensor, point_coords)
point_labels = sample_point_labels(instances, point_coords_wrt_image)
return point_coords, point_labels
def _get_point_logits(self, fine_grained_features, point_coords, parameters):
return self.point_head(fine_grained_features, point_coords, parameters)
|
banmo-main
|
third_party/detectron2_old/projects/PointRend/point_rend/mask_head.py
|
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
from detectron2.config import CfgNode as CN
def add_pointrend_config(cfg):
"""
Add config for PointRend.
"""
# We retry random cropping until no single category in semantic segmentation GT occupies more
# than `SINGLE_CATEGORY_MAX_AREA` part of the crop.
cfg.INPUT.CROP.SINGLE_CATEGORY_MAX_AREA = 1.0
# Color augmentatition from SSD paper for semantic segmentation model during training.
cfg.INPUT.COLOR_AUG_SSD = False
# Names of the input feature maps to be used by a coarse mask head.
cfg.MODEL.ROI_MASK_HEAD.IN_FEATURES = ("p2",)
cfg.MODEL.ROI_MASK_HEAD.FC_DIM = 1024
cfg.MODEL.ROI_MASK_HEAD.NUM_FC = 2
# The side size of a coarse mask head prediction.
cfg.MODEL.ROI_MASK_HEAD.OUTPUT_SIDE_RESOLUTION = 7
# True if point head is used.
cfg.MODEL.ROI_MASK_HEAD.POINT_HEAD_ON = False
cfg.MODEL.POINT_HEAD = CN()
cfg.MODEL.POINT_HEAD.NAME = "StandardPointHead"
cfg.MODEL.POINT_HEAD.NUM_CLASSES = 80
# Names of the input feature maps to be used by a mask point head.
cfg.MODEL.POINT_HEAD.IN_FEATURES = ("p2",)
# Number of points sampled during training for a mask point head.
cfg.MODEL.POINT_HEAD.TRAIN_NUM_POINTS = 14 * 14
# Oversampling parameter for PointRend point sampling during training. Parameter `k` in the
# original paper.
cfg.MODEL.POINT_HEAD.OVERSAMPLE_RATIO = 3
# Importance sampling parameter for PointRend point sampling during training. Parametr `beta` in
# the original paper.
cfg.MODEL.POINT_HEAD.IMPORTANCE_SAMPLE_RATIO = 0.75
# Number of subdivision steps during inference.
cfg.MODEL.POINT_HEAD.SUBDIVISION_STEPS = 5
# Maximum number of points selected at each subdivision step (N).
cfg.MODEL.POINT_HEAD.SUBDIVISION_NUM_POINTS = 28 * 28
cfg.MODEL.POINT_HEAD.FC_DIM = 256
cfg.MODEL.POINT_HEAD.NUM_FC = 3
cfg.MODEL.POINT_HEAD.CLS_AGNOSTIC_MASK = False
# If True, then coarse prediction features are used as inout for each layer in PointRend's MLP.
cfg.MODEL.POINT_HEAD.COARSE_PRED_EACH_LAYER = True
cfg.MODEL.POINT_HEAD.COARSE_SEM_SEG_HEAD_NAME = "SemSegFPNHead"
"""
Add config for Implicit PointRend.
"""
cfg.MODEL.IMPLICIT_POINTREND = CN()
cfg.MODEL.IMPLICIT_POINTREND.IMAGE_FEATURE_ENABLED = True
cfg.MODEL.IMPLICIT_POINTREND.POS_ENC_ENABLED = True
cfg.MODEL.IMPLICIT_POINTREND.PARAMS_L2_REGULARIZER = 0.00001
|
banmo-main
|
third_party/detectron2_old/projects/PointRend/point_rend/config.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from .config import add_pointrend_config
from .mask_head import PointRendMaskHead, ImplicitPointRendMaskHead
from .semantic_seg import PointRendSemSegHead
from .color_augmentation import ColorAugSSDTransform
from . import roi_heads as _ # only registration
|
banmo-main
|
third_party/detectron2_old/projects/PointRend/point_rend/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import numpy as np
import fvcore.nn.weight_init as weight_init
import torch
from torch import nn
from torch.nn import functional as F
from detectron2.layers import ShapeSpec, cat
from detectron2.utils.events import get_event_storage
from detectron2.utils.registry import Registry
POINT_HEAD_REGISTRY = Registry("POINT_HEAD")
POINT_HEAD_REGISTRY.__doc__ = """
Registry for point heads, which makes prediction for a given set of per-point features.
The registered object will be called with `obj(cfg, input_shape)`.
"""
def roi_mask_point_loss(mask_logits, instances, point_labels):
"""
Compute the point-based loss for instance segmentation mask predictions
given point-wise mask prediction and its corresponding point-wise labels.
Args:
mask_logits (Tensor): A tensor of shape (R, C, P) or (R, 1, P) for class-specific or
class-agnostic, where R is the total number of predicted masks in all images, C is the
number of foreground classes, and P is the number of points sampled for each mask.
The values are logits.
instances (list[Instances]): A list of N Instances, where N is the number of images
in the batch. These instances are in 1:1 correspondence with the `mask_logits`. So, i_th
elememt of the list contains R_i objects and R_1 + ... + R_N is equal to R.
The ground-truth labels (class, box, mask, ...) associated with each instance are stored
in fields.
point_labels (Tensor): A tensor of shape (R, P), where R is the total number of
predicted masks and P is the number of points for each mask.
Labels with value of -1 will be ignored.
Returns:
point_loss (Tensor): A scalar tensor containing the loss.
"""
with torch.no_grad():
cls_agnostic_mask = mask_logits.size(1) == 1
total_num_masks = mask_logits.size(0)
gt_classes = []
for instances_per_image in instances:
if len(instances_per_image) == 0:
continue
if not cls_agnostic_mask:
gt_classes_per_image = instances_per_image.gt_classes.to(dtype=torch.int64)
gt_classes.append(gt_classes_per_image)
gt_mask_logits = point_labels
point_ignores = point_labels == -1
if gt_mask_logits.shape[0] == 0:
return mask_logits.sum() * 0
assert gt_mask_logits.numel() > 0, gt_mask_logits.shape
if cls_agnostic_mask:
mask_logits = mask_logits[:, 0]
else:
indices = torch.arange(total_num_masks)
gt_classes = cat(gt_classes, dim=0)
mask_logits = mask_logits[indices, gt_classes]
# Log the training accuracy (using gt classes and 0.0 threshold for the logits)
mask_accurate = (mask_logits > 0.0) == gt_mask_logits.to(dtype=torch.uint8)
mask_accurate = mask_accurate[~point_ignores]
mask_accuracy = mask_accurate.nonzero().size(0) / mask_accurate.numel()
get_event_storage().put_scalar("point/accuracy", mask_accuracy)
point_loss = F.binary_cross_entropy_with_logits(
mask_logits, gt_mask_logits.to(dtype=torch.float32), weight=~point_ignores, reduction="mean"
)
return point_loss
@POINT_HEAD_REGISTRY.register()
class StandardPointHead(nn.Module):
"""
A point head multi-layer perceptron which we model with conv1d layers with kernel 1. The head
takes both fine-grained and coarse prediction features as its input.
"""
def __init__(self, cfg, input_shape: ShapeSpec):
"""
The following attributes are parsed from config:
fc_dim: the output dimension of each FC layers
num_fc: the number of FC layers
coarse_pred_each_layer: if True, coarse prediction features are concatenated to each
layer's input
"""
super(StandardPointHead, self).__init__()
# fmt: off
num_classes = cfg.MODEL.POINT_HEAD.NUM_CLASSES
fc_dim = cfg.MODEL.POINT_HEAD.FC_DIM
num_fc = cfg.MODEL.POINT_HEAD.NUM_FC
cls_agnostic_mask = cfg.MODEL.POINT_HEAD.CLS_AGNOSTIC_MASK
self.coarse_pred_each_layer = cfg.MODEL.POINT_HEAD.COARSE_PRED_EACH_LAYER
input_channels = input_shape.channels
# fmt: on
fc_dim_in = input_channels + num_classes
self.fc_layers = []
for k in range(num_fc):
fc = nn.Conv1d(fc_dim_in, fc_dim, kernel_size=1, stride=1, padding=0, bias=True)
self.add_module("fc{}".format(k + 1), fc)
self.fc_layers.append(fc)
fc_dim_in = fc_dim
fc_dim_in += num_classes if self.coarse_pred_each_layer else 0
num_mask_classes = 1 if cls_agnostic_mask else num_classes
self.predictor = nn.Conv1d(fc_dim_in, num_mask_classes, kernel_size=1, stride=1, padding=0)
for layer in self.fc_layers:
weight_init.c2_msra_fill(layer)
# use normal distribution initialization for mask prediction layer
nn.init.normal_(self.predictor.weight, std=0.001)
if self.predictor.bias is not None:
nn.init.constant_(self.predictor.bias, 0)
def forward(self, fine_grained_features, coarse_features):
x = torch.cat((fine_grained_features, coarse_features), dim=1)
for layer in self.fc_layers:
x = F.relu(layer(x))
if self.coarse_pred_each_layer:
x = cat((x, coarse_features), dim=1)
return self.predictor(x)
@POINT_HEAD_REGISTRY.register()
class ImplicitPointHead(nn.Module):
"""
A point head multi-layer perceptron which we model with conv1d layers with kernel 1. The head
takes both fine-grained features and instance-wise MLP parameters as its input.
"""
def __init__(self, cfg, input_shape: ShapeSpec):
"""
The following attributes are parsed from config:
channels: the output dimension of each FC layers
num_layers: the number of FC layers (including the final prediction layer)
image_feature_enabled: if True, fine-grained image-level features are used
positional_encoding_enabled: if True, positional encoding is used
"""
super(ImplicitPointHead, self).__init__()
# fmt: off
self.num_layers = cfg.MODEL.POINT_HEAD.NUM_FC + 1
self.channels = cfg.MODEL.POINT_HEAD.FC_DIM
self.image_feature_enabled = cfg.MODEL.IMPLICIT_POINTREND.IMAGE_FEATURE_ENABLED
self.positional_encoding_enabled = cfg.MODEL.IMPLICIT_POINTREND.POS_ENC_ENABLED
self.num_classes = (
cfg.MODEL.POINT_HEAD.NUM_CLASSES if not cfg.MODEL.POINT_HEAD.CLS_AGNOSTIC_MASK else 1
)
self.in_channels = input_shape.channels
# fmt: on
if not self.image_feature_enabled:
self.in_channels = 0
if self.positional_encoding_enabled:
self.in_channels += 256
self.register_buffer("positional_encoding_gaussian_matrix", torch.randn((2, 128)))
assert self.in_channels > 0
num_weight_params, num_bias_params = [], []
assert self.num_layers >= 2
for l in range(self.num_layers):
if l == 0:
# input layer
num_weight_params.append(self.in_channels * self.channels)
num_bias_params.append(self.channels)
elif l == self.num_layers - 1:
# output layer
num_weight_params.append(self.channels * self.num_classes)
num_bias_params.append(self.num_classes)
else:
# intermediate layer
num_weight_params.append(self.channels * self.channels)
num_bias_params.append(self.channels)
self.num_weight_params = num_weight_params
self.num_bias_params = num_bias_params
self.num_params = sum(num_weight_params) + sum(num_bias_params)
def forward(self, fine_grained_features, point_coords, parameters):
# features: [R, channels, K]
# point_coords: [R, K, 2]
num_instances = fine_grained_features.size(0)
num_points = fine_grained_features.size(2)
if num_instances == 0:
return torch.zeros((0, 1, num_points), device=fine_grained_features.device)
if self.positional_encoding_enabled:
# locations: [R*K, 2]
locations = 2 * point_coords.reshape(num_instances * num_points, 2) - 1
locations = locations @ self.positional_encoding_gaussian_matrix.to(locations.device)
locations = 2 * np.pi * locations
locations = torch.cat([torch.sin(locations), torch.cos(locations)], dim=1)
# locations: [R, C, K]
locations = locations.reshape(num_instances, num_points, 256).permute(0, 2, 1)
if not self.image_feature_enabled:
fine_grained_features = locations
else:
fine_grained_features = torch.cat([locations, fine_grained_features], dim=1)
# features [R, C, K]
mask_feat = fine_grained_features.reshape(num_instances, self.in_channels, num_points)
weights, biases = self._parse_params(
parameters,
self.in_channels,
self.channels,
self.num_classes,
self.num_weight_params,
self.num_bias_params,
)
point_logits = self._dynamic_mlp(mask_feat, weights, biases, num_instances)
point_logits = point_logits.reshape(-1, self.num_classes, num_points)
return point_logits
@staticmethod
def _dynamic_mlp(features, weights, biases, num_instances):
assert features.dim() == 3, features.dim()
n_layers = len(weights)
x = features
for i, (w, b) in enumerate(zip(weights, biases)):
x = torch.einsum("nck,ndc->ndk", x, w) + b
if i < n_layers - 1:
x = F.relu(x)
return x
@staticmethod
def _parse_params(
pred_params,
in_channels,
channels,
num_classes,
num_weight_params,
num_bias_params,
):
assert pred_params.dim() == 2
assert len(num_weight_params) == len(num_bias_params)
assert pred_params.size(1) == sum(num_weight_params) + sum(num_bias_params)
num_instances = pred_params.size(0)
num_layers = len(num_weight_params)
params_splits = list(
torch.split_with_sizes(pred_params, num_weight_params + num_bias_params, dim=1)
)
weight_splits = params_splits[:num_layers]
bias_splits = params_splits[num_layers:]
for l in range(num_layers):
if l == 0:
# input layer
weight_splits[l] = weight_splits[l].reshape(num_instances, channels, in_channels)
bias_splits[l] = bias_splits[l].reshape(num_instances, channels, 1)
elif l < num_layers - 1:
# intermediate layer
weight_splits[l] = weight_splits[l].reshape(num_instances, channels, channels)
bias_splits[l] = bias_splits[l].reshape(num_instances, channels, 1)
else:
# output layer
weight_splits[l] = weight_splits[l].reshape(num_instances, num_classes, channels)
bias_splits[l] = bias_splits[l].reshape(num_instances, num_classes, 1)
return weight_splits, bias_splits
def build_point_head(cfg, input_channels):
"""
Build a point head defined by `cfg.MODEL.POINT_HEAD.NAME`.
"""
head_name = cfg.MODEL.POINT_HEAD.NAME
return POINT_HEAD_REGISTRY.get(head_name)(cfg, input_channels)
|
banmo-main
|
third_party/detectron2_old/projects/PointRend/point_rend/point_head.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import logging
from detectron2.modeling import ROI_HEADS_REGISTRY, StandardROIHeads
@ROI_HEADS_REGISTRY.register()
class PointRendROIHeads(StandardROIHeads):
"""
Identical to StandardROIHeads, except for some weights conversion code to
handle old models.
"""
_version = 2
def _load_from_state_dict(
self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
):
version = local_metadata.get("version", None)
if version is None or version < 2:
logger = logging.getLogger(__name__)
logger.warning(
"Weight format of PointRend models have changed! "
"Please upgrade your models. Applying automatic conversion now ..."
)
for k in list(state_dict.keys()):
newk = k
if k.startswith(prefix + "mask_point_head"):
newk = k.replace(prefix + "mask_point_head", prefix + "mask_head.point_head")
if k.startswith(prefix + "mask_coarse_head"):
newk = k.replace(prefix + "mask_coarse_head", prefix + "mask_head.coarse_head")
if newk != k:
state_dict[newk] = state_dict[k]
del state_dict[k]
@classmethod
def _init_mask_head(cls, cfg, input_shape):
if cfg.MODEL.MASK_ON and cfg.MODEL.ROI_MASK_HEAD.NAME != "PointRendMaskHead":
logger = logging.getLogger(__name__)
logger.warning(
"Config of PointRend models have changed! "
"Please upgrade your models. Applying automatic conversion now ..."
)
assert cfg.MODEL.ROI_MASK_HEAD.NAME == "CoarseMaskHead"
cfg.defrost()
cfg.MODEL.ROI_MASK_HEAD.NAME = "PointRendMaskHead"
cfg.MODEL.ROI_MASK_HEAD.POOLER_TYPE = ""
cfg.freeze()
return super()._init_mask_head(cfg, input_shape)
|
banmo-main
|
third_party/detectron2_old/projects/PointRend/point_rend/roi_heads.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import numpy as np
import random
import cv2
from fvcore.transforms.transform import Transform
class ColorAugSSDTransform(Transform):
"""
A color related data augmentation used in Single Shot Multibox Detector (SSD).
Wei Liu, Dragomir Anguelov, Dumitru Erhan, Christian Szegedy,
Scott Reed, Cheng-Yang Fu, Alexander C. Berg.
SSD: Single Shot MultiBox Detector. ECCV 2016.
Implementation based on:
https://github.com/weiliu89/caffe/blob
/4817bf8b4200b35ada8ed0dc378dceaf38c539e4
/src/caffe/util/im_transforms.cpp
https://github.com/chainer/chainercv/blob
/7159616642e0be7c5b3ef380b848e16b7e99355b/chainercv
/links/model/ssd/transforms.py
"""
def __init__(
self,
img_format,
brightness_delta=32,
contrast_low=0.5,
contrast_high=1.5,
saturation_low=0.5,
saturation_high=1.5,
hue_delta=18,
):
super().__init__()
assert img_format in ["BGR", "RGB"]
self.is_rgb = img_format == "RGB"
del img_format
self._set_attributes(locals())
def apply_coords(self, coords):
return coords
def apply_segmentation(self, segmentation):
return segmentation
def apply_image(self, img, interp=None):
if self.is_rgb:
img = img[:, :, [2, 1, 0]]
img = self.brightness(img)
if random.randrange(2):
img = self.contrast(img)
img = self.saturation(img)
img = self.hue(img)
else:
img = self.saturation(img)
img = self.hue(img)
img = self.contrast(img)
if self.is_rgb:
img = img[:, :, [2, 1, 0]]
return img
def convert(self, img, alpha=1, beta=0):
img = img.astype(np.float32) * alpha + beta
img = np.clip(img, 0, 255)
return img.astype(np.uint8)
def brightness(self, img):
if random.randrange(2):
return self.convert(
img, beta=random.uniform(-self.brightness_delta, self.brightness_delta)
)
return img
def contrast(self, img):
if random.randrange(2):
return self.convert(img, alpha=random.uniform(self.contrast_low, self.contrast_high))
return img
def saturation(self, img):
if random.randrange(2):
img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
img[:, :, 1] = self.convert(
img[:, :, 1], alpha=random.uniform(self.saturation_low, self.saturation_high)
)
return cv2.cvtColor(img, cv2.COLOR_HSV2BGR)
return img
def hue(self, img):
if random.randrange(2):
img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
img[:, :, 0] = (
img[:, :, 0].astype(int) + random.randint(-self.hue_delta, self.hue_delta)
) % 180
return cv2.cvtColor(img, cv2.COLOR_HSV2BGR)
return img
|
banmo-main
|
third_party/detectron2_old/projects/PointRend/point_rend/color_augmentation.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import numpy as np
from typing import Dict
import torch
from torch import nn
from torch.nn import functional as F
from detectron2.layers import ShapeSpec, cat
from detectron2.modeling import SEM_SEG_HEADS_REGISTRY
from .point_features import (
get_uncertain_point_coords_on_grid,
get_uncertain_point_coords_with_randomness,
point_sample,
)
from .point_head import build_point_head
def calculate_uncertainty(sem_seg_logits):
"""
For each location of the prediction `sem_seg_logits` we estimate uncerainty as the
difference between top first and top second predicted logits.
Args:
mask_logits (Tensor): A tensor of shape (N, C, ...), where N is the minibatch size and
C is the number of foreground classes. The values are logits.
Returns:
scores (Tensor): A tensor of shape (N, 1, ...) that contains uncertainty scores with
the most uncertain locations having the highest uncertainty score.
"""
top2_scores = torch.topk(sem_seg_logits, k=2, dim=1)[0]
return (top2_scores[:, 1] - top2_scores[:, 0]).unsqueeze(1)
@SEM_SEG_HEADS_REGISTRY.register()
class PointRendSemSegHead(nn.Module):
"""
A semantic segmentation head that combines a head set in `POINT_HEAD.COARSE_SEM_SEG_HEAD_NAME`
and a point head set in `MODEL.POINT_HEAD.NAME`.
"""
def __init__(self, cfg, input_shape: Dict[str, ShapeSpec]):
super().__init__()
self.ignore_value = cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE
self.coarse_sem_seg_head = SEM_SEG_HEADS_REGISTRY.get(
cfg.MODEL.POINT_HEAD.COARSE_SEM_SEG_HEAD_NAME
)(cfg, input_shape)
self._init_point_head(cfg, input_shape)
def _init_point_head(self, cfg, input_shape: Dict[str, ShapeSpec]):
# fmt: off
assert cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES == cfg.MODEL.POINT_HEAD.NUM_CLASSES
feature_channels = {k: v.channels for k, v in input_shape.items()}
self.in_features = cfg.MODEL.POINT_HEAD.IN_FEATURES
self.train_num_points = cfg.MODEL.POINT_HEAD.TRAIN_NUM_POINTS
self.oversample_ratio = cfg.MODEL.POINT_HEAD.OVERSAMPLE_RATIO
self.importance_sample_ratio = cfg.MODEL.POINT_HEAD.IMPORTANCE_SAMPLE_RATIO
self.subdivision_steps = cfg.MODEL.POINT_HEAD.SUBDIVISION_STEPS
self.subdivision_num_points = cfg.MODEL.POINT_HEAD.SUBDIVISION_NUM_POINTS
# fmt: on
in_channels = int(np.sum([feature_channels[f] for f in self.in_features]))
self.point_head = build_point_head(cfg, ShapeSpec(channels=in_channels, width=1, height=1))
def forward(self, features, targets=None):
coarse_sem_seg_logits = self.coarse_sem_seg_head.layers(features)
if self.training:
losses = self.coarse_sem_seg_head.losses(coarse_sem_seg_logits, targets)
with torch.no_grad():
point_coords = get_uncertain_point_coords_with_randomness(
coarse_sem_seg_logits,
calculate_uncertainty,
self.train_num_points,
self.oversample_ratio,
self.importance_sample_ratio,
)
coarse_features = point_sample(coarse_sem_seg_logits, point_coords, align_corners=False)
fine_grained_features = cat(
[
point_sample(features[in_feature], point_coords, align_corners=False)
for in_feature in self.in_features
],
dim=1,
)
point_logits = self.point_head(fine_grained_features, coarse_features)
point_targets = (
point_sample(
targets.unsqueeze(1).to(torch.float),
point_coords,
mode="nearest",
align_corners=False,
)
.squeeze(1)
.to(torch.long)
)
losses["loss_sem_seg_point"] = F.cross_entropy(
point_logits, point_targets, reduction="mean", ignore_index=self.ignore_value
)
return None, losses
else:
sem_seg_logits = coarse_sem_seg_logits.clone()
for _ in range(self.subdivision_steps):
sem_seg_logits = F.interpolate(
sem_seg_logits, scale_factor=2, mode="bilinear", align_corners=False
)
uncertainty_map = calculate_uncertainty(sem_seg_logits)
point_indices, point_coords = get_uncertain_point_coords_on_grid(
uncertainty_map, self.subdivision_num_points
)
fine_grained_features = cat(
[
point_sample(features[in_feature], point_coords, align_corners=False)
for in_feature in self.in_features
]
)
coarse_features = point_sample(
coarse_sem_seg_logits, point_coords, align_corners=False
)
point_logits = self.point_head(fine_grained_features, coarse_features)
# put sem seg point predictions to the right places on the upsampled grid.
N, C, H, W = sem_seg_logits.shape
point_indices = point_indices.unsqueeze(1).expand(-1, C, -1)
sem_seg_logits = (
sem_seg_logits.reshape(N, C, H * W)
.scatter_(2, point_indices, point_logits)
.view(N, C, H, W)
)
return sem_seg_logits, {}
|
banmo-main
|
third_party/detectron2_old/projects/PointRend/point_rend/semantic_seg.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
import copy
import json
import os
from collections import defaultdict
# This mapping is extracted from the official LVIS mapping:
# https://github.com/lvis-dataset/lvis-api/blob/master/data/coco_to_synset.json
COCO_SYNSET_CATEGORIES = [
{"synset": "person.n.01", "coco_cat_id": 1},
{"synset": "bicycle.n.01", "coco_cat_id": 2},
{"synset": "car.n.01", "coco_cat_id": 3},
{"synset": "motorcycle.n.01", "coco_cat_id": 4},
{"synset": "airplane.n.01", "coco_cat_id": 5},
{"synset": "bus.n.01", "coco_cat_id": 6},
{"synset": "train.n.01", "coco_cat_id": 7},
{"synset": "truck.n.01", "coco_cat_id": 8},
{"synset": "boat.n.01", "coco_cat_id": 9},
{"synset": "traffic_light.n.01", "coco_cat_id": 10},
{"synset": "fireplug.n.01", "coco_cat_id": 11},
{"synset": "stop_sign.n.01", "coco_cat_id": 13},
{"synset": "parking_meter.n.01", "coco_cat_id": 14},
{"synset": "bench.n.01", "coco_cat_id": 15},
{"synset": "bird.n.01", "coco_cat_id": 16},
{"synset": "cat.n.01", "coco_cat_id": 17},
{"synset": "dog.n.01", "coco_cat_id": 18},
{"synset": "horse.n.01", "coco_cat_id": 19},
{"synset": "sheep.n.01", "coco_cat_id": 20},
{"synset": "beef.n.01", "coco_cat_id": 21},
{"synset": "elephant.n.01", "coco_cat_id": 22},
{"synset": "bear.n.01", "coco_cat_id": 23},
{"synset": "zebra.n.01", "coco_cat_id": 24},
{"synset": "giraffe.n.01", "coco_cat_id": 25},
{"synset": "backpack.n.01", "coco_cat_id": 27},
{"synset": "umbrella.n.01", "coco_cat_id": 28},
{"synset": "bag.n.04", "coco_cat_id": 31},
{"synset": "necktie.n.01", "coco_cat_id": 32},
{"synset": "bag.n.06", "coco_cat_id": 33},
{"synset": "frisbee.n.01", "coco_cat_id": 34},
{"synset": "ski.n.01", "coco_cat_id": 35},
{"synset": "snowboard.n.01", "coco_cat_id": 36},
{"synset": "ball.n.06", "coco_cat_id": 37},
{"synset": "kite.n.03", "coco_cat_id": 38},
{"synset": "baseball_bat.n.01", "coco_cat_id": 39},
{"synset": "baseball_glove.n.01", "coco_cat_id": 40},
{"synset": "skateboard.n.01", "coco_cat_id": 41},
{"synset": "surfboard.n.01", "coco_cat_id": 42},
{"synset": "tennis_racket.n.01", "coco_cat_id": 43},
{"synset": "bottle.n.01", "coco_cat_id": 44},
{"synset": "wineglass.n.01", "coco_cat_id": 46},
{"synset": "cup.n.01", "coco_cat_id": 47},
{"synset": "fork.n.01", "coco_cat_id": 48},
{"synset": "knife.n.01", "coco_cat_id": 49},
{"synset": "spoon.n.01", "coco_cat_id": 50},
{"synset": "bowl.n.03", "coco_cat_id": 51},
{"synset": "banana.n.02", "coco_cat_id": 52},
{"synset": "apple.n.01", "coco_cat_id": 53},
{"synset": "sandwich.n.01", "coco_cat_id": 54},
{"synset": "orange.n.01", "coco_cat_id": 55},
{"synset": "broccoli.n.01", "coco_cat_id": 56},
{"synset": "carrot.n.01", "coco_cat_id": 57},
{"synset": "frank.n.02", "coco_cat_id": 58},
{"synset": "pizza.n.01", "coco_cat_id": 59},
{"synset": "doughnut.n.02", "coco_cat_id": 60},
{"synset": "cake.n.03", "coco_cat_id": 61},
{"synset": "chair.n.01", "coco_cat_id": 62},
{"synset": "sofa.n.01", "coco_cat_id": 63},
{"synset": "pot.n.04", "coco_cat_id": 64},
{"synset": "bed.n.01", "coco_cat_id": 65},
{"synset": "dining_table.n.01", "coco_cat_id": 67},
{"synset": "toilet.n.02", "coco_cat_id": 70},
{"synset": "television_receiver.n.01", "coco_cat_id": 72},
{"synset": "laptop.n.01", "coco_cat_id": 73},
{"synset": "mouse.n.04", "coco_cat_id": 74},
{"synset": "remote_control.n.01", "coco_cat_id": 75},
{"synset": "computer_keyboard.n.01", "coco_cat_id": 76},
{"synset": "cellular_telephone.n.01", "coco_cat_id": 77},
{"synset": "microwave.n.02", "coco_cat_id": 78},
{"synset": "oven.n.01", "coco_cat_id": 79},
{"synset": "toaster.n.02", "coco_cat_id": 80},
{"synset": "sink.n.01", "coco_cat_id": 81},
{"synset": "electric_refrigerator.n.01", "coco_cat_id": 82},
{"synset": "book.n.01", "coco_cat_id": 84},
{"synset": "clock.n.01", "coco_cat_id": 85},
{"synset": "vase.n.01", "coco_cat_id": 86},
{"synset": "scissors.n.01", "coco_cat_id": 87},
{"synset": "teddy.n.01", "coco_cat_id": 88},
{"synset": "hand_blower.n.01", "coco_cat_id": 89},
{"synset": "toothbrush.n.01", "coco_cat_id": 90},
]
def cocofy_lvis(input_filename, output_filename):
"""
Filter LVIS instance segmentation annotations to remove all categories that are not included in
COCO. The new json files can be used to evaluate COCO AP using `lvis-api`. The category ids in
the output json are the incontiguous COCO dataset ids.
Args:
input_filename (str): path to the LVIS json file.
output_filename (str): path to the COCOfied json file.
"""
with open(input_filename, "r") as f:
lvis_json = json.load(f)
lvis_annos = lvis_json.pop("annotations")
cocofied_lvis = copy.deepcopy(lvis_json)
lvis_json["annotations"] = lvis_annos
# Mapping from lvis cat id to coco cat id via synset
lvis_cat_id_to_synset = {cat["id"]: cat["synset"] for cat in lvis_json["categories"]}
synset_to_coco_cat_id = {x["synset"]: x["coco_cat_id"] for x in COCO_SYNSET_CATEGORIES}
# Synsets that we will keep in the dataset
synsets_to_keep = set(synset_to_coco_cat_id.keys())
coco_cat_id_with_instances = defaultdict(int)
new_annos = []
ann_id = 1
for ann in lvis_annos:
lvis_cat_id = ann["category_id"]
synset = lvis_cat_id_to_synset[lvis_cat_id]
if synset not in synsets_to_keep:
continue
coco_cat_id = synset_to_coco_cat_id[synset]
new_ann = copy.deepcopy(ann)
new_ann["category_id"] = coco_cat_id
new_ann["id"] = ann_id
ann_id += 1
new_annos.append(new_ann)
coco_cat_id_with_instances[coco_cat_id] += 1
cocofied_lvis["annotations"] = new_annos
for image in cocofied_lvis["images"]:
for key in ["not_exhaustive_category_ids", "neg_category_ids"]:
new_category_list = []
for lvis_cat_id in image[key]:
synset = lvis_cat_id_to_synset[lvis_cat_id]
if synset not in synsets_to_keep:
continue
coco_cat_id = synset_to_coco_cat_id[synset]
new_category_list.append(coco_cat_id)
coco_cat_id_with_instances[coco_cat_id] += 1
image[key] = new_category_list
coco_cat_id_with_instances = set(coco_cat_id_with_instances.keys())
new_categories = []
for cat in lvis_json["categories"]:
synset = cat["synset"]
if synset not in synsets_to_keep:
continue
coco_cat_id = synset_to_coco_cat_id[synset]
if coco_cat_id not in coco_cat_id_with_instances:
continue
new_cat = copy.deepcopy(cat)
new_cat["id"] = coco_cat_id
new_categories.append(new_cat)
cocofied_lvis["categories"] = new_categories
with open(output_filename, "w") as f:
json.dump(cocofied_lvis, f)
print("{} is COCOfied and stored in {}.".format(input_filename, output_filename))
if __name__ == "__main__":
dataset_dir = os.path.join(os.getenv("DETECTRON2_DATASETS", "datasets"), "lvis")
for s in ["lvis_v0.5_train", "lvis_v0.5_val"]:
print("Start COCOfing {}.".format(s))
cocofy_lvis(
os.path.join(dataset_dir, "{}.json".format(s)),
os.path.join(dataset_dir, "{}_cocofied.json".format(s)),
)
|
banmo-main
|
third_party/detectron2_old/datasets/prepare_cocofied_lvis.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
import functools
import json
import multiprocessing as mp
import numpy as np
import os
import time
from fvcore.common.download import download
from panopticapi.utils import rgb2id
from PIL import Image
from detectron2.data.datasets.builtin_meta import COCO_CATEGORIES
def _process_panoptic_to_semantic(input_panoptic, output_semantic, segments, id_map):
panoptic = np.asarray(Image.open(input_panoptic), dtype=np.uint32)
panoptic = rgb2id(panoptic)
output = np.zeros_like(panoptic, dtype=np.uint8) + 255
for seg in segments:
cat_id = seg["category_id"]
new_cat_id = id_map[cat_id]
output[panoptic == seg["id"]] = new_cat_id
Image.fromarray(output).save(output_semantic)
def separate_coco_semantic_from_panoptic(panoptic_json, panoptic_root, sem_seg_root, categories):
"""
Create semantic segmentation annotations from panoptic segmentation
annotations, to be used by PanopticFPN.
It maps all thing categories to class 0, and maps all unlabeled pixels to class 255.
It maps all stuff categories to contiguous ids starting from 1.
Args:
panoptic_json (str): path to the panoptic json file, in COCO's format.
panoptic_root (str): a directory with panoptic annotation files, in COCO's format.
sem_seg_root (str): a directory to output semantic annotation files
categories (list[dict]): category metadata. Each dict needs to have:
"id": corresponds to the "category_id" in the json annotations
"isthing": 0 or 1
"""
os.makedirs(sem_seg_root, exist_ok=True)
stuff_ids = [k["id"] for k in categories if k["isthing"] == 0]
thing_ids = [k["id"] for k in categories if k["isthing"] == 1]
id_map = {} # map from category id to id in the output semantic annotation
assert len(stuff_ids) <= 254
for i, stuff_id in enumerate(stuff_ids):
id_map[stuff_id] = i + 1
for thing_id in thing_ids:
id_map[thing_id] = 0
id_map[0] = 255
with open(panoptic_json) as f:
obj = json.load(f)
pool = mp.Pool(processes=max(mp.cpu_count() // 2, 4))
def iter_annotations():
for anno in obj["annotations"]:
file_name = anno["file_name"]
segments = anno["segments_info"]
input = os.path.join(panoptic_root, file_name)
output = os.path.join(sem_seg_root, file_name)
yield input, output, segments
print("Start writing to {} ...".format(sem_seg_root))
start = time.time()
pool.starmap(
functools.partial(_process_panoptic_to_semantic, id_map=id_map),
iter_annotations(),
chunksize=100,
)
print("Finished. time: {:.2f}s".format(time.time() - start))
if __name__ == "__main__":
dataset_dir = os.path.join(os.getenv("DETECTRON2_DATASETS", "datasets"), "coco")
for s in ["val2017", "train2017"]:
separate_coco_semantic_from_panoptic(
os.path.join(dataset_dir, "annotations/panoptic_{}.json".format(s)),
os.path.join(dataset_dir, "panoptic_{}".format(s)),
os.path.join(dataset_dir, "panoptic_stuff_{}".format(s)),
COCO_CATEGORIES,
)
# Prepare val2017_100 for quick testing:
dest_dir = os.path.join(dataset_dir, "annotations/")
URL_PREFIX = "https://dl.fbaipublicfiles.com/detectron2/"
download(URL_PREFIX + "annotations/coco/panoptic_val2017_100.json", dest_dir)
with open(os.path.join(dest_dir, "panoptic_val2017_100.json")) as f:
obj = json.load(f)
def link_val100(dir_full, dir_100):
print("Creating " + dir_100 + " ...")
os.makedirs(dir_100, exist_ok=True)
for img in obj["images"]:
basename = os.path.splitext(img["file_name"])[0]
src = os.path.join(dir_full, basename + ".png")
dst = os.path.join(dir_100, basename + ".png")
src = os.path.relpath(src, start=dir_100)
os.symlink(src, dst)
link_val100(
os.path.join(dataset_dir, "panoptic_val2017"),
os.path.join(dataset_dir, "panoptic_val2017_100"),
)
link_val100(
os.path.join(dataset_dir, "panoptic_stuff_val2017"),
os.path.join(dataset_dir, "panoptic_stuff_val2017_100"),
)
|
banmo-main
|
third_party/detectron2_old/datasets/prepare_panoptic_fpn.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
import numpy as np
import os
from pathlib import Path
import tqdm
from PIL import Image
def convert(input, output):
img = np.asarray(Image.open(input))
assert img.dtype == np.uint8
img = img - 1 # 0 (ignore) becomes 255. others are shifted by 1
Image.fromarray(img).save(output)
if __name__ == "__main__":
dataset_dir = Path(os.getenv("DETECTRON2_DATASETS", "datasets")) / "ADEChallengeData2016"
for name in ["training", "validation"]:
annotation_dir = dataset_dir / "annotations" / name
output_dir = dataset_dir / "annotations_detectron2" / name
output_dir.mkdir(parents=True, exist_ok=True)
for file in tqdm.tqdm(list(annotation_dir.iterdir())):
output_file = output_dir / file.name
convert(file, output_file)
|
banmo-main
|
third_party/detectron2_old/datasets/prepare_ade20k_sem_seg.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import unittest
import torch
from detectron2.modeling.meta_arch import GeneralizedRCNN
from detectron2.utils.registry import _convert_target_to_string, locate
class A:
class B:
pass
class TestLocate(unittest.TestCase):
def _test_obj(self, obj):
name = _convert_target_to_string(obj)
newobj = locate(name)
self.assertIs(obj, newobj)
def test_basic(self):
self._test_obj(GeneralizedRCNN)
def test_inside_class(self):
# requires using __qualname__ instead of __name__
self._test_obj(A.B)
def test_builtin(self):
self._test_obj(len)
self._test_obj(dict)
def test_pytorch_optim(self):
# pydoc.locate does not work for it
self._test_obj(torch.optim.SGD)
def test_failure(self):
with self.assertRaises(ImportError):
locate("asdf")
def test_compress_target(self):
from detectron2.data.transforms import RandomCrop
name = _convert_target_to_string(RandomCrop)
# name shouldn't contain 'augmentation_impl'
self.assertEqual(name, "detectron2.data.transforms.RandomCrop")
self.assertIs(RandomCrop, locate(name))
|
banmo-main
|
third_party/detectron2_old/tests/test_registry.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import json
import os
import tempfile
import time
import unittest
from unittest import mock
import torch
from fvcore.common.checkpoint import Checkpointer
from torch import nn
from detectron2.config import configurable, get_cfg
from detectron2.engine import DefaultTrainer, SimpleTrainer, hooks
from detectron2.modeling.meta_arch import META_ARCH_REGISTRY
from detectron2.utils.events import CommonMetricPrinter, JSONWriter
@META_ARCH_REGISTRY.register()
class _SimpleModel(nn.Module):
@configurable
def __init__(self, sleep_sec=0):
super().__init__()
self.mod = nn.Linear(10, 20)
self.sleep_sec = sleep_sec
@classmethod
def from_config(cls, cfg):
return {}
def forward(self, x):
if self.sleep_sec > 0:
time.sleep(self.sleep_sec)
return {"loss": x.sum() + sum([x.mean() for x in self.parameters()])}
class TestTrainer(unittest.TestCase):
def _data_loader(self, device):
device = torch.device(device)
while True:
yield torch.rand(3, 3).to(device)
def test_simple_trainer(self, device="cpu"):
model = _SimpleModel().to(device=device)
trainer = SimpleTrainer(
model, self._data_loader(device), torch.optim.SGD(model.parameters(), 0.1)
)
trainer.train(0, 10)
@unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
def test_simple_trainer_cuda(self):
self.test_simple_trainer(device="cuda")
def test_writer_hooks(self):
model = _SimpleModel(sleep_sec=0.1)
trainer = SimpleTrainer(
model, self._data_loader("cpu"), torch.optim.SGD(model.parameters(), 0.1)
)
max_iter = 50
with tempfile.TemporaryDirectory(prefix="detectron2_test") as d:
json_file = os.path.join(d, "metrics.json")
writers = [CommonMetricPrinter(max_iter), JSONWriter(json_file)]
trainer.register_hooks(
[hooks.EvalHook(0, lambda: {"metric": 100}), hooks.PeriodicWriter(writers)]
)
with self.assertLogs(writers[0].logger) as logs:
trainer.train(0, max_iter)
with open(json_file, "r") as f:
data = [json.loads(line.strip()) for line in f]
self.assertEqual([x["iteration"] for x in data], [19, 39, 49, 50])
# the eval metric is in the last line with iter 50
self.assertIn("metric", data[-1], "Eval metric must be in last line of JSON!")
# test logged messages from CommonMetricPrinter
self.assertEqual(len(logs.output), 3)
for log, iter in zip(logs.output, [19, 39, 49]):
self.assertIn(f"iter: {iter}", log)
self.assertIn("eta: 0:00:00", logs.output[-1], "Last ETA must be 0!")
@unittest.skipIf(os.environ.get("CI"), "Require COCO data.")
def test_default_trainer(self):
# TODO: this test requires manifold access, so changed device to CPU. see: T88318502
cfg = get_cfg()
cfg.MODEL.DEVICE = "cpu"
cfg.MODEL.META_ARCHITECTURE = "_SimpleModel"
cfg.DATASETS.TRAIN = ("coco_2017_val_100",)
with tempfile.TemporaryDirectory(prefix="detectron2_test") as d:
cfg.OUTPUT_DIR = d
trainer = DefaultTrainer(cfg)
# test property
self.assertIs(trainer.model, trainer._trainer.model)
trainer.model = _SimpleModel()
self.assertIs(trainer.model, trainer._trainer.model)
def test_checkpoint_resume(self):
model = _SimpleModel()
dataloader = self._data_loader("cpu")
opt = torch.optim.SGD(model.parameters(), 0.1)
scheduler = torch.optim.lr_scheduler.StepLR(opt, 3)
with tempfile.TemporaryDirectory(prefix="detectron2_test") as d:
trainer = SimpleTrainer(model, dataloader, opt)
checkpointer = Checkpointer(model, d, opt=opt, trainer=trainer)
trainer.register_hooks(
[
hooks.LRScheduler(scheduler=scheduler),
# checkpoint after scheduler to properly save the state of scheduler
hooks.PeriodicCheckpointer(checkpointer, 10),
]
)
trainer.train(0, 12)
self.assertAlmostEqual(opt.param_groups[0]["lr"], 1e-5)
self.assertEqual(scheduler.last_epoch, 12)
del trainer
opt = torch.optim.SGD(model.parameters(), 999) # lr will be loaded
trainer = SimpleTrainer(model, dataloader, opt)
scheduler = torch.optim.lr_scheduler.StepLR(opt, 3)
trainer.register_hooks(
[
hooks.LRScheduler(scheduler=scheduler),
]
)
checkpointer = Checkpointer(model, d, opt=opt, trainer=trainer)
checkpointer.resume_or_load("non_exist.pth")
self.assertEqual(trainer.iter, 11) # last finished iter number (0-based in Trainer)
# number of times `scheduler.step()` was called (1-based)
self.assertEqual(scheduler.last_epoch, 12)
self.assertAlmostEqual(opt.param_groups[0]["lr"], 1e-5)
def test_eval_hook(self):
model = _SimpleModel()
dataloader = self._data_loader("cpu")
opt = torch.optim.SGD(model.parameters(), 0.1)
for total_iter, period, eval_count in [(30, 15, 2), (31, 15, 3), (20, 0, 1)]:
test_func = mock.Mock(return_value={"metric": 3.0})
trainer = SimpleTrainer(model, dataloader, opt)
trainer.register_hooks([hooks.EvalHook(period, test_func)])
trainer.train(0, total_iter)
self.assertEqual(test_func.call_count, eval_count)
|
banmo-main
|
third_party/detectron2_old/tests/test_engine.py
|
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
import numpy as np
import os
import tempfile
import unittest
import cv2
import torch
from detectron2.data import MetadataCatalog
from detectron2.structures import BoxMode, Instances, RotatedBoxes
from detectron2.utils.visualizer import ColorMode, Visualizer
class TestVisualizer(unittest.TestCase):
def _random_data(self):
H, W = 100, 100
N = 10
img = np.random.rand(H, W, 3) * 255
boxxy = np.random.rand(N, 2) * (H // 2)
boxes = np.concatenate((boxxy, boxxy + H // 2), axis=1)
def _rand_poly():
return np.random.rand(3, 2).flatten() * H
polygons = [[_rand_poly() for _ in range(np.random.randint(1, 5))] for _ in range(N)]
mask = np.zeros_like(img[:, :, 0], dtype=np.bool)
mask[:40, 10:20] = 1
labels = [str(i) for i in range(N)]
return img, boxes, labels, polygons, [mask] * N
@property
def metadata(self):
return MetadataCatalog.get("coco_2017_train")
def test_draw_dataset_dict(self):
img = np.random.rand(512, 512, 3) * 255
dic = {
"annotations": [
{
"bbox": [
368.9946492271106,
330.891438763377,
13.148537455410235,
13.644708680142685,
],
"bbox_mode": BoxMode.XYWH_ABS,
"category_id": 0,
"iscrowd": 1,
"segmentation": {
"counts": "_jh52m?2N2N2N2O100O10O001N1O2MceP2",
"size": [512, 512],
},
}
],
"height": 512,
"image_id": 1,
"width": 512,
}
v = Visualizer(img)
v.draw_dataset_dict(dic)
v = Visualizer(img, self.metadata)
v.draw_dataset_dict(dic)
def test_draw_rotated_dataset_dict(self):
img = np.random.rand(512, 512, 3) * 255
dic = {
"annotations": [
{
"bbox": [
368.9946492271106,
330.891438763377,
13.148537455410235,
13.644708680142685,
45.0,
],
"bbox_mode": BoxMode.XYWHA_ABS,
"category_id": 0,
"iscrowd": 1,
}
],
"height": 512,
"image_id": 1,
"width": 512,
}
v = Visualizer(img, self.metadata)
v.draw_dataset_dict(dic)
def test_overlay_instances(self):
img, boxes, labels, polygons, masks = self._random_data()
v = Visualizer(img, self.metadata)
output = v.overlay_instances(masks=polygons, boxes=boxes, labels=labels).get_image()
self.assertEqual(output.shape, img.shape)
# Test 2x scaling
v = Visualizer(img, self.metadata, scale=2.0)
output = v.overlay_instances(masks=polygons, boxes=boxes, labels=labels).get_image()
self.assertEqual(output.shape[0], img.shape[0] * 2)
# Test overlay masks
v = Visualizer(img, self.metadata)
output = v.overlay_instances(masks=masks, boxes=boxes, labels=labels).get_image()
self.assertEqual(output.shape, img.shape)
def test_overlay_instances_no_boxes(self):
img, boxes, labels, polygons, _ = self._random_data()
v = Visualizer(img, self.metadata)
v.overlay_instances(masks=polygons, boxes=None, labels=labels).get_image()
def test_draw_instance_predictions(self):
img, boxes, _, _, masks = self._random_data()
num_inst = len(boxes)
inst = Instances((img.shape[0], img.shape[1]))
inst.pred_classes = torch.randint(0, 80, size=(num_inst,))
inst.scores = torch.rand(num_inst)
inst.pred_boxes = torch.from_numpy(boxes)
inst.pred_masks = torch.from_numpy(np.asarray(masks))
v = Visualizer(img)
v.draw_instance_predictions(inst)
v = Visualizer(img, self.metadata)
v.draw_instance_predictions(inst)
def test_BWmode_nomask(self):
img, boxes, _, _, masks = self._random_data()
num_inst = len(boxes)
inst = Instances((img.shape[0], img.shape[1]))
inst.pred_classes = torch.randint(0, 80, size=(num_inst,))
inst.scores = torch.rand(num_inst)
inst.pred_boxes = torch.from_numpy(boxes)
v = Visualizer(img, self.metadata, instance_mode=ColorMode.IMAGE_BW)
v.draw_instance_predictions(inst)
def test_draw_empty_mask_predictions(self):
img, boxes, _, _, masks = self._random_data()
num_inst = len(boxes)
inst = Instances((img.shape[0], img.shape[1]))
inst.pred_classes = torch.randint(0, 80, size=(num_inst,))
inst.scores = torch.rand(num_inst)
inst.pred_boxes = torch.from_numpy(boxes)
inst.pred_masks = torch.from_numpy(np.zeros_like(np.asarray(masks)))
v = Visualizer(img, self.metadata)
v.draw_instance_predictions(inst)
def test_correct_output_shape(self):
img = np.random.rand(928, 928, 3) * 255
v = Visualizer(img, self.metadata)
out = v.output.get_image()
self.assertEqual(out.shape, img.shape)
def test_overlay_rotated_instances(self):
H, W = 100, 150
img = np.random.rand(H, W, 3) * 255
num_boxes = 50
boxes_5d = torch.zeros(num_boxes, 5)
boxes_5d[:, 0] = torch.FloatTensor(num_boxes).uniform_(-0.1 * W, 1.1 * W)
boxes_5d[:, 1] = torch.FloatTensor(num_boxes).uniform_(-0.1 * H, 1.1 * H)
boxes_5d[:, 2] = torch.FloatTensor(num_boxes).uniform_(0, max(W, H))
boxes_5d[:, 3] = torch.FloatTensor(num_boxes).uniform_(0, max(W, H))
boxes_5d[:, 4] = torch.FloatTensor(num_boxes).uniform_(-1800, 1800)
rotated_boxes = RotatedBoxes(boxes_5d)
labels = [str(i) for i in range(num_boxes)]
v = Visualizer(img, self.metadata)
output = v.overlay_instances(boxes=rotated_boxes, labels=labels).get_image()
self.assertEqual(output.shape, img.shape)
def test_draw_no_metadata(self):
img, boxes, _, _, masks = self._random_data()
num_inst = len(boxes)
inst = Instances((img.shape[0], img.shape[1]))
inst.pred_classes = torch.randint(0, 80, size=(num_inst,))
inst.scores = torch.rand(num_inst)
inst.pred_boxes = torch.from_numpy(boxes)
inst.pred_masks = torch.from_numpy(np.asarray(masks))
v = Visualizer(img, MetadataCatalog.get("asdfasdf"))
v.draw_instance_predictions(inst)
def test_draw_binary_mask(self):
img, boxes, _, _, masks = self._random_data()
img[:, :, 0] = 0 # remove red color
mask = masks[0]
mask_with_hole = np.zeros_like(mask).astype("uint8")
mask_with_hole = cv2.rectangle(mask_with_hole, (10, 10), (50, 50), 1, 5)
for m in [mask, mask_with_hole]:
for save in [True, False]:
v = Visualizer(img)
o = v.draw_binary_mask(m, color="red", text="test")
if save:
with tempfile.TemporaryDirectory(prefix="detectron2_viz") as d:
path = os.path.join(d, "output.png")
o.save(path)
o = cv2.imread(path)[:, :, ::-1]
else:
o = o.get_image().astype("float32")
# red color is drawn on the image
self.assertTrue(o[:, :, 0].sum() > 0)
def test_border_mask_with_holes(self):
H, W = 200, 200
img = np.zeros((H, W, 3))
img[:, :, 0] = 255.0
v = Visualizer(img, scale=3)
mask = np.zeros((H, W))
mask[:, 100:150] = 1
# create a hole, to trigger imshow
mask = cv2.rectangle(mask, (110, 110), (130, 130), 0, thickness=-1)
output = v.draw_binary_mask(mask, color="blue")
output = output.get_image()[:, :, ::-1]
first_row = {tuple(x.tolist()) for x in output[0]}
last_row = {tuple(x.tolist()) for x in output[-1]}
# Check quantization / off-by-1 error: the first and last row must have two colors
self.assertEqual(len(last_row), 2)
self.assertEqual(len(first_row), 2)
self.assertIn((0, 0, 255), last_row)
self.assertIn((0, 0, 255), first_row)
def test_border_polygons(self):
H, W = 200, 200
img = np.zeros((H, W, 3))
img[:, :, 0] = 255.0
v = Visualizer(img, scale=3)
mask = np.zeros((H, W))
mask[:, 100:150] = 1
output = v.draw_binary_mask(mask, color="blue")
output = output.get_image()[:, :, ::-1]
first_row = {tuple(x.tolist()) for x in output[0]}
last_row = {tuple(x.tolist()) for x in output[-1]}
# Check quantization / off-by-1 error:
# the first and last row must have >=2 colors, because the polygon
# touches both rows
self.assertGreaterEqual(len(last_row), 2)
self.assertGreaterEqual(len(first_row), 2)
self.assertIn((0, 0, 255), last_row)
self.assertIn((0, 0, 255), first_row)
if __name__ == "__main__":
unittest.main()
|
banmo-main
|
third_party/detectron2_old/tests/test_visualizer.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import unittest
from detectron2.utils.collect_env import collect_env_info
class TestProjects(unittest.TestCase):
def test_import(self):
from detectron2.projects import point_rend
_ = point_rend.add_pointrend_config
import detectron2.projects.deeplab as deeplab
_ = deeplab.add_deeplab_config
# import detectron2.projects.panoptic_deeplab as panoptic_deeplab
# _ = panoptic_deeplab.add_panoptic_deeplab_config
class TestCollectEnv(unittest.TestCase):
def test(self):
_ = collect_env_info()
|
banmo-main
|
third_party/detectron2_old/tests/test_packaging.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import unittest
import torch
from detectron2.utils.analysis import flop_count_operators, parameter_count
from detectron2.utils.testing import get_model_no_weights
class RetinaNetTest(unittest.TestCase):
def setUp(self):
self.model = get_model_no_weights("COCO-Detection/retinanet_R_50_FPN_1x.yaml")
def test_flop(self):
# RetinaNet supports flop-counting with random inputs
inputs = [{"image": torch.rand(3, 800, 800), "test_unused": "abcd"}]
res = flop_count_operators(self.model, inputs)
self.assertTrue(int(res["conv"]), 146) # 146B flops
def test_param_count(self):
res = parameter_count(self.model)
self.assertTrue(res[""], 37915572)
self.assertTrue(res["backbone"], 31452352)
class FasterRCNNTest(unittest.TestCase):
def setUp(self):
self.model = get_model_no_weights("COCO-Detection/faster_rcnn_R_50_FPN_1x.yaml")
def test_flop(self):
# Faster R-CNN supports flop-counting with random inputs
inputs = [{"image": torch.rand(3, 800, 800)}]
res = flop_count_operators(self.model, inputs)
# This only checks flops for backbone & proposal generator
# Flops for box head is not conv, and depends on #proposals, which is
# almost 0 for random inputs.
self.assertTrue(int(res["conv"]), 117)
def test_param_count(self):
res = parameter_count(self.model)
self.assertTrue(res[""], 41699936)
self.assertTrue(res["backbone"], 26799296)
|
banmo-main
|
third_party/detectron2_old/tests/test_model_analysis.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
|
banmo-main
|
third_party/detectron2_old/tests/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import math
import numpy as np
from unittest import TestCase
import torch
from fvcore.common.param_scheduler import CosineParamScheduler, MultiStepParamScheduler
from torch import nn
from detectron2.solver import LRMultiplier, WarmupParamScheduler
class TestScheduler(TestCase):
def test_warmup_multistep(self):
p = nn.Parameter(torch.zeros(0))
opt = torch.optim.SGD([p], lr=5)
multiplier = WarmupParamScheduler(
MultiStepParamScheduler(
[1, 0.1, 0.01, 0.001],
milestones=[10, 15, 20],
num_updates=30,
),
0.001,
5 / 30,
)
sched = LRMultiplier(opt, multiplier, 30)
# This is an equivalent of:
# sched = WarmupMultiStepLR(
# opt, milestones=[10, 15, 20], gamma=0.1, warmup_factor=0.001, warmup_iters=5)
p.sum().backward()
opt.step()
lrs = [0.005]
for _ in range(30):
sched.step()
lrs.append(opt.param_groups[0]["lr"])
self.assertTrue(np.allclose(lrs[:5], [0.005, 1.004, 2.003, 3.002, 4.001]))
self.assertTrue(np.allclose(lrs[5:10], 5.0))
self.assertTrue(np.allclose(lrs[10:15], 0.5))
self.assertTrue(np.allclose(lrs[15:20], 0.05))
self.assertTrue(np.allclose(lrs[20:], 0.005))
def test_warmup_cosine(self):
p = nn.Parameter(torch.zeros(0))
opt = torch.optim.SGD([p], lr=5)
multiplier = WarmupParamScheduler(
CosineParamScheduler(1, 0),
0.001,
5 / 30,
)
sched = LRMultiplier(opt, multiplier, 30)
p.sum().backward()
opt.step()
self.assertEqual(opt.param_groups[0]["lr"], 0.005)
lrs = [0.005]
for _ in range(30):
sched.step()
lrs.append(opt.param_groups[0]["lr"])
for idx, lr in enumerate(lrs):
expected_cosine = 2.5 * (1.0 + math.cos(math.pi * idx / 30))
if idx >= 5:
self.assertAlmostEqual(lr, expected_cosine)
else:
self.assertNotAlmostEqual(lr, expected_cosine)
|
banmo-main
|
third_party/detectron2_old/tests/test_scheduler.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# -*- coding: utf-8 -*-
import copy
import os
import tempfile
import unittest
import torch
from detectron2 import model_zoo
from detectron2.utils.logger import setup_logger
from detectron2.utils.testing import get_sample_coco_image
@unittest.skipIf(os.environ.get("CI"), "Require COCO data and model zoo.")
class TestCaffe2Export(unittest.TestCase):
def setUp(self):
setup_logger()
def _test_model(self, config_path, device="cpu"):
# requires extra dependencies
from detectron2.export import Caffe2Model, add_export_config, Caffe2Tracer
cfg = model_zoo.get_config(config_path)
add_export_config(cfg)
cfg.MODEL.DEVICE = device
model = model_zoo.get(config_path, trained=True, device=device)
inputs = [{"image": get_sample_coco_image()}]
tracer = Caffe2Tracer(cfg, model, copy.deepcopy(inputs))
c2_model = tracer.export_caffe2()
with tempfile.TemporaryDirectory(prefix="detectron2_unittest") as d:
c2_model.save_protobuf(d)
c2_model.save_graph(os.path.join(d, "test.svg"), inputs=copy.deepcopy(inputs))
c2_model = Caffe2Model.load_protobuf(d)
c2_model(inputs)[0]["instances"]
ts_model = tracer.export_torchscript()
ts_model.save(os.path.join(d, "model.ts"))
def testMaskRCNN(self):
# TODO: this test requires manifold access, see: T88318502
self._test_model("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml")
@unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
def testMaskRCNNGPU(self):
# TODO: this test requires manifold access, see: T88318502
self._test_model("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml", device="cuda")
def testRetinaNet(self):
# TODO: this test requires manifold access, see: T88318502
self._test_model("COCO-Detection/retinanet_R_50_FPN_3x.yaml")
def testPanopticFPN(self):
# TODO: this test requires manifold access, see: T88318502
self._test_model("COCO-PanopticSegmentation/panoptic_fpn_R_50_3x.yaml")
|
banmo-main
|
third_party/detectron2_old/tests/test_export_caffe2.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import json
import os
import tempfile
import unittest
from detectron2.utils.events import CommonMetricPrinter, EventStorage, JSONWriter
class TestEventWriter(unittest.TestCase):
def testScalar(self):
with tempfile.TemporaryDirectory(
prefix="detectron2_tests"
) as dir, EventStorage() as storage:
json_file = os.path.join(dir, "test.json")
writer = JSONWriter(json_file)
for k in range(60):
storage.put_scalar("key", k, smoothing_hint=False)
if (k + 1) % 20 == 0:
writer.write()
storage.step()
writer.close()
with open(json_file) as f:
data = [json.loads(l) for l in f]
self.assertTrue([int(k["key"]) for k in data] == [19, 39, 59])
def testScalarMismatchedPeriod(self):
with tempfile.TemporaryDirectory(
prefix="detectron2_tests"
) as dir, EventStorage() as storage:
json_file = os.path.join(dir, "test.json")
writer = JSONWriter(json_file)
for k in range(60):
if k % 17 == 0: # write in a differnt period
storage.put_scalar("key2", k, smoothing_hint=False)
storage.put_scalar("key", k, smoothing_hint=False)
if (k + 1) % 20 == 0:
writer.write()
storage.step()
writer.close()
with open(json_file) as f:
data = [json.loads(l) for l in f]
self.assertTrue([int(k.get("key2", 0)) for k in data] == [17, 0, 34, 0, 51, 0])
self.assertTrue([int(k.get("key", 0)) for k in data] == [0, 19, 0, 39, 0, 59])
self.assertTrue([int(k["iteration"]) for k in data] == [17, 19, 34, 39, 51, 59])
def testPrintETA(self):
with EventStorage() as s:
p1 = CommonMetricPrinter(10)
p2 = CommonMetricPrinter()
s.put_scalar("time", 1.0)
s.step()
s.put_scalar("time", 1.0)
s.step()
with self.assertLogs("detectron2.utils.events") as logs:
p1.write()
self.assertIn("eta", logs.output[0])
with self.assertLogs("detectron2.utils.events") as logs:
p2.write()
self.assertNotIn("eta", logs.output[0])
|
banmo-main
|
third_party/detectron2_old/tests/test_events.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import json
import os
import tempfile
import unittest
import torch
from torch import Tensor, nn
from detectron2 import model_zoo
from detectron2.config import get_cfg
from detectron2.config.instantiate import dump_dataclass, instantiate
from detectron2.export import dump_torchscript_IR, scripting_with_instances
from detectron2.export.flatten import TracingAdapter, flatten_to_tuple
from detectron2.export.torchscript_patch import patch_builtin_len
from detectron2.layers import ShapeSpec
from detectron2.modeling import build_backbone
from detectron2.modeling.postprocessing import detector_postprocess
from detectron2.modeling.roi_heads import KRCNNConvDeconvUpsampleHead
from detectron2.structures import Boxes, Instances
from detectron2.utils.env import TORCH_VERSION
from detectron2.utils.testing import (
assert_instances_allclose,
convert_scripted_instances,
get_sample_coco_image,
random_boxes,
)
"""
https://detectron2.readthedocs.io/tutorials/deployment.html
contains some explanations of this file.
"""
@unittest.skipIf(os.environ.get("CI") or TORCH_VERSION < (1, 8), "Insufficient Pytorch version")
class TestScripting(unittest.TestCase):
@unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
def testMaskRCNN(self):
# TODO: this test requires manifold access, see: T88318502
self._test_rcnn_model("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml")
@unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
def testRetinaNet(self):
# TODO: this test requires manifold access, see: T88318502
self._test_retinanet_model("COCO-Detection/retinanet_R_50_FPN_3x.yaml")
def _test_rcnn_model(self, config_path):
model = model_zoo.get(config_path, trained=True)
model.eval()
fields = {
"proposal_boxes": Boxes,
"objectness_logits": Tensor,
"pred_boxes": Boxes,
"scores": Tensor,
"pred_classes": Tensor,
"pred_masks": Tensor,
}
script_model = scripting_with_instances(model, fields)
inputs = [{"image": get_sample_coco_image()}] * 2
with torch.no_grad():
instance = model.inference(inputs, do_postprocess=False)[0]
scripted_instance = script_model.inference(inputs, do_postprocess=False)[0]
assert_instances_allclose(instance, scripted_instance)
def _test_retinanet_model(self, config_path):
model = model_zoo.get(config_path, trained=True)
model.eval()
fields = {
"pred_boxes": Boxes,
"scores": Tensor,
"pred_classes": Tensor,
}
script_model = scripting_with_instances(model, fields)
img = get_sample_coco_image()
inputs = [{"image": img}] * 2
with torch.no_grad():
instance = model(inputs)[0]["instances"]
scripted_instance = convert_scripted_instances(script_model(inputs)[0])
scripted_instance = detector_postprocess(scripted_instance, img.shape[1], img.shape[2])
assert_instances_allclose(instance, scripted_instance)
# Note that the model currently cannot be saved and loaded into a new process:
# https://github.com/pytorch/pytorch/issues/46944
@unittest.skipIf(os.environ.get("CI") or TORCH_VERSION < (1, 8), "Insufficient Pytorch version")
class TestTracing(unittest.TestCase):
@unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
def testMaskRCNN(self):
# TODO: this test requires manifold access, see: T88318502
def inference_func(model, image):
inputs = [{"image": image}]
return model.inference(inputs, do_postprocess=False)[0]
self._test_model("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml", inference_func)
@unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
def testRetinaNet(self):
# TODO: this test requires manifold access, see: T88318502
def inference_func(model, image):
return model.forward([{"image": image}])[0]["instances"]
self._test_model("COCO-Detection/retinanet_R_50_FPN_3x.yaml", inference_func)
def _test_model(self, config_path, inference_func):
model = model_zoo.get(config_path, trained=True)
image = get_sample_coco_image()
wrapper = TracingAdapter(model, image, inference_func)
wrapper.eval()
with torch.no_grad():
small_image = nn.functional.interpolate(image, scale_factor=0.5)
# trace with a different image, and the trace must still work
traced_model = torch.jit.trace(wrapper, (small_image,))
output = inference_func(model, image)
traced_output = wrapper.outputs_schema(traced_model(image))
assert_instances_allclose(output, traced_output, size_as_tensor=True)
def testKeypointHead(self):
class M(nn.Module):
def __init__(self):
super().__init__()
self.model = KRCNNConvDeconvUpsampleHead(
ShapeSpec(channels=4, height=14, width=14), num_keypoints=17, conv_dims=(4,)
)
def forward(self, x, predbox1, predbox2):
inst = [
Instances((100, 100), pred_boxes=Boxes(predbox1)),
Instances((100, 100), pred_boxes=Boxes(predbox2)),
]
ret = self.model(x, inst)
return tuple(x.pred_keypoints for x in ret)
model = M()
model.eval()
def gen_input(num1, num2):
feat = torch.randn((num1 + num2, 4, 14, 14))
box1 = random_boxes(num1)
box2 = random_boxes(num2)
return feat, box1, box2
with torch.no_grad(), patch_builtin_len():
trace = torch.jit.trace(model, gen_input(15, 15), check_trace=False)
inputs = gen_input(12, 10)
trace_outputs = trace(*inputs)
true_outputs = model(*inputs)
for trace_output, true_output in zip(trace_outputs, true_outputs):
self.assertTrue(torch.allclose(trace_output, true_output))
class TestTorchscriptUtils(unittest.TestCase):
# TODO: add test to dump scripting
def test_dump_IR_tracing(self):
cfg = get_cfg()
cfg.MODEL.RESNETS.DEPTH = 18
cfg.MODEL.RESNETS.RES2_OUT_CHANNELS = 64
class Mod(nn.Module):
def forward(self, x):
return tuple(self.m(x).values())
model = Mod()
model.m = build_backbone(cfg)
model.eval()
with torch.no_grad():
ts_model = torch.jit.trace(model, (torch.rand(2, 3, 224, 224),))
with tempfile.TemporaryDirectory(prefix="detectron2_test") as d:
dump_torchscript_IR(ts_model, d)
# check that the files are created
for name in ["model_ts_code", "model_ts_IR", "model_ts_IR_inlined", "model"]:
fname = os.path.join(d, name + ".txt")
self.assertTrue(os.stat(fname).st_size > 0, fname)
def test_dump_IR_function(self):
@torch.jit.script
def gunc(x, y):
return x + y
def func(x, y):
return x + y + gunc(x, y)
ts_model = torch.jit.trace(func, (torch.rand(3), torch.rand(3)))
with tempfile.TemporaryDirectory(prefix="detectron2_test") as d:
dump_torchscript_IR(ts_model, d)
for name in ["model_ts_code", "model_ts_IR", "model_ts_IR_inlined"]:
fname = os.path.join(d, name + ".txt")
self.assertTrue(os.stat(fname).st_size > 0, fname)
def test_flatten_basic(self):
obj = [3, ([5, 6], {"name": [7, 9], "name2": 3})]
res, schema = flatten_to_tuple(obj)
self.assertEqual(res, (3, 5, 6, 7, 9, 3))
new_obj = schema(res)
self.assertEqual(new_obj, obj)
_, new_schema = flatten_to_tuple(new_obj)
self.assertEqual(schema, new_schema) # test __eq__
self._check_schema(schema)
def _check_schema(self, schema):
dumped_schema = dump_dataclass(schema)
# Check that the schema is json-serializable
# Although in reality you might want to use yaml because it often has many levels
json.dumps(dumped_schema)
# Check that the schema can be deserialized
new_schema = instantiate(dumped_schema)
self.assertEqual(schema, new_schema)
def test_flatten_instances_boxes(self):
inst = Instances(
torch.tensor([5, 8]), pred_masks=torch.tensor([3]), pred_boxes=Boxes(torch.ones((1, 4)))
)
obj = [3, ([5, 6], inst)]
res, schema = flatten_to_tuple(obj)
self.assertEqual(res[:3], (3, 5, 6))
for r, expected in zip(res[3:], (inst.pred_boxes.tensor, inst.pred_masks, inst.image_size)):
self.assertIs(r, expected)
new_obj = schema(res)
assert_instances_allclose(new_obj[1][1], inst, rtol=0.0, size_as_tensor=True)
self._check_schema(schema)
|
banmo-main
|
third_party/detectron2_old/tests/test_export_torchscript.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import logging
import unittest
from detectron2 import model_zoo
from detectron2.config import instantiate
from detectron2.modeling import FPN, GeneralizedRCNN
logger = logging.getLogger(__name__)
class TestModelZoo(unittest.TestCase):
def test_get_returns_model(self):
model = model_zoo.get("Misc/scratch_mask_rcnn_R_50_FPN_3x_gn.yaml", trained=False)
self.assertIsInstance(model, GeneralizedRCNN)
self.assertIsInstance(model.backbone, FPN)
def test_get_invalid_model(self):
self.assertRaises(RuntimeError, model_zoo.get, "Invalid/config.yaml")
def test_get_url(self):
url = model_zoo.get_checkpoint_url("Misc/scratch_mask_rcnn_R_50_FPN_3x_gn.yaml")
self.assertEqual(
url,
"https://dl.fbaipublicfiles.com/detectron2/Misc/scratch_mask_rcnn_R_50_FPN_3x_gn/138602908/model_final_01ca85.pkl", # noqa
)
url2 = model_zoo.get_checkpoint_url("Misc/scratch_mask_rcnn_R_50_FPN_3x_gn.py")
self.assertEqual(url, url2)
def _build_lazy_model(self, name):
cfg = model_zoo.get_config("common/models/" + name)
instantiate(cfg.model)
def test_mask_rcnn_fpn(self):
self._build_lazy_model("mask_rcnn_fpn.py")
def test_mask_rcnn_c4(self):
self._build_lazy_model("mask_rcnn_c4.py")
def test_panoptic_fpn(self):
self._build_lazy_model("panoptic_fpn.py")
def test_schedule(self):
cfg = model_zoo.get_config("common/coco_schedule.py")
for _, v in cfg.items():
instantiate(v)
if __name__ == "__main__":
unittest.main()
|
banmo-main
|
third_party/detectron2_old/tests/test_model_zoo.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import unittest
from collections import OrderedDict
import torch
from torch import nn
from detectron2.checkpoint.c2_model_loading import align_and_update_state_dicts
from detectron2.utils.logger import setup_logger
class TestCheckpointer(unittest.TestCase):
def setUp(self):
setup_logger()
def create_complex_model(self):
m = nn.Module()
m.block1 = nn.Module()
m.block1.layer1 = nn.Linear(2, 3)
m.layer2 = nn.Linear(3, 2)
m.res = nn.Module()
m.res.layer2 = nn.Linear(3, 2)
state_dict = OrderedDict()
state_dict["layer1.weight"] = torch.rand(3, 2)
state_dict["layer1.bias"] = torch.rand(3)
state_dict["layer2.weight"] = torch.rand(2, 3)
state_dict["layer2.bias"] = torch.rand(2)
state_dict["res.layer2.weight"] = torch.rand(2, 3)
state_dict["res.layer2.bias"] = torch.rand(2)
return m, state_dict
def test_complex_model_loaded(self):
for add_data_parallel in [False, True]:
model, state_dict = self.create_complex_model()
if add_data_parallel:
model = nn.DataParallel(model)
model_sd = model.state_dict()
sd_to_load = align_and_update_state_dicts(model_sd, state_dict)
model.load_state_dict(sd_to_load)
for loaded, stored in zip(model_sd.values(), state_dict.values()):
# different tensor references
self.assertFalse(id(loaded) == id(stored))
# same content
self.assertTrue(loaded.to(stored).equal(stored))
if __name__ == "__main__":
unittest.main()
|
banmo-main
|
third_party/detectron2_old/tests/test_checkpoint.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from __future__ import absolute_import, division, print_function, unicode_literals
import unittest
import torch
from detectron2.layers import batched_nms
from detectron2.utils.testing import random_boxes
class TestNMS(unittest.TestCase):
def _create_tensors(self, N):
boxes = random_boxes(N, 200)
scores = torch.rand(N)
return boxes, scores
def test_nms_scriptability(self):
N = 2000
num_classes = 50
boxes, scores = self._create_tensors(N)
idxs = torch.randint(0, num_classes, (N,))
scripted_batched_nms = torch.jit.script(batched_nms)
err_msg = "NMS is incompatible with jit-scripted NMS for IoU={}"
for iou in [0.2, 0.5, 0.8]:
keep_ref = batched_nms(boxes, scores, idxs, iou)
backup = boxes.clone()
scripted_keep = scripted_batched_nms(boxes, scores, idxs, iou)
assert torch.allclose(boxes, backup), "boxes modified by jit-scripted batched_nms"
self.assertTrue(torch.equal(keep_ref, scripted_keep), err_msg.format(iou))
if __name__ == "__main__":
unittest.main()
|
banmo-main
|
third_party/detectron2_old/tests/layers/test_nms.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import unittest
import torch
from torch import nn
from detectron2.layers import ASPP, DepthwiseSeparableConv2d, FrozenBatchNorm2d
from detectron2.modeling.backbone.resnet import BasicStem, ResNet
"""
Test for misc layers.
"""
class TestBlocks(unittest.TestCase):
def test_separable_conv(self):
DepthwiseSeparableConv2d(3, 10, norm1="BN", activation1=nn.PReLU())
def test_aspp(self):
m = ASPP(3, 10, [2, 3, 4], norm="", activation=nn.PReLU())
self.assertIsNot(m.convs[0].activation.weight, m.convs[1].activation.weight)
self.assertIsNot(m.convs[0].activation.weight, m.project.activation.weight)
@unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
def test_frozen_batchnorm_fp16(self):
from torch.cuda.amp import autocast
C = 10
input = torch.rand(1, C, 10, 10).cuda()
m = FrozenBatchNorm2d(C).cuda()
with autocast():
output = m(input.half())
self.assertEqual(output.dtype, torch.float16)
# requires_grad triggers a different codepath
input.requires_grad_()
with autocast():
output = m(input.half())
self.assertEqual(output.dtype, torch.float16)
def test_resnet_unused_stages(self):
resnet = ResNet(BasicStem(), ResNet.make_default_stages(18), out_features=["res2"])
self.assertTrue(hasattr(resnet, "res2"))
self.assertFalse(hasattr(resnet, "res3"))
self.assertFalse(hasattr(resnet, "res5"))
resnet = ResNet(BasicStem(), ResNet.make_default_stages(18), out_features=["res2", "res5"])
self.assertTrue(hasattr(resnet, "res2"))
self.assertTrue(hasattr(resnet, "res4"))
self.assertTrue(hasattr(resnet, "res5"))
|
banmo-main
|
third_party/detectron2_old/tests/layers/test_blocks.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.