python_code
stringlengths 0
4.04M
| repo_name
stringlengths 7
58
| file_path
stringlengths 5
147
|
---|---|---|
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
"""
See "Data Augmentation" tutorial for an overview of the system:
https://detectron2.readthedocs.io/tutorials/augmentation.html
"""
import numpy as np
import torch
import torch.nn.functional as F
from fvcore.transforms.transform import (
CropTransform,
HFlipTransform,
NoOpTransform,
Transform,
TransformList,
)
from PIL import Image
try:
import cv2 # noqa
except ImportError:
# OpenCV is an optional dependency at the moment
pass
__all__ = [
"ExtentTransform",
"ResizeTransform",
"RotationTransform",
"ColorTransform",
"PILColorTransform",
]
class ExtentTransform(Transform):
"""
Extracts a subregion from the source image and scales it to the output size.
The fill color is used to map pixels from the source rect that fall outside
the source image.
See: https://pillow.readthedocs.io/en/latest/PIL.html#PIL.ImageTransform.ExtentTransform
"""
def __init__(self, src_rect, output_size, interp=Image.LINEAR, fill=0):
"""
Args:
src_rect (x0, y0, x1, y1): src coordinates
output_size (h, w): dst image size
interp: PIL interpolation methods
fill: Fill color used when src_rect extends outside image
"""
super().__init__()
self._set_attributes(locals())
def apply_image(self, img, interp=None):
h, w = self.output_size
if len(img.shape) > 2 and img.shape[2] == 1:
pil_image = Image.fromarray(img[:, :, 0], mode="L")
else:
pil_image = Image.fromarray(img)
pil_image = pil_image.transform(
size=(w, h),
method=Image.EXTENT,
data=self.src_rect,
resample=interp if interp else self.interp,
fill=self.fill,
)
ret = np.asarray(pil_image)
if len(img.shape) > 2 and img.shape[2] == 1:
ret = np.expand_dims(ret, -1)
return ret
def apply_coords(self, coords):
# Transform image center from source coordinates into output coordinates
# and then map the new origin to the corner of the output image.
h, w = self.output_size
x0, y0, x1, y1 = self.src_rect
new_coords = coords.astype(np.float32)
new_coords[:, 0] -= 0.5 * (x0 + x1)
new_coords[:, 1] -= 0.5 * (y0 + y1)
new_coords[:, 0] *= w / (x1 - x0)
new_coords[:, 1] *= h / (y1 - y0)
new_coords[:, 0] += 0.5 * w
new_coords[:, 1] += 0.5 * h
return new_coords
def apply_segmentation(self, segmentation):
segmentation = self.apply_image(segmentation, interp=Image.NEAREST)
return segmentation
class ResizeTransform(Transform):
"""
Resize the image to a target size.
"""
def __init__(self, h, w, new_h, new_w, interp=None):
"""
Args:
h, w (int): original image size
new_h, new_w (int): new image size
interp: PIL interpolation methods, defaults to bilinear.
"""
# TODO decide on PIL vs opencv
super().__init__()
if interp is None:
interp = Image.BILINEAR
self._set_attributes(locals())
def apply_image(self, img, interp=None):
assert img.shape[:2] == (self.h, self.w)
assert len(img.shape) <= 4
interp_method = interp if interp is not None else self.interp
if img.dtype == np.uint8:
if len(img.shape) > 2 and img.shape[2] == 1:
pil_image = Image.fromarray(img[:, :, 0], mode="L")
else:
pil_image = Image.fromarray(img)
pil_image = pil_image.resize((self.new_w, self.new_h), interp_method)
ret = np.asarray(pil_image)
if len(img.shape) > 2 and img.shape[2] == 1:
ret = np.expand_dims(ret, -1)
else:
# PIL only supports uint8
if any(x < 0 for x in img.strides):
img = np.ascontiguousarray(img)
img = torch.from_numpy(img)
shape = list(img.shape)
shape_4d = shape[:2] + [1] * (4 - len(shape)) + shape[2:]
img = img.view(shape_4d).permute(2, 3, 0, 1) # hw(c) -> nchw
_PIL_RESIZE_TO_INTERPOLATE_MODE = {
Image.NEAREST: "nearest",
Image.BILINEAR: "bilinear",
Image.BICUBIC: "bicubic",
}
mode = _PIL_RESIZE_TO_INTERPOLATE_MODE[interp_method]
align_corners = None if mode == "nearest" else False
img = F.interpolate(
img, (self.new_h, self.new_w), mode=mode, align_corners=align_corners
)
shape[:2] = (self.new_h, self.new_w)
ret = img.permute(2, 3, 0, 1).view(shape).numpy() # nchw -> hw(c)
return ret
def apply_coords(self, coords):
coords[:, 0] = coords[:, 0] * (self.new_w * 1.0 / self.w)
coords[:, 1] = coords[:, 1] * (self.new_h * 1.0 / self.h)
return coords
def apply_segmentation(self, segmentation):
segmentation = self.apply_image(segmentation, interp=Image.NEAREST)
return segmentation
def inverse(self):
return ResizeTransform(self.new_h, self.new_w, self.h, self.w, self.interp)
class RotationTransform(Transform):
"""
This method returns a copy of this image, rotated the given
number of degrees counter clockwise around its center.
"""
def __init__(self, h, w, angle, expand=True, center=None, interp=None):
"""
Args:
h, w (int): original image size
angle (float): degrees for rotation
expand (bool): choose if the image should be resized to fit the whole
rotated image (default), or simply cropped
center (tuple (width, height)): coordinates of the rotation center
if left to None, the center will be fit to the center of each image
center has no effect if expand=True because it only affects shifting
interp: cv2 interpolation method, default cv2.INTER_LINEAR
"""
super().__init__()
image_center = np.array((w / 2, h / 2))
if center is None:
center = image_center
if interp is None:
interp = cv2.INTER_LINEAR
abs_cos, abs_sin = (abs(np.cos(np.deg2rad(angle))), abs(np.sin(np.deg2rad(angle))))
if expand:
# find the new width and height bounds
bound_w, bound_h = np.rint(
[h * abs_sin + w * abs_cos, h * abs_cos + w * abs_sin]
).astype(int)
else:
bound_w, bound_h = w, h
self._set_attributes(locals())
self.rm_coords = self.create_rotation_matrix()
# Needed because of this problem https://github.com/opencv/opencv/issues/11784
self.rm_image = self.create_rotation_matrix(offset=-0.5)
def apply_image(self, img, interp=None):
"""
img should be a numpy array, formatted as Height * Width * Nchannels
"""
if len(img) == 0 or self.angle % 360 == 0:
return img
assert img.shape[:2] == (self.h, self.w)
interp = interp if interp is not None else self.interp
return cv2.warpAffine(img, self.rm_image, (self.bound_w, self.bound_h), flags=interp)
def apply_coords(self, coords):
"""
coords should be a N * 2 array-like, containing N couples of (x, y) points
"""
coords = np.asarray(coords, dtype=float)
if len(coords) == 0 or self.angle % 360 == 0:
return coords
return cv2.transform(coords[:, np.newaxis, :], self.rm_coords)[:, 0, :]
def apply_segmentation(self, segmentation):
segmentation = self.apply_image(segmentation, interp=cv2.INTER_NEAREST)
return segmentation
def create_rotation_matrix(self, offset=0):
center = (self.center[0] + offset, self.center[1] + offset)
rm = cv2.getRotationMatrix2D(tuple(center), self.angle, 1)
if self.expand:
# Find the coordinates of the center of rotation in the new image
# The only point for which we know the future coordinates is the center of the image
rot_im_center = cv2.transform(self.image_center[None, None, :] + offset, rm)[0, 0, :]
new_center = np.array([self.bound_w / 2, self.bound_h / 2]) + offset - rot_im_center
# shift the rotation center to the new coordinates
rm[:, 2] += new_center
return rm
def inverse(self):
"""
The inverse is to rotate it back with expand, and crop to get the original shape.
"""
if not self.expand: # Not possible to inverse if a part of the image is lost
raise NotImplementedError()
rotation = RotationTransform(
self.bound_h, self.bound_w, -self.angle, True, None, self.interp
)
crop = CropTransform(
(rotation.bound_w - self.w) // 2, (rotation.bound_h - self.h) // 2, self.w, self.h
)
return TransformList([rotation, crop])
class ColorTransform(Transform):
"""
Generic wrapper for any photometric transforms.
These transformations should only affect the color space and
not the coordinate space of the image (e.g. annotation
coordinates such as bounding boxes should not be changed)
"""
def __init__(self, op):
"""
Args:
op (Callable): operation to be applied to the image,
which takes in an ndarray and returns an ndarray.
"""
if not callable(op):
raise ValueError("op parameter should be callable")
super().__init__()
self._set_attributes(locals())
def apply_image(self, img):
return self.op(img)
def apply_coords(self, coords):
return coords
def inverse(self):
return NoOpTransform()
def apply_segmentation(self, segmentation):
return segmentation
class PILColorTransform(ColorTransform):
"""
Generic wrapper for PIL Photometric image transforms,
which affect the color space and not the coordinate
space of the image
"""
def __init__(self, op):
"""
Args:
op (Callable): operation to be applied to the image,
which takes in a PIL Image and returns a transformed
PIL Image.
For reference on possible operations see:
- https://pillow.readthedocs.io/en/stable/
"""
if not callable(op):
raise ValueError("op parameter should be callable")
super().__init__(op)
def apply_image(self, img):
img = Image.fromarray(img)
return np.asarray(super().apply_image(img))
def HFlip_rotated_box(transform, rotated_boxes):
"""
Apply the horizontal flip transform on rotated boxes.
Args:
rotated_boxes (ndarray): Nx5 floating point array of
(x_center, y_center, width, height, angle_degrees) format
in absolute coordinates.
"""
# Transform x_center
rotated_boxes[:, 0] = transform.width - rotated_boxes[:, 0]
# Transform angle
rotated_boxes[:, 4] = -rotated_boxes[:, 4]
return rotated_boxes
def Resize_rotated_box(transform, rotated_boxes):
"""
Apply the resizing transform on rotated boxes. For details of how these (approximation)
formulas are derived, please refer to :meth:`RotatedBoxes.scale`.
Args:
rotated_boxes (ndarray): Nx5 floating point array of
(x_center, y_center, width, height, angle_degrees) format
in absolute coordinates.
"""
scale_factor_x = transform.new_w * 1.0 / transform.w
scale_factor_y = transform.new_h * 1.0 / transform.h
rotated_boxes[:, 0] *= scale_factor_x
rotated_boxes[:, 1] *= scale_factor_y
theta = rotated_boxes[:, 4] * np.pi / 180.0
c = np.cos(theta)
s = np.sin(theta)
rotated_boxes[:, 2] *= np.sqrt(np.square(scale_factor_x * c) + np.square(scale_factor_y * s))
rotated_boxes[:, 3] *= np.sqrt(np.square(scale_factor_x * s) + np.square(scale_factor_y * c))
rotated_boxes[:, 4] = np.arctan2(scale_factor_x * s, scale_factor_y * c) * 180 / np.pi
return rotated_boxes
HFlipTransform.register_type("rotated_box", HFlip_rotated_box)
ResizeTransform.register_type("rotated_box", Resize_rotated_box)
# not necessary any more with latest fvcore
NoOpTransform.register_type("rotated_box", lambda t, x: x)
|
banmo-main
|
third_party/detectron2_old/detectron2/data/transforms/transform.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from .distributed_sampler import InferenceSampler, RepeatFactorTrainingSampler, TrainingSampler
from .grouped_batch_sampler import GroupedBatchSampler
__all__ = [
"GroupedBatchSampler",
"TrainingSampler",
"InferenceSampler",
"RepeatFactorTrainingSampler",
]
|
banmo-main
|
third_party/detectron2_old/detectron2/data/samplers/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import numpy as np
from torch.utils.data.sampler import BatchSampler, Sampler
class GroupedBatchSampler(BatchSampler):
"""
Wraps another sampler to yield a mini-batch of indices.
It enforces that the batch only contain elements from the same group.
It also tries to provide mini-batches which follows an ordering which is
as close as possible to the ordering from the original sampler.
"""
def __init__(self, sampler, group_ids, batch_size):
"""
Args:
sampler (Sampler): Base sampler.
group_ids (list[int]): If the sampler produces indices in range [0, N),
`group_ids` must be a list of `N` ints which contains the group id of each sample.
The group ids must be a set of integers in the range [0, num_groups).
batch_size (int): Size of mini-batch.
"""
if not isinstance(sampler, Sampler):
raise ValueError(
"sampler should be an instance of "
"torch.utils.data.Sampler, but got sampler={}".format(sampler)
)
self.sampler = sampler
self.group_ids = np.asarray(group_ids)
assert self.group_ids.ndim == 1
self.batch_size = batch_size
groups = np.unique(self.group_ids).tolist()
# buffer the indices of each group until batch size is reached
self.buffer_per_group = {k: [] for k in groups}
def __iter__(self):
for idx in self.sampler:
group_id = self.group_ids[idx]
group_buffer = self.buffer_per_group[group_id]
group_buffer.append(idx)
if len(group_buffer) == self.batch_size:
yield group_buffer[:] # yield a copy of the list
del group_buffer[:]
def __len__(self):
raise NotImplementedError("len() of GroupedBatchSampler is not well-defined.")
|
banmo-main
|
third_party/detectron2_old/detectron2/data/samplers/grouped_batch_sampler.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import itertools
import math
from collections import defaultdict
from typing import Optional
import torch
from torch.utils.data.sampler import Sampler
from detectron2.utils import comm
class TrainingSampler(Sampler):
"""
In training, we only care about the "infinite stream" of training data.
So this sampler produces an infinite stream of indices and
all workers cooperate to correctly shuffle the indices and sample different indices.
The samplers in each worker effectively produces `indices[worker_id::num_workers]`
where `indices` is an infinite stream of indices consisting of
`shuffle(range(size)) + shuffle(range(size)) + ...` (if shuffle is True)
or `range(size) + range(size) + ...` (if shuffle is False)
"""
def __init__(self, size: int, shuffle: bool = True, seed: Optional[int] = None):
"""
Args:
size (int): the total number of data of the underlying dataset to sample from
shuffle (bool): whether to shuffle the indices or not
seed (int): the initial seed of the shuffle. Must be the same
across all workers. If None, will use a random seed shared
among workers (require synchronization among all workers).
"""
self._size = size
assert size > 0
self._shuffle = shuffle
if seed is None:
seed = comm.shared_random_seed()
self._seed = int(seed)
self._rank = comm.get_rank()
self._world_size = comm.get_world_size()
def __iter__(self):
start = self._rank
yield from itertools.islice(self._infinite_indices(), start, None, self._world_size)
def _infinite_indices(self):
g = torch.Generator()
g.manual_seed(self._seed)
while True:
if self._shuffle:
yield from torch.randperm(self._size, generator=g).tolist()
else:
yield from torch.arange(self._size).tolist()
class RepeatFactorTrainingSampler(Sampler):
"""
Similar to TrainingSampler, but a sample may appear more times than others based
on its "repeat factor". This is suitable for training on class imbalanced datasets like LVIS.
"""
def __init__(self, repeat_factors, *, shuffle=True, seed=None):
"""
Args:
repeat_factors (Tensor): a float vector, the repeat factor for each indice. When it's
full of ones, it is equivalent to ``TrainingSampler(len(repeat_factors), ...)``.
shuffle (bool): whether to shuffle the indices or not
seed (int): the initial seed of the shuffle. Must be the same
across all workers. If None, will use a random seed shared
among workers (require synchronization among all workers).
"""
self._shuffle = shuffle
if seed is None:
seed = comm.shared_random_seed()
self._seed = int(seed)
self._rank = comm.get_rank()
self._world_size = comm.get_world_size()
# Split into whole number (_int_part) and fractional (_frac_part) parts.
self._int_part = torch.trunc(repeat_factors)
self._frac_part = repeat_factors - self._int_part
@staticmethod
def repeat_factors_from_category_frequency(dataset_dicts, repeat_thresh):
"""
Compute (fractional) per-image repeat factors based on category frequency.
The repeat factor for an image is a function of the frequency of the rarest
category labeled in that image. The "frequency of category c" in [0, 1] is defined
as the fraction of images in the training set (without repeats) in which category c
appears.
See :paper:`lvis` (>= v2) Appendix B.2.
Args:
dataset_dicts (list[dict]): annotations in Detectron2 dataset format.
repeat_thresh (float): frequency threshold below which data is repeated.
If the frequency is half of `repeat_thresh`, the image will be
repeated twice.
Returns:
torch.Tensor:
the i-th element is the repeat factor for the dataset image at index i.
"""
# 1. For each category c, compute the fraction of images that contain it: f(c)
category_freq = defaultdict(int)
for dataset_dict in dataset_dicts: # For each image (without repeats)
cat_ids = {ann["category_id"] for ann in dataset_dict["annotations"]}
for cat_id in cat_ids:
category_freq[cat_id] += 1
num_images = len(dataset_dicts)
for k, v in category_freq.items():
category_freq[k] = v / num_images
# 2. For each category c, compute the category-level repeat factor:
# r(c) = max(1, sqrt(t / f(c)))
category_rep = {
cat_id: max(1.0, math.sqrt(repeat_thresh / cat_freq))
for cat_id, cat_freq in category_freq.items()
}
# 3. For each image I, compute the image-level repeat factor:
# r(I) = max_{c in I} r(c)
rep_factors = []
for dataset_dict in dataset_dicts:
cat_ids = {ann["category_id"] for ann in dataset_dict["annotations"]}
rep_factor = max({category_rep[cat_id] for cat_id in cat_ids}, default=1.0)
rep_factors.append(rep_factor)
return torch.tensor(rep_factors, dtype=torch.float32)
def _get_epoch_indices(self, generator):
"""
Create a list of dataset indices (with repeats) to use for one epoch.
Args:
generator (torch.Generator): pseudo random number generator used for
stochastic rounding.
Returns:
torch.Tensor: list of dataset indices to use in one epoch. Each index
is repeated based on its calculated repeat factor.
"""
# Since repeat factors are fractional, we use stochastic rounding so
# that the target repeat factor is achieved in expectation over the
# course of training
rands = torch.rand(len(self._frac_part), generator=generator)
rep_factors = self._int_part + (rands < self._frac_part).float()
# Construct a list of indices in which we repeat images as specified
indices = []
for dataset_index, rep_factor in enumerate(rep_factors):
indices.extend([dataset_index] * int(rep_factor.item()))
return torch.tensor(indices, dtype=torch.int64)
def __iter__(self):
start = self._rank
yield from itertools.islice(self._infinite_indices(), start, None, self._world_size)
def _infinite_indices(self):
g = torch.Generator()
g.manual_seed(self._seed)
while True:
# Sample indices with repeats determined by stochastic rounding; each
# "epoch" may have a slightly different size due to the rounding.
indices = self._get_epoch_indices(g)
if self._shuffle:
randperm = torch.randperm(len(indices), generator=g)
yield from indices[randperm].tolist()
else:
yield from indices.tolist()
class InferenceSampler(Sampler):
"""
Produce indices for inference across all workers.
Inference needs to run on the __exact__ set of samples,
therefore when the total number of samples is not divisible by the number of workers,
this sampler produces different number of samples on different workers.
"""
def __init__(self, size: int):
"""
Args:
size (int): the total number of data of the underlying dataset to sample from
"""
self._size = size
assert size > 0
self._rank = comm.get_rank()
self._world_size = comm.get_world_size()
shard_size = (self._size - 1) // self._world_size + 1
begin = shard_size * self._rank
end = min(shard_size * (self._rank + 1), self._size)
self._local_indices = range(begin, end)
def __iter__(self):
yield from self._local_indices
def __len__(self):
return len(self._local_indices)
|
banmo-main
|
third_party/detectron2_old/detectron2/data/samplers/distributed_sampler.py
|
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
import datetime
import itertools
import logging
import os
import tempfile
import time
from collections import Counter
import torch
from fvcore.common.checkpoint import PeriodicCheckpointer as _PeriodicCheckpointer
from fvcore.common.param_scheduler import ParamScheduler
from fvcore.common.timer import Timer
from fvcore.nn.precise_bn import get_bn_modules, update_bn_stats
import detectron2.utils.comm as comm
from detectron2.evaluation.testing import flatten_results_dict
from detectron2.solver import LRMultiplier
from detectron2.utils.events import EventStorage, EventWriter
from detectron2.utils.file_io import PathManager
from .train_loop import HookBase
__all__ = [
"CallbackHook",
"IterationTimer",
"PeriodicWriter",
"PeriodicCheckpointer",
"LRScheduler",
"AutogradProfiler",
"EvalHook",
"PreciseBN",
]
"""
Implement some common hooks.
"""
class CallbackHook(HookBase):
"""
Create a hook using callback functions provided by the user.
"""
def __init__(self, *, before_train=None, after_train=None, before_step=None, after_step=None):
"""
Each argument is a function that takes one argument: the trainer.
"""
self._before_train = before_train
self._before_step = before_step
self._after_step = after_step
self._after_train = after_train
def before_train(self):
if self._before_train:
self._before_train(self.trainer)
def after_train(self):
if self._after_train:
self._after_train(self.trainer)
# The functions may be closures that hold reference to the trainer
# Therefore, delete them to avoid circular reference.
del self._before_train, self._after_train
del self._before_step, self._after_step
def before_step(self):
if self._before_step:
self._before_step(self.trainer)
def after_step(self):
if self._after_step:
self._after_step(self.trainer)
class IterationTimer(HookBase):
"""
Track the time spent for each iteration (each run_step call in the trainer).
Print a summary in the end of training.
This hook uses the time between the call to its :meth:`before_step`
and :meth:`after_step` methods.
Under the convention that :meth:`before_step` of all hooks should only
take negligible amount of time, the :class:`IterationTimer` hook should be
placed at the beginning of the list of hooks to obtain accurate timing.
"""
def __init__(self, warmup_iter=3):
"""
Args:
warmup_iter (int): the number of iterations at the beginning to exclude
from timing.
"""
self._warmup_iter = warmup_iter
self._step_timer = Timer()
self._start_time = time.perf_counter()
self._total_timer = Timer()
def before_train(self):
self._start_time = time.perf_counter()
self._total_timer.reset()
self._total_timer.pause()
def after_train(self):
logger = logging.getLogger(__name__)
total_time = time.perf_counter() - self._start_time
total_time_minus_hooks = self._total_timer.seconds()
hook_time = total_time - total_time_minus_hooks
num_iter = self.trainer.iter + 1 - self.trainer.start_iter - self._warmup_iter
if num_iter > 0 and total_time_minus_hooks > 0:
# Speed is meaningful only after warmup
# NOTE this format is parsed by grep in some scripts
logger.info(
"Overall training speed: {} iterations in {} ({:.4f} s / it)".format(
num_iter,
str(datetime.timedelta(seconds=int(total_time_minus_hooks))),
total_time_minus_hooks / num_iter,
)
)
logger.info(
"Total training time: {} ({} on hooks)".format(
str(datetime.timedelta(seconds=int(total_time))),
str(datetime.timedelta(seconds=int(hook_time))),
)
)
def before_step(self):
self._step_timer.reset()
self._total_timer.resume()
def after_step(self):
# +1 because we're in after_step, the current step is done
# but not yet counted
iter_done = self.trainer.iter - self.trainer.start_iter + 1
if iter_done >= self._warmup_iter:
sec = self._step_timer.seconds()
self.trainer.storage.put_scalars(time=sec)
else:
self._start_time = time.perf_counter()
self._total_timer.reset()
self._total_timer.pause()
class PeriodicWriter(HookBase):
"""
Write events to EventStorage (by calling ``writer.write()``) periodically.
It is executed every ``period`` iterations and after the last iteration.
Note that ``period`` does not affect how data is smoothed by each writer.
"""
def __init__(self, writers, period=20):
"""
Args:
writers (list[EventWriter]): a list of EventWriter objects
period (int):
"""
self._writers = writers
for w in writers:
assert isinstance(w, EventWriter), w
self._period = period
def after_step(self):
if (self.trainer.iter + 1) % self._period == 0 or (
self.trainer.iter == self.trainer.max_iter - 1
):
for writer in self._writers:
writer.write()
def after_train(self):
for writer in self._writers:
# If any new data is found (e.g. produced by other after_train),
# write them before closing
writer.write()
writer.close()
class PeriodicCheckpointer(_PeriodicCheckpointer, HookBase):
"""
Same as :class:`detectron2.checkpoint.PeriodicCheckpointer`, but as a hook.
Note that when used as a hook,
it is unable to save additional data other than what's defined
by the given `checkpointer`.
It is executed every ``period`` iterations and after the last iteration.
"""
def before_train(self):
self.max_iter = self.trainer.max_iter
def after_step(self):
# No way to use **kwargs
self.step(self.trainer.iter)
class LRScheduler(HookBase):
"""
A hook which executes a torch builtin LR scheduler and summarizes the LR.
It is executed after every iteration.
"""
def __init__(self, optimizer=None, scheduler=None):
"""
Args:
optimizer (torch.optim.Optimizer):
scheduler (torch.optim.LRScheduler or fvcore.common.param_scheduler.ParamScheduler):
if a :class:`ParamScheduler` object, it defines the multiplier over the base LR
in the optimizer.
If any argument is not given, will try to obtain it from the trainer.
"""
self._optimizer = optimizer
self._scheduler = scheduler
def before_train(self):
self._optimizer = self._optimizer or self.trainer.optimizer
if isinstance(self.scheduler, ParamScheduler):
self._scheduler = LRMultiplier(
self._optimizer,
self.scheduler,
self.trainer.max_iter,
last_iter=self.trainer.iter - 1,
)
# NOTE: some heuristics on what LR to summarize
# summarize the param group with most parameters
largest_group = max(len(g["params"]) for g in self._optimizer.param_groups)
if largest_group == 1:
# If all groups have one parameter,
# then find the most common initial LR, and use it for summary
lr_count = Counter([g["lr"] for g in self._optimizer.param_groups])
lr = lr_count.most_common()[0][0]
for i, g in enumerate(self._optimizer.param_groups):
if g["lr"] == lr:
self._best_param_group_id = i
break
else:
for i, g in enumerate(self._optimizer.param_groups):
if len(g["params"]) == largest_group:
self._best_param_group_id = i
break
def after_step(self):
lr = self._optimizer.param_groups[self._best_param_group_id]["lr"]
self.trainer.storage.put_scalar("lr", lr, smoothing_hint=False)
self.scheduler.step()
@property
def scheduler(self):
return self._scheduler or self.trainer.scheduler
def state_dict(self):
if isinstance(self.scheduler, torch.optim.lr_scheduler._LRScheduler):
return self.scheduler.state_dict()
return {}
def load_state_dict(self, state_dict):
if isinstance(self.scheduler, torch.optim.lr_scheduler._LRScheduler):
logger = logging.getLogger(__name__)
logger.info("Loading scheduler from state_dict ...")
self.scheduler.load_state_dict(state_dict)
class AutogradProfiler(HookBase):
"""
A hook which runs `torch.autograd.profiler.profile`.
Examples:
::
hooks.AutogradProfiler(
lambda trainer: trainer.iter > 10 and trainer.iter < 20, self.cfg.OUTPUT_DIR
)
The above example will run the profiler for iteration 10~20 and dump
results to ``OUTPUT_DIR``. We did not profile the first few iterations
because they are typically slower than the rest.
The result files can be loaded in the ``chrome://tracing`` page in chrome browser.
Note:
When used together with NCCL on older version of GPUs,
autograd profiler may cause deadlock because it unnecessarily allocates
memory on every device it sees. The memory management calls, if
interleaved with NCCL calls, lead to deadlock on GPUs that do not
support ``cudaLaunchCooperativeKernelMultiDevice``.
"""
def __init__(self, enable_predicate, output_dir, *, use_cuda=True):
"""
Args:
enable_predicate (callable[trainer -> bool]): a function which takes a trainer,
and returns whether to enable the profiler.
It will be called once every step, and can be used to select which steps to profile.
output_dir (str): the output directory to dump tracing files.
use_cuda (bool): same as in `torch.autograd.profiler.profile`.
"""
self._enable_predicate = enable_predicate
self._use_cuda = use_cuda
self._output_dir = output_dir
def before_step(self):
if self._enable_predicate(self.trainer):
self._profiler = torch.autograd.profiler.profile(use_cuda=self._use_cuda)
self._profiler.__enter__()
else:
self._profiler = None
def after_step(self):
if self._profiler is None:
return
self._profiler.__exit__(None, None, None)
PathManager.mkdirs(self._output_dir)
out_file = os.path.join(
self._output_dir, "profiler-trace-iter{}.json".format(self.trainer.iter)
)
if "://" not in out_file:
self._profiler.export_chrome_trace(out_file)
else:
# Support non-posix filesystems
with tempfile.TemporaryDirectory(prefix="detectron2_profiler") as d:
tmp_file = os.path.join(d, "tmp.json")
self._profiler.export_chrome_trace(tmp_file)
with open(tmp_file) as f:
content = f.read()
with PathManager.open(out_file, "w") as f:
f.write(content)
class EvalHook(HookBase):
"""
Run an evaluation function periodically, and at the end of training.
It is executed every ``eval_period`` iterations and after the last iteration.
"""
def __init__(self, eval_period, eval_function):
"""
Args:
eval_period (int): the period to run `eval_function`. Set to 0 to
not evaluate periodically (but still after the last iteration).
eval_function (callable): a function which takes no arguments, and
returns a nested dict of evaluation metrics.
Note:
This hook must be enabled in all or none workers.
If you would like only certain workers to perform evaluation,
give other workers a no-op function (`eval_function=lambda: None`).
"""
self._period = eval_period
self._func = eval_function
def _do_eval(self):
results = self._func()
if results:
assert isinstance(
results, dict
), "Eval function must return a dict. Got {} instead.".format(results)
flattened_results = flatten_results_dict(results)
for k, v in flattened_results.items():
try:
v = float(v)
except Exception as e:
raise ValueError(
"[EvalHook] eval_function should return a nested dict of float. "
"Got '{}: {}' instead.".format(k, v)
) from e
self.trainer.storage.put_scalars(**flattened_results, smoothing_hint=False)
# Evaluation may take different time among workers.
# A barrier make them start the next iteration together.
comm.synchronize()
def after_step(self):
next_iter = self.trainer.iter + 1
if self._period > 0 and next_iter % self._period == 0:
# do the last eval in after_train
if next_iter != self.trainer.max_iter:
self._do_eval()
def after_train(self):
# This condition is to prevent the eval from running after a failed training
if self.trainer.iter + 1 >= self.trainer.max_iter:
self._do_eval()
# func is likely a closure that holds reference to the trainer
# therefore we clean it to avoid circular reference in the end
del self._func
class PreciseBN(HookBase):
"""
The standard implementation of BatchNorm uses EMA in inference, which is
sometimes suboptimal.
This class computes the true average of statistics rather than the moving average,
and put true averages to every BN layer in the given model.
It is executed every ``period`` iterations and after the last iteration.
"""
def __init__(self, period, model, data_loader, num_iter):
"""
Args:
period (int): the period this hook is run, or 0 to not run during training.
The hook will always run in the end of training.
model (nn.Module): a module whose all BN layers in training mode will be
updated by precise BN.
Note that user is responsible for ensuring the BN layers to be
updated are in training mode when this hook is triggered.
data_loader (iterable): it will produce data to be run by `model(data)`.
num_iter (int): number of iterations used to compute the precise
statistics.
"""
self._logger = logging.getLogger(__name__)
if len(get_bn_modules(model)) == 0:
self._logger.info(
"PreciseBN is disabled because model does not contain BN layers in training mode."
)
self._disabled = True
return
self._model = model
self._data_loader = data_loader
self._num_iter = num_iter
self._period = period
self._disabled = False
self._data_iter = None
def after_step(self):
next_iter = self.trainer.iter + 1
is_final = next_iter == self.trainer.max_iter
if is_final or (self._period > 0 and next_iter % self._period == 0):
self.update_stats()
def update_stats(self):
"""
Update the model with precise statistics. Users can manually call this method.
"""
if self._disabled:
return
if self._data_iter is None:
self._data_iter = iter(self._data_loader)
def data_loader():
for num_iter in itertools.count(1):
if num_iter % 100 == 0:
self._logger.info(
"Running precise-BN ... {}/{} iterations.".format(num_iter, self._num_iter)
)
# This way we can reuse the same iterator
yield next(self._data_iter)
with EventStorage(): # capture events in a new storage to discard them
self._logger.info(
"Running precise-BN for {} iterations... ".format(self._num_iter)
+ "Note that this could produce different statistics every time."
)
update_bn_stats(self._model, data_loader(), self._num_iter)
|
banmo-main
|
third_party/detectron2_old/detectron2/engine/hooks.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from .launch import *
from .train_loop import *
__all__ = [k for k in globals().keys() if not k.startswith("_")]
# prefer to let hooks and defaults live in separate namespaces (therefore not in __all__)
# but still make them available here
from .hooks import *
from .defaults import *
|
banmo-main
|
third_party/detectron2_old/detectron2/engine/__init__.py
|
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
import logging
import numpy as np
import time
import weakref
from typing import Dict, List, Optional
import torch
from torch.nn.parallel import DataParallel, DistributedDataParallel
import detectron2.utils.comm as comm
from detectron2.utils.events import EventStorage, get_event_storage
from detectron2.utils.logger import _log_api_usage
__all__ = ["HookBase", "TrainerBase", "SimpleTrainer", "AMPTrainer"]
class HookBase:
"""
Base class for hooks that can be registered with :class:`TrainerBase`.
Each hook can implement 4 methods. The way they are called is demonstrated
in the following snippet:
::
hook.before_train()
for iter in range(start_iter, max_iter):
hook.before_step()
trainer.run_step()
hook.after_step()
iter += 1
hook.after_train()
Notes:
1. In the hook method, users can access ``self.trainer`` to access more
properties about the context (e.g., model, current iteration, or config
if using :class:`DefaultTrainer`).
2. A hook that does something in :meth:`before_step` can often be
implemented equivalently in :meth:`after_step`.
If the hook takes non-trivial time, it is strongly recommended to
implement the hook in :meth:`after_step` instead of :meth:`before_step`.
The convention is that :meth:`before_step` should only take negligible time.
Following this convention will allow hooks that do care about the difference
between :meth:`before_step` and :meth:`after_step` (e.g., timer) to
function properly.
"""
trainer: "TrainerBase" = None
"""
A weak reference to the trainer object. Set by the trainer when the hook is registered.
"""
def before_train(self):
"""
Called before the first iteration.
"""
pass
def after_train(self):
"""
Called after the last iteration.
"""
pass
def before_step(self):
"""
Called before each iteration.
"""
pass
def after_step(self):
"""
Called after each iteration.
"""
pass
def state_dict(self):
"""
Hooks are stateless by default, but can be made checkpointable by
implementing `state_dict` and `load_state_dict`.
"""
return {}
class TrainerBase:
"""
Base class for iterative trainer with hooks.
The only assumption we made here is: the training runs in a loop.
A subclass can implement what the loop is.
We made no assumptions about the existence of dataloader, optimizer, model, etc.
Attributes:
iter(int): the current iteration.
start_iter(int): The iteration to start with.
By convention the minimum possible value is 0.
max_iter(int): The iteration to end training.
storage(EventStorage): An EventStorage that's opened during the course of training.
"""
def __init__(self) -> None:
self._hooks: List[HookBase] = []
self.iter: int = 0
self.start_iter: int = 0
self.max_iter: int
self.storage: EventStorage
_log_api_usage("trainer." + self.__class__.__name__)
def register_hooks(self, hooks: List[Optional[HookBase]]) -> None:
"""
Register hooks to the trainer. The hooks are executed in the order
they are registered.
Args:
hooks (list[Optional[HookBase]]): list of hooks
"""
hooks = [h for h in hooks if h is not None]
for h in hooks:
assert isinstance(h, HookBase)
# To avoid circular reference, hooks and trainer cannot own each other.
# This normally does not matter, but will cause memory leak if the
# involved objects contain __del__:
# See http://engineering.hearsaysocial.com/2013/06/16/circular-references-in-python/
h.trainer = weakref.proxy(self)
self._hooks.extend(hooks)
def train(self, start_iter: int, max_iter: int):
"""
Args:
start_iter, max_iter (int): See docs above
"""
logger = logging.getLogger(__name__)
logger.info("Starting training from iteration {}".format(start_iter))
self.iter = self.start_iter = start_iter
self.max_iter = max_iter
with EventStorage(start_iter) as self.storage:
try:
self.before_train()
for self.iter in range(start_iter, max_iter):
self.before_step()
self.run_step()
self.after_step()
# self.iter == max_iter can be used by `after_train` to
# tell whether the training successfully finished or failed
# due to exceptions.
self.iter += 1
except Exception:
logger.exception("Exception during training:")
raise
finally:
self.after_train()
def before_train(self):
for h in self._hooks:
h.before_train()
def after_train(self):
self.storage.iter = self.iter
for h in self._hooks:
h.after_train()
def before_step(self):
# Maintain the invariant that storage.iter == trainer.iter
# for the entire execution of each step
self.storage.iter = self.iter
for h in self._hooks:
h.before_step()
def after_step(self):
for h in self._hooks:
h.after_step()
def run_step(self):
raise NotImplementedError
def state_dict(self):
ret = {"iteration": self.iter}
hooks_state = {}
for h in self._hooks:
sd = h.state_dict()
if sd:
name = type(h).__qualname__
if name in hooks_state:
# TODO handle repetitive stateful hooks
continue
hooks_state[name] = sd
if hooks_state:
ret["hooks"] = hooks_state
return ret
def load_state_dict(self, state_dict):
logger = logging.getLogger(__name__)
self.iter = state_dict["iteration"]
for key, value in state_dict.get("hooks", {}).items():
for h in self._hooks:
try:
name = type(h).__qualname__
except AttributeError:
continue
if name == key:
h.load_state_dict(value)
break
else:
logger.warning(f"Cannot find the hook '{key}', its state_dict is ignored.")
class SimpleTrainer(TrainerBase):
"""
A simple trainer for the most common type of task:
single-cost single-optimizer single-data-source iterative optimization,
optionally using data-parallelism.
It assumes that every step, you:
1. Compute the loss with a data from the data_loader.
2. Compute the gradients with the above loss.
3. Update the model with the optimizer.
All other tasks during training (checkpointing, logging, evaluation, LR schedule)
are maintained by hooks, which can be registered by :meth:`TrainerBase.register_hooks`.
If you want to do anything fancier than this,
either subclass TrainerBase and implement your own `run_step`,
or write your own training loop.
"""
def __init__(self, model, data_loader, optimizer):
"""
Args:
model: a torch Module. Takes a data from data_loader and returns a
dict of losses.
data_loader: an iterable. Contains data to be used to call model.
optimizer: a torch optimizer.
"""
super().__init__()
"""
We set the model to training mode in the trainer.
However it's valid to train a model that's in eval mode.
If you want your model (or a submodule of it) to behave
like evaluation during training, you can overwrite its train() method.
"""
model.train()
self.model = model
self.data_loader = data_loader
self._data_loader_iter = iter(data_loader)
self.optimizer = optimizer
def run_step(self):
"""
Implement the standard training logic described above.
"""
assert self.model.training, "[SimpleTrainer] model was changed to eval mode!"
start = time.perf_counter()
"""
If you want to do something with the data, you can wrap the dataloader.
"""
data = next(self._data_loader_iter)
data_time = time.perf_counter() - start
"""
If you want to do something with the losses, you can wrap the model.
"""
loss_dict = self.model(data)
if isinstance(loss_dict, torch.Tensor):
losses = loss_dict
loss_dict = {"total_loss": loss_dict}
else:
losses = sum(loss_dict.values())
"""
If you need to accumulate gradients or do something similar, you can
wrap the optimizer with your custom `zero_grad()` method.
"""
self.optimizer.zero_grad()
losses.backward()
self._write_metrics(loss_dict, data_time)
"""
If you need gradient clipping/scaling or other processing, you can
wrap the optimizer with your custom `step()` method. But it is
suboptimal as explained in https://arxiv.org/abs/2006.15704 Sec 3.2.4
"""
self.optimizer.step()
def _write_metrics(
self,
loss_dict: Dict[str, torch.Tensor],
data_time: float,
prefix: str = "",
):
"""
Args:
loss_dict (dict): dict of scalar losses
data_time (float): time taken by the dataloader iteration
"""
metrics_dict = {k: v.detach().cpu().item() for k, v in loss_dict.items()}
metrics_dict["data_time"] = data_time
# Gather metrics among all workers for logging
# This assumes we do DDP-style training, which is currently the only
# supported method in detectron2.
all_metrics_dict = comm.gather(metrics_dict)
if comm.is_main_process():
storage = get_event_storage()
# data_time among workers can have high variance. The actual latency
# caused by data_time is the maximum among workers.
data_time = np.max([x.pop("data_time") for x in all_metrics_dict])
storage.put_scalar("data_time", data_time)
# average the rest metrics
metrics_dict = {
k: np.mean([x[k] for x in all_metrics_dict]) for k in all_metrics_dict[0].keys()
}
total_losses_reduced = sum(metrics_dict.values())
if not np.isfinite(total_losses_reduced):
raise FloatingPointError(
f"Loss became infinite or NaN at iteration={self.iter}!\n"
f"loss_dict = {metrics_dict}"
)
storage.put_scalar("{}total_loss".format(prefix), total_losses_reduced)
if len(metrics_dict) > 1:
storage.put_scalars(**metrics_dict)
def state_dict(self):
ret = super().state_dict()
ret["optimizer"] = self.optimizer.state_dict()
return ret
def load_state_dict(self, state_dict):
super().load_state_dict(state_dict)
self.optimizer.load_state_dict(state_dict["optimizer"])
class AMPTrainer(SimpleTrainer):
"""
Like :class:`SimpleTrainer`, but uses PyTorch's native automatic mixed precision
in the training loop.
"""
def __init__(self, model, data_loader, optimizer, grad_scaler=None):
"""
Args:
model, data_loader, optimizer: same as in :class:`SimpleTrainer`.
grad_scaler: torch GradScaler to automatically scale gradients.
"""
unsupported = "AMPTrainer does not support single-process multi-device training!"
if isinstance(model, DistributedDataParallel):
assert not (model.device_ids and len(model.device_ids) > 1), unsupported
assert not isinstance(model, DataParallel), unsupported
super().__init__(model, data_loader, optimizer)
if grad_scaler is None:
from torch.cuda.amp import GradScaler
grad_scaler = GradScaler()
self.grad_scaler = grad_scaler
def run_step(self):
"""
Implement the AMP training logic.
"""
assert self.model.training, "[AMPTrainer] model was changed to eval mode!"
assert torch.cuda.is_available(), "[AMPTrainer] CUDA is required for AMP training!"
from torch.cuda.amp import autocast
start = time.perf_counter()
data = next(self._data_loader_iter)
data_time = time.perf_counter() - start
with autocast():
loss_dict = self.model(data)
if isinstance(loss_dict, torch.Tensor):
losses = loss_dict
loss_dict = {"total_loss": loss_dict}
else:
losses = sum(loss_dict.values())
self.optimizer.zero_grad()
self.grad_scaler.scale(losses).backward()
self._write_metrics(loss_dict, data_time)
self.grad_scaler.step(self.optimizer)
self.grad_scaler.update()
def state_dict(self):
ret = super().state_dict()
ret["grad_scaler"] = self.grad_scaler.state_dict()
return ret
def load_state_dict(self, state_dict):
super().load_state_dict(state_dict)
self.grad_scaler.load_state_dict(state_dict["grad_scaler"])
|
banmo-main
|
third_party/detectron2_old/detectron2/engine/train_loop.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import logging
from datetime import timedelta
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
from detectron2.utils import comm
__all__ = ["DEFAULT_TIMEOUT", "launch"]
DEFAULT_TIMEOUT = timedelta(minutes=30)
def _find_free_port():
import socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Binding to port 0 will cause the OS to find an available port for us
sock.bind(("", 0))
port = sock.getsockname()[1]
sock.close()
# NOTE: there is still a chance the port could be taken by other processes.
return port
def launch(
main_func,
num_gpus_per_machine,
num_machines=1,
machine_rank=0,
dist_url=None,
args=(),
timeout=DEFAULT_TIMEOUT,
):
"""
Launch multi-gpu or distributed training.
This function must be called on all machines involved in the training.
It will spawn child processes (defined by ``num_gpus_per_machine``) on each machine.
Args:
main_func: a function that will be called by `main_func(*args)`
num_gpus_per_machine (int): number of GPUs per machine
num_machines (int): the total number of machines
machine_rank (int): the rank of this machine
dist_url (str): url to connect to for distributed jobs, including protocol
e.g. "tcp://127.0.0.1:8686".
Can be set to "auto" to automatically select a free port on localhost
timeout (timedelta): timeout of the distributed workers
args (tuple): arguments passed to main_func
"""
world_size = num_machines * num_gpus_per_machine
if world_size > 1:
# https://github.com/pytorch/pytorch/pull/14391
# TODO prctl in spawned processes
if dist_url == "auto":
assert num_machines == 1, "dist_url=auto not supported in multi-machine jobs."
port = _find_free_port()
dist_url = f"tcp://127.0.0.1:{port}"
if num_machines > 1 and dist_url.startswith("file://"):
logger = logging.getLogger(__name__)
logger.warning(
"file:// is not a reliable init_method in multi-machine jobs. Prefer tcp://"
)
mp.spawn(
_distributed_worker,
nprocs=num_gpus_per_machine,
args=(
main_func,
world_size,
num_gpus_per_machine,
machine_rank,
dist_url,
args,
timeout,
),
daemon=False,
)
else:
main_func(*args)
def _distributed_worker(
local_rank,
main_func,
world_size,
num_gpus_per_machine,
machine_rank,
dist_url,
args,
timeout=DEFAULT_TIMEOUT,
):
assert torch.cuda.is_available(), "cuda is not available. Please check your installation."
global_rank = machine_rank * num_gpus_per_machine + local_rank
try:
dist.init_process_group(
backend="NCCL",
init_method=dist_url,
world_size=world_size,
rank=global_rank,
timeout=timeout,
)
except Exception as e:
logger = logging.getLogger(__name__)
logger.error("Process group URL: {}".format(dist_url))
raise e
# synchronize is needed here to prevent a possible timeout after calling init_process_group
# See: https://github.com/facebookresearch/maskrcnn-benchmark/issues/172
comm.synchronize()
assert num_gpus_per_machine <= torch.cuda.device_count()
torch.cuda.set_device(local_rank)
# Setup the local process group (which contains ranks within the same machine)
assert comm._LOCAL_PROCESS_GROUP is None
num_machines = world_size // num_gpus_per_machine
for i in range(num_machines):
ranks_on_i = list(range(i * num_gpus_per_machine, (i + 1) * num_gpus_per_machine))
pg = dist.new_group(ranks_on_i)
if i == machine_rank:
comm._LOCAL_PROCESS_GROUP = pg
main_func(*args)
|
banmo-main
|
third_party/detectron2_old/detectron2/engine/launch.py
|
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
"""
This file contains components with some default boilerplate logic user may need
in training / testing. They will not work for everyone, but many users may find them useful.
The behavior of functions/classes in this file is subject to change,
since they are meant to represent the "common default behavior" people need in their projects.
"""
import argparse
import logging
import os
import sys
import weakref
from collections import OrderedDict
from typing import Optional
import torch
from fvcore.nn.precise_bn import get_bn_modules
from omegaconf import OmegaConf
from torch.nn.parallel import DistributedDataParallel
import detectron2.data.transforms as T
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import CfgNode, LazyConfig
from detectron2.data import (
MetadataCatalog,
build_detection_test_loader,
build_detection_train_loader,
)
from detectron2.evaluation import (
DatasetEvaluator,
inference_on_dataset,
print_csv_format,
verify_results,
)
from detectron2.modeling import build_model
from detectron2.solver import build_lr_scheduler, build_optimizer
from detectron2.utils import comm
from detectron2.utils.collect_env import collect_env_info
from detectron2.utils.env import seed_all_rng
from detectron2.utils.events import CommonMetricPrinter, JSONWriter, TensorboardXWriter
from detectron2.utils.file_io import PathManager
from detectron2.utils.logger import setup_logger
from . import hooks
from .train_loop import AMPTrainer, SimpleTrainer, TrainerBase
__all__ = [
"create_ddp_model",
"default_argument_parser",
"default_setup",
"default_writers",
"DefaultPredictor",
"DefaultTrainer",
]
def create_ddp_model(model, *, fp16_compression=False, **kwargs):
"""
Create a DistributedDataParallel model if there are >1 processes.
Args:
model: a torch.nn.Module
fp16_compression: add fp16 compression hooks to the ddp object.
See more at https://pytorch.org/docs/stable/ddp_comm_hooks.html#torch.distributed.algorithms.ddp_comm_hooks.default_hooks.fp16_compress_hook
kwargs: other arguments of :module:`torch.nn.parallel.DistributedDataParallel`.
""" # noqa
if comm.get_world_size() == 1:
return model
if "device_ids" not in kwargs:
kwargs["device_ids"] = [comm.get_local_rank()]
ddp = DistributedDataParallel(model, **kwargs)
if fp16_compression:
from torch.distributed.algorithms.ddp_comm_hooks import default as comm_hooks
ddp.register_comm_hook(state=None, hook=comm_hooks.fp16_compress_hook)
return ddp
def default_argument_parser(epilog=None):
"""
Create a parser with some common arguments used by detectron2 users.
Args:
epilog (str): epilog passed to ArgumentParser describing the usage.
Returns:
argparse.ArgumentParser:
"""
parser = argparse.ArgumentParser(
epilog=epilog
or f"""
Examples:
Run on single machine:
$ {sys.argv[0]} --num-gpus 8 --config-file cfg.yaml
Change some config options:
$ {sys.argv[0]} --config-file cfg.yaml MODEL.WEIGHTS /path/to/weight.pth SOLVER.BASE_LR 0.001
Run on multiple machines:
(machine0)$ {sys.argv[0]} --machine-rank 0 --num-machines 2 --dist-url <URL> [--other-flags]
(machine1)$ {sys.argv[0]} --machine-rank 1 --num-machines 2 --dist-url <URL> [--other-flags]
""",
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument("--config-file", default="", metavar="FILE", help="path to config file")
parser.add_argument(
"--resume",
action="store_true",
help="Whether to attempt to resume from the checkpoint directory. "
"See documentation of `DefaultTrainer.resume_or_load()` for what it means.",
)
parser.add_argument("--eval-only", action="store_true", help="perform evaluation only")
parser.add_argument("--num-gpus", type=int, default=1, help="number of gpus *per machine*")
parser.add_argument("--num-machines", type=int, default=1, help="total number of machines")
parser.add_argument(
"--machine-rank", type=int, default=0, help="the rank of this machine (unique per machine)"
)
# PyTorch still may leave orphan processes in multi-gpu training.
# Therefore we use a deterministic way to obtain port,
# so that users are aware of orphan processes by seeing the port occupied.
port = 2 ** 15 + 2 ** 14 + hash(os.getuid() if sys.platform != "win32" else 1) % 2 ** 14
parser.add_argument(
"--dist-url",
default="tcp://127.0.0.1:{}".format(port),
help="initialization URL for pytorch distributed backend. See "
"https://pytorch.org/docs/stable/distributed.html for details.",
)
parser.add_argument(
"opts",
help="Modify config options by adding 'KEY VALUE' pairs at the end of the command. "
"See config references at "
"https://detectron2.readthedocs.io/modules/config.html#config-references",
default=None,
nargs=argparse.REMAINDER,
)
return parser
def _try_get_key(cfg, *keys, default=None):
"""
Try select keys from cfg until the first key that exists. Otherwise return default.
"""
if isinstance(cfg, CfgNode):
cfg = OmegaConf.create(cfg.dump())
for k in keys:
parts = k.split(".")
# https://github.com/omry/omegaconf/issues/674
for p in parts:
if p not in cfg:
break
cfg = OmegaConf.select(cfg, p)
else:
return cfg
return default
def _highlight(code, filename):
try:
import pygments
except ImportError:
return code
from pygments.lexers import Python3Lexer, YamlLexer
from pygments.formatters import Terminal256Formatter
lexer = Python3Lexer() if filename.endswith(".py") else YamlLexer()
code = pygments.highlight(code, lexer, Terminal256Formatter(style="monokai"))
return code
def default_setup(cfg, args):
"""
Perform some basic common setups at the beginning of a job, including:
1. Set up the detectron2 logger
2. Log basic information about environment, cmdline arguments, and config
3. Backup the config to the output directory
Args:
cfg (CfgNode or omegaconf.DictConfig): the full config to be used
args (argparse.NameSpace): the command line arguments to be logged
"""
output_dir = _try_get_key(cfg, "OUTPUT_DIR", "output_dir", "train.output_dir")
if comm.is_main_process() and output_dir:
PathManager.mkdirs(output_dir)
rank = comm.get_rank()
setup_logger(output_dir, distributed_rank=rank, name="fvcore")
logger = setup_logger(output_dir, distributed_rank=rank)
logger.info("Rank of current process: {}. World size: {}".format(rank, comm.get_world_size()))
logger.info("Environment info:\n" + collect_env_info())
logger.info("Command line arguments: " + str(args))
if hasattr(args, "config_file") and args.config_file != "":
logger.info(
"Contents of args.config_file={}:\n{}".format(
args.config_file,
_highlight(PathManager.open(args.config_file, "r").read(), args.config_file),
)
)
if comm.is_main_process() and output_dir:
# Note: some of our scripts may expect the existence of
# config.yaml in output directory
path = os.path.join(output_dir, "config.yaml")
if isinstance(cfg, CfgNode):
logger.info("Running with full config:\n{}".format(_highlight(cfg.dump(), ".yaml")))
with PathManager.open(path, "w") as f:
f.write(cfg.dump())
else:
LazyConfig.save(cfg, path)
logger.info("Full config saved to {}".format(path))
# make sure each worker has a different, yet deterministic seed if specified
seed = _try_get_key(cfg, "SEED", "train.seed", default=-1)
seed_all_rng(None if seed < 0 else seed + rank)
# cudnn benchmark has large overhead. It shouldn't be used considering the small size of
# typical validation set.
if not (hasattr(args, "eval_only") and args.eval_only):
torch.backends.cudnn.benchmark = _try_get_key(
cfg, "CUDNN_BENCHMARK", "train.cudnn_benchmark", default=False
)
def default_writers(output_dir: str, max_iter: Optional[int] = None):
"""
Build a list of :class:`EventWriter` to be used.
It now consists of a :class:`CommonMetricPrinter`,
:class:`TensorboardXWriter` and :class:`JSONWriter`.
Args:
output_dir: directory to store JSON metrics and tensorboard events
max_iter: the total number of iterations
Returns:
list[EventWriter]: a list of :class:`EventWriter` objects.
"""
return [
# It may not always print what you want to see, since it prints "common" metrics only.
CommonMetricPrinter(max_iter),
JSONWriter(os.path.join(output_dir, "metrics.json")),
TensorboardXWriter(output_dir),
]
class DefaultPredictor:
"""
Create a simple end-to-end predictor with the given config that runs on
single device for a single input image.
Compared to using the model directly, this class does the following additions:
1. Load checkpoint from `cfg.MODEL.WEIGHTS`.
2. Always take BGR image as the input and apply conversion defined by `cfg.INPUT.FORMAT`.
3. Apply resizing defined by `cfg.INPUT.{MIN,MAX}_SIZE_TEST`.
4. Take one input image and produce a single output, instead of a batch.
This is meant for simple demo purposes, so it does the above steps automatically.
This is not meant for benchmarks or running complicated inference logic.
If you'd like to do anything more fancy, please refer to its source code as examples
to build and use the model manually.
Attributes:
metadata (Metadata): the metadata of the underlying dataset, obtained from
cfg.DATASETS.TEST.
Examples:
::
pred = DefaultPredictor(cfg)
inputs = cv2.imread("input.jpg")
outputs = pred(inputs)
"""
def __init__(self, cfg):
self.cfg = cfg.clone() # cfg can be modified by model
self.model = build_model(self.cfg)
self.model.eval()
if len(cfg.DATASETS.TEST):
self.metadata = MetadataCatalog.get(cfg.DATASETS.TEST[0])
checkpointer = DetectionCheckpointer(self.model)
checkpointer.load(cfg.MODEL.WEIGHTS)
self.aug = T.ResizeShortestEdge(
[cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST], cfg.INPUT.MAX_SIZE_TEST
)
self.input_format = cfg.INPUT.FORMAT
assert self.input_format in ["RGB", "BGR"], self.input_format
def __call__(self, original_image):
"""
Args:
original_image (np.ndarray): an image of shape (H, W, C) (in BGR order).
Returns:
predictions (dict):
the output of the model for one image only.
See :doc:`/tutorials/models` for details about the format.
"""
with torch.no_grad(): # https://github.com/sphinx-doc/sphinx/issues/4258
# Apply pre-processing to image.
if self.input_format == "RGB":
# whether the model expects BGR inputs or RGB
original_image = original_image[:, :, ::-1]
height, width = original_image.shape[:2]
image = self.aug.get_transform(original_image).apply_image(original_image)
image = torch.as_tensor(image.astype("float32").transpose(2, 0, 1))
inputs = {"image": image, "height": height, "width": width}
predictions = self.model([inputs])[0]
return predictions
class DefaultTrainer(TrainerBase):
"""
A trainer with default training logic. It does the following:
1. Create a :class:`SimpleTrainer` using model, optimizer, dataloader
defined by the given config. Create a LR scheduler defined by the config.
2. Load the last checkpoint or `cfg.MODEL.WEIGHTS`, if exists, when
`resume_or_load` is called.
3. Register a few common hooks defined by the config.
It is created to simplify the **standard model training workflow** and reduce code boilerplate
for users who only need the standard training workflow, with standard features.
It means this class makes *many assumptions* about your training logic that
may easily become invalid in a new research. In fact, any assumptions beyond those made in the
:class:`SimpleTrainer` are too much for research.
The code of this class has been annotated about restrictive assumptions it makes.
When they do not work for you, you're encouraged to:
1. Overwrite methods of this class, OR:
2. Use :class:`SimpleTrainer`, which only does minimal SGD training and
nothing else. You can then add your own hooks if needed. OR:
3. Write your own training loop similar to `tools/plain_train_net.py`.
See the :doc:`/tutorials/training` tutorials for more details.
Note that the behavior of this class, like other functions/classes in
this file, is not stable, since it is meant to represent the "common default behavior".
It is only guaranteed to work well with the standard models and training workflow in detectron2.
To obtain more stable behavior, write your own training logic with other public APIs.
Examples:
::
trainer = DefaultTrainer(cfg)
trainer.resume_or_load() # load last checkpoint or MODEL.WEIGHTS
trainer.train()
Attributes:
scheduler:
checkpointer (DetectionCheckpointer):
cfg (CfgNode):
"""
def __init__(self, cfg):
"""
Args:
cfg (CfgNode):
"""
super().__init__()
logger = logging.getLogger("detectron2")
if not logger.isEnabledFor(logging.INFO): # setup_logger is not called for d2
setup_logger()
cfg = DefaultTrainer.auto_scale_workers(cfg, comm.get_world_size())
# Assume these objects must be constructed in this order.
model = self.build_model(cfg)
optimizer = self.build_optimizer(cfg, model)
data_loader = self.build_train_loader(cfg)
model = create_ddp_model(model, broadcast_buffers=False)
self._trainer = (AMPTrainer if cfg.SOLVER.AMP.ENABLED else SimpleTrainer)(
model, data_loader, optimizer
)
self.scheduler = self.build_lr_scheduler(cfg, optimizer)
self.checkpointer = DetectionCheckpointer(
# Assume you want to save checkpoints together with logs/statistics
model,
cfg.OUTPUT_DIR,
trainer=weakref.proxy(self),
)
self.start_iter = 0
self.max_iter = cfg.SOLVER.MAX_ITER
self.cfg = cfg
self.register_hooks(self.build_hooks())
def resume_or_load(self, resume=True):
"""
If `resume==True` and `cfg.OUTPUT_DIR` contains the last checkpoint (defined by
a `last_checkpoint` file), resume from the file. Resuming means loading all
available states (eg. optimizer and scheduler) and update iteration counter
from the checkpoint. ``cfg.MODEL.WEIGHTS`` will not be used.
Otherwise, this is considered as an independent training. The method will load model
weights from the file `cfg.MODEL.WEIGHTS` (but will not load other states) and start
from iteration 0.
Args:
resume (bool): whether to do resume or not
"""
self.checkpointer.resume_or_load(self.cfg.MODEL.WEIGHTS, resume=resume)
if resume and self.checkpointer.has_checkpoint():
# The checkpoint stores the training iteration that just finished, thus we start
# at the next iteration
self.start_iter = self.iter + 1
def build_hooks(self):
"""
Build a list of default hooks, including timing, evaluation,
checkpointing, lr scheduling, precise BN, writing events.
Returns:
list[HookBase]:
"""
cfg = self.cfg.clone()
cfg.defrost()
cfg.DATALOADER.NUM_WORKERS = 0 # save some memory and time for PreciseBN
ret = [
hooks.IterationTimer(),
hooks.LRScheduler(),
hooks.PreciseBN(
# Run at the same freq as (but before) evaluation.
cfg.TEST.EVAL_PERIOD,
self.model,
# Build a new data loader to not affect training
self.build_train_loader(cfg),
cfg.TEST.PRECISE_BN.NUM_ITER,
)
if cfg.TEST.PRECISE_BN.ENABLED and get_bn_modules(self.model)
else None,
]
# Do PreciseBN before checkpointer, because it updates the model and need to
# be saved by checkpointer.
# This is not always the best: if checkpointing has a different frequency,
# some checkpoints may have more precise statistics than others.
if comm.is_main_process():
ret.append(hooks.PeriodicCheckpointer(self.checkpointer, cfg.SOLVER.CHECKPOINT_PERIOD))
def test_and_save_results():
self._last_eval_results = self.test(self.cfg, self.model)
return self._last_eval_results
# Do evaluation after checkpointer, because then if it fails,
# we can use the saved checkpoint to debug.
ret.append(hooks.EvalHook(cfg.TEST.EVAL_PERIOD, test_and_save_results))
if comm.is_main_process():
# Here the default print/log frequency of each writer is used.
# run writers in the end, so that evaluation metrics are written
ret.append(hooks.PeriodicWriter(self.build_writers(), period=20))
return ret
def build_writers(self):
"""
Build a list of writers to be used using :func:`default_writers()`.
If you'd like a different list of writers, you can overwrite it in
your trainer.
Returns:
list[EventWriter]: a list of :class:`EventWriter` objects.
"""
return default_writers(self.cfg.OUTPUT_DIR, self.max_iter)
def train(self):
"""
Run training.
Returns:
OrderedDict of results, if evaluation is enabled. Otherwise None.
"""
super().train(self.start_iter, self.max_iter)
if len(self.cfg.TEST.EXPECTED_RESULTS) and comm.is_main_process():
assert hasattr(
self, "_last_eval_results"
), "No evaluation results obtained during training!"
verify_results(self.cfg, self._last_eval_results)
return self._last_eval_results
def run_step(self):
self._trainer.iter = self.iter
self._trainer.run_step()
@classmethod
def build_model(cls, cfg):
"""
Returns:
torch.nn.Module:
It now calls :func:`detectron2.modeling.build_model`.
Overwrite it if you'd like a different model.
"""
model = build_model(cfg)
logger = logging.getLogger(__name__)
logger.info("Model:\n{}".format(model))
return model
@classmethod
def build_optimizer(cls, cfg, model):
"""
Returns:
torch.optim.Optimizer:
It now calls :func:`detectron2.solver.build_optimizer`.
Overwrite it if you'd like a different optimizer.
"""
return build_optimizer(cfg, model)
@classmethod
def build_lr_scheduler(cls, cfg, optimizer):
"""
It now calls :func:`detectron2.solver.build_lr_scheduler`.
Overwrite it if you'd like a different scheduler.
"""
return build_lr_scheduler(cfg, optimizer)
@classmethod
def build_train_loader(cls, cfg):
"""
Returns:
iterable
It now calls :func:`detectron2.data.build_detection_train_loader`.
Overwrite it if you'd like a different data loader.
"""
return build_detection_train_loader(cfg)
@classmethod
def build_test_loader(cls, cfg, dataset_name):
"""
Returns:
iterable
It now calls :func:`detectron2.data.build_detection_test_loader`.
Overwrite it if you'd like a different data loader.
"""
return build_detection_test_loader(cfg, dataset_name)
@classmethod
def build_evaluator(cls, cfg, dataset_name):
"""
Returns:
DatasetEvaluator or None
It is not implemented by default.
"""
raise NotImplementedError(
"""
If you want DefaultTrainer to automatically run evaluation,
please implement `build_evaluator()` in subclasses (see train_net.py for example).
Alternatively, you can call evaluation functions yourself (see Colab balloon tutorial for example).
"""
)
@classmethod
def test(cls, cfg, model, evaluators=None):
"""
Args:
cfg (CfgNode):
model (nn.Module):
evaluators (list[DatasetEvaluator] or None): if None, will call
:meth:`build_evaluator`. Otherwise, must have the same length as
``cfg.DATASETS.TEST``.
Returns:
dict: a dict of result metrics
"""
logger = logging.getLogger(__name__)
if isinstance(evaluators, DatasetEvaluator):
evaluators = [evaluators]
if evaluators is not None:
assert len(cfg.DATASETS.TEST) == len(evaluators), "{} != {}".format(
len(cfg.DATASETS.TEST), len(evaluators)
)
results = OrderedDict()
for idx, dataset_name in enumerate(cfg.DATASETS.TEST):
data_loader = cls.build_test_loader(cfg, dataset_name)
# When evaluators are passed in as arguments,
# implicitly assume that evaluators can be created before data_loader.
if evaluators is not None:
evaluator = evaluators[idx]
else:
try:
evaluator = cls.build_evaluator(cfg, dataset_name)
except NotImplementedError:
logger.warn(
"No evaluator found. Use `DefaultTrainer.test(evaluators=)`, "
"or implement its `build_evaluator` method."
)
results[dataset_name] = {}
continue
results_i = inference_on_dataset(model, data_loader, evaluator)
results[dataset_name] = results_i
if comm.is_main_process():
assert isinstance(
results_i, dict
), "Evaluator must return a dict on the main process. Got {} instead.".format(
results_i
)
logger.info("Evaluation results for {} in csv format:".format(dataset_name))
print_csv_format(results_i)
if len(results) == 1:
results = list(results.values())[0]
return results
@staticmethod
def auto_scale_workers(cfg, num_workers: int):
"""
When the config is defined for certain number of workers (according to
``cfg.SOLVER.REFERENCE_WORLD_SIZE``) that's different from the number of
workers currently in use, returns a new cfg where the total batch size
is scaled so that the per-GPU batch size stays the same as the
original ``IMS_PER_BATCH // REFERENCE_WORLD_SIZE``.
Other config options are also scaled accordingly:
* training steps and warmup steps are scaled inverse proportionally.
* learning rate are scaled proportionally, following :paper:`ImageNet in 1h`.
For example, with the original config like the following:
.. code-block:: yaml
IMS_PER_BATCH: 16
BASE_LR: 0.1
REFERENCE_WORLD_SIZE: 8
MAX_ITER: 5000
STEPS: (4000,)
CHECKPOINT_PERIOD: 1000
When this config is used on 16 GPUs instead of the reference number 8,
calling this method will return a new config with:
.. code-block:: yaml
IMS_PER_BATCH: 32
BASE_LR: 0.2
REFERENCE_WORLD_SIZE: 16
MAX_ITER: 2500
STEPS: (2000,)
CHECKPOINT_PERIOD: 500
Note that both the original config and this new config can be trained on 16 GPUs.
It's up to user whether to enable this feature (by setting ``REFERENCE_WORLD_SIZE``).
Returns:
CfgNode: a new config. Same as original if ``cfg.SOLVER.REFERENCE_WORLD_SIZE==0``.
"""
old_world_size = cfg.SOLVER.REFERENCE_WORLD_SIZE
if old_world_size == 0 or old_world_size == num_workers:
return cfg
cfg = cfg.clone()
frozen = cfg.is_frozen()
cfg.defrost()
assert (
cfg.SOLVER.IMS_PER_BATCH % old_world_size == 0
), "Invalid REFERENCE_WORLD_SIZE in config!"
scale = num_workers / old_world_size
bs = cfg.SOLVER.IMS_PER_BATCH = int(round(cfg.SOLVER.IMS_PER_BATCH * scale))
lr = cfg.SOLVER.BASE_LR = cfg.SOLVER.BASE_LR * scale
max_iter = cfg.SOLVER.MAX_ITER = int(round(cfg.SOLVER.MAX_ITER / scale))
warmup_iter = cfg.SOLVER.WARMUP_ITERS = int(round(cfg.SOLVER.WARMUP_ITERS / scale))
cfg.SOLVER.STEPS = tuple(int(round(s / scale)) for s in cfg.SOLVER.STEPS)
cfg.TEST.EVAL_PERIOD = int(round(cfg.TEST.EVAL_PERIOD / scale))
cfg.SOLVER.CHECKPOINT_PERIOD = int(round(cfg.SOLVER.CHECKPOINT_PERIOD / scale))
cfg.SOLVER.REFERENCE_WORLD_SIZE = num_workers # maintain invariant
logger = logging.getLogger(__name__)
logger.info(
f"Auto-scaling the config to batch_size={bs}, learning_rate={lr}, "
f"max_iter={max_iter}, warmup={warmup_iter}."
)
if frozen:
cfg.freeze()
return cfg
# Access basic attributes from the underlying trainer
for _attr in ["model", "data_loader", "optimizer"]:
setattr(
DefaultTrainer,
_attr,
property(
# getter
lambda self, x=_attr: getattr(self._trainer, x),
# setter
lambda self, value, x=_attr: setattr(self._trainer, x, value),
),
)
|
banmo-main
|
third_party/detectron2_old/detectron2/engine/defaults.py
|
from __future__ import print_function
import sys
sys.path.insert(0,'../')
import cv2
import pdb
import argparse
import numpy as np
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
from torch.autograd import Variable
import torch.nn.functional as F
import time
from flowutils.io import mkdir_p
from flowutils.util_flow import write_flow, save_pfm
from flowutils.flowlib import point_vec
from flowutils.dydepth import warp_flow
import glob
cudnn.benchmark = False
parser = argparse.ArgumentParser(description='VCN+expansion')
parser.add_argument('--datapath', default='/ssd/kitti_scene/training/',
help='dataset path')
parser.add_argument('--loadmodel', default=None,
help='model path')
parser.add_argument('--testres', type=float, default=1,
help='resolution')
parser.add_argument('--maxdisp', type=int ,default=256,
help='maxium disparity. Only affect the coarsest cost volume size')
parser.add_argument('--fac', type=float ,default=1,
help='controls the shape of search grid. Only affect the coarse cost volume size')
parser.add_argument('--dframe', type=int ,default=1,
help='how many frames to skip')
args = parser.parse_args()
mean_L = [[0.33,0.33,0.33]]
mean_R = [[0.33,0.33,0.33]]
# construct model, VCN-expansion
from models.VCNplus import VCN
from models.VCNplus import WarpModule, flow_reg
model = VCN([1, 256, 256], md=[int(4*(args.maxdisp/256)),4,4,4,4], fac=args.fac)
model = nn.DataParallel(model, device_ids=[0])
model.cuda()
if args.loadmodel is not None:
pretrained_dict = torch.load(args.loadmodel)
mean_L=pretrained_dict['mean_L']
mean_R=pretrained_dict['mean_R']
pretrained_dict['state_dict'] = {k:v for k,v in pretrained_dict['state_dict'].items()}
model.load_state_dict(pretrained_dict['state_dict'],strict=False)
else:
print('dry run')
print('Number of model parameters: {}'.format(sum([p.data.nelement() for p in model.parameters()])))
seqname = args.datapath.strip().split('/')[-2]
dframe = args.dframe
mkdir_p('./%s/FlowFW_%d' % (seqname,dframe))
mkdir_p('./%s/FlowBW_%d' % (seqname,dframe))
test_left_img = sorted(glob.glob('%s/*'%(args.datapath)))
silhouettes = sorted(glob.glob('%s/*'%(args.datapath.replace('JPEGImages', 'Annotations'))))
def flow_inference(imgL_o, imgR_o):
# for gray input images
if len(imgL_o.shape) == 2:
imgL_o = np.tile(imgL_o[:,:,np.newaxis],(1,1,3))
imgR_o = np.tile(imgR_o[:,:,np.newaxis],(1,1,3))
# resize
# set test res
if args.testres == -1:
testres = np.sqrt(2*1e6/(imgL_o.shape[0]*imgL_o.shape[1]))
#testres = np.sqrt(1e6/(imgL_o.shape[0]*imgL_o.shape[1]))
else:
testres = args.testres
maxh = imgL_o.shape[0]*testres
maxw = imgL_o.shape[1]*testres
max_h = int(maxh // 64 * 64)
max_w = int(maxw // 64 * 64)
if max_h < maxh: max_h += 64
if max_w < maxw: max_w += 64
input_size = imgL_o.shape
imgL = cv2.resize(imgL_o,(max_w, max_h))
imgR = cv2.resize(imgR_o,(max_w, max_h))
imgL_noaug = torch.Tensor(imgL/255.)[np.newaxis].float().cuda()
# flip channel, subtract mean
imgL = imgL[:,:,::-1].copy() / 255. - np.asarray(mean_L).mean(0)[np.newaxis,np.newaxis,:]
imgR = imgR[:,:,::-1].copy() / 255. - np.asarray(mean_R).mean(0)[np.newaxis,np.newaxis,:]
imgL = np.transpose(imgL, [2,0,1])[np.newaxis]
imgR = np.transpose(imgR, [2,0,1])[np.newaxis]
# modify module according to inputs
for i in range(len(model.module.reg_modules)):
model.module.reg_modules[i] = flow_reg([1,max_w//(2**(6-i)), max_h//(2**(6-i))],
ent=getattr(model.module, 'flow_reg%d'%2**(6-i)).ent,\
maxdisp=getattr(model.module, 'flow_reg%d'%2**(6-i)).md,\
fac=getattr(model.module, 'flow_reg%d'%2**(6-i)).fac).cuda()
for i in range(len(model.module.warp_modules)):
model.module.warp_modules[i] = WarpModule([1,max_w//(2**(6-i)), max_h//(2**(6-i))]).cuda()
# get intrinsics
intr_list = [torch.Tensor(inxx).cuda() for inxx in [[1],[1],[1],[1],[1],[0],[0],[1],[0],[0]]]
fl_next = 1
intr_list.append(torch.Tensor([input_size[1] / max_w]).cuda())
intr_list.append(torch.Tensor([input_size[0] / max_h]).cuda())
intr_list.append(torch.Tensor([fl_next]).cuda())
disc_aux = [None,None,None,intr_list,imgL_noaug,None]
# forward
imgL = Variable(torch.FloatTensor(imgL).cuda())
imgR = Variable(torch.FloatTensor(imgR).cuda())
with torch.no_grad():
imgLR = torch.cat([imgL,imgR],0)
model.eval()
torch.cuda.synchronize()
start_time = time.time()
rts = model(imgLR, disc_aux)
torch.cuda.synchronize()
ttime = (time.time() - start_time); print('time = %.2f' % (ttime*1000) )
flow, occ, logmid, logexp = rts
# upsampling
occ = cv2.resize(occ.data.cpu().numpy(), (input_size[1],input_size[0]),interpolation=cv2.INTER_LINEAR)
logexp = cv2.resize(logexp.cpu().numpy(), (input_size[1],input_size[0]),interpolation=cv2.INTER_LINEAR)
logmid = cv2.resize(logmid.cpu().numpy(), (input_size[1],input_size[0]),interpolation=cv2.INTER_LINEAR)
flow = torch.squeeze(flow).data.cpu().numpy()
flow = np.concatenate( [cv2.resize(flow[0],(input_size[1],input_size[0]))[:,:,np.newaxis],
cv2.resize(flow[1],(input_size[1],input_size[0]))[:,:,np.newaxis]],-1)
flow[:,:,0] *= imgL_o.shape[1] / max_w
flow[:,:,1] *= imgL_o.shape[0] / max_h
# deal with unequal size
x0,y0 =np.meshgrid(range(input_size[1]),range(input_size[0]))
hp0 = np.stack([x0,y0],-1) # screen coord
hp1 = flow + hp0
hp1[:,:,0] = hp1[:,:,0]/float(imgL_o.shape[1])*float(imgR_o.shape[1])
hp1[:,:,1] = hp1[:,:,1]/float(imgL_o.shape[0])*float(imgR_o.shape[0])
flow = hp1 - hp0
flow = np.concatenate( (flow, np.ones([flow.shape[0],flow.shape[1],1])),-1)
return flow, occ
def main():
model.eval()
inx=0;jnx=dframe
while True:
if jnx>=len(test_left_img):break
print('%s/%s'%(test_left_img[inx],test_left_img[jnx]))
if inx%dframe==0:
imgL_o = cv2.imread(test_left_img[inx])[:,:,::-1]
imgR_o = cv2.imread(test_left_img[jnx])[:,:,::-1]
mask =cv2.imread(silhouettes[inx],0)
maskR =cv2.imread(silhouettes[jnx],0)
masko = mask.copy()
maskRo = maskR.copy()
mask = mask/np.sort(np.unique(mask))[1]
occluder = mask==255
mask[occluder] = 0
mask =np.logical_and(mask>0, mask!=255)
maskR = maskR/np.sort(np.unique(maskR))[1]
occluder = maskR==255
maskR[occluder] = 0
maskR =np.logical_and(maskR>0,maskR!=255)
indices = np.where(mask>0); xid = indices[1]; yid = indices[0]
length = [ (xid.max()-xid.min())//2, (yid.max()-yid.min())//2]
flowfw, occfw = flow_inference(imgL_o, imgR_o)
flowfw_normed = np.concatenate( [flowfw[:,:,:1]/length[0], flowfw[:,:,1:2]/length[1]],-1 )
flowbw, occbw = flow_inference(imgR_o, imgL_o)
# save predictions
# downsample first
flowfw = resize_to_target(flowfw,is_flow=True)
flowbw = resize_to_target(flowbw,is_flow=True)
occfw = resize_to_target(occfw, is_flow=False)
occbw = resize_to_target(occbw, is_flow=False)
imgL_o = resize_to_target(imgL_o, is_flow=False)
imgR_o = resize_to_target(imgR_o, is_flow=False)
mask = resize_to_target(mask .astype(float), is_flow=False).astype(bool)
maskR = resize_to_target(maskR.astype(float), is_flow=False) .astype(bool)
with open('%s/FlowFW_%d/flo-%05d.pfm'% (seqname,dframe,inx),'w') as f:
save_pfm(f,flowfw[::-1].astype(np.float32))
with open('%s/FlowFW_%d/occ-%05d.pfm'% (seqname,dframe,inx),'w') as f:
save_pfm(f,occfw[::-1].astype(np.float32))
with open('%s/FlowBW_%d/flo-%05d.pfm'% (seqname,dframe,jnx),'w') as f:
save_pfm(f,flowbw[::-1].astype(np.float32))
with open('%s/FlowBW_%d/occ-%05d.pfm'% (seqname,dframe,jnx),'w') as f:
save_pfm(f,occbw[::-1].astype(np.float32))
imwarped = warp_flow(imgR_o, flowfw[:,:,:2])
cv2.imwrite('%s/FlowFW_%d/warp-%05d.jpg'% (seqname, dframe, inx),imwarped[:,:,::-1])
imwarped = warp_flow(imgL_o, flowbw[:,:,:2])
cv2.imwrite('%s/FlowBW_%d/warp-%05d.jpg'% (seqname, dframe, jnx),imwarped[:,:,::-1])
# visualize semi-dense flow for forward
x0,y0 =np.meshgrid(range(flowfw.shape[1]),range(flowfw.shape[0]))
hp0 = np.stack([x0,y0],-1)
dis = warp_flow(hp0+flowbw[...,:2], flowfw[...,:2]) - hp0
dis = np.linalg.norm(dis[:,:,:2],2,-1)
dis = dis / np.sqrt(flowfw.shape[0] * flowfw.shape[1]) * 2
fb_mask = np.exp(-25*dis) > 0.8
#mask = np.logical_and(mask, fb_mask)
mask = fb_mask # do not use object mask
flowvis = flowfw.copy(); flowvis[~mask]=0
flowvis = point_vec(imgL_o, flowvis,skip=10)
cv2.imwrite('%s/FlowFW_%d/visflo-%05d.jpg'% (seqname, dframe, inx),flowvis)
flowvis = flowbw.copy(); flowvis[~maskR]=0
flowvis = point_vec(imgR_o, flowvis)
cv2.imwrite('%s/FlowBW_%d/visflo-%05d.jpg'% (seqname, dframe, jnx),flowvis)
inx+=1
jnx+=1
torch.cuda.empty_cache()
def resize_to_target(flowfw, is_flow=False):
h,w = flowfw.shape[:2]
factor = np.sqrt(250*1000 / (h*w) )
th,tw = int(h*factor), int(w*factor)
factor_h = th/h
factor_w = tw/w
flowfw_d = cv2.resize(flowfw, (tw,th))
if is_flow:
flowfw_d[...,0] *= factor_w
flowfw_d[...,1] *= factor_h
return flowfw_d
if __name__ == '__main__':
main()
|
banmo-main
|
third_party/vcnplus/auto_gen.py
|
"""
# ==============================
# flowlib.py
# library for optical flow processing
# Author: Ruoteng Li
# Date: 6th Aug 2016
# ==============================
"""
import png
from flowutils.util_flow import readPFM
import numpy as np
import matplotlib.colors as cl
import matplotlib.pyplot as plt
from PIL import Image
import cv2
import pdb
UNKNOWN_FLOW_THRESH = 1e7
SMALLFLOW = 0.0
LARGEFLOW = 1e8
"""
=============
Flow Section
=============
"""
def show_flow(filename):
"""
visualize optical flow map using matplotlib
:param filename: optical flow file
:return: None
"""
flow = read_flow(filename)
img = flow_to_image(flow)
plt.imshow(img)
plt.show()
def point_vec(img,flow,skip=40):
maxsize=1000.
extendfac=1.
resize_factor = 1
#resize_factor = max(1,int(max(maxsize/img.shape[0], maxsize/img.shape[1])))
meshgrid = np.meshgrid(range(img.shape[1]),range(img.shape[0]))
dispimg = cv2.resize(img[:,:,::-1].copy(), None,fx=resize_factor,fy=resize_factor)
colorflow = flow_to_image(flow).astype(int)
for i in range(img.shape[1]): # x
for j in range(img.shape[0]): # y
if flow[j,i,2] != 1: continue
if j%skip!=0 or i%skip!=0: continue
xend = int((meshgrid[0][j,i]+extendfac*flow[j,i,0])*resize_factor)
yend = int((meshgrid[1][j,i]+extendfac*flow[j,i,1])*resize_factor)
leng = np.linalg.norm(flow[j,i,:2]*extendfac)
if leng<1:continue
dispimg = cv2.arrowedLine(dispimg, (meshgrid[0][j,i]*resize_factor,meshgrid[1][j,i]*resize_factor),\
(xend,yend),
(int(colorflow[j,i,2]),int(colorflow[j,i,1]),int(colorflow[j,i,0])),1,tipLength=4/leng,line_type=cv2.LINE_AA)
return dispimg
def visualize_flow(flow, mode='Y'):
"""
this function visualize the input flow
:param flow: input flow in array
:param mode: choose which color mode to visualize the flow (Y: Ccbcr, RGB: RGB color)
:return: None
"""
if mode == 'Y':
# Ccbcr color wheel
img = flow_to_image(flow)
elif mode == 'RGB':
(h, w) = flow.shape[0:2]
du = flow[:, :, 0]
dv = flow[:, :, 1]
valid = flow[:, :, 2]
max_flow = np.sqrt(du**2+dv**2).max()
img = np.zeros((h, w, 3), dtype=np.float64)
# angle layer
img[:, :, 0] = np.fmod(np.arctan2(dv, du) / (2 * np.pi)+1.,1.)
# magnitude layer, normalized to 1
img[:, :, 1] = np.sqrt(du * du + dv * dv) * 8 / max_flow
# phase layer
img[:, :, 2] = 8 - img[:, :, 1]
# clip to [0,1]
small_idx = img[:, :, 0:3] < 0
large_idx = img[:, :, 0:3] > 1
img[small_idx] = 0
img[large_idx] = 1
# convert to rgb
img = cl.hsv_to_rgb(img)
# remove invalid point
img[:, :, 0] = img[:, :, 0] * valid
img[:, :, 1] = img[:, :, 1] * valid
img[:, :, 2] = img[:, :, 2] * valid
return img
def read_flow(filename):
"""
read optical flow data from flow file
:param filename: name of the flow file
:return: optical flow data in numpy array
"""
if filename.endswith('.flo'):
flow = read_flo_file(filename)
elif filename.endswith('.png'):
flow = read_png_file(filename)
elif filename.endswith('.pfm'):
flow = read_pfm_file(filename)
else:
raise Exception('Invalid flow file format!')
return flow
import numpy as np
import os
def write_flo(flow, filename):
TAG_STRING = b'PIEH'
assert type(filename) is str, "file is not str %r" % str(filename)
assert filename[-4:] == '.flo', "file ending is not .flo %r" % file[-4:]
height, width, nBands = flow.shape
assert nBands == 2, "Number of bands = %r != 2" % nBands
u = flow[: , : , 0]
v = flow[: , : , 1]
assert u.shape == v.shape, "Invalid flow shape"
height, width = u.shape
f = open(filename,'wb')
f.write(TAG_STRING)
np.array(width).astype(np.int32).tofile(f)
np.array(height).astype(np.int32).tofile(f)
tmp = np.zeros((height, width*nBands))
tmp[:,np.arange(width)*2] = u
tmp[:,np.arange(width)*2 + 1] = v
tmp.astype(np.float32).tofile(f)
f.close()
def write_flow(flow, filename):
"""
write optical flow in Middlebury .flo format
:param flow: optical flow map
:param filename: optical flow file path to be saved
:return: None
"""
f = open(filename, 'wb')
magic = np.array([202021.25], dtype=np.float32)
(height, width) = flow.shape[0:2]
w = np.array([width], dtype=np.int32)
h = np.array([height], dtype=np.int32)
magic.tofile(f)
w.tofile(f)
h.tofile(f)
flow.tofile(f)
f.close()
def save_flow_image(flow, image_file):
"""
save flow visualization into image file
:param flow: optical flow data
:param flow_fil
:return: None
"""
flow_img = flow_to_image(flow)
img_out = Image.fromarray(flow_img)
img_out.save(image_file)
def flowfile_to_imagefile(flow_file, image_file):
"""
convert flowfile into image file
:param flow: optical flow data
:param flow_fil
:return: None
"""
flow = read_flow(flow_file)
save_flow_image(flow, image_file)
def segment_flow(flow):
h = flow.shape[0]
w = flow.shape[1]
u = flow[:, :, 0]
v = flow[:, :, 1]
idx = ((abs(u) > LARGEFLOW) | (abs(v) > LARGEFLOW))
idx2 = (abs(u) == SMALLFLOW)
class0 = (v == 0) & (u == 0)
u[idx2] = 0.00001
tan_value = v / u
class1 = (tan_value < 1) & (tan_value >= 0) & (u > 0) & (v >= 0)
class2 = (tan_value >= 1) & (u >= 0) & (v >= 0)
class3 = (tan_value < -1) & (u <= 0) & (v >= 0)
class4 = (tan_value < 0) & (tan_value >= -1) & (u < 0) & (v >= 0)
class8 = (tan_value >= -1) & (tan_value < 0) & (u > 0) & (v <= 0)
class7 = (tan_value < -1) & (u >= 0) & (v <= 0)
class6 = (tan_value >= 1) & (u <= 0) & (v <= 0)
class5 = (tan_value >= 0) & (tan_value < 1) & (u < 0) & (v <= 0)
seg = np.zeros((h, w))
seg[class1] = 1
seg[class2] = 2
seg[class3] = 3
seg[class4] = 4
seg[class5] = 5
seg[class6] = 6
seg[class7] = 7
seg[class8] = 8
seg[class0] = 0
seg[idx] = 0
return seg
def flow_error(tu, tv, u, v):
"""
Calculate average end point error
:param tu: ground-truth horizontal flow map
:param tv: ground-truth vertical flow map
:param u: estimated horizontal flow map
:param v: estimated vertical flow map
:return: End point error of the estimated flow
"""
smallflow = 0.0
'''
stu = tu[bord+1:end-bord,bord+1:end-bord]
stv = tv[bord+1:end-bord,bord+1:end-bord]
su = u[bord+1:end-bord,bord+1:end-bord]
sv = v[bord+1:end-bord,bord+1:end-bord]
'''
stu = tu[:]
stv = tv[:]
su = u[:]
sv = v[:]
idxUnknow = (abs(stu) > UNKNOWN_FLOW_THRESH) | (abs(stv) > UNKNOWN_FLOW_THRESH)
stu[idxUnknow] = 0
stv[idxUnknow] = 0
su[idxUnknow] = 0
sv[idxUnknow] = 0
ind2 = [(np.absolute(stu) > smallflow) | (np.absolute(stv) > smallflow)]
index_su = su[ind2]
index_sv = sv[ind2]
an = 1.0 / np.sqrt(index_su ** 2 + index_sv ** 2 + 1)
un = index_su * an
vn = index_sv * an
index_stu = stu[ind2]
index_stv = stv[ind2]
tn = 1.0 / np.sqrt(index_stu ** 2 + index_stv ** 2 + 1)
tun = index_stu * tn
tvn = index_stv * tn
'''
angle = un * tun + vn * tvn + (an * tn)
index = [angle == 1.0]
angle[index] = 0.999
ang = np.arccos(angle)
mang = np.mean(ang)
mang = mang * 180 / np.pi
'''
epe = np.sqrt((stu - su) ** 2 + (stv - sv) ** 2)
epe = epe[ind2]
mepe = np.mean(epe)
return mepe
def flow_to_image(flow):
"""
Convert flow into middlebury color code image
:param flow: optical flow map
:return: optical flow image in middlebury color
"""
u = flow[:, :, 0]
v = flow[:, :, 1]
maxu = -999.
maxv = -999.
minu = 999.
minv = 999.
idxUnknow = (abs(u) > UNKNOWN_FLOW_THRESH) | (abs(v) > UNKNOWN_FLOW_THRESH)
u[idxUnknow] = 0
v[idxUnknow] = 0
maxu = max(maxu, np.max(u))
minu = min(minu, np.min(u))
maxv = max(maxv, np.max(v))
minv = min(minv, np.min(v))
rad = np.sqrt(u ** 2 + v ** 2)
maxrad = max(-1, np.max(rad))
u = u/(maxrad + np.finfo(float).eps)
v = v/(maxrad + np.finfo(float).eps)
img = compute_color(u, v)
idx = np.repeat(idxUnknow[:, :, np.newaxis], 3, axis=2)
img[idx] = 0
return np.uint8(img)
def evaluate_flow_file(gt_file, pred_file):
"""
evaluate the estimated optical flow end point error according to ground truth provided
:param gt_file: ground truth file path
:param pred_file: estimated optical flow file path
:return: end point error, float32
"""
# Read flow files and calculate the errors
gt_flow = read_flow(gt_file) # ground truth flow
eva_flow = read_flow(pred_file) # predicted flow
# Calculate errors
average_pe = flow_error(gt_flow[:, :, 0], gt_flow[:, :, 1], eva_flow[:, :, 0], eva_flow[:, :, 1])
return average_pe
def evaluate_flow(gt_flow, pred_flow):
"""
gt: ground-truth flow
pred: estimated flow
"""
average_pe = flow_error(gt_flow[:, :, 0], gt_flow[:, :, 1], pred_flow[:, :, 0], pred_flow[:, :, 1])
return average_pe
"""
==============
Disparity Section
==============
"""
def read_disp_png(file_name):
"""
Read optical flow from KITTI .png file
:param file_name: name of the flow file
:return: optical flow data in matrix
"""
image_object = png.Reader(filename=file_name)
image_direct = image_object.asDirect()
image_data = list(image_direct[2])
(w, h) = image_direct[3]['size']
channel = len(image_data[0]) / w
flow = np.zeros((h, w, channel), dtype=np.uint16)
for i in range(len(image_data)):
for j in range(channel):
flow[i, :, j] = image_data[i][j::channel]
return flow[:, :, 0] / 256
def disp_to_flowfile(disp, filename):
"""
Read KITTI disparity file in png format
:param disp: disparity matrix
:param filename: the flow file name to save
:return: None
"""
f = open(filename, 'wb')
magic = np.array([202021.25], dtype=np.float32)
(height, width) = disp.shape[0:2]
w = np.array([width], dtype=np.int32)
h = np.array([height], dtype=np.int32)
empty_map = np.zeros((height, width), dtype=np.float32)
data = np.dstack((disp, empty_map))
magic.tofile(f)
w.tofile(f)
h.tofile(f)
data.tofile(f)
f.close()
"""
==============
Image Section
==============
"""
def read_image(filename):
"""
Read normal image of any format
:param filename: name of the image file
:return: image data in matrix uint8 type
"""
img = Image.open(filename)
im = np.array(img)
return im
def warp_image(im, flow):
"""
Use optical flow to warp image to the next
:param im: image to warp
:param flow: optical flow
:return: warped image
"""
from scipy import interpolate
image_height = im.shape[0]
image_width = im.shape[1]
flow_height = flow.shape[0]
flow_width = flow.shape[1]
n = image_height * image_width
(iy, ix) = np.mgrid[0:image_height, 0:image_width]
(fy, fx) = np.mgrid[0:flow_height, 0:flow_width]
fx = fx.astype(np.float64)
fy = fy.astype(np.float64)
fx += flow[:,:,0]
fy += flow[:,:,1]
mask = np.logical_or(fx <0 , fx > flow_width)
mask = np.logical_or(mask, fy < 0)
mask = np.logical_or(mask, fy > flow_height)
fx = np.minimum(np.maximum(fx, 0), flow_width)
fy = np.minimum(np.maximum(fy, 0), flow_height)
points = np.concatenate((ix.reshape(n,1), iy.reshape(n,1)), axis=1)
xi = np.concatenate((fx.reshape(n, 1), fy.reshape(n,1)), axis=1)
warp = np.zeros((image_height, image_width, im.shape[2]))
for i in range(im.shape[2]):
channel = im[:, :, i]
plt.imshow(channel, cmap='gray')
values = channel.reshape(n, 1)
new_channel = interpolate.griddata(points, values, xi, method='cubic')
new_channel = np.reshape(new_channel, [flow_height, flow_width])
new_channel[mask] = 1
warp[:, :, i] = new_channel.astype(np.uint8)
return warp.astype(np.uint8)
"""
==============
Others
==============
"""
def pfm_to_flo(pfm_file):
flow_filename = pfm_file[0:pfm_file.find('.pfm')] + '.flo'
(data, scale) = readPFM(pfm_file)
flow = data[:, :, 0:2]
write_flow(flow, flow_filename)
def scale_image(image, new_range):
"""
Linearly scale the image into desired range
:param image: input image
:param new_range: the new range to be aligned
:return: image normalized in new range
"""
min_val = np.min(image).astype(np.float32)
max_val = np.max(image).astype(np.float32)
min_val_new = np.array(min(new_range), dtype=np.float32)
max_val_new = np.array(max(new_range), dtype=np.float32)
scaled_image = (image - min_val) / (max_val - min_val) * (max_val_new - min_val_new) + min_val_new
return scaled_image.astype(np.uint8)
def compute_color(u, v):
"""
compute optical flow color map
:param u: optical flow horizontal map
:param v: optical flow vertical map
:return: optical flow in color code
"""
[h, w] = u.shape
img = np.zeros([h, w, 3])
nanIdx = np.isnan(u) | np.isnan(v)
u[nanIdx] = 0
v[nanIdx] = 0
colorwheel = make_color_wheel()
ncols = np.size(colorwheel, 0)
rad = np.sqrt(u**2+v**2)
a = np.arctan2(-v, -u) / np.pi
fk = (a+1) / 2 * (ncols - 1) + 1
k0 = np.floor(fk).astype(int)
k1 = k0 + 1
k1[k1 == ncols+1] = 1
f = fk - k0
for i in range(0, np.size(colorwheel,1)):
tmp = colorwheel[:, i]
col0 = tmp[k0-1] / 255
col1 = tmp[k1-1] / 255
col = (1-f) * col0 + f * col1
idx = rad <= 1
col[idx] = 1-rad[idx]*(1-col[idx])
notidx = np.logical_not(idx)
col[notidx] *= 0.75
img[:, :, i] = np.uint8(np.floor(255 * col*(1-nanIdx)))
return img
def make_color_wheel():
"""
Generate color wheel according Middlebury color code
:return: Color wheel
"""
RY = 15
YG = 6
GC = 4
CB = 11
BM = 13
MR = 6
ncols = RY + YG + GC + CB + BM + MR
colorwheel = np.zeros([ncols, 3])
col = 0
# RY
colorwheel[0:RY, 0] = 255
colorwheel[0:RY, 1] = np.transpose(np.floor(255*np.arange(0, RY) / RY))
col += RY
# YG
colorwheel[col:col+YG, 0] = 255 - np.transpose(np.floor(255*np.arange(0, YG) / YG))
colorwheel[col:col+YG, 1] = 255
col += YG
# GC
colorwheel[col:col+GC, 1] = 255
colorwheel[col:col+GC, 2] = np.transpose(np.floor(255*np.arange(0, GC) / GC))
col += GC
# CB
colorwheel[col:col+CB, 1] = 255 - np.transpose(np.floor(255*np.arange(0, CB) / CB))
colorwheel[col:col+CB, 2] = 255
col += CB
# BM
colorwheel[col:col+BM, 2] = 255
colorwheel[col:col+BM, 0] = np.transpose(np.floor(255*np.arange(0, BM) / BM))
col += + BM
# MR
colorwheel[col:col+MR, 2] = 255 - np.transpose(np.floor(255 * np.arange(0, MR) / MR))
colorwheel[col:col+MR, 0] = 255
return colorwheel
def read_flo_file(filename):
"""
Read from Middlebury .flo file
:param flow_file: name of the flow file
:return: optical flow data in matrix
"""
f = open(filename, 'rb')
magic = np.fromfile(f, np.float32, count=1)
data2d = None
if 202021.25 != magic:
print('Magic number incorrect. Invalid .flo file')
else:
w = np.fromfile(f, np.int32, count=1)
h = np.fromfile(f, np.int32, count=1)
#print("Reading %d x %d flow file in .flo format" % (h, w))
flow = np.ones((h[0],w[0],3))
data2d = np.fromfile(f, np.float32, count=2 * w[0] * h[0])
# reshape data into 3D array (columns, rows, channels)
data2d = np.resize(data2d, (h[0], w[0], 2))
flow[:,:,:2] = data2d
f.close()
return flow
def read_png_file(flow_file):
"""
Read from KITTI .png file
:param flow_file: name of the flow file
:return: optical flow data in matrix
"""
flow = cv2.imread(flow_file,-1)[:,:,::-1].astype(np.float64)
# flow_object = png.Reader(filename=flow_file)
# flow_direct = flow_object.asDirect()
# flow_data = list(flow_direct[2])
# (w, h) = flow_direct[3]['size']
# #print("Reading %d x %d flow file in .png format" % (h, w))
# flow = np.zeros((h, w, 3), dtype=np.float64)
# for i in range(len(flow_data)):
# flow[i, :, 0] = flow_data[i][0::3]
# flow[i, :, 1] = flow_data[i][1::3]
# flow[i, :, 2] = flow_data[i][2::3]
invalid_idx = (flow[:, :, 2] == 0)
flow[:, :, 0:2] = (flow[:, :, 0:2] - 2 ** 15) / 64.0
flow[invalid_idx, 0] = 0
flow[invalid_idx, 1] = 0
return flow
def read_pfm_file(flow_file):
"""
Read from .pfm file
:param flow_file: name of the flow file
:return: optical flow data in matrix
"""
(data, scale) = readPFM(flow_file)
return data
# fast resample layer
def resample(img, sz):
"""
img: flow map to be resampled
sz: new flow map size. Must be [height,weight]
"""
original_image_size = img.shape
in_height = img.shape[0]
in_width = img.shape[1]
out_height = sz[0]
out_width = sz[1]
out_flow = np.zeros((out_height, out_width, 2))
# find scale
height_scale = float(in_height) / float(out_height)
width_scale = float(in_width) / float(out_width)
[x,y] = np.meshgrid(range(out_width), range(out_height))
xx = x * width_scale
yy = y * height_scale
x0 = np.floor(xx).astype(np.int32)
x1 = x0 + 1
y0 = np.floor(yy).astype(np.int32)
y1 = y0 + 1
x0 = np.clip(x0,0,in_width-1)
x1 = np.clip(x1,0,in_width-1)
y0 = np.clip(y0,0,in_height-1)
y1 = np.clip(y1,0,in_height-1)
Ia = img[y0,x0,:]
Ib = img[y1,x0,:]
Ic = img[y0,x1,:]
Id = img[y1,x1,:]
wa = (y1-yy) * (x1-xx)
wb = (yy-y0) * (x1-xx)
wc = (y1-yy) * (xx-x0)
wd = (yy-y0) * (xx-x0)
out_flow[:,:,0] = (Ia[:,:,0]*wa + Ib[:,:,0]*wb + Ic[:,:,0]*wc + Id[:,:,0]*wd) * out_width / in_width
out_flow[:,:,1] = (Ia[:,:,1]*wa + Ib[:,:,1]*wb + Ic[:,:,1]*wc + Id[:,:,1]*wd) * out_height / in_height
return out_flow
|
banmo-main
|
third_party/vcnplus/flowutils/flowlib.py
|
"""
Taken from https://github.com/ClementPinard/FlowNetPytorch
"""
import pdb
import torch
import torch.nn.functional as F
def EPE(input_flow, target_flow, mask, sparse=False, mean=True):
#mask = target_flow[:,2]>0
target_flow = target_flow[:,:2]
EPE_map = torch.norm(target_flow-input_flow,2,1)
batch_size = EPE_map.size(0)
if sparse:
# invalid flow is defined with both flow coordinates to be exactly 0
mask = (target_flow[:,0] == 0) & (target_flow[:,1] == 0)
EPE_map = EPE_map[~mask]
if mean:
return EPE_map[mask].mean()
else:
return EPE_map[mask].sum()/batch_size
def rob_EPE(input_flow, target_flow, mask, sparse=False, mean=True):
#mask = target_flow[:,2]>0
target_flow = target_flow[:,:2]
#TODO
# EPE_map = torch.norm(target_flow-input_flow,2,1)
EPE_map = (torch.norm(target_flow-input_flow,1,1)+0.01).pow(0.4)
batch_size = EPE_map.size(0)
if sparse:
# invalid flow is defined with both flow coordinates to be exactly 0
mask = (target_flow[:,0] == 0) & (target_flow[:,1] == 0)
EPE_map = EPE_map[~mask]
if mean:
return EPE_map[mask].mean()
else:
return EPE_map[mask].sum()/batch_size
def sparse_max_pool(input, size):
'''Downsample the input by considering 0 values as invalid.
Unfortunately, no generic interpolation mode can resize a sparse map correctly,
the strategy here is to use max pooling for positive values and "min pooling"
for negative values, the two results are then summed.
This technique allows sparsity to be minized, contrary to nearest interpolation,
which could potentially lose information for isolated data points.'''
positive = (input > 0).float()
negative = (input < 0).float()
output = F.adaptive_max_pool2d(input * positive, size) - F.adaptive_max_pool2d(-input * negative, size)
return output
def multiscaleEPE(network_output, target_flow, mask, weights=None, sparse=False, rob_loss = False):
def one_scale(output, target, mask, sparse):
b, _, h, w = output.size()
if sparse:
target_scaled = sparse_max_pool(target, (h, w))
else:
target_scaled = F.interpolate(target, (h, w), mode='area')
mask = F.interpolate(mask.float().unsqueeze(1), (h, w), mode='bilinear').squeeze(1)==1
if rob_loss:
return rob_EPE(output, target_scaled, mask, sparse, mean=False)
else:
return EPE(output, target_scaled, mask, sparse, mean=False)
if type(network_output) not in [tuple, list]:
network_output = [network_output]
if weights is None:
weights = [0.005, 0.01, 0.02, 0.08, 0.32] # as in original article
assert(len(weights) == len(network_output))
loss = 0
for output, weight in zip(network_output, weights):
loss += weight * one_scale(output, target_flow, mask, sparse)
return loss
def realEPE(output, target, mask, sparse=False):
b, _, h, w = target.size()
upsampled_output = F.interpolate(output, (h,w), mode='bilinear', align_corners=False)
return EPE(upsampled_output, target,mask, sparse, mean=True)
|
banmo-main
|
third_party/vcnplus/flowutils/multiscaleloss.py
|
import errno
import os
import shutil
import sys
import traceback
import zipfile
if sys.version_info[0] == 2:
import urllib2
else:
import urllib.request
def add_image(log,tag,img,step):
"""
for torch tensorboard
"""
timg = img[0]
timg = (timg-timg.min())/(timg.max()-timg.min())
if len(timg.shape)==2:
formats='HW'
elif timg.shape[0]==3:
formats='CHW'
else:
formats='HWC'
log.add_image(tag,timg,step,dataformats=formats)
# Converts a string to bytes (for writing the string into a file). Provided for
# compatibility with Python 2 and 3.
def StrToBytes(text):
if sys.version_info[0] == 2:
return text
else:
return bytes(text, 'UTF-8')
# Outputs the given text and lets the user input a response (submitted by
# pressing the return key). Provided for compatibility with Python 2 and 3.
def GetUserInput(text):
if sys.version_info[0] == 2:
return raw_input(text)
else:
return input(text)
# Creates the given directory (hierarchy), which may already exist. Provided for
# compatibility with Python 2 and 3.
def MakeDirsExistOk(directory_path):
try:
os.makedirs(directory_path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
# Deletes all files and folders within the given folder.
def DeleteFolderContents(folder_path):
for file_name in os.listdir(folder_path):
file_path = os.path.join(folder_path, file_name)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
else: #if os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception as e:
print('Exception in DeleteFolderContents():')
print(e)
print('Stack trace:')
print(traceback.format_exc())
# Creates the given directory, respectively deletes all content of the directory
# in case it already exists.
def MakeCleanDirectory(folder_path):
if os.path.isdir(folder_path):
DeleteFolderContents(folder_path)
else:
MakeDirsExistOk(folder_path)
# Downloads the given URL to a file in the given directory. Returns the
# path to the downloaded file.
# In part adapted from: https://stackoverflow.com/questions/22676
def DownloadFile(url, dest_dir_path):
file_name = url.split('/')[-1]
dest_file_path = os.path.join(dest_dir_path, file_name)
if os.path.isfile(dest_file_path):
print('The following file already exists:')
print(dest_file_path)
print('Please choose whether to re-download and overwrite the file [o] or to skip downloading this file [s] by entering o or s.')
while True:
response = GetUserInput("> ")
if response == 's':
return dest_file_path
elif response == 'o':
break
else:
print('Please enter o or s.')
url_object = None
if sys.version_info[0] == 2:
url_object = urllib2.urlopen(url)
else:
url_object = urllib.request.urlopen(url)
with open(dest_file_path, 'wb') as outfile:
meta = url_object.info()
file_size = 0
if sys.version_info[0] == 2:
file_size = int(meta.getheaders("Content-Length")[0])
else:
file_size = int(meta["Content-Length"])
print("Downloading: %s (size [bytes]: %s)" % (url, file_size))
file_size_downloaded = 0
block_size = 8192
while True:
buffer = url_object.read(block_size)
if not buffer:
break
file_size_downloaded += len(buffer)
outfile.write(buffer)
sys.stdout.write("%d / %d (%3f%%)\r" % (file_size_downloaded, file_size, file_size_downloaded * 100. / file_size))
sys.stdout.flush()
return dest_file_path
# Unzips the given zip file into the given directory.
def UnzipFile(file_path, unzip_dir_path, overwrite=True):
zip_ref = zipfile.ZipFile(open(file_path, 'rb'))
if not overwrite:
for f in zip_ref.namelist():
if not os.path.isfile(os.path.join(unzip_dir_path, f)):
zip_ref.extract(f, path=unzip_dir_path)
else:
print('Not overwriting {}'.format(f))
else:
zip_ref.extractall(unzip_dir_path)
zip_ref.close()
# Creates a zip file with the contents of the given directory.
# The archive_base_path must not include the extension .zip. The full, final
# path of the archive is returned by the function.
def ZipDirectory(archive_base_path, root_dir_path):
# return shutil.make_archive(archive_base_path, 'zip', root_dir_path) # THIS WILL ALWAYS HAVE ./ FOLDER INCLUDED
with zipfile.ZipFile(archive_base_path+'.zip', "w", compression=zipfile.ZIP_DEFLATED) as zf:
base_path = os.path.normpath(root_dir_path)
for dirpath, dirnames, filenames in os.walk(root_dir_path):
for name in sorted(dirnames):
path = os.path.normpath(os.path.join(dirpath, name))
zf.write(path, os.path.relpath(path, base_path))
for name in filenames:
path = os.path.normpath(os.path.join(dirpath, name))
if os.path.isfile(path):
zf.write(path, os.path.relpath(path, base_path))
return archive_base_path+'.zip'
# Downloads a zip file and directly unzips it.
def DownloadAndUnzipFile(url, archive_dir_path, unzip_dir_path, overwrite=True):
archive_path = DownloadFile(url, archive_dir_path)
UnzipFile(archive_path, unzip_dir_path, overwrite=overwrite)
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
|
banmo-main
|
third_party/vcnplus/flowutils/io.py
|
import math
import png
import struct
import array
import numpy as np
import cv2
import pdb
from io import *
UNKNOWN_FLOW_THRESH = 1e9;
UNKNOWN_FLOW = 1e10;
# Middlebury checks
TAG_STRING = 'PIEH' # use this when WRITING the file
TAG_FLOAT = 202021.25 # check for this when READING the file
def readPFM(file):
import re
file = open(file, 'rb')
color = None
width = None
height = None
scale = None
endian = None
header = file.readline().rstrip()
if header == b'PF':
color = True
elif header == b'Pf':
color = False
else:
raise Exception('Not a PFM file.')
dim_match = re.match(b'^(\d+)\s(\d+)\s$', file.readline())
if dim_match:
width, height = map(int, dim_match.groups())
else:
raise Exception('Malformed PFM header.')
scale = float(file.readline().rstrip())
if scale < 0: # little-endian
endian = '<'
scale = -scale
else:
endian = '>' # big-endian
data = np.fromfile(file, endian + 'f')
shape = (height, width, 3) if color else (height, width)
data = np.reshape(data, shape)
data = np.flipud(data)
return data, scale
def save_pfm(file, image, scale = 1):
import sys
color = None
if image.dtype.name != 'float32':
raise Exception('Image dtype must be float32.')
if len(image.shape) == 3 and image.shape[2] == 3: # color image
color = True
elif len(image.shape) == 2 or len(image.shape) == 3 and image.shape[2] == 1: # greyscale
color = False
else:
raise Exception('Image must have H x W x 3, H x W x 1 or H x W dimensions.')
file.write('PF\n' if color else 'Pf\n')
file.write('%d %d\n' % (image.shape[1], image.shape[0]))
endian = image.dtype.byteorder
if endian == '<' or endian == '=' and sys.byteorder == 'little':
scale = -scale
file.write('%f\n' % scale)
image.tofile(file)
def ReadMiddleburyFloFile(path):
""" Read .FLO file as specified by Middlebury.
Returns tuple (width, height, u, v, mask), where u, v, mask are flat
arrays of values.
"""
with open(path, 'rb') as fil:
tag = struct.unpack('f', fil.read(4))[0]
width = struct.unpack('i', fil.read(4))[0]
height = struct.unpack('i', fil.read(4))[0]
assert tag == TAG_FLOAT
#data = np.fromfile(path, dtype=np.float, count=-1)
#data = data[3:]
fmt = 'f' * width*height*2
data = struct.unpack(fmt, fil.read(4*width*height*2))
u = data[::2]
v = data[1::2]
mask = map(lambda x,y: abs(x)<UNKNOWN_FLOW_THRESH and abs(y) < UNKNOWN_FLOW_THRESH, u, v)
mask = list(mask)
u_masked = map(lambda x,y: x if y else 0, u, mask)
v_masked = map(lambda x,y: x if y else 0, v, mask)
return width, height, list(u_masked), list(v_masked), list(mask)
def ReadKittiPngFile(path):
""" Read 16-bit .PNG file as specified by KITTI-2015 (flow).
Returns a tuple, (width, height, u, v, mask), where u, v, mask
are flat arrays of values.
"""
# Read .png file.
png_reader = png.Reader(path)
data = png_reader.read()
if data[3]['bitdepth'] != 16:
raise Exception('bitdepth of ' + path + ' is not 16')
width = data[0]
height = data[1]
# Get list of rows.
rows = list(data[2])
u = array.array('f', [0]) * width*height
v = array.array('f', [0]) * width*height
mask = array.array('f', [0]) * width*height
for y, row in enumerate(rows):
for x in range(width):
ind = width*y+x
u[ind] = (row[3*x] - 2**15) / 64.0
v[ind] = (row[3*x+1] - 2**15) / 64.0
mask[ind] = row[3*x+2]
# if mask[ind] > 0:
# print(u[ind], v[ind], mask[ind], row[3*x], row[3*x+1], row[3*x+2])
#png_reader.close()
return (width, height, u, v, mask)
def WriteMiddleburyFloFile(path, width, height, u, v, mask=None):
""" Write .FLO file as specified by Middlebury.
"""
if mask is not None:
u_masked = map(lambda x,y: x if y else UNKNOWN_FLOW, u, mask)
v_masked = map(lambda x,y: x if y else UNKNOWN_FLOW, v, mask)
else:
u_masked = u
v_masked = v
fmt = 'f' * width*height*2
# Interleave lists
data = [x for t in zip(u_masked,v_masked) for x in t]
with open(path, 'wb') as fil:
fil.write(str.encode(TAG_STRING))
fil.write(struct.pack('i', width))
fil.write(struct.pack('i', height))
fil.write(struct.pack(fmt, *data))
def write_flow(path,flow):
invalid_idx = (flow[:, :, 2] == 0)
flow[:, :, 0:2] = flow[:, :, 0:2]*64.+ 2 ** 15
flow[invalid_idx, 0] = 0
flow[invalid_idx, 1] = 0
flow = flow.astype(np.uint16)
flow = cv2.imwrite(path, flow[:,:,::-1])
#WriteKittiPngFile(path,
# flow.shape[1], flow.shape[0], flow[:,:,0].flatten(),
# flow[:,:,1].flatten(), flow[:,:,2].flatten())
def WriteKittiPngFile(path, width, height, u, v, mask=None):
""" Write 16-bit .PNG file as specified by KITTI-2015 (flow).
u, v are lists of float values
mask is a list of floats, denoting the *valid* pixels.
"""
data = array.array('H',[0])*width*height*3
for i,(u_,v_,mask_) in enumerate(zip(u,v,mask)):
data[3*i] = int(u_*64.0+2**15)
data[3*i+1] = int(v_*64.0+2**15)
data[3*i+2] = int(mask_)
# if mask_ > 0:
# print(data[3*i], data[3*i+1],data[3*i+2])
with open(path, 'wb') as png_file:
png_writer = png.Writer(width=width, height=height, bitdepth=16, compression=3, greyscale=False)
png_writer.write_array(png_file, data)
def ConvertMiddleburyFloToKittiPng(src_path, dest_path):
width, height, u, v, mask = ReadMiddleburyFloFile(src_path)
WriteKittiPngFile(dest_path, width, height, u, v, mask=mask)
def ConvertKittiPngToMiddleburyFlo(src_path, dest_path):
width, height, u, v, mask = ReadKittiPngFile(src_path)
WriteMiddleburyFloFile(dest_path, width, height, u, v, mask=mask)
def ParseFilenameKitti(filename):
# Parse kitti filename (seq_frameno.xx),
# return seq, frameno, ext.
# Be aware that seq might contain the dataset name (if contained as prefix)
ext = filename[filename.rfind('.'):]
frameno = filename[filename.rfind('_')+1:filename.rfind('.')]
frameno = int(frameno)
seq = filename[:filename.rfind('_')]
return seq, frameno, ext
def read_calib_file(filepath):
"""Read in a calibration file and parse into a dictionary."""
data = {}
with open(filepath, 'r') as f:
for line in f.readlines():
key, value = line.split(':', 1)
# The only non-float values in these files are dates, which
# we don't care about anyway
try:
data[key] = np.array([float(x) for x in value.split()])
except ValueError:
pass
return data
def load_calib_cam_to_cam(cam_to_cam_file):
# We'll return the camera calibration as a dictionary
data = {}
# Load and parse the cam-to-cam calibration data
filedata = read_calib_file(cam_to_cam_file)
# Create 3x4 projection matrices
P_rect_00 = np.reshape(filedata['P_rect_00'], (3, 4))
P_rect_10 = np.reshape(filedata['P_rect_01'], (3, 4))
P_rect_20 = np.reshape(filedata['P_rect_02'], (3, 4))
P_rect_30 = np.reshape(filedata['P_rect_03'], (3, 4))
# Compute the camera intrinsics
data['K_cam0'] = P_rect_00[0:3, 0:3]
data['K_cam1'] = P_rect_10[0:3, 0:3]
data['K_cam2'] = P_rect_20[0:3, 0:3]
data['K_cam3'] = P_rect_30[0:3, 0:3]
data['b00'] = P_rect_00[0, 3] / P_rect_00[0, 0]
data['b10'] = P_rect_10[0, 3] / P_rect_10[0, 0]
data['b20'] = P_rect_20[0, 3] / P_rect_20[0, 0]
data['b30'] = P_rect_30[0, 3] / P_rect_30[0, 0]
return data
|
banmo-main
|
third_party/vcnplus/flowutils/util_flow.py
|
banmo-main
|
third_party/vcnplus/flowutils/__init__.py
|
|
gpuid = 1
import pdb
import sys
import torch
import numpy as np
import cv2
def write_calib(K,bl,shape,maxd,path):
str1 = 'camera.A=[%f 0 %f; 0 %f %f; 0 0 1]'%(K[0,0], K[0,2], K[1,1],K[1,2])
str2 = 'camera.height=%d'%(shape[0])
str3 = 'camera.width=%d' %(shape[1])
str4 = 'camera.zmax=%f'%(maxd)
str5 = 'rho=%f'%(bl*K[0,0])
with open(path,'w') as f:
f.write('%s\n%s\n%s\n%s\n%s'%(str1,str2,str3,str4,str5))
def create_ade20k_label_colormap():
"""Creates a label colormap used in ADE20K segmentation benchmark.
Returns:
A colormap for visualizing segmentation results.
"""
return np.asarray([
[0, 0, 0],
[120, 120, 120],
[180, 120, 120],
[6, 230, 230],
[80, 50, 50],
[4, 200, 3],
[120, 120, 80],
[140, 140, 140],
[204, 5, 255],
[230, 230, 230],
[4, 250, 7],
[224, 5, 255],
[235, 255, 7],
[150, 5, 61],
[120, 120, 70],
[8, 255, 51],
[255, 6, 82],
[143, 255, 140],
[204, 255, 4],
[255, 51, 7],
[204, 70, 3],
[0, 102, 200],
[61, 230, 250],
[255, 6, 51],
[11, 102, 255],
[255, 7, 71],
[255, 9, 224],
[9, 7, 230],
[220, 220, 220],
[255, 9, 92],
[112, 9, 255],
[8, 255, 214],
[7, 255, 224],
[255, 184, 6],
[10, 255, 71],
[255, 41, 10],
[7, 255, 255],
[224, 255, 8],
[102, 8, 255],
[255, 61, 6],
[255, 194, 7],
[255, 122, 8],
[0, 255, 20],
[255, 8, 41],
[255, 5, 153],
[6, 51, 255],
[235, 12, 255],
[160, 150, 20],
[0, 163, 255],
[140, 140, 140],
[250, 10, 15],
[20, 255, 0],
[31, 255, 0],
[255, 31, 0],
[255, 224, 0],
[153, 255, 0],
[0, 0, 255],
[255, 71, 0],
[0, 235, 255],
[0, 173, 255],
[31, 0, 255],
[11, 200, 200],
[255, 82, 0],
[0, 255, 245],
[0, 61, 255],
[0, 255, 112],
[0, 255, 133],
[255, 0, 0],
[255, 163, 0],
[255, 102, 0],
[194, 255, 0],
[0, 143, 255],
[51, 255, 0],
[0, 82, 255],
[0, 255, 41],
[0, 255, 173],
[10, 0, 255],
[173, 255, 0],
[0, 255, 153],
[255, 92, 0],
[255, 0, 255],
[255, 0, 245],
[255, 0, 102],
[255, 173, 0],
[255, 0, 20],
[255, 184, 184],
[0, 31, 255],
[0, 255, 61],
[0, 71, 255],
[255, 0, 204],
[0, 255, 194],
[0, 255, 82],
[0, 10, 255],
[0, 112, 255],
[51, 0, 255],
[0, 194, 255],
[0, 122, 255],
[0, 255, 163],
[255, 153, 0],
[0, 255, 10],
[255, 112, 0],
[143, 255, 0],
[82, 0, 255],
[163, 255, 0],
[255, 235, 0],
[8, 184, 170],
[133, 0, 255],
[0, 255, 92],
[184, 0, 255],
[255, 0, 31],
[0, 184, 255],
[0, 214, 255],
[255, 0, 112],
[92, 255, 0],
[0, 224, 255],
[112, 224, 255],
[70, 184, 160],
[163, 0, 255],
[153, 0, 255],
[71, 255, 0],
[255, 0, 163],
[255, 204, 0],
[255, 0, 143],
[0, 255, 235],
[133, 255, 0],
[255, 0, 235],
[245, 0, 255],
[255, 0, 122],
[255, 245, 0],
[10, 190, 212],
[214, 255, 0],
[0, 204, 255],
[20, 0, 255],
[255, 255, 0],
[0, 153, 255],
[0, 41, 255],
[0, 255, 204],
[41, 0, 255],
[41, 255, 0],
[173, 0, 255],
[0, 245, 255],
[71, 0, 255],
[122, 0, 255],
[0, 255, 184],
[0, 92, 255],
[184, 255, 0],
[0, 133, 255],
[255, 214, 0],
[25, 194, 194],
[102, 255, 0],
[92, 0, 255],
])
def write_pfm(path, image, scale=1):
"""Write pfm file.
Args:
path (str): pathto file
image (array): data
scale (int, optional): Scale. Defaults to 1.
"""
with open(path, "wb") as file:
color = None
if image.dtype.name != "float32":
raise Exception("Image dtype must be float32.")
image = np.flipud(image)
if len(image.shape) == 3 and image.shape[2] == 3: # color image
color = True
elif (
len(image.shape) == 2 or len(image.shape) == 3 and image.shape[2] == 1
): # greyscale
color = False
else:
raise Exception("Image must have H x W x 3, H x W x 1 or H x W dimensions.")
file.write("PF\n".encode() if color else "Pf\n".encode())
file.write("%d %d\n".encode() % (image.shape[1], image.shape[0]))
endian = image.dtype.byteorder
if endian == "<" or endian == "=" and sys.byteorder == "little":
scale = -scale
file.write("%f\n".encode() % scale)
image.tofile(file)
def triangulation(disp, xcoord, ycoord, bl=1, fl = 450, cx = 479.5, cy = 269.5):
mask = (disp<=0).flatten()
depth = bl*fl / (disp) # 450px->15mm focal length
X = (xcoord - cx) * depth / fl
Y = (ycoord - cy) * depth / fl
Z = depth
P = np.concatenate((X[np.newaxis],Y[np.newaxis],Z[np.newaxis]),0).reshape(3,-1)
P = np.concatenate((P,np.ones((1,P.shape[-1]))),0)
P[:,mask]=0
return P
def midpoint_triangulate(x, cam):
"""
Args:
x: Set of 2D points in homogeneous coords, (3 x n x N) matrix
cam: Collection of n objects, each containing member variables
cam.P - 3x4 camera matrix [0]
cam.R - 3x3 rotation matrix [1]
cam.T - 3x1 translation matrix [2]
Returns:
midpoint: 3D point in homogeneous coords, (4 x 1) matrix
"""
n = len(cam) # No. of cameras
N = x.shape[-1]
I = np.eye(3) # 3x3 identity matrix
A = np.zeros((3,n))
B = np.zeros((3,n,N))
sigma2 = np.zeros((3,N))
for i in range(n):
a = -np.linalg.inv(cam[i][:3,:3]).dot(cam[i][:3,-1:]) # ith camera position #
A[:,i,None] = a
if i==0:
b = np.linalg.pinv(cam[i][:3,:3]).dot(x[:,i]) # Directional vector # 4, N
else:
b = np.linalg.pinv(cam[i]).dot(x[:,i]) # Directional vector # 4, N
b = b / b[3:]
b = b[:3,:] - a # 3,N
b = b / np.linalg.norm(b,2,0)[np.newaxis]
B[:,i,:] = b
sigma2 = sigma2 + b * (b.T.dot(a).reshape(-1,N)) # 3,N
Bo = B.transpose([2,0,1])
Bt = B.transpose([2,1,0])
Bo = torch.DoubleTensor(Bo)
Bt = torch.DoubleTensor(Bt)
A = torch.DoubleTensor(A)
sigma2 = torch.DoubleTensor(sigma2)
I = torch.DoubleTensor(I)
BoBt = torch.matmul(Bo, Bt)
C = (n * I)[np.newaxis] - BoBt# N,3,3
Cinv = C.inverse()
sigma1 = torch.sum(A, axis=1)[:,None]
m1 = I[np.newaxis] + torch.matmul(BoBt,Cinv)
m2 = torch.matmul(Cinv,sigma2.T[:,:,np.newaxis])
midpoint = (1/n) * torch.matmul(m1,sigma1[np.newaxis]) - m2
midpoint = np.asarray(midpoint)
return midpoint[:,:,0].T, np.asarray(Bo)
def register_disp_fast(id_flow, id_mono, mask, inlier_th=0.01,niters=100):
"""
input: disp_flow, disp_mono, mask
output: inlier_mask, registered
register up-to-scale rough depth to motion-based depth
"""
shape = id_mono.shape
id_mono = id_mono.flatten()
disp_flow = id_flow[mask] # register to flow with mono
disp_mono = id_mono[mask]
num_samp = min(3000,len(disp_flow))
np.random.seed(0)
submask = np.random.choice(range(len(disp_flow)), num_samp)
disp_flow = disp_flow[submask]
disp_mono = disp_mono[submask]
n = len(disp_flow)
sample_size=niters
rand_idx = np.random.choice(range(n),sample_size)
scale_cand = (disp_flow/disp_mono)[rand_idx]
dis_cand = np.abs(np.log(disp_mono[:,np.newaxis]*scale_cand[np.newaxis])-np.log(disp_flow[:,np.newaxis]))
rank_metric = (dis_cand<inlier_th).sum(0)
scale_idx = np.argmax(rank_metric)
scale = scale_cand[scale_idx]
# # another way to align scale
# from scipy.optimize import minimize
# def cost_function(alpha, K):
# return np.mean(np.abs(alpha*K - 1))
#
# # MRE minimize
# output = minimize(cost_function, 1., args=(disp_mono/disp_flow),method='Nelder-Mead')
# if output.success:
# scale = output.x
dis = np.abs(np.log(disp_mono*scale)-np.log(disp_flow))
ninliers = (dis<inlier_th).sum()/n
registered_flow=(id_flow.reshape(shape))/scale
return registered_flow, scale, ninliers
def testEss(K0,K1,R,T,p1,p2):
testP = cv2.triangulatePoints(K0.dot(np.concatenate( (np.eye(3),np.zeros((3,1))), -1)),
K1.dot(np.concatenate( (R,T), -1)),
p1[:2],p2[:2])
Z1 = testP[2,:]/testP[-1,:]
Z2 = (R.dot(Z1*np.linalg.inv(K0).dot(p1))+T)[-1,:]
if ((Z1>0).sum() > (Z1<=0).sum()) and ((Z2>0).sum() > (Z2<=0).sum()):
#print(Z1)
#print(Z2)
return True
else:
return False
def pose_estimate(K0,K1,hp0,hp1,strict_mask,rot,th=0.0001):
# # epipolar geometry
# from models.submodule import F_ngransac
# tmphp0 = hp0[:,strict_mask]
# tmphp1 = hp1[:,strict_mask]
# #num_samp = min(300000,tmphp0.shape[1])
# num_samp = min(30000,tmphp0.shape[1])
# #num_samp = min(3000,tmphp0.shape[1])
# submask = np.random.choice(range(tmphp0.shape[1]), num_samp)
# tmphp0 = tmphp0[:,submask]
# tmphp1 = tmphp1[:,submask]
#
# rotx,transx,Ex = F_ngransac(torch.Tensor(tmphp0.T[np.newaxis]).cuda(),
# torch.Tensor(tmphp1.T[np.newaxis]).cuda(),
# torch.Tensor(K0[np.newaxis]).cuda(),
# False,0,
# Kn = torch.Tensor(K1[np.newaxis]).cuda())
# R01 = cv2.Rodrigues(np.asarray(rotx[0]))[0]
# T01 = np.asarray(transx[0])
# E = np.asarray(Ex[0])
# _,R01,T01,_ = cv2.recoverPose(E.astype(float), tmphp0[:2].T, tmphp1[:2].T, K0) # RT are 0->1 points transform
# T01 = T01[:,0]
# R01=R01.T
# T01=-R01.dot(T01) # now are 1->0 points transform
E, maskk = cv2.findEssentialMat(np.linalg.inv(K0).dot(hp0[:,strict_mask])[:2].T,
np.linalg.inv(K1).dot(hp1[:,strict_mask])[:2].T, np.eye(3),
cv2.LMEDS,threshold=th)
valid_points = np.ones((strict_mask.sum())).astype(bool)
valid_points[~maskk[:,0].astype(bool)]=False
fmask = strict_mask.copy()
fmask[strict_mask]=valid_points
R1, R2, T = cv2.decomposeEssentialMat(E)
for rott in [(R1,T),(R2,T),(R1,-T),(R2,-T)]:
if testEss(K0,K1,rott[0],rott[1],hp0[:,fmask], hp1[:,fmask]):
R01=rott[0].T
T01=-R01.dot(rott[1][:,0])
if not 'T01' in locals():
T01 = np.asarray([0,0,1])
R01 = np.eye(3)
T01t = T01.copy()
# compensate R
H01 = K0.dot(R01).dot(np.linalg.inv(K1)) # plane at infinity
comp_hp1 = H01.dot(hp1)
comp_hp1 = comp_hp1/comp_hp1[-1:]
return R01,T01,H01,comp_hp1,E
def evaluate_tri(t10,R01,K0,K1,hp0,hp1,disp0,ent,bl,inlier_th=0.1,select_th=0.4, valid_mask=None):
if valid_mask is not None:
hp0 = hp0[:,valid_mask]
hp1 = hp1[:,valid_mask]
disp0 = disp0.flatten()[valid_mask]
ent = ent.flatten()[valid_mask]
# triangluation
#import time; beg = time.time()
cams = [K0.dot(np.concatenate( (np.eye(3),np.zeros((3,1))), -1)),
K1.dot(np.concatenate( (R01.T,-R01.T.dot(t10[:,np.newaxis])), -1)) ]
P_pred,_ = midpoint_triangulate( np.concatenate([hp0[:,np.newaxis],hp1[:,np.newaxis]],1),cams)
#print(1000*(time.time()-beg))
idepth_p3d = np.clip(K0[0,0]*bl/P_pred[2], 1e-6, np.inf)
# discard points with small disp
entmask = np.logical_and(idepth_p3d>1e-12, ~np.isinf(idepth_p3d))
entmask_tmp = entmask[entmask].copy()
entmask_tmp[np.argsort(-idepth_p3d[entmask])[entmask.sum()//2:]]=False # remove sky
entmask[entmask] = entmask_tmp
med = np.median(idepth_p3d[entmask])
entmask = np.logical_and(entmask, np.logical_and(idepth_p3d>med/5., idepth_p3d<med*5))
if entmask.sum()<10:
return None,None,None
registered_p3d,scale,ninliers = register_disp_fast(idepth_p3d, disp0, entmask,
inlier_th=inlier_th,niters=100)
print('size/inlier ratio: %d/%.2f'%(entmask.sum(),ninliers))
disp_ratio = np.abs(np.log(registered_p3d.flatten()/disp0.flatten()))
agree_mask = disp_ratio<np.log(select_th)
rank = np.argsort(disp_ratio)
return agree_mask,t10*scale,rank
def rb_fitting(bgmask_pred,mask_pred,idepth,flow,ent,K0,K1,bl,parallax_th=2,mono=True,sintel=False,tranpred=None,quatpred=None):
if sintel: parallax_th = parallax_th*0.25
# prepare data
shape = flow.shape[:2]
x0,y0=np.meshgrid(range(shape[1]),range(shape[0]))
x0=x0.astype(np.float32)
y0=y0.astype(np.float32)
x1=x0+flow[:,:,0]
y1=y0+flow[:,:,1]
hp0 = np.concatenate((x0[np.newaxis],y0[np.newaxis],np.ones(x1.shape)[np.newaxis]),0).reshape((3,-1))
hp1 = np.concatenate((x1[np.newaxis],y1[np.newaxis],np.ones(x1.shape)[np.newaxis]),0).reshape((3,-1))
# use bg + valid pixels to compute R/t
valid_mask = np.logical_and(bgmask_pred, ent<0).flatten()
R01,T01,H01,comp_hp1,E = pose_estimate(K0,K1,hp0,hp1,valid_mask,[0,0,0])
parallax = np.transpose((comp_hp1[:2]-hp0[:2]),[1,0]).reshape(x1.shape+(2,))
parallax_mag = np.linalg.norm(parallax[:,:,:2],2,2)
flow_mag = np.linalg.norm(flow[:,:,:2],2,2)
print('[BG Fitting] mean pp/flow: %.1f/%.1f px'%(parallax_mag[bgmask_pred].mean(), flow_mag[bgmask_pred].mean()))
reg_flow_P = triangulation(idepth, x0, y0, bl=bl, fl = K0[0,0], cx = K0[0,2], cy = K0[1,2])[:3]
if parallax_mag[bgmask_pred].mean()<parallax_th:
# static camera
print("static")
scene_type = 'H'
T01_c = [0,0,0]
else:
scene_type = 'F'
# determine scale of translation / reconstruction
aligned_mask,T01_c,ranked_p = evaluate_tri(T01,R01,K0,K1,hp0,hp1,idepth,ent,bl,inlier_th=0.01,select_th=1.2,valid_mask=valid_mask)
if not mono:
# PnP refine
aligned_mask[ranked_p[50000:]]=False
tmp = valid_mask.copy()
tmp[tmp] = aligned_mask
aligned_mask = tmp
_,rvec, T01=cv2.solvePnP(reg_flow_P.T[aligned_mask.flatten(),np.newaxis],
hp1[:2].T[aligned_mask.flatten(),np.newaxis], K0, 0,
flags=cv2.SOLVEPNP_DLS)
_,rvec, T01,=cv2.solvePnP(reg_flow_P.T[aligned_mask,np.newaxis],
hp1[:2].T[aligned_mask,np.newaxis], K0, 0,rvec, T01,useExtrinsicGuess=True,
flags=cv2.SOLVEPNP_ITERATIVE)
R01 = cv2.Rodrigues(rvec)[0].T
T01_c = -R01.dot(T01)[:,0]
RTs = []
for i in range(0,mask_pred.max()):
obj_mask = (mask_pred==i+1).flatten()
valid_mask = np.logical_and(obj_mask, ent.reshape(obj_mask.shape)<0)
if valid_mask.sum()<10 or (valid_mask.sum() / obj_mask.sum() < 0.3):
RT01 = None
else:
if tranpred is None:
R01x,T01_cx,_,comp_hp1,_ = pose_estimate(K0,K1,hp0,hp1,valid_mask,[0,0,0])
parallax = np.transpose((comp_hp1[:2]-hp0[:2]),[1,0])
parallax_mag = np.linalg.norm(parallax,2,-1)
center_coord = hp0[:,obj_mask].mean(-1)
print('[FG-%03d Fitting] center/mean pp/flow: (%d,%d)/%.1f/%.1f px'%(i,
center_coord[0], center_coord[1], parallax_mag[obj_mask].mean(),
flow_mag.flatten()[obj_mask].mean()))
if parallax_mag[obj_mask].mean()<parallax_th: RTs.append(None);continue
else:
R01x = quatpred[i].T
T01_cx = -quatpred[i].T.dot(tranpred[i][:,None])[:,0]
T01_cx = T01_cx / np.linalg.norm(T01_cx)
aligned_mask,T01_cx,ranked_p = evaluate_tri(T01_cx,R01x,K0,K1,hp0,hp1,idepth,ent,bl,inlier_th=0.01,select_th=1.2,valid_mask=valid_mask)
if T01_cx is None: RTs.append(None); continue
if not mono:
aligned_mask[ranked_p[50000:]]=False
tmp = valid_mask.copy()
tmp[tmp] = aligned_mask
obj_mask = tmp
if tranpred is None:
_,rvec, T01_cx=cv2.solvePnP(reg_flow_P.T[obj_mask,np.newaxis],
hp1[:2].T[obj_mask,np.newaxis], K0, 0,
flags=cv2.SOLVEPNP_DLS)
else:
rvec = cv2.Rodrigues(R01x.T)[0]
T01_cx = -R01x.T.dot(T01_cx[:,None])
_,rvec, T01_cx=cv2.solvePnP(reg_flow_P.T[obj_mask,np.newaxis],
hp1[:2].T[obj_mask,np.newaxis], K0, 0,rvec, T01_cx,useExtrinsicGuess=True,
flags=cv2.SOLVEPNP_ITERATIVE)
R01x = cv2.Rodrigues(rvec)[0].T
T01_cx = -R01x.dot(T01_cx)[:,0]
if T01_cx is None:
RT01=None
else:
RT01 = [R01x, T01_cx]
RTs.append(RT01)
return scene_type, T01_c, R01,RTs
def mod_flow(bgmask,mask_pred, idepth,disp1,flow,ent,bl,K0,K1,scene_type, T01_c,R01, RTs, segs_unc, oracle=None, mono=True,sintel=False):
# prepare data
idepth = idepth.copy()
flow = flow.copy()
shape = flow.shape[:2]
x0,y0=np.meshgrid(range(shape[1]),range(shape[0]))
x0=x0.astype(np.float32)
y0=y0.astype(np.float32)
x1=x0+flow[:,:,0]
y1=y0+flow[:,:,1]
hp0 = np.concatenate((x0[np.newaxis],y0[np.newaxis],np.ones(x1.shape)[np.newaxis]),0).reshape((3,-1))
hp1 = np.concatenate((x1[np.newaxis],y1[np.newaxis],np.ones(x1.shape)[np.newaxis]),0).reshape((3,-1))
reg_flow_P = triangulation(idepth, x0, y0, bl=bl, fl = K0[0,0], cx = K0[0,2], cy = K0[1,2])[:3]
# modify motion fields
if scene_type == 'H':
H,maskh = cv2.findHomography(hp0.T[ent.flatten()<0], hp1.T[ent.flatten()<0], cv2.FM_RANSAC,ransacReprojThreshold=5)
mod_mask = np.logical_and(bgmask,ent>0)
comp_hp0 = H.dot(hp0); comp_hp0 = comp_hp0/comp_hp0[-1:]
flow[mod_mask] = np.transpose((comp_hp0-hp0).reshape((3,)+shape), (1,2,0))[mod_mask]
elif scene_type == 'F':
mod_mask = bgmask
# modify disp0 | if monocular
if not (T01_c is None or np.isinf(np.linalg.norm(T01_c))):
print('[BG Update] cam trans mag: %.2f'%np.linalg.norm(T01_c))
if mono:
cams = [K0.dot(np.concatenate( (np.eye(3),np.zeros((3,1))), -1)),
K1.dot(np.concatenate( (R01.T,-R01.T.dot(T01_c[:,np.newaxis])), -1)) ]
pts = np.concatenate([hp0[:,np.newaxis,mod_mask.flatten()],
hp1[:,np.newaxis,mod_mask.flatten()]],1)
P_flow,cray = midpoint_triangulate(pts ,cams)
cflow = 1-(1/(1 + np.exp(-ent)) )
cmotion = 1-segs_unc
angle_th = 0.2
cangle = np.clip(np.arccos(np.abs(np.sum(cray[:,:,0] * cray[:,:,1],-1))) / np.pi * 180, 0,angle_th) # N,3,2
cangle = 1-np.power((cangle-angle_th)/angle_th,2)
cangle_tmp = np.zeros(shape)
cangle_tmp[mod_mask] = cangle
conf_depth = (cmotion*cflow*cangle_tmp)
lflow = (cmotion*cangle_tmp)
dcmask = np.logical_or(lflow[mod_mask]<0.25, P_flow[-1]<1e-12)
P_flow[:,dcmask] = reg_flow_P[:,mod_mask.flatten()][:,dcmask] # dont change
reg_flow_P[:,mod_mask.flatten()] = P_flow
# disp 1
reg_flow_PP = R01.T.dot(reg_flow_P)-R01.T.dot(T01_c)[:,np.newaxis]
hpp1 = K0.dot(reg_flow_PP)
hpp1 = hpp1/hpp1[-1:]
if not mono:
flow[mod_mask] = (hpp1 - hp0).T.reshape(shape+(3,))[mod_mask]
disp1[mod_mask] = bl*K0[0,0]/reg_flow_PP[-1].reshape(shape)[mod_mask]
# obj
for i in range(0,mask_pred.max()):
if sintel:break
obj_mask = mask_pred==i+1
if oracle is not None:
if (obj_mask).sum()>0:
# use midas depth
if np.median(idepth[obj_mask])==0: continue
reg_flow_P[2,obj_mask.flatten()] = bl*K0[0,0] / (np.median(oracle[obj_mask]) / np.median(idepth[obj_mask]) * idepth[obj_mask])
else:
if RTs[i] is not None:
mod_mask = obj_mask
T01_c_sub = RTs[i][1]
if not np.isinf(np.linalg.norm(T01_c_sub)):
R01_sub = RTs[i][0]
print('[FG-%03d Update] ins trans norm: %.2f'%(i,np.linalg.norm(T01_c_sub)))
if mono:
# mono replace
cams = [K0.dot(np.concatenate( (np.eye(3),np.zeros((3,1))), -1)),
K1.dot(np.concatenate( (R01_sub.T,-R01_sub.T.dot(T01_c_sub[:,np.newaxis])), -1)) ]
pts = np.concatenate([hp0[:,np.newaxis,mod_mask.flatten()],
hp1[:,np.newaxis,mod_mask.flatten()]],1)
P_flow,det = midpoint_triangulate(pts ,cams)
med = np.median(P_flow[2])
reg_flow_P[:,mod_mask.flatten()] = P_flow # modify disp0 | if monocular
print('[FG-%03d Update] size:%d/center:%.1f,%.1f/med:%.1f'%(i, P_flow.shape[1],pts[:,0].mean(-1)[0],pts[:,0].mean(-1)[1], med))
# disp 1
reg_flow_PP = R01_sub.T.dot(reg_flow_P)-R01_sub.T.dot(T01_c_sub)[:,np.newaxis]
hpp1 = K0.dot(reg_flow_PP)
hpp1 = hpp1/hpp1[-1:]
if not mono:
flow[mod_mask] = (hpp1 - hp0).T.reshape(shape+(3,))[mod_mask]
disp1[mod_mask] = bl*K0[0,0]/reg_flow_PP[-1].reshape(shape)[mod_mask]
idepth = bl*K0[0,0] / reg_flow_P[-1].reshape(shape)
return idepth,flow, disp1
def bilinear_interpolate(im, x, y):
x = np.asarray(x)
y = np.asarray(y)
x0 = np.floor(x).astype(int)
x1 = x0 + 1
y0 = np.floor(y).astype(int)
y1 = y0 + 1
x0 = np.clip(x0, 0, im.shape[1]-1);
x1 = np.clip(x1, 0, im.shape[1]-1);
y0 = np.clip(y0, 0, im.shape[0]-1);
y1 = np.clip(y1, 0, im.shape[0]-1);
Ia = im[ y0, x0 ]
Ib = im[ y1, x0 ]
Ic = im[ y0, x1 ]
Id = im[ y1, x1 ]
wa = (x1-x) * (y1-y)
wb = (x1-x) * (y-y0)
wc = (x-x0) * (y1-y)
wd = (x-x0) * (y-y0)
return wa*Ia + wb*Ib + wc*Ic + wd*Id
def extract_trajectory(cams_gt):
# world matrix of the camera object: point from world to current frame
cam_traj_gt = []
for cam in cams_gt:
cam_pos_gt = cams_gt[0].dot(np.linalg.inv(cam))[:3,-1]
cam_traj_gt.append(cam_pos_gt)
cam_traj_gt = np.stack(cam_traj_gt)
return cam_traj_gt
def extract_delta(cams_gt):
# world matrix of the camera object: point from world to current frame
cam_traj_gt = [np.zeros(3)]
for i,cam in enumerate(cams_gt):
if i==0:continue
cam_traj_gt.append(cams_gt[i-1].dot(np.linalg.inv(cam))[:3,-1])
cam_traj_gt = np.stack(cam_traj_gt)
return cam_traj_gt
def warp_flow(img, flow):
h, w = flow.shape[:2]
flow = flow.copy().astype(np.float32)
flow[:,:,0] += np.arange(w)
flow[:,:,1] += np.arange(h)[:,np.newaxis]
res = cv2.remap(img, flow, None, cv2.INTER_LINEAR)
return res
def lin_interp(shape, xyd):
import scipy
import scipy.interpolate.LinearNDInterpolator as LinearNDInterpolator
# taken from https://github.com/hunse/kitti
m, n = shape
ij, d = xyd[:, 1::-1], xyd[:, 2]
f = LinearNDInterpolator(ij, d, fill_value=0)
J, I = np.meshgrid(np.arange(n), np.arange(m))
IJ = np.vstack([I.flatten(), J.flatten()]).T
disparity = f(IJ).reshape(shape)
return disparity
def colmap_cam_read(auxdir,framename):
K = np.eye(3)
with open(auxdir, 'r') as f:
lines = f.readlines()
if len(lines) == 4:
# shared intrinsics
_,_,_,_,fl, cx, cy, _ = lines[-1].split(' ')
K[0,0] = fl
K[1,1] = fl
K[0,2] = cx
K[1,2] = cy
return K
|
banmo-main
|
third_party/vcnplus/flowutils/dydepth.py
|
import pdb
import math
import numpy as np
import cv2
import torch
import torch.nn.functional as F
import torch.nn as nn
def gaussian2D(shape, sigma=1):
m, n = [(ss - 1.) / 2. for ss in shape]
y, x = np.ogrid[-m:m+1,-n:n+1]
h = np.exp(-(x * x + y * y) / (2 * sigma * sigma))
h[h < np.finfo(h.dtype).eps * h.max()] = 0
return h
def draw_umich_gaussian(heatmap, center, radius, k=1):
diameter = 2 * radius + 1
gaussian = gaussian2D((diameter, diameter), sigma=diameter / 6)
x, y = int(center[0]), int(center[1])
height, width = heatmap.shape[0:2]
left, right = min(x, radius), min(width - x, radius + 1)
top, bottom = min(y, radius), min(height - y, radius + 1)
masked_heatmap = heatmap[y - top:y + bottom, x - left:x + right]
masked_gaussian = gaussian[radius - top:radius + bottom, radius - left:radius + right]
if min(masked_gaussian.shape) > 0 and min(masked_heatmap.shape) > 0: # TODO debug
np.maximum(masked_heatmap, masked_gaussian * k, out=masked_heatmap)
return heatmap
def gaussian_radius(det_size, min_overlap=0.7):
height, width = det_size
a1 = 1
b1 = (height + width)
c1 = width * height * (1 - min_overlap) / (1 + min_overlap)
sq1 = np.sqrt(b1 ** 2 - 4 * a1 * c1)
r1 = (b1 + sq1) / 2
a2 = 4
b2 = 2 * (height + width)
c2 = (1 - min_overlap) * width * height
sq2 = np.sqrt(b2 ** 2 - 4 * a2 * c2)
r2 = (b2 + sq2) / 2
a3 = 4 * min_overlap
b3 = -2 * min_overlap * (height + width)
c3 = (min_overlap - 1) * width * height
sq3 = np.sqrt(b3 ** 2 - 4 * a3 * c3)
r3 = (b3 + sq3) / 2
return min(r1, r2, r3)
def _gather_feat(feat, ind, mask=None):
dim = feat.size(2)
ind = ind.unsqueeze(2).expand(ind.size(0), ind.size(1), dim)
feat = feat.gather(1, ind)
if mask is not None:
mask = mask.unsqueeze(2).expand_as(feat)
feat = feat[mask]
feat = feat.view(-1, dim)
return feat
def _transpose_and_gather_feat(feat, ind):
feat = feat.permute(0, 2, 3, 1).contiguous()
feat = feat.view(feat.size(0), -1, feat.size(3))
feat = _gather_feat(feat, ind)
return feat
def get_polarmask(mask):
# single mask
mask = np.asarray(mask.cpu()).astype(np.uint8)
contour, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE) # cv 4.x
#_,contour, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE) # cv 3.x
#contour = [i for i in contour if len(i)>50]
img = np.zeros(mask.shape+(3,))
#import pdb; pdb.set_trace()
img = cv2.drawContours(img, contour, -1, (0, 255, 0), 3)
#cv2.imwrite('/data/gengshay/3.png',mask)
#cv2.imwrite('/data/gengshay/4.png',img)
contour.sort(key=lambda x: cv2.contourArea(x), reverse=True) #only save the biggest one
'''debug IndexError: list index out of range'''
try:
count = contour[0][:, 0, :]
except:
pdb.set_trace()
try:
center = get_centerpoint(count)
except:
x,y = count.mean(axis=0)
center=[int(x), int(y)]
contour = contour[0]
contour = torch.Tensor(contour).float()
dists, coords = get_36_coordinates(center[0], center[1], contour)
return dists, np.asarray(center)
def get_centerpoint(lis):
area = 0.0
x, y = 0.0, 0.0
a = len(lis)
for i in range(a):
lat = lis[i][0]
lng = lis[i][1]
if i == 0:
lat1 = lis[-1][0]
lng1 = lis[-1][1]
else:
lat1 = lis[i - 1][0]
lng1 = lis[i - 1][1]
fg = (lat * lng1 - lng * lat1) / 2.0
area += fg
x += fg * (lat + lat1) / 3.0
y += fg * (lng + lng1) / 3.0
x = x / area
y = y / area
return [int(x), int(y)]
def get_36_coordinates(c_x, c_y, pos_mask_contour):
ct = pos_mask_contour[:, 0, :]
x = ct[:, 0] - c_x
y = ct[:, 1] - c_y
# angle = np.arctan2(x, y)*180/np.pi
angle = torch.atan2(x, y) * 180 / np.pi
angle[angle < 0] += 360
angle = angle.int()
# dist = np.sqrt(x ** 2 + y ** 2)
dist = torch.sqrt(x ** 2 + y ** 2)
angle, idx = torch.sort(angle)
dist = dist[idx]
new_coordinate = {}
for i in range(0, 360, 10):
if i in angle:
d = dist[angle==i].max()
new_coordinate[i] = d
elif i + 1 in angle:
d = dist[angle == i+1].max()
new_coordinate[i] = d
elif i - 1 in angle:
d = dist[angle == i-1].max()
new_coordinate[i] = d
elif i + 2 in angle:
d = dist[angle == i+2].max()
new_coordinate[i] = d
elif i - 2 in angle:
d = dist[angle == i-2].max()
new_coordinate[i] = d
elif i + 3 in angle:
d = dist[angle == i+3].max()
new_coordinate[i] = d
elif i - 3 in angle:
d = dist[angle == i-3].max()
new_coordinate[i] = d
distances = torch.zeros(36)
for a in range(0, 360, 10):
if not a in new_coordinate.keys():
new_coordinate[a] = torch.tensor(1e-6)
distances[a//10] = 1e-6
else:
distances[a//10] = new_coordinate[a]
# for idx in range(36):
# dist = new_coordinate[idx * 10]
# distances[idx] = dist
return distances, new_coordinate
def polar_reg(output, mask, ind, target):
pred = _transpose_and_gather_feat(output, ind)
mask = mask.unsqueeze(2).expand_as(pred).float()
loss = F.l1_loss(pred * mask, target * mask, size_average=False)
loss = loss / (mask.sum() + 1e-4)
return loss,pred
def rigid_transform(p03d,p13d,quat, tran,mask):
mask = torch.Tensor(mask).cuda()
for it in range(mask.max().int()):
obj_mask = mask==(it+1)
# compute rigid transform
quatx = torch.nn.functional.normalize(quat[it],2,-1)
quatx = kornia.quaternion_to_rotation_matrix(quatx)
p13d[obj_mask] = quatx.matmul(p03d[obj_mask][:,:,None])[:,:,0]+tran[it]
return p03d,p13d
def pose_reg(quat, tran, pose_px_ind, ind, gt_p03d, gt_p13d, gt_depth, max_obj, p03d_feat,img):
# solve the scale
alpha = torch.ones(quat.shape[0]).cuda()
for i in range(quat.shape[0]):
d1 = p03d_feat[i,-1]
d2 = gt_p03d[i,-1].view(-1)
alpha[i] = (d1*d2).sum()/(d1*d1).sum()
#pdb.set_trace()
#from utils.fusion import pcwrite
#pc1 = np.asarray(p03d_feat[0].T.cpu())
#pc2 = np.asarray(gt_p03d[0].view(3,-1).T.cpu())
#pc1 = pc1*np.asarray(alpha[i].cpu())
#pcwrite('/data/gengshay/0.ply',np.concatenate([pc1,pc1],-1))
#pcwrite('/data/gengshay/1.ply',np.concatenate([pc2,pc2],-1))
alpha = alpha.detach()
vis = torch.zeros_like(gt_depth)
quat = _transpose_and_gather_feat(quat, ind).view(-1,4)
tran = _transpose_and_gather_feat(tran, ind).view(-1,3)
gt_p03d = gt_p03d.permute(0,2,3,1)
gt_p13d = gt_p13d.permute(0,2,3,1)
gt_depth = gt_depth.permute(0,2,3,1)
loss = []
for it,obj_mask in enumerate(pose_px_ind):
imgid = it//max_obj
if len(obj_mask)>0:
p03d = gt_p03d[imgid][obj_mask]
p13d = gt_p13d[imgid][obj_mask]
depth =gt_depth[imgid][obj_mask]
# compute rigid transform
quatx = torch.nn.functional.normalize(quat[it],2,-1)
quatx = kornia.quaternion_to_rotation_matrix(quatx)
pred_p13d = quatx.matmul(p03d[:,:,None])[:,:,0]+tran[it] * alpha[imgid]
#pdb.set_trace()
#from utils.fusion import pcwrite
#pc1 = np.asarray(p03d.cpu())
#pc2 = np.asarray(pred_p13d.detach().cpu())
#pc3 = np.asarray(p13d.cpu())
#rgb = img[imgid][obj_mask].cpu()*255
#pcwrite('/data/gengshay/0.ply',np.concatenate([pc1,rgb],-1))
#pcwrite('/data/gengshay/1.ply',np.concatenate([pc2,rgb],-1))
#pcwrite('/data/gengshay/2.ply',np.concatenate([pc3,rgb],-1))
sub_loss = ((p13d - pred_p13d)/depth).abs()
loss.append( sub_loss.mean() )
# vis
sub_vis = torch.zeros_like(vis[0,0])
sub_vis[obj_mask] = sub_loss.mean(-1)
vis[imgid,0] += sub_vis
if len(loss)>0:
loss = torch.stack(loss).mean()
else:
loss = 0
return loss, vis
def distance2mask(points, distances, angles, max_shape=None):
'''Decode distance prediction to 36 mask points
Args:
points (Tensor): Shape (n, 2), [x, y].
distance (Tensor): Distance from the given point to 36,from angle 0 to 350.
angles (Tensor):
max_shape (tuple): Shape of the image.
Returns:
Tensor: Decoded masks.
'''
num_points = points.shape[0]
points = points[:, :, None].repeat(1, 1, 36)
c_x, c_y = points[:, 0], points[:, 1]
sin = torch.sin(angles)
cos = torch.cos(angles)
sin = sin[None, :].repeat(num_points, 1)
cos = cos[None, :].repeat(num_points, 1)
x = distances * sin + c_x
y = distances * cos + c_y
if max_shape is not None:
x = x.clamp(min=0, max=max_shape[1] - 1)
y = y.clamp(min=0, max=max_shape[0] - 1)
res = torch.cat([x[:, None, :], y[:, None, :]], dim=1)
return res
def ctdet_decode(heat, wh, reg=None, cat_spec_wh=False, K=100,quat=None,tran =None,p03d=None):
batch, cat, height, width = heat.size()
# heat = torch.sigmoid(heat)
# perform nms on heatmaps
heat = _nms(heat)
scores, inds, clses, ys, xs = _topk(heat, K=K)
if reg is not None:
reg = _transpose_and_gather_feat(reg, inds)
reg = reg.view(batch, K, 2)
xs = xs.view(batch, K, 1) + reg[:, :, 0:1]
ys = ys.view(batch, K, 1) + reg[:, :, 1:2]
else:
xs = xs.view(batch, K, 1)
ys = ys.view(batch, K, 1)
scores = scores.view(batch, K, 1)
pdist_ct = torch.cat([xs,ys],-1)
pdist_ind=(ys*width+xs).long()
pdist_pred = _transpose_and_gather_feat(wh, pdist_ind[:,:,0])
if quat is not None:
quat_pred = _transpose_and_gather_feat(quat, pdist_ind[:,:,0])
tran_pred = _transpose_and_gather_feat(tran, pdist_ind[:,:,0])
pdist_mask = (scores>0.1)[:,:,0]
contour_pred = np.zeros(wh.shape[2:])
mask_pred = np.zeros(wh.shape[2:])
angles = torch.range(0, 350, 10).cuda() / 180 * math.pi
bboxs = np.zeros((0,4))
p03d = p03d[0].permute(1,2,0)
p13d = p03d.clone()
if pdist_mask.sum()>0:
contour = distance2mask(pdist_ct[0][pdist_mask[0]], pdist_pred[0][pdist_mask[0]], angles, wh.shape[2:])
contour = np.asarray(contour.permute(0,2,1).cpu()[:,:,None],dtype=int)
contour_pred = cv2.drawContours(contour_pred, contour, -1,1,3)
mask_pred,bboxs = draw_masks(mask_pred, np.asarray(pdist_ct[0][pdist_mask[0]].cpu()), contour)
#pdb.set_trace()
if quat is not None:
quat_pred = quat_pred[0][pdist_mask[0]]
tran_pred = tran_pred[0][pdist_mask[0]]
#p03d,p13d = rigid_transform(p03d,p13d,quat_pred,tran_pred, mask_pred)
pred = np.concatenate([contour_pred, mask_pred],0)
rt = {}
rt['mask'] = pred
scores = np.asarray(scores[scores>0.1].cpu())
rt['bbox'] = np.concatenate([bboxs.reshape((-1,4)), scores[:,None]],-1)
if quat is not None:
rt['quat'] = np.asarray(kornia.quaternion_to_rotation_matrix(quat_pred).cpu())
rt['tran'] = np.asarray(tran_pred.cpu())
#rt['p03d'] = np.asarray(p03d.cpu())
#rt['p13d'] = np.asarray(p13d.cpu())
return rt
def label_colormap():
"""Creates a label colormap used in CITYSCAPES segmentation benchmark.
Returns:
A colormap for visualizing segmentation results.
"""
colormap = np.zeros((256, 3), dtype=np.uint8)
colormap[0] = [128, 64, 128]
colormap[1] = [255, 0, 0]
colormap[2] = [0, 255, 0]
colormap[3] = [250, 250, 0]
colormap[4] = [0, 215, 230]
colormap[5] = [190, 153, 153]
colormap[6] = [250, 170, 30]
colormap[7] = [102, 102, 156]
colormap[8] = [107, 142, 35]
colormap[9] = [152, 251, 152]
colormap[10] = [70, 130, 180]
colormap[11] = [220, 20, 60]
colormap[12] = [0, 0, 230]
colormap[13] = [0, 0, 142]
colormap[14] = [0, 0, 70]
colormap[15] = [0, 60, 100]
colormap[16] = [0, 80, 100]
colormap[17] = [244, 35, 232]
colormap[18] = [119, 11, 32]
return colormap
def draw_masks(mask, ct, contour):
colormap = label_colormap()
bboxs = []
for i in np.argsort(ct[:,1]):
mask = cv2.drawContours(mask, contour[i:i+1], -1,float(i+1),-1) # x,y
bboxs.append(np.hstack( (contour[i,:,0].min(0), contour[i,:,0].max(0)) )[None])
#cv2.imwrite('/data/gengshay/0.png',mask)
return mask, np.concatenate(bboxs,0)
def _topk(scores, K=40):
batch, cat, height, width = scores.size()
topk_scores, topk_inds = torch.topk(scores.view(batch, cat, -1), K)
topk_inds = topk_inds % (height * width)
topk_ys = (topk_inds / width).int().float()
topk_xs = (topk_inds % width).int().float()
topk_score, topk_ind = torch.topk(topk_scores.view(batch, -1), K)
topk_clses = (topk_ind / K).int()
topk_inds = _gather_feat(
topk_inds.view(batch, -1, 1), topk_ind).view(batch, K)
topk_ys = _gather_feat(topk_ys.view(batch, -1, 1), topk_ind).view(batch, K)
topk_xs = _gather_feat(topk_xs.view(batch, -1, 1), topk_ind).view(batch, K)
return topk_score, topk_inds, topk_clses, topk_ys, topk_xs
def _nms(heat, kernel=3):
pad = (kernel - 1) // 2
hmax = nn.functional.max_pool2d(
heat, (kernel, kernel), stride=1, padding=pad)
keep = (hmax == heat).float()
return heat * keep
|
banmo-main
|
third_party/vcnplus/flowutils/detlib.py
|
#! /usr/bin/env python2
"""
I/O script to save and load the data coming with the MPI-Sintel low-level
computer vision benchmark.
For more details about the benchmark, please visit www.mpi-sintel.de
CHANGELOG:
v1.0 (2015/02/03): First release
Copyright (c) 2015 Jonas Wulff
Max Planck Institute for Intelligent Systems, Tuebingen, Germany
"""
# Requirements: Numpy as PIL/Pillow
import numpy as np
from PIL import Image
# Check for endianness, based on Daniel Scharstein's optical flow code.
# Using little-endian architecture, these two should be equal.
TAG_FLOAT = 202021.25
TAG_CHAR = 'PIEH'
def flow_read(filename):
""" Read optical flow from file, return (U,V) tuple.
Original code by Deqing Sun, adapted from Daniel Scharstein.
"""
f = open(filename,'rb')
check = np.fromfile(f,dtype=np.float32,count=1)[0]
assert check == TAG_FLOAT, ' flow_read:: Wrong tag in flow file (should be: {0}, is: {1}). Big-endian machine? '.format(TAG_FLOAT,check)
width = np.fromfile(f,dtype=np.int32,count=1)[0]
height = np.fromfile(f,dtype=np.int32,count=1)[0]
size = width*height
assert width > 0 and height > 0 and size > 1 and size < 100000000, ' flow_read:: Wrong input size (width = {0}, height = {1}).'.format(width,height)
tmp = np.fromfile(f,dtype=np.float32,count=-1).reshape((height,width*2))
u = tmp[:,np.arange(width)*2]
v = tmp[:,np.arange(width)*2 + 1]
return u,v
def flow_write(filename,uv,v=None):
""" Write optical flow to file.
If v is None, uv is assumed to contain both u and v channels,
stacked in depth.
Original code by Deqing Sun, adapted from Daniel Scharstein.
"""
nBands = 2
if v is None:
assert(uv.ndim == 3)
assert(uv.shape[2] == 2)
u = uv[:,:,0]
v = uv[:,:,1]
else:
u = uv
assert(u.shape == v.shape)
height,width = u.shape
f = open(filename,'wb')
# write the header
f.write(TAG_CHAR)
np.array(width).astype(np.int32).tofile(f)
np.array(height).astype(np.int32).tofile(f)
# arrange into matrix form
tmp = np.zeros((height, width*nBands))
tmp[:,np.arange(width)*2] = u
tmp[:,np.arange(width)*2 + 1] = v
tmp.astype(np.float32).tofile(f)
f.close()
def depth_read(filename):
""" Read depth data from file, return as numpy array. """
f = open(filename,'rb')
check = np.fromfile(f,dtype=np.float32,count=1)[0]
assert check == TAG_FLOAT, ' depth_read:: Wrong tag in flow file (should be: {0}, is: {1}). Big-endian machine? '.format(TAG_FLOAT,check)
width = np.fromfile(f,dtype=np.int32,count=1)[0]
height = np.fromfile(f,dtype=np.int32,count=1)[0]
size = width*height
assert width > 0 and height > 0 and size > 1 and size < 100000000, ' depth_read:: Wrong input size (width = {0}, height = {1}).'.format(width,height)
depth = np.fromfile(f,dtype=np.float32,count=-1).reshape((height,width))
return depth
def depth_write(filename, depth):
""" Write depth to file. """
height,width = depth.shape[:2]
f = open(filename,'wb')
# write the header
f.write(TAG_CHAR)
np.array(width).astype(np.int32).tofile(f)
np.array(height).astype(np.int32).tofile(f)
depth.astype(np.float32).tofile(f)
f.close()
def disparity_write(filename,disparity,bitdepth=16):
""" Write disparity to file.
bitdepth can be either 16 (default) or 32.
The maximum disparity is 1024, since the image width in Sintel
is 1024.
"""
d = disparity.copy()
# Clip disparity.
d[d>1024] = 1024
d[d<0] = 0
d_r = (d / 4.0).astype('uint8')
d_g = ((d * (2.0**6)) % 256).astype('uint8')
out = np.zeros((d.shape[0],d.shape[1],3),dtype='uint8')
out[:,:,0] = d_r
out[:,:,1] = d_g
if bitdepth > 16:
d_b = (d * (2**14) % 256).astype('uint8')
out[:,:,2] = d_b
Image.fromarray(out,'RGB').save(filename,'PNG')
def disparity_read(filename):
""" Return disparity read from filename. """
f_in = np.array(Image.open(filename))
d_r = f_in[:,:,0].astype('float64')
d_g = f_in[:,:,1].astype('float64')
d_b = f_in[:,:,2].astype('float64')
depth = d_r * 4 + d_g / (2**6) + d_b / (2**14)
return depth
#def cam_read(filename):
# """ Read camera data, return (M,N) tuple.
#
# M is the intrinsic matrix, N is the extrinsic matrix, so that
#
# x = M*N*X,
# with x being a point in homogeneous image pixel coordinates, X being a
# point in homogeneous world coordinates.
# """
# txtdata = np.loadtxt(filename)
# intrinsic = txtdata[0,:9].reshape((3,3))
# extrinsic = textdata[1,:12].reshape((3,4))
# return intrinsic,extrinsic
#
#
#def cam_write(filename,M,N):
# """ Write intrinsic matrix M and extrinsic matrix N to file. """
# Z = np.zeros((2,12))
# Z[0,:9] = M.ravel()
# Z[1,:12] = N.ravel()
# np.savetxt(filename,Z)
def cam_read(filename):
""" Read camera data, return (M,N) tuple.
M is the intrinsic matrix, N is the extrinsic matrix, so that
x = M*N*X,
with x being a point in homogeneous image pixel coordinates, X being a
point in homogeneous world coordinates.
"""
f = open(filename,'rb')
check = np.fromfile(f,dtype=np.float32,count=1)[0]
assert check == TAG_FLOAT, ' cam_read:: Wrong tag in flow file (should be: {0}, is: {1}). Big-endian machine? '.format(TAG_FLOAT,check)
M = np.fromfile(f,dtype='float64',count=9).reshape((3,3))
N = np.fromfile(f,dtype='float64',count=12).reshape((3,4))
return M,N
def cam_write(filename, M, N):
""" Write intrinsic matrix M and extrinsic matrix N to file. """
f = open(filename,'wb')
# write the header
f.write(TAG_CHAR)
M.astype('float64').tofile(f)
N.astype('float64').tofile(f)
f.close()
def segmentation_write(filename,segmentation):
""" Write segmentation to file. """
segmentation_ = segmentation.astype('int32')
seg_r = np.floor(segmentation_ / (256**2)).astype('uint8')
seg_g = np.floor((segmentation_ % (256**2)) / 256).astype('uint8')
seg_b = np.floor(segmentation_ % 256).astype('uint8')
out = np.zeros((segmentation.shape[0],segmentation.shape[1],3),dtype='uint8')
out[:,:,0] = seg_r
out[:,:,1] = seg_g
out[:,:,2] = seg_b
Image.fromarray(out,'RGB').save(filename,'PNG')
def segmentation_read(filename):
""" Return disparity read from filename. """
f_in = np.array(Image.open(filename))
seg_r = f_in[:,:,0].astype('int32')
seg_g = f_in[:,:,1].astype('int32')
seg_b = f_in[:,:,2].astype('int32')
segmentation = (seg_r * 256 + seg_g) * 256 + seg_b
return segmentation
|
banmo-main
|
third_party/vcnplus/flowutils/sintel_io.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torchvision.models as models
import torch
import torch.nn as nn
import os
from .networks.msra_resnet import get_pose_net
from .networks.dlav0 import get_pose_net as get_dlav0
from .networks.pose_dla_dcn import get_pose_net as get_dla_dcn
from .networks.resnet_dcn import get_pose_net as get_pose_net_dcn
from .networks.large_hourglass import get_large_hourglass_net
_model_factory = {
'res': get_pose_net, # default Resnet with deconv
'dlav0': get_dlav0, # default DLAup
'dla': get_dla_dcn,
'resdcn': get_pose_net_dcn,
'hourglass': get_large_hourglass_net,
}
def create_model(arch, heads, head_conv,num_input):
num_layers = int(arch[arch.find('_') + 1:]) if '_' in arch else 0
arch = arch[:arch.find('_')] if '_' in arch else arch
get_model = _model_factory[arch]
model = get_model(num_layers=num_layers, heads=heads, head_conv=head_conv,num_input=num_input)
return model
def load_model(model, model_path, optimizer=None, resume=False,
lr=None, lr_step=None):
start_epoch = 0
checkpoint = torch.load(model_path, map_location=lambda storage, loc: storage)
print('loaded {}, epoch {}'.format(model_path, checkpoint['epoch']))
state_dict_ = checkpoint['state_dict']
state_dict = {}
# convert data_parallal to model
for k in state_dict_:
if k.startswith('module') and not k.startswith('module_list'):
state_dict[k[7:]] = state_dict_[k]
else:
state_dict[k] = state_dict_[k]
model_state_dict = model.state_dict()
# check loaded parameters and created model parameters
msg = 'If you see this, your model does not fully load the ' + \
'pre-trained weight. Please make sure ' + \
'you have correctly specified --arch xxx ' + \
'or set the correct --num_classes for your own dataset.'
for k in state_dict:
if k in model_state_dict:
if state_dict[k].shape != model_state_dict[k].shape:
print('Skip loading parameter {}, required shape{}, '\
'loaded shape{}. {}'.format(
k, model_state_dict[k].shape, state_dict[k].shape, msg))
state_dict[k] = model_state_dict[k]
else:
print('Drop parameter {}.'.format(k) + msg)
for k in model_state_dict:
if not (k in state_dict):
print('No param {}.'.format(k) + msg)
state_dict[k] = model_state_dict[k]
model.load_state_dict(state_dict, strict=False)
# resume optimizer parameters
if optimizer is not None and resume:
if 'optimizer' in checkpoint:
optimizer.load_state_dict(checkpoint['optimizer'])
start_epoch = checkpoint['epoch']
start_lr = lr
for step in lr_step:
if start_epoch >= step:
start_lr *= 0.1
for param_group in optimizer.param_groups:
param_group['lr'] = start_lr
print('Resumed optimizer with start lr', start_lr)
else:
print('No optimizer parameters in checkpoint.')
if optimizer is not None:
return model, optimizer, start_epoch
else:
return model
def save_model(path, epoch, model, optimizer=None):
if isinstance(model, torch.nn.DataParallel):
state_dict = model.module.state_dict()
else:
state_dict = model.state_dict()
data = {'epoch': epoch,
'state_dict': state_dict}
if not (optimizer is None):
data['optimizer'] = optimizer.state_dict()
torch.save(data, path)
|
banmo-main
|
third_party/vcnplus/models/det.py
|
# ------------------------------------------------------------------------------
# Portions of this code are from
# CornerNet (https://github.com/princeton-vl/CornerNet)
# Copyright (c) 2018, University of Michigan
# Licensed under the BSD 3-Clause License
# ------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pdb
import torch
import torch.nn as nn
from .det_utils import _transpose_and_gather_feat
import torch.nn.functional as F
def _slow_neg_loss(pred, gt):
'''focal loss from CornerNet'''
pos_inds = gt.eq(1)
neg_inds = gt.lt(1)
neg_weights = torch.pow(1 - gt[neg_inds], 4)
loss = 0
pos_pred = pred[pos_inds]
neg_pred = pred[neg_inds]
pos_loss = torch.log(pos_pred) * torch.pow(1 - pos_pred, 2)
neg_loss = torch.log(1 - neg_pred) * torch.pow(neg_pred, 2) * neg_weights
num_pos = pos_inds.float().sum()
pos_loss = pos_loss.sum()
neg_loss = neg_loss.sum()
if pos_pred.nelement() == 0:
loss = loss - neg_loss
else:
loss = loss - (pos_loss + neg_loss) / num_pos
return loss
def _neg_loss(pred, gt, heat_logits):
''' Modified focal loss. Exactly the same as CornerNet.
Runs faster and costs a little bit more memory
Arguments:
pred (batch x c x h x w)
gt_regr (batch x c x h x w)
'''
pos_inds = gt.eq(1).float()
neg_inds = gt.lt(1).float()
neg_weights = torch.pow(1 - gt, 4)
loss = 0
logpred = torch.nn.functional.log_softmax(heat_logits,1)
pos_loss = logpred[:,0:1] * torch.pow(1 - pred, 2) * pos_inds
neg_loss = logpred[:,1:2] * torch.pow(pred, 2) * neg_weights * neg_inds
num_pos = pos_inds.float().sum()
pos_loss = pos_loss.sum()
neg_loss = neg_loss.sum()
if num_pos == 0:
loss = loss - neg_loss
else:
loss = loss - (pos_loss + neg_loss) / num_pos
return loss
def _not_faster_neg_loss(pred, gt):
pos_inds = gt.eq(1).float()
neg_inds = gt.lt(1).float()
num_pos = pos_inds.float().sum()
neg_weights = torch.pow(1 - gt, 4)
loss = 0
trans_pred = pred * neg_inds + (1 - pred) * pos_inds
weight = neg_weights * neg_inds + pos_inds
all_loss = torch.log(1 - trans_pred) * torch.pow(trans_pred, 2) * weight
all_loss = all_loss.sum()
if num_pos > 0:
all_loss /= num_pos
loss -= all_loss
return loss
def _slow_reg_loss(regr, gt_regr, mask):
num = mask.float().sum()
mask = mask.unsqueeze(2).expand_as(gt_regr)
regr = regr[mask]
gt_regr = gt_regr[mask]
regr_loss = nn.functional.smooth_l1_loss(regr, gt_regr, size_average=False)
regr_loss = regr_loss / (num + 1e-4)
return regr_loss
def _reg_loss(regr, gt_regr, mask):
''' L1 regression loss
Arguments:
regr (batch x max_objects x dim)
gt_regr (batch x max_objects x dim)
mask (batch x max_objects)
'''
num = mask.float().sum()
mask = mask.unsqueeze(2).expand_as(gt_regr).float()
regr = regr * mask
gt_regr = gt_regr * mask
regr_loss = nn.functional.smooth_l1_loss(regr, gt_regr, size_average=False)
regr_loss = regr_loss / (num + 1e-4)
return regr_loss
class FocalLoss(nn.Module):
'''nn.Module warpper for focal loss'''
def __init__(self):
super(FocalLoss, self).__init__()
self.neg_loss = _neg_loss
def forward(self, out, target, logits):
return self.neg_loss(out, target, logits)
class RegLoss(nn.Module):
'''Regression loss for an output tensor
Arguments:
output (batch x dim x h x w)
mask (batch x max_objects)
ind (batch x max_objects)
target (batch x max_objects x dim)
'''
def __init__(self):
super(RegLoss, self).__init__()
def forward(self, output, mask, ind, target):
pred = _transpose_and_gather_feat(output, ind)
loss = _reg_loss(pred, target, mask)
return loss
class RegL1Loss(nn.Module):
def __init__(self):
super(RegL1Loss, self).__init__()
def forward(self, output, mask, ind, target):
pred = _transpose_and_gather_feat(output, ind)
mask = mask.unsqueeze(2).expand_as(pred).float()
# loss = F.l1_loss(pred * mask, target * mask, reduction='elementwise_mean')
loss = F.l1_loss(pred * mask, target * mask, size_average=False)
loss = loss / (mask.sum() + 1e-4)
return loss
class NormRegL1Loss(nn.Module):
def __init__(self):
super(NormRegL1Loss, self).__init__()
def forward(self, output, mask, ind, target):
pred = _transpose_and_gather_feat(output, ind)
mask = mask.unsqueeze(2).expand_as(pred).float()
# loss = F.l1_loss(pred * mask, target * mask, reduction='elementwise_mean')
pred = pred / (target + 1e-4)
target = target * 0 + 1
loss = F.l1_loss(pred * mask, target * mask, size_average=False)
loss = loss / (mask.sum() + 1e-4)
return loss
class RegWeightedL1Loss(nn.Module):
def __init__(self):
super(RegWeightedL1Loss, self).__init__()
def forward(self, output, mask, ind, target):
pred = _transpose_and_gather_feat(output, ind)
mask = mask.float()
# loss = F.l1_loss(pred * mask, target * mask, reduction='elementwise_mean')
loss = F.l1_loss(pred * mask, target * mask, size_average=False)
loss = loss / (mask.sum() + 1e-4)
return loss
class L1Loss(nn.Module):
def __init__(self):
super(L1Loss, self).__init__()
def forward(self, output, mask, ind, target):
pred = _transpose_and_gather_feat(output, ind)
mask = mask.unsqueeze(2).expand_as(pred).float()
loss = F.l1_loss(pred * mask, target * mask, reduction='elementwise_mean')
return loss
class BinRotLoss(nn.Module):
def __init__(self):
super(BinRotLoss, self).__init__()
def forward(self, output, mask, ind, rotbin, rotres):
pred = _transpose_and_gather_feat(output, ind)
loss = compute_rot_loss(pred, rotbin, rotres, mask)
return loss
def compute_res_loss(output, target):
return F.smooth_l1_loss(output, target, reduction='elementwise_mean')
# TODO: weight
def compute_bin_loss(output, target, mask):
mask = mask.expand_as(output)
output = output * mask.float()
return F.cross_entropy(output, target, reduction='elementwise_mean')
def compute_rot_loss(output, target_bin, target_res, mask):
# output: (B, 128, 8) [bin1_cls[0], bin1_cls[1], bin1_sin, bin1_cos,
# bin2_cls[0], bin2_cls[1], bin2_sin, bin2_cos]
# target_bin: (B, 128, 2) [bin1_cls, bin2_cls]
# target_res: (B, 128, 2) [bin1_res, bin2_res]
# mask: (B, 128, 1)
# import pdb; pdb.set_trace()
output = output.view(-1, 8)
target_bin = target_bin.view(-1, 2)
target_res = target_res.view(-1, 2)
mask = mask.view(-1, 1)
loss_bin1 = compute_bin_loss(output[:, 0:2], target_bin[:, 0], mask)
loss_bin2 = compute_bin_loss(output[:, 4:6], target_bin[:, 1], mask)
loss_res = torch.zeros_like(loss_bin1)
if target_bin[:, 0].nonzero().shape[0] > 0:
idx1 = target_bin[:, 0].nonzero()[:, 0]
valid_output1 = torch.index_select(output, 0, idx1.long())
valid_target_res1 = torch.index_select(target_res, 0, idx1.long())
loss_sin1 = compute_res_loss(
valid_output1[:, 2], torch.sin(valid_target_res1[:, 0]))
loss_cos1 = compute_res_loss(
valid_output1[:, 3], torch.cos(valid_target_res1[:, 0]))
loss_res += loss_sin1 + loss_cos1
if target_bin[:, 1].nonzero().shape[0] > 0:
idx2 = target_bin[:, 1].nonzero()[:, 0]
valid_output2 = torch.index_select(output, 0, idx2.long())
valid_target_res2 = torch.index_select(target_res, 0, idx2.long())
loss_sin2 = compute_res_loss(
valid_output2[:, 6], torch.sin(valid_target_res2[:, 1]))
loss_cos2 = compute_res_loss(
valid_output2[:, 7], torch.cos(valid_target_res2[:, 1]))
loss_res += loss_sin2 + loss_cos2
return loss_bin1 + loss_bin2 + loss_res
|
banmo-main
|
third_party/vcnplus/models/det_losses.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
def _sigmoid(x):
y = torch.clamp(x.sigmoid_(), min=1e-4, max=1-1e-4)
return y
def _gather_feat(feat, ind, mask=None):
dim = feat.size(2)
ind = ind.unsqueeze(2).expand(ind.size(0), ind.size(1), dim)
feat = feat.gather(1, ind)
if mask is not None:
mask = mask.unsqueeze(2).expand_as(feat)
feat = feat[mask]
feat = feat.view(-1, dim)
return feat
def _transpose_and_gather_feat(feat, ind):
feat = feat.permute(0, 2, 3, 1).contiguous()
feat = feat.view(feat.size(0), -1, feat.size(3))
feat = _gather_feat(feat, ind)
return feat
def flip_tensor(x):
return torch.flip(x, [3])
# tmp = x.detach().cpu().numpy()[..., ::-1].copy()
# return torch.from_numpy(tmp).to(x.device)
def flip_lr(x, flip_idx):
tmp = x.detach().cpu().numpy()[..., ::-1].copy()
shape = tmp.shape
for e in flip_idx:
tmp[:, e[0], ...], tmp[:, e[1], ...] = \
tmp[:, e[1], ...].copy(), tmp[:, e[0], ...].copy()
return torch.from_numpy(tmp.reshape(shape)).to(x.device)
def flip_lr_off(x, flip_idx):
tmp = x.detach().cpu().numpy()[..., ::-1].copy()
shape = tmp.shape
tmp = tmp.reshape(tmp.shape[0], 17, 2,
tmp.shape[2], tmp.shape[3])
tmp[:, :, 0, :, :] *= -1
for e in flip_idx:
tmp[:, e[0], ...], tmp[:, e[1], ...] = \
tmp[:, e[1], ...].copy(), tmp[:, e[0], ...].copy()
return torch.from_numpy(tmp.reshape(shape)).to(x.device)
|
banmo-main
|
third_party/vcnplus/models/det_utils.py
|
banmo-main
|
third_party/vcnplus/models/__init__.py
|
|
"""
Copyright (C) 2019 NVIDIA Corporation. All rights reserved.
Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).
This file incorporates work covered by the following copyright and permission notice:
Copyright (c) 2018 Ignacio Rocco
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
Source: https://github.com/ignacio-rocco/weakalign/blob/master/model/cnn_geometric_model.py
"""
import torch
import torch.nn as nn
from torchvision import models
def featureL2Norm(feature):
epsilon = 1e-6
norm = torch.pow(torch.sum(torch.pow(feature, 2), 1) +
epsilon, 0.5).unsqueeze(1).expand_as(feature)
return torch.div(feature, norm)
class FeatureExtraction(torch.nn.Module):
def __init__(self, train_fe=False, feature_extraction_cnn='vgg19', normalization=True, last_layer='', use_cuda=True):
super(FeatureExtraction, self).__init__()
self.normalization = normalization
# multiple extracting layers
last_layer = last_layer.split(',')
if feature_extraction_cnn == 'vgg16':
self.model = models.vgg16(pretrained=True)
# keep feature extraction network up to indicated layer
vgg_feature_layers = ['conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1', 'conv2_1',
'relu2_1', 'conv2_2', 'relu2_2', 'pool2', 'conv3_1', 'relu3_1',
'conv3_2', 'relu3_2', 'conv3_3', 'relu3_3', 'pool3', 'conv4_1',
'relu4_1', 'conv4_2', 'relu4_2', 'conv4_3', 'relu4_3', 'pool4',
'conv5_1', 'relu5_1', 'conv5_2', 'relu5_2', 'conv5_3', 'relu5_3', 'pool5']
start_index = 0
self.model_list = []
for l in last_layer:
if l == '':
l = 'pool4'
layer_idx = vgg_feature_layers.index(l)
assert layer_idx >= start_index, 'layer order wrong!'
model = nn.Sequential(
*list(self.model.features.children())[start_index:layer_idx + 1])
self.model_list.append(model)
start_index = layer_idx + 1
if feature_extraction_cnn == 'vgg19':
self.model = models.vgg19(pretrained=True)
# keep feature extraction network up to indicated layer
vgg_feature_layers = ['conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1',
'conv2_1', 'relu2_1', 'conv2_2', 'relu2_2', 'pool2',
'conv3_1', 'relu3_1', 'conv3_2', 'relu3_2', 'conv3_3', 'relu3_3', 'conv3_4', 'relu3_4', 'pool3',
'conv4_1', 'relu4_1', 'conv4_2', 'relu4_2', 'conv4_3', 'relu4_3', 'conv4_4', 'relu4_4', 'pool4',
'conv5_1', 'relu5_1', 'conv5_2', 'relu5_2', 'conv5_3', 'relu5_3', 'conv5_4', 'relu5_4', 'pool5']
vgg_output_dim = [64, 64, 64, 64, 64,
128, 128, 128, 128, 128,
256, 256, 256, 256, 256, 256, 256, 256, 256,
512, 512, 512, 512, 512, 512, 512, 512, 512,
512, 512, 512, 512, 512, 512, 512, 512, 512]
start_index = 0
self.model_list = []
self.out_dim = 0
for l in last_layer:
if l == '':
l = 'relu5_4'
layer_idx = vgg_feature_layers.index(l)
assert layer_idx >= start_index, 'layer order wrong!'
self.out_dim += vgg_output_dim[layer_idx]
model = nn.Sequential(
*list(self.model.features.children())[start_index:layer_idx + 1])
self.model_list.append(model)
start_index = layer_idx + 1
if feature_extraction_cnn == 'resnet101':
self.model = models.resnet101(pretrained=True)
resnet_feature_layers = ['conv1',
'bn1',
'relu',
'maxpool',
'layer1',
'layer2',
'layer3',
'layer4']
if last_layer == '':
last_layer = 'layer3'
last_layer_idx = resnet_feature_layers.index(last_layer)
resnet_module_list = [self.model.conv1,
self.model.bn1,
self.model.relu,
self.model.maxpool,
self.model.layer1,
self.model.layer2,
self.model.layer3,
self.model.layer4]
self.model = nn.Sequential(
*resnet_module_list[:last_layer_idx + 1])
if feature_extraction_cnn == 'resnet101_v2':
self.model = models.resnet101(pretrained=True)
# keep feature extraction network up to pool4 (last layer - 7)
self.model = nn.Sequential(*list(self.model.children())[:-3])
if feature_extraction_cnn == 'densenet201':
self.model = models.densenet201(pretrained=True)
# keep feature extraction network up to transitionlayer2
self.model = nn.Sequential(
*list(self.model.features.children())[:-4])
if not train_fe:
# freeze parameters
for param in self.model.parameters():
param.requires_grad = False
# move to GPU
if use_cuda:
self.model_list = [model.cuda() for model in self.model_list]
def forward(self, image_batch):
features_list = []
features = image_batch
for model in self.model_list:
features = model(features)
if self.normalization:
features = featureL2Norm(features)
features_list.append(features)
return features_list
|
banmo-main
|
third_party/vcnplus/models/feature_extraction.py
|
from __future__ import print_function
import torch
import torch.nn as nn
import torch.utils.data
from torch.autograd import Variable
import torch.nn.functional as F
import math
import numpy as np
import pdb
#import kornia
class residualBlock(nn.Module):
expansion = 1
def __init__(self, in_channels, n_filters, stride=1, downsample=None,dilation=1,with_bn=True):
super(residualBlock, self).__init__()
if dilation > 1:
padding = dilation
else:
padding = 1
if with_bn:
self.convbnrelu1 = conv2DBatchNormRelu(in_channels, n_filters, 3, stride, padding, dilation=dilation)
self.convbn2 = conv2DBatchNorm(n_filters, n_filters, 3, 1, 1)
else:
self.convbnrelu1 = conv2DBatchNormRelu(in_channels, n_filters, 3, stride, padding, dilation=dilation,with_bn=False)
self.convbn2 = conv2DBatchNorm(n_filters, n_filters, 3, 1, 1, with_bn=False)
self.downsample = downsample
self.relu = nn.LeakyReLU(0.1, inplace=True)
def forward(self, x):
residual = x
out = self.convbnrelu1(x)
out = self.convbn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
return self.relu(out)
def conv(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1):
return nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation, bias=True),
nn.BatchNorm2d(out_planes),
nn.LeakyReLU(0.1,inplace=True))
class conv2DBatchNorm(nn.Module):
def __init__(self, in_channels, n_filters, k_size, stride, padding, dilation=1, with_bn=True):
super(conv2DBatchNorm, self).__init__()
bias = not with_bn
if dilation > 1:
conv_mod = nn.Conv2d(int(in_channels), int(n_filters), kernel_size=k_size,
padding=padding, stride=stride, bias=bias, dilation=dilation)
else:
conv_mod = nn.Conv2d(int(in_channels), int(n_filters), kernel_size=k_size,
padding=padding, stride=stride, bias=bias, dilation=1)
if with_bn:
self.cb_unit = nn.Sequential(conv_mod,
nn.BatchNorm2d(int(n_filters)),)
else:
self.cb_unit = nn.Sequential(conv_mod,)
def forward(self, inputs):
outputs = self.cb_unit(inputs)
return outputs
class conv2DBatchNormRelu(nn.Module):
def __init__(self, in_channels, n_filters, k_size, stride, padding, dilation=1, with_bn=True):
super(conv2DBatchNormRelu, self).__init__()
bias = not with_bn
if dilation > 1:
conv_mod = nn.Conv2d(int(in_channels), int(n_filters), kernel_size=k_size,
padding=padding, stride=stride, bias=bias, dilation=dilation)
else:
conv_mod = nn.Conv2d(int(in_channels), int(n_filters), kernel_size=k_size,
padding=padding, stride=stride, bias=bias, dilation=1)
if with_bn:
self.cbr_unit = nn.Sequential(conv_mod,
nn.BatchNorm2d(int(n_filters)),
nn.LeakyReLU(0.1, inplace=True),)
else:
self.cbr_unit = nn.Sequential(conv_mod,
nn.LeakyReLU(0.1, inplace=True),)
def forward(self, inputs):
outputs = self.cbr_unit(inputs)
return outputs
class pyramidPooling(nn.Module):
def __init__(self, in_channels, with_bn=True, levels=4):
super(pyramidPooling, self).__init__()
self.levels = levels
self.paths = []
for i in range(levels):
self.paths.append(conv2DBatchNormRelu(in_channels, in_channels, 1, 1, 0, with_bn=with_bn))
self.path_module_list = nn.ModuleList(self.paths)
self.relu = nn.LeakyReLU(0.1, inplace=True)
def forward(self, x):
h, w = x.shape[2:]
k_sizes = []
strides = []
for pool_size in np.linspace(1,min(h,w)//2,self.levels,dtype=int):
k_sizes.append((int(h/pool_size), int(w/pool_size)))
strides.append((int(h/pool_size), int(w/pool_size)))
k_sizes = k_sizes[::-1]
strides = strides[::-1]
pp_sum = x
for i, module in enumerate(self.path_module_list):
out = F.avg_pool2d(x, k_sizes[i], stride=strides[i], padding=0)
out = module(out)
out = F.upsample(out, size=(h,w), mode='bilinear')
pp_sum = pp_sum + 1./self.levels*out
pp_sum = self.relu(pp_sum/2.)
return pp_sum
class pspnet(nn.Module):
"""
Modified PSPNet. https://github.com/meetshah1995/pytorch-semseg/blob/master/ptsemseg/models/pspnet.py
"""
def __init__(self, is_proj=True,groups=1):
super(pspnet, self).__init__()
self.inplanes = 32
self.is_proj = is_proj
# Encoder
self.convbnrelu1_1 = conv2DBatchNormRelu(in_channels=3, k_size=3, n_filters=16,
padding=1, stride=2)
self.convbnrelu1_2 = conv2DBatchNormRelu(in_channels=16, k_size=3, n_filters=16,
padding=1, stride=1)
self.convbnrelu1_3 = conv2DBatchNormRelu(in_channels=16, k_size=3, n_filters=32,
padding=1, stride=1)
# Vanilla Residual Blocks
self.res_block3 = self._make_layer(residualBlock,64,1,stride=2)
self.res_block5 = self._make_layer(residualBlock,128,1,stride=2)
self.res_block6 = self._make_layer(residualBlock,128,1,stride=2)
self.res_block7 = self._make_layer(residualBlock,128,1,stride=2)
self.pyramid_pooling = pyramidPooling(128, levels=3)
# Iconvs
self.upconv6 = nn.Sequential(nn.Upsample(scale_factor=2),
conv2DBatchNormRelu(in_channels=128, k_size=3, n_filters=64,
padding=1, stride=1))
self.iconv5 = conv2DBatchNormRelu(in_channels=192, k_size=3, n_filters=128,
padding=1, stride=1)
self.upconv5 = nn.Sequential(nn.Upsample(scale_factor=2),
conv2DBatchNormRelu(in_channels=128, k_size=3, n_filters=64,
padding=1, stride=1))
self.iconv4 = conv2DBatchNormRelu(in_channels=192, k_size=3, n_filters=128,
padding=1, stride=1)
self.upconv4 = nn.Sequential(nn.Upsample(scale_factor=2),
conv2DBatchNormRelu(in_channels=128, k_size=3, n_filters=64,
padding=1, stride=1))
self.iconv3 = conv2DBatchNormRelu(in_channels=128, k_size=3, n_filters=64,
padding=1, stride=1)
self.upconv3 = nn.Sequential(nn.Upsample(scale_factor=2),
conv2DBatchNormRelu(in_channels=64, k_size=3, n_filters=32,
padding=1, stride=1))
self.iconv2 = conv2DBatchNormRelu(in_channels=64, k_size=3, n_filters=64,
padding=1, stride=1)
if self.is_proj:
self.proj6 = conv2DBatchNormRelu(in_channels=128,k_size=1,n_filters=128//groups, padding=0,stride=1)
self.proj5 = conv2DBatchNormRelu(in_channels=128,k_size=1,n_filters=128//groups, padding=0,stride=1)
self.proj4 = conv2DBatchNormRelu(in_channels=128,k_size=1,n_filters=128//groups, padding=0,stride=1)
self.proj3 = conv2DBatchNormRelu(in_channels=64, k_size=1,n_filters=64//groups, padding=0,stride=1)
self.proj2 = conv2DBatchNormRelu(in_channels=64, k_size=1,n_filters=64//groups, padding=0,stride=1)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if hasattr(m.bias,'data'):
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
# H, W -> H/2, W/2
conv1 = self.convbnrelu1_1(x)
conv1 = self.convbnrelu1_2(conv1)
conv1 = self.convbnrelu1_3(conv1)
## H/2, W/2 -> H/4, W/4
pool1 = F.max_pool2d(conv1, 3, 2, 1)
# H/4, W/4 -> H/16, W/16
rconv3 = self.res_block3(pool1)
conv4 = self.res_block5(rconv3)
conv5 = self.res_block6(conv4)
conv6 = self.res_block7(conv5)
conv6 = self.pyramid_pooling(conv6)
conv6x = F.upsample(conv6, [conv5.size()[2],conv5.size()[3]],mode='bilinear')
concat5 = torch.cat((conv5,self.upconv6[1](conv6x)),dim=1)
conv5 = self.iconv5(concat5)
conv5x = F.upsample(conv5, [conv4.size()[2],conv4.size()[3]],mode='bilinear')
concat4 = torch.cat((conv4,self.upconv5[1](conv5x)),dim=1)
conv4 = self.iconv4(concat4)
conv4x = F.upsample(conv4, [rconv3.size()[2],rconv3.size()[3]],mode='bilinear')
concat3 = torch.cat((rconv3,self.upconv4[1](conv4x)),dim=1)
conv3 = self.iconv3(concat3)
conv3x = F.upsample(conv3, [pool1.size()[2],pool1.size()[3]],mode='bilinear')
concat2 = torch.cat((pool1,self.upconv3[1](conv3x)),dim=1)
conv2 = self.iconv2(concat2)
if self.is_proj:
proj6 = self.proj6(conv6)
proj5 = self.proj5(conv5)
proj4 = self.proj4(conv4)
proj3 = self.proj3(conv3)
proj2 = self.proj2(conv2)
return proj6,proj5,proj4,proj3,proj2
else:
return conv6, conv5, conv4, conv3, conv2
class pspnet_s(nn.Module):
"""
Modified PSPNet. https://github.com/meetshah1995/pytorch-semseg/blob/master/ptsemseg/models/pspnet.py
"""
def __init__(self, is_proj=True,groups=1):
super(pspnet_s, self).__init__()
self.inplanes = 32
self.is_proj = is_proj
# Encoder
self.convbnrelu1_1 = conv2DBatchNormRelu(in_channels=3, k_size=3, n_filters=16,
padding=1, stride=2)
self.convbnrelu1_2 = conv2DBatchNormRelu(in_channels=16, k_size=3, n_filters=16,
padding=1, stride=1)
self.convbnrelu1_3 = conv2DBatchNormRelu(in_channels=16, k_size=3, n_filters=32,
padding=1, stride=1)
# Vanilla Residual Blocks
self.res_block3 = self._make_layer(residualBlock,64,1,stride=2)
self.res_block5 = self._make_layer(residualBlock,128,1,stride=2)
self.res_block6 = self._make_layer(residualBlock,128,1,stride=2)
self.res_block7 = self._make_layer(residualBlock,128,1,stride=2)
self.pyramid_pooling = pyramidPooling(128, levels=3)
# Iconvs
self.upconv6 = nn.Sequential(nn.Upsample(scale_factor=2),
conv2DBatchNormRelu(in_channels=128, k_size=3, n_filters=64,
padding=1, stride=1))
self.iconv5 = conv2DBatchNormRelu(in_channels=192, k_size=3, n_filters=128,
padding=1, stride=1)
self.upconv5 = nn.Sequential(nn.Upsample(scale_factor=2),
conv2DBatchNormRelu(in_channels=128, k_size=3, n_filters=64,
padding=1, stride=1))
self.iconv4 = conv2DBatchNormRelu(in_channels=192, k_size=3, n_filters=128,
padding=1, stride=1)
self.upconv4 = nn.Sequential(nn.Upsample(scale_factor=2),
conv2DBatchNormRelu(in_channels=128, k_size=3, n_filters=64,
padding=1, stride=1))
self.iconv3 = conv2DBatchNormRelu(in_channels=128, k_size=3, n_filters=64,
padding=1, stride=1)
#self.upconv3 = nn.Sequential(nn.Upsample(scale_factor=2),
# conv2DBatchNormRelu(in_channels=64, k_size=3, n_filters=32,
# padding=1, stride=1))
#self.iconv2 = conv2DBatchNormRelu(in_channels=64, k_size=3, n_filters=64,
# padding=1, stride=1)
if self.is_proj:
self.proj6 = conv2DBatchNormRelu(in_channels=128,k_size=1,n_filters=128//groups, padding=0,stride=1)
self.proj5 = conv2DBatchNormRelu(in_channels=128,k_size=1,n_filters=128//groups, padding=0,stride=1)
self.proj4 = conv2DBatchNormRelu(in_channels=128,k_size=1,n_filters=128//groups, padding=0,stride=1)
self.proj3 = conv2DBatchNormRelu(in_channels=64, k_size=1,n_filters=64//groups, padding=0,stride=1)
#self.proj2 = conv2DBatchNormRelu(in_channels=64, k_size=1,n_filters=64//groups, padding=0,stride=1)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if hasattr(m.bias,'data'):
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
# H, W -> H/2, W/2
conv1 = self.convbnrelu1_1(x)
conv1 = self.convbnrelu1_2(conv1)
conv1 = self.convbnrelu1_3(conv1)
## H/2, W/2 -> H/4, W/4
pool1 = F.max_pool2d(conv1, 3, 2, 1)
# H/4, W/4 -> H/16, W/16
rconv3 = self.res_block3(pool1)
conv4 = self.res_block5(rconv3)
conv5 = self.res_block6(conv4)
conv6 = self.res_block7(conv5)
conv6 = self.pyramid_pooling(conv6)
conv6x = F.upsample(conv6, [conv5.size()[2],conv5.size()[3]],mode='bilinear')
concat5 = torch.cat((conv5,self.upconv6[1](conv6x)),dim=1)
conv5 = self.iconv5(concat5)
conv5x = F.upsample(conv5, [conv4.size()[2],conv4.size()[3]],mode='bilinear')
concat4 = torch.cat((conv4,self.upconv5[1](conv5x)),dim=1)
conv4 = self.iconv4(concat4)
conv4x = F.upsample(conv4, [rconv3.size()[2],rconv3.size()[3]],mode='bilinear')
concat3 = torch.cat((rconv3,self.upconv4[1](conv4x)),dim=1)
conv3 = self.iconv3(concat3)
#conv3x = F.upsample(conv3, [pool1.size()[2],pool1.size()[3]],mode='bilinear')
#concat2 = torch.cat((pool1,self.upconv3[1](conv3x)),dim=1)
#conv2 = self.iconv2(concat2)
if self.is_proj:
proj6 = self.proj6(conv6)
proj5 = self.proj5(conv5)
proj4 = self.proj4(conv4)
proj3 = self.proj3(conv3)
# proj2 = self.proj2(conv2)
# return proj6,proj5,proj4,proj3,proj2
return proj6,proj5,proj4,proj3
else:
# return conv6, conv5, conv4, conv3, conv2
return conv6, conv5, conv4, conv3
class bfmodule(nn.Module):
def __init__(self, inplanes, outplanes):
super(bfmodule, self).__init__()
self.proj = conv2DBatchNormRelu(in_channels=inplanes,k_size=1,n_filters=64,padding=0,stride=1)
self.inplanes = 64
# Vanilla Residual Blocks
self.res_block3 = self._make_layer(residualBlock,64,1,stride=2)
self.res_block5 = self._make_layer(residualBlock,64,1,stride=2)
self.res_block6 = self._make_layer(residualBlock,64,1,stride=2)
self.res_block7 = self._make_layer(residualBlock,128,1,stride=2)
self.pyramid_pooling = pyramidPooling(128, levels=3)
# Iconvs
self.upconv6 = conv2DBatchNormRelu(in_channels=128, k_size=3, n_filters=64,
padding=1, stride=1)
self.upconv5 = conv2DBatchNormRelu(in_channels=64, k_size=3, n_filters=32,
padding=1, stride=1)
self.upconv4 = conv2DBatchNormRelu(in_channels=64, k_size=3, n_filters=32,
padding=1, stride=1)
self.upconv3 = conv2DBatchNormRelu(in_channels=64, k_size=3, n_filters=32,
padding=1, stride=1)
self.iconv5 = conv2DBatchNormRelu(in_channels=128, k_size=3, n_filters=64,
padding=1, stride=1)
self.iconv4 = conv2DBatchNormRelu(in_channels=96, k_size=3, n_filters=64,
padding=1, stride=1)
self.iconv3 = conv2DBatchNormRelu(in_channels=96, k_size=3, n_filters=64,
padding=1, stride=1)
self.iconv2 = nn.Sequential(conv2DBatchNormRelu(in_channels=96, k_size=3, n_filters=64,
padding=1, stride=1),
nn.Conv2d(64, outplanes,kernel_size=3, stride=1, padding=1, bias=True))
self.proj6 = nn.Conv2d(128, outplanes,kernel_size=3, stride=1, padding=1, bias=True)
self.proj5 = nn.Conv2d(64, outplanes,kernel_size=3, stride=1, padding=1, bias=True)
self.proj4 = nn.Conv2d(64, outplanes,kernel_size=3, stride=1, padding=1, bias=True)
self.proj3 = nn.Conv2d(64, outplanes,kernel_size=3, stride=1, padding=1, bias=True)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if hasattr(m.bias,'data'):
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
proj = self.proj(x) # 4x
rconv3 = self.res_block3(proj) #8x
conv4 = self.res_block5(rconv3) #16x
conv5 = self.res_block6(conv4) #32x
conv6 = self.res_block7(conv5) #64x
conv6 = self.pyramid_pooling(conv6) #64x
pred6 = self.proj6(conv6)
conv6u = F.upsample(conv6, [conv5.size()[2],conv5.size()[3]], mode='bilinear')
concat5 = torch.cat((conv5,self.upconv6(conv6u)),dim=1)
conv5 = self.iconv5(concat5) #32x
pred5 = self.proj5(conv5)
conv5u = F.upsample(conv5, [conv4.size()[2],conv4.size()[3]], mode='bilinear')
concat4 = torch.cat((conv4,self.upconv5(conv5u)),dim=1)
conv4 = self.iconv4(concat4) #16x
pred4 = self.proj4(conv4)
conv4u = F.upsample(conv4, [rconv3.size()[2],rconv3.size()[3]], mode='bilinear')
concat3 = torch.cat((rconv3,self.upconv4(conv4u)),dim=1)
conv3 = self.iconv3(concat3) # 8x
pred3 = self.proj3(conv3)
conv3u = F.upsample(conv3, [x.size()[2],x.size()[3]], mode='bilinear')
concat2 = torch.cat((proj,self.upconv3(conv3u)),dim=1)
pred2 = self.iconv2(concat2) # 4x
return pred2, pred3, pred4, pred5, pred6
class bfmodule_feat(nn.Module):
def __init__(self, inplanes, outplanes):
super(bfmodule_feat, self).__init__()
self.proj = conv2DBatchNormRelu(in_channels=inplanes,k_size=1,n_filters=64,padding=0,stride=1)
self.inplanes = 64
# Vanilla Residual Blocks
self.res_block3 = self._make_layer(residualBlock,64,1,stride=2)
self.res_block5 = self._make_layer(residualBlock,64,1,stride=2)
self.res_block6 = self._make_layer(residualBlock,64,1,stride=2)
self.res_block7 = self._make_layer(residualBlock,128,1,stride=2)
self.pyramid_pooling = pyramidPooling(128, levels=3)
# Iconvs
self.upconv6 = conv2DBatchNormRelu(in_channels=128, k_size=3, n_filters=64,
padding=1, stride=1)
self.upconv5 = conv2DBatchNormRelu(in_channels=64, k_size=3, n_filters=32,
padding=1, stride=1)
self.upconv4 = conv2DBatchNormRelu(in_channels=64, k_size=3, n_filters=32,
padding=1, stride=1)
self.upconv3 = conv2DBatchNormRelu(in_channels=64, k_size=3, n_filters=32,
padding=1, stride=1)
self.iconv5 = conv2DBatchNormRelu(in_channels=128, k_size=3, n_filters=64,
padding=1, stride=1)
self.iconv4 = conv2DBatchNormRelu(in_channels=96, k_size=3, n_filters=64,
padding=1, stride=1)
self.iconv3 = conv2DBatchNormRelu(in_channels=96, k_size=3, n_filters=64,
padding=1, stride=1)
self.iconv2 = conv2DBatchNormRelu(in_channels=96, k_size=3, n_filters=64,
padding=1, stride=1)
self.proj6 = nn.Conv2d(128, outplanes,kernel_size=3, stride=1, padding=1, bias=True)
self.proj5 = nn.Conv2d(64, outplanes,kernel_size=3, stride=1, padding=1, bias=True)
self.proj4 = nn.Conv2d(64, outplanes,kernel_size=3, stride=1, padding=1, bias=True)
self.proj3 = nn.Conv2d(64, outplanes,kernel_size=3, stride=1, padding=1, bias=True)
self.proj2 = nn.Conv2d(64, outplanes,kernel_size=3, stride=1, padding=1, bias=True)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if hasattr(m.bias,'data'):
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
proj = self.proj(x) # 4x
rconv3 = self.res_block3(proj) #8x
conv4 = self.res_block5(rconv3) #16x
conv5 = self.res_block6(conv4) #32x
conv6 = self.res_block7(conv5) #64x
conv6 = self.pyramid_pooling(conv6) #64x
pred6 = self.proj6(conv6)
conv6u = F.upsample(conv6, [conv5.size()[2],conv5.size()[3]], mode='bilinear')
concat5 = torch.cat((conv5,self.upconv6(conv6u)),dim=1)
conv5 = self.iconv5(concat5) #32x
pred5 = self.proj5(conv5)
conv5u = F.upsample(conv5, [conv4.size()[2],conv4.size()[3]], mode='bilinear')
concat4 = torch.cat((conv4,self.upconv5(conv5u)),dim=1)
conv4 = self.iconv4(concat4) #16x
pred4 = self.proj4(conv4)
conv4u = F.upsample(conv4, [rconv3.size()[2],rconv3.size()[3]], mode='bilinear')
concat3 = torch.cat((rconv3,self.upconv4(conv4u)),dim=1)
conv3 = self.iconv3(concat3) # 8x
pred3 = self.proj3(conv3)
conv3u = F.upsample(conv3, [x.size()[2],x.size()[3]], mode='bilinear')
concat2 = torch.cat((proj,self.upconv3(conv3u)),dim=1)
conv2 = self.iconv2(concat2) # 4x
pred2 = self.proj2(conv2) # 4x
return pred2, conv2
def compute_geo_costs(rot, trans, Ex, Kinv, hp0, hp1, tau, Kinv_n=None):
if Kinv_n is None: Kinv_n = Kinv
R01 = kornia.angle_axis_to_rotation_matrix(rot)
H01 = Kinv.inverse().matmul(R01).matmul(Kinv_n)
comp_hp1 = H01.matmul(hp1.permute(0,2,1))
foe = (comp_hp1-tau*hp0.permute(0,2,1))
parallax3d = Kinv.matmul(foe)
p3dmag = parallax3d.norm(2,1)[:,np.newaxis]
parallax2d = (comp_hp1/comp_hp1[:,-1:]-hp0.permute(0,2,1))[:,:2]
p2dmag = parallax2d.norm(2,1)[:,np.newaxis]
p2dnorm = parallax2d / (1e-9+p2dmag)
foe_cam = Kinv.inverse().matmul(trans[:,:,np.newaxis])
foe_cam = foe_cam[:,:2] / (1e-9+foe_cam[:,-1:])
direct = foe_cam -hp0.permute(0,2,1)[:,:2]
directn = direct / (1e-9+direct.norm(2,1)[:,np.newaxis])
# metrics: 0) R-homography+symterr; 1) sampson 2) 2D angular 3) 3D sampson 4) 3D angular
##TODO validate
comp_hp0 = H01.inverse().matmul(hp0.permute(0,2,1))
mcost00 = parallax2d.norm(2,1)
mcost01 = (comp_hp0/comp_hp0[:,-1:] - hp1.permute(0,2,1))[:,:2].norm(2,1)
mcost1 = sampson_err(Kinv.matmul(hp0.permute(0,2,1)),
Kinv_n.matmul(hp1.permute(0,2,1)),Ex.cuda().permute(0,2,1)) # variable K
mcost2 = -(trans[:,-1:,np.newaxis]).sign()*(directn*p2dnorm).sum(1,keepdims=True)
mcost4 = -(trans[:,:,np.newaxis]*parallax3d).sum(1,keepdims=True)/(p3dmag+1e-9)
mcost3 = torch.clamp(1-mcost4.pow(2),0,1).sqrt()*p3dmag*mcost4.sign()
mcost10 = torch.clamp(1-mcost2.pow(2),0,1).sqrt()*p2dmag*mcost2.sign()
return mcost00, mcost01, mcost1, mcost2, mcost3, mcost4, p3dmag, mcost10
def get_skew_mat(transx,rotx):
rot = kornia.angle_axis_to_rotation_matrix(rotx)
trans = -rot.permute(0,2,1).matmul(transx[:,:,np.newaxis])[:,:,0]
rot = rot.permute(0,2,1)
tx = torch.zeros(transx.shape[0],3,3)
tx[:,0,1] = -transx[:,2]
tx[:,0,2] = transx[:,1]
tx[:,1,0] = transx[:,2]
tx[:,1,2] = -transx[:,0]
tx[:,2,0] = -transx[:,1]
tx[:,2,1] = transx[:,0]
return rot.matmul(tx)
def sampson_err(x1h, x2h, F):
l2 = F.permute(0,2,1).matmul(x1h)
l1 = F.matmul(x2h)
algdis = (l1 * x1h).sum(1)
dis = algdis**2 / (1e-9+l1[:,0]**2+l1[:,1]**2+l2[:,0]**2+l2[:,1]**2)
return dis
def get_intrinsics(intr, noise=False):
f = intr[0].float()
cx = intr[1].float()
cy = intr[2].float()
bs = f.shape[0]
delta = 1e-4
if noise:
fo = f.clone()
cxo = cx.clone()
cyo = cy.clone()
f = torch.Tensor(np.random.normal(loc=0., scale=delta,size=(bs,))).cuda().exp() * fo
cx = torch.Tensor(np.random.normal(loc=0.,scale=delta,size=(bs,))).cuda().exp() * cxo
cy = torch.Tensor(np.random.normal(loc=0.,scale=delta,size=(bs,))).cuda().exp() * cyo
Kinv = torch.Tensor(np.eye(3)[np.newaxis]).cuda().repeat(bs,1,1)
Kinv[:,2,2] *= f
Kinv[:,0,2] -= cx
Kinv[:,1,2] -= cy
Kinv /= f[:,np.newaxis,np.newaxis] #4,3,3
Taug = torch.cat(intr[4:10],-1).view(-1,bs).T # 4,6
Taug = torch.cat((Taug.view(bs,3,2).permute(0,2,1),Kinv[:,2:3]),1)
Kinv = Kinv.matmul(Taug)
if len(intr)>12:
Kinv_n = torch.Tensor(np.eye(3)[np.newaxis]).cuda().repeat(bs,1,1)
fn = intr[12].float()
Kinv_n[:,2,2] *= fn
Kinv_n[:,0,2] -= cx
Kinv_n[:,1,2] -= cy
Kinv_n /= fn[:,np.newaxis,np.newaxis] #4,3,3
elif noise:
f = torch.Tensor(np.random.normal(loc=0., scale=delta,size=(bs,))).cuda().exp() * fo
cx = torch.Tensor(np.random.normal(loc=0.,scale=delta,size=(bs,))).cuda().exp() * cxo
cy = torch.Tensor(np.random.normal(loc=0.,scale=delta,size=(bs,))).cuda().exp() * cyo
Kinv_n = torch.Tensor(np.eye(3)[np.newaxis]).cuda().repeat(bs,1,1)
Kinv_n[:,2,2] *= f
Kinv_n[:,0,2] -= cx
Kinv_n[:,1,2] -= cy
Kinv_n /= f[:,np.newaxis,np.newaxis] #4,3,3
Taug = torch.cat(intr[4:10],-1).view(-1,bs).T # 4,6
Taug = torch.cat((Taug.view(bs,3,2).permute(0,2,1),Kinv_n[:,2:3]),1)
Kinv_n = Kinv_n.matmul(Taug)
else:
Kinv_n = Kinv
return Kinv, Kinv_n
def F_ngransac(hp0,hp1,Ks,rand, unc_occ, iters=1000,cv=False,Kn=None):
cv=True
if Kn is None:
Kn = Ks
import cv2
b = hp1.shape[0]
hp0_cpu = np.asarray(hp0.cpu())
hp1_cpu = np.asarray(hp1.cpu())
if not rand:
## TODO
fmask = np.ones(hp0.shape[1]).astype(bool)
rand_seed = 0
else:
fmask = np.random.choice([True, False], size=hp0.shape[1], p=[0.1,0.9])
rand_seed = np.random.randint(0,1000) # random seed to by used in C++
### TODO
hp0 = Ks.inverse().matmul(hp0.permute(0,2,1)).permute(0,2,1)
hp1 = Kn.inverse().matmul(hp1.permute(0,2,1)).permute(0,2,1)
ratios = torch.zeros(hp0[:1,:,:1].shape)
probs = torch.Tensor(np.ones(fmask.sum()))/fmask.sum()
probs = probs[np.newaxis,:,np.newaxis]
#probs = torch.Tensor(np.zeros(fmask.sum()))
##unc_occ = unc_occ<0; probs[unc_occ[0]] = 1./unc_occ.float().sum()
#probs = F.softmax(-0.1*unc_occ[0],-1).cpu()
#probs = probs[np.newaxis,:,np.newaxis]
Es = torch.zeros((b, 3,3)).float() # estimated model
rot = torch.zeros((b, 3)).float() # estimated model
trans = torch.zeros((b, 3)).float() # estimated model
out_model = torch.zeros((3, 3)).float() # estimated model
out_inliers = torch.zeros(probs.size()) # inlier mask of estimated model
out_gradients = torch.zeros(probs.size()) # gradient tensor (only used during training)
for i in range(b):
pts1 = hp0[i:i+1, fmask,:2].cpu()
pts2 = hp1[i:i+1, fmask,:2].cpu()
# create data tensor of feature coordinates and matching ratios
correspondences = torch.cat((pts1, pts2, ratios), axis=2)
correspondences = correspondences.permute(2,1,0)
#incount = ngransac.find_fundamental_mat(correspondences, probs, rand_seed, 1000, 0.1, True, out_model, out_inliers, out_gradients)
#E = K1.T.dot(out_model).dot(K0)
if cv==True:
E, ffmask = cv2.findEssentialMat(np.asarray(pts1[0]), np.asarray(pts2[0]), np.eye(3), cv2.FM_RANSAC,threshold=0.0001)
ffmask = ffmask[:,0]
Es[i]=torch.Tensor(E)
else:
import ngransac
incount = ngransac.find_essential_mat(correspondences, probs, rand_seed, iters, 0.0001, out_model, out_inliers, out_gradients)
Es[i]=out_model
E = np.asarray(out_model)
maskk = np.asarray(out_inliers[0,:,0])
ffmask = fmask.copy()
ffmask[fmask] = maskk
K1 = np.asarray(Kn[i].cpu())
K0 = np.asarray(Ks[i].cpu())
R1, R2, T = cv2.decomposeEssentialMat(E)
for rott in [(R1,T),(R2,T),(R1,-T),(R2,-T)]:
if testEss(K0,K1,rott[0],rott[1],hp0_cpu[0,ffmask].T, hp1_cpu[i,ffmask].T):
#if testEss(K0,K1,rott[0],rott[1],hp0_cpu[0,ffmask].T[:,ffmask.sum()//10::ffmask.sum()//10], hp1_cpu[i,ffmask].T[:,ffmask.sum()//10::ffmask.sum()//10]):
R01=rott[0].T
t10=-R01.dot(rott[1][:,0])
if not 't10' in locals():
t10 = np.asarray([0,0,1])
R01 = np.eye(3)
rot[i] = torch.Tensor(cv2.Rodrigues(R01)[0][:,0]).cuda()
trans[i] = torch.Tensor(t10).cuda()
return rot, trans, Es
def testEss(K0,K1,R,T,p1,p2):
import cv2
testP = cv2.triangulatePoints(K0.dot(np.concatenate( (np.eye(3),np.zeros((3,1))), -1)),
K1.dot(np.concatenate( (R,T), -1)),
p1[:2],p2[:2])
Z1 = testP[2,:]/testP[-1,:]
Z2 = (R.dot(Z1*np.linalg.inv(K0).dot(p1))+T)[-1,:]
if ((Z1>0).sum() > (Z1<=0).sum()) and ((Z2>0).sum() > (Z2<=0).sum()):
#print(Z1)
#print(Z2)
return True
else:
return False
|
banmo-main
|
third_party/vcnplus/models/submodule.py
|
import pdb
import torch.nn as nn
import math
import torch
from torch.nn.parameter import Parameter
import torch.nn.functional as F
from torch.nn import Module
from torch.nn.modules.conv import _ConvNd
from torch.nn.modules.utils import _quadruple
from torch.autograd import Variable
from torch.nn import Conv2d
def conv4d(data,filters,bias=None,permute_filters=True,use_half=False):
"""
This is done by stacking results of multiple 3D convolutions, and is very slow.
Taken from https://github.com/ignacio-rocco/ncnet
"""
b,c,h,w,d,t=data.size()
data=data.permute(2,0,1,3,4,5).contiguous() # permute to avoid making contiguous inside loop
# Same permutation is done with filters, unless already provided with permutation
if permute_filters:
filters=filters.permute(2,0,1,3,4,5).contiguous() # permute to avoid making contiguous inside loop
c_out=filters.size(1)
if use_half:
output = Variable(torch.HalfTensor(h,b,c_out,w,d,t),requires_grad=data.requires_grad)
else:
output = Variable(torch.zeros(h,b,c_out,w,d,t),requires_grad=data.requires_grad)
padding=filters.size(0)//2
if use_half:
Z=Variable(torch.zeros(padding,b,c,w,d,t).half())
else:
Z=Variable(torch.zeros(padding,b,c,w,d,t))
if data.is_cuda:
Z=Z.cuda(data.get_device())
output=output.cuda(data.get_device())
data_padded = torch.cat((Z,data,Z),0)
for i in range(output.size(0)): # loop on first feature dimension
# convolve with center channel of filter (at position=padding)
output[i,:,:,:,:,:]=F.conv3d(data_padded[i+padding,:,:,:,:,:],
filters[padding,:,:,:,:,:], bias=bias, stride=1, padding=padding)
# convolve with upper/lower channels of filter (at postions [:padding] [padding+1:])
for p in range(1,padding+1):
output[i,:,:,:,:,:]=output[i,:,:,:,:,:]+F.conv3d(data_padded[i+padding-p,:,:,:,:,:],
filters[padding-p,:,:,:,:,:], bias=None, stride=1, padding=padding)
output[i,:,:,:,:,:]=output[i,:,:,:,:,:]+F.conv3d(data_padded[i+padding+p,:,:,:,:,:],
filters[padding+p,:,:,:,:,:], bias=None, stride=1, padding=padding)
output=output.permute(1,2,0,3,4,5).contiguous()
return output
class Conv4d(_ConvNd):
"""Applies a 4D convolution over an input signal composed of several input
planes.
"""
def __init__(self, in_channels, out_channels, kernel_size, bias=True, pre_permuted_filters=True):
# stride, dilation and groups !=1 functionality not tested
stride=1
dilation=1
groups=1
# zero padding is added automatically in conv4d function to preserve tensor size
padding = 0
kernel_size = _quadruple(kernel_size)
stride = _quadruple(stride)
padding = _quadruple(padding)
dilation = _quadruple(dilation)
super(Conv4d, self).__init__(
in_channels, out_channels, kernel_size, stride, padding, dilation,
False, _quadruple(0), groups, bias)
# weights will be sliced along one dimension during convolution loop
# make the looping dimension to be the first one in the tensor,
# so that we don't need to call contiguous() inside the loop
self.pre_permuted_filters=pre_permuted_filters
if self.pre_permuted_filters:
self.weight.data=self.weight.data.permute(2,0,1,3,4,5).contiguous()
self.use_half=False
# self.isbias = bias
# if not self.isbias:
# self.bn = torch.nn.BatchNorm1d(out_channels)
def forward(self, input):
out = conv4d(input, self.weight, bias=self.bias,permute_filters=not self.pre_permuted_filters,use_half=self.use_half) # filters pre-permuted in constructor
# if not self.isbias:
# b,c,u,v,h,w = out.shape
# out = self.bn(out.view(b,c,-1)).view(b,c,u,v,h,w)
return out
class fullConv4d(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, bias=True, pre_permuted_filters=True):
super(fullConv4d, self).__init__()
self.conv = Conv4d(in_channels, out_channels, kernel_size, bias=bias, pre_permuted_filters=pre_permuted_filters)
self.isbias = bias
if not self.isbias:
self.bn = torch.nn.BatchNorm1d(out_channels)
def forward(self, input):
out = self.conv(input)
if not self.isbias:
b,c,u,v,h,w = out.shape
out = self.bn(out.view(b,c,-1)).view(b,c,u,v,h,w)
return out
class butterfly4D(torch.nn.Module):
'''
butterfly 4d
'''
def __init__(self, fdima, fdimb, withbn=True, full=True,groups=1):
super(butterfly4D, self).__init__()
self.proj = nn.Sequential(projfeat4d(fdima, fdimb, 1, with_bn=withbn,groups=groups),
nn.ReLU(inplace=True),)
self.conva1 = sepConv4dBlock(fdimb,fdimb,with_bn=withbn, stride=(2,1,1),full=full,groups=groups)
self.conva2 = sepConv4dBlock(fdimb,fdimb,with_bn=withbn, stride=(2,1,1),full=full,groups=groups)
self.convb3 = sepConv4dBlock(fdimb,fdimb,with_bn=withbn, stride=(1,1,1),full=full,groups=groups)
self.convb2 = sepConv4dBlock(fdimb,fdimb,with_bn=withbn, stride=(1,1,1),full=full,groups=groups)
self.convb1 = sepConv4dBlock(fdimb,fdimb,with_bn=withbn, stride=(1,1,1),full=full,groups=groups)
#@profile
def forward(self,x):
out = self.proj(x)
b,c,u,v,h,w = out.shape # 9x9
out1 = self.conva1(out) # 5x5, 3
_,c1,u1,v1,h1,w1 = out1.shape
out2 = self.conva2(out1) # 3x3, 9
_,c2,u2,v2,h2,w2 = out2.shape
out2 = self.convb3(out2) # 3x3, 9
tout1 = F.upsample(out2.view(b,c,u2,v2,-1),(u1,v1,h2*w2),mode='trilinear').view(b,c,u1,v1,h2,w2) # 5x5
tout1 = F.upsample(tout1.view(b,c,-1,h2,w2),(u1*v1,h1,w1),mode='trilinear').view(b,c,u1,v1,h1,w1) # 5x5
out1 = tout1 + out1
out1 = self.convb2(out1)
tout = F.upsample(out1.view(b,c,u1,v1,-1),(u,v,h1*w1),mode='trilinear').view(b,c,u,v,h1,w1)
tout = F.upsample(tout.view(b,c,-1,h1,w1),(u*v,h,w),mode='trilinear').view(b,c,u,v,h,w)
out = tout + out
out = self.convb1(out)
return out
class projfeat4d(torch.nn.Module):
'''
Turn 3d projection into 2d projection
'''
def __init__(self, in_planes, out_planes, stride, with_bn=True,groups=1):
super(projfeat4d, self).__init__()
self.with_bn = with_bn
self.stride = stride
self.conv1 = nn.Conv3d(in_planes, out_planes, 1, (stride,stride,1), padding=0,bias=not with_bn,groups=groups)
self.bn = nn.BatchNorm3d(out_planes)
def forward(self,x):
b,c,u,v,h,w = x.size()
x = self.conv1(x.view(b,c,u,v,h*w))
if self.with_bn:
x = self.bn(x)
_,c,u,v,_ = x.shape
x = x.view(b,c,u,v,h,w)
return x
class sepConv4d(torch.nn.Module):
'''
Separable 4d convolution block as 2 3D convolutions
'''
def __init__(self, in_planes, out_planes, stride=(1,1,1), with_bn=True, ksize=3, full=True,groups=1):
super(sepConv4d, self).__init__()
bias = not with_bn
self.isproj = False
self.stride = stride[0]
expand = 1
if with_bn:
if in_planes != out_planes:
self.isproj = True
self.proj = nn.Sequential(nn.Conv2d(in_planes, out_planes, 1, bias=bias, padding=0,groups=groups),
nn.BatchNorm2d(out_planes))
if full:
self.conv1 = nn.Sequential(nn.Conv3d(in_planes*expand, in_planes, (1,ksize,ksize), stride=(1,self.stride,self.stride), bias=bias, padding=(0,ksize//2,ksize//2),groups=groups),
nn.BatchNorm3d(in_planes))
else:
self.conv1 = nn.Sequential(nn.Conv3d(in_planes*expand, in_planes, (1,ksize,ksize), stride=1, bias=bias, padding=(0,ksize//2,ksize//2),groups=groups),
nn.BatchNorm3d(in_planes))
self.conv2 = nn.Sequential(nn.Conv3d(in_planes, in_planes*expand, (ksize,ksize,1), stride=(self.stride,self.stride,1), bias=bias, padding=(ksize//2,ksize//2,0),groups=groups),
nn.BatchNorm3d(in_planes*expand))
else:
if in_planes != out_planes:
self.isproj = True
self.proj = nn.Conv2d(in_planes, out_planes, 1, bias=bias, padding=0,groups=groups)
if full:
self.conv1 = nn.Conv3d(in_planes*expand, in_planes, (1,ksize,ksize), stride=(1,self.stride,self.stride), bias=bias, padding=(0,ksize//2,ksize//2),groups=groups)
else:
self.conv1 = nn.Conv3d(in_planes*expand, in_planes, (1,ksize,ksize), stride=1, bias=bias, padding=(0,ksize//2,ksize//2),groups=groups)
self.conv2 = nn.Conv3d(in_planes, in_planes*expand, (ksize,ksize,1), stride=(self.stride,self.stride,1), bias=bias, padding=(ksize//2,ksize//2,0),groups=groups)
self.relu = nn.ReLU(inplace=True)
#@profile
def forward(self,x):
b,c,u,v,h,w = x.shape
x = self.conv2(x.view(b,c,u,v,-1))
b,c,u,v,_ = x.shape
x = self.relu(x)
x = self.conv1(x.view(b,c,-1,h,w))
b,c,_,h,w = x.shape
if self.isproj:
x = self.proj(x.view(b,c,-1,w))
x = x.view(b,-1,u,v,h,w)
return x
class sepConv4dBlock(torch.nn.Module):
'''
Separable 4d convolution block as 2 2D convolutions and a projection
layer
'''
def __init__(self, in_planes, out_planes, stride=(1,1,1), with_bn=True, full=True,groups=1):
super(sepConv4dBlock, self).__init__()
if in_planes == out_planes and stride==(1,1,1):
self.downsample = None
else:
if full:
self.downsample = sepConv4d(in_planes, out_planes, stride, with_bn=with_bn,ksize=1, full=full,groups=groups)
else:
self.downsample = projfeat4d(in_planes, out_planes,stride[0], with_bn=with_bn,groups=groups)
self.conv1 = sepConv4d(in_planes, out_planes, stride, with_bn=with_bn, full=full ,groups=groups)
self.conv2 = sepConv4d(out_planes, out_planes,(1,1,1), with_bn=with_bn, full=full,groups=groups)
self.relu1 = nn.ReLU(inplace=True)
self.relu2 = nn.ReLU(inplace=True)
#@profile
def forward(self,x):
out = self.relu1(self.conv1(x))
if self.downsample:
x = self.downsample(x)
out = self.relu2(x + self.conv2(out))
return out
##import torch.backends.cudnn as cudnn
##cudnn.benchmark = True
#import time
##im = torch.randn(9,64,9,160,224).cuda()
##net = torch.nn.Conv3d(64, 64, 3).cuda()
##net = Conv4d(1,1,3,bias=True,pre_permuted_filters=True).cuda()
##net = sepConv4dBlock(2,2,stride=(1,1,1)).cuda()
#
##im = torch.randn(1,16,9,9,96,320).cuda()
##net = sepConv4d(16,16,with_bn=False).cuda()
#
##im = torch.randn(1,16,81,96,320).cuda()
##net = torch.nn.Conv3d(16,16,(1,3,3),padding=(0,1,1)).cuda()
#
##im = torch.randn(1,16,9,9,96*320).cuda()
##net = torch.nn.Conv3d(16,16,(3,3,1),padding=(1,1,0)).cuda()
#
##im = torch.randn(10000,10,9,9).cuda()
##net = torch.nn.Conv2d(10,10,3,padding=1).cuda()
#
##im = torch.randn(81,16,96,320).cuda()
##net = torch.nn.Conv2d(16,16,3,padding=1).cuda()
#c= int(16 *1)
#cp = int(16 *1)
#h=int(96 *4)
#w=int(320 *4)
#k=3
#im = torch.randn(1,c,h,w).cuda()
#net = torch.nn.Conv2d(c,cp,k,padding=k//2).cuda()
#
#im2 = torch.randn(cp,k*k*c).cuda()
#im1 = F.unfold(im, (k,k), padding=k//2)[0]
#
#
#net(im)
#net(im)
#torch.mm(im2,im1)
#torch.mm(im2,im1)
#torch.cuda.synchronize()
#beg = time.time()
#for i in range(100):
# net(im)
# #im1 = F.unfold(im, (k,k), padding=k//2)[0]
# torch.mm(im2,im1)
#torch.cuda.synchronize()
#print('%f'%((time.time()-beg)*10.))
|
banmo-main
|
third_party/vcnplus/models/conv4d.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
import math
import pdb
import time
import cv2
from .submodule import pspnet, bfmodule, bfmodule_feat, conv, compute_geo_costs, get_skew_mat, get_intrinsics, F_ngransac
from .conv4d import sepConv4d, butterfly4D
class flow_reg(nn.Module):
"""
Soft winner-take-all that selects the most likely diplacement.
Set ent=True to enable entropy output.
Set maxdisp to adjust maximum allowed displacement towards one side.
maxdisp=4 searches for a 9x9 region.
Set fac to squeeze search window.
maxdisp=4 and fac=2 gives search window of 9x5
"""
def __init__(self, size, ent=False, maxdisp = int(4), fac=1):
B,W,H = size
super(flow_reg, self).__init__()
self.ent = ent
self.md = maxdisp
self.fac = fac
self.truncated = True
self.wsize = 3 # by default using truncation 7x7
flowrangey = range(-maxdisp,maxdisp+1)
flowrangex = range(-int(maxdisp//self.fac),int(maxdisp//self.fac)+1)
meshgrid = np.meshgrid(flowrangex,flowrangey)
flowy = np.tile( np.reshape(meshgrid[0],[1,2*maxdisp+1,2*int(maxdisp//self.fac)+1,1,1]), (B,1,1,H,W) )
flowx = np.tile( np.reshape(meshgrid[1],[1,2*maxdisp+1,2*int(maxdisp//self.fac)+1,1,1]), (B,1,1,H,W) )
self.register_buffer('flowx',torch.Tensor(flowx))
self.register_buffer('flowy',torch.Tensor(flowy))
self.pool3d = nn.MaxPool3d((self.wsize*2+1,self.wsize*2+1,1),stride=1,padding=(self.wsize,self.wsize,0))
def forward(self, x):
b,u,v,h,w = x.shape
oldx = x
if self.truncated:
# truncated softmax
x = x.view(b,u*v,h,w)
idx = x.argmax(1)[:,np.newaxis]
if x.is_cuda:
mask = Variable(torch.cuda.HalfTensor(b,u*v,h,w)).fill_(0)
else:
mask = Variable(torch.FloatTensor(b,u*v,h,w)).fill_(0)
mask.scatter_(1,idx,1)
mask = mask.view(b,1,u,v,-1)
mask = self.pool3d(mask)[:,0].view(b,u,v,h,w)
ninf = x.clone().fill_(-np.inf).view(b,u,v,h,w)
x = torch.where(mask.byte(),oldx,ninf)
else:
self.wsize = (np.sqrt(u*v)-1)/2
b,u,v,h,w = x.shape
x = F.softmax(x.view(b,-1,h,w),1).view(b,u,v,h,w)
if np.isnan(x.min().detach().cpu()):
#pdb.set_trace()
x[torch.isnan(x)] = F.softmax(oldx[torch.isnan(x)])
outx = torch.sum(torch.sum(x*self.flowx,1),1,keepdim=True)
outy = torch.sum(torch.sum(x*self.flowy,1),1,keepdim=True)
if self.ent:
# local
local_entropy = (-x*torch.clamp(x,1e-9,1-1e-9).log()).sum(1).sum(1)[:,np.newaxis]
if self.wsize == 0:
local_entropy[:] = 1.
else:
local_entropy /= np.log((self.wsize*2+1)**2)
# global
x = F.softmax(oldx.view(b,-1,h,w),1).view(b,u,v,h,w)
global_entropy = (-x*torch.clamp(x,1e-9,1-1e-9).log()).sum(1).sum(1)[:,np.newaxis]
global_entropy /= np.log(x.shape[1]*x.shape[2])
return torch.cat([outx,outy],1),torch.cat([local_entropy, global_entropy],1)
else:
return torch.cat([outx,outy],1),None
class WarpModule(nn.Module):
"""
taken from https://github.com/NVlabs/PWC-Net/blob/master/PyTorch/models/PWCNet.py
"""
def __init__(self, size):
super(WarpModule, self).__init__()
B,W,H = size
# mesh grid
xx = torch.arange(0, W).view(1,-1).repeat(H,1)
yy = torch.arange(0, H).view(-1,1).repeat(1,W)
xx = xx.view(1,1,H,W).repeat(B,1,1,1)
yy = yy.view(1,1,H,W).repeat(B,1,1,1)
self.register_buffer('grid',torch.cat((xx,yy),1).float())
def forward(self, x, flo):
"""
warp an image/tensor (im2) back to im1, according to the optical flow
x: [B, C, H, W] (im2)
flo: [B, 2, H, W] flow
"""
B, C, H, W = x.size()
vgrid = self.grid + flo
# scale grid to [-1,1]
vgrid[:,0,:,:] = 2.0*vgrid[:,0,:,:]/max(W-1,1)-1.0
vgrid[:,1,:,:] = 2.0*vgrid[:,1,:,:]/max(H-1,1)-1.0
vgrid = vgrid.permute(0,2,3,1)
#output = nn.functional.grid_sample(x, vgrid)
output = nn.functional.grid_sample(x, vgrid, align_corners=True)
mask = ((vgrid[:,:,:,0].abs()<1) * (vgrid[:,:,:,1].abs()<1)) >0
return output*mask.unsqueeze(1).float(), mask
def get_grid(B,H,W):
meshgrid_base = np.meshgrid(range(0,W), range(0,H))[::-1]
basey = np.reshape(meshgrid_base[0],[1,1,1,H,W])
basex = np.reshape(meshgrid_base[1],[1,1,1,H,W])
grid = torch.tensor(np.concatenate((basex.reshape((-1,H,W,1)),basey.reshape((-1,H,W,1))),-1)).cuda().float()
return grid.view(1,1,H,W,2)
class VCN(nn.Module):
"""
VCN.
md defines maximum displacement for each level, following a coarse-to-fine-warping scheme
fac defines squeeze parameter for the coarsest level
"""
def __init__(self, size, md=[4,4,4,4,4], fac=1., exp_unc=True):
super(VCN,self).__init__()
self.md = md
self.fac = fac
use_entropy = True
withbn = True
## pspnet
self.pspnet = pspnet(is_proj=False)
### Volumetric-UNet
fdima1 = 128 # 6/5/4
fdima2 = 64 # 3/2
fdimb1 = 16 # 6/5/4/3
fdimb2 = 12 # 2
full=False
self.f6 = butterfly4D(fdima1, fdimb1,withbn=withbn,full=full)
self.p6 = sepConv4d(fdimb1,fdimb1, with_bn=False, full=full)
self.f5 = butterfly4D(fdima1, fdimb1,withbn=withbn, full=full)
self.p5 = sepConv4d(fdimb1,fdimb1, with_bn=False,full=full)
self.f4 = butterfly4D(fdima1, fdimb1,withbn=withbn,full=full)
self.p4 = sepConv4d(fdimb1,fdimb1, with_bn=False,full=full)
self.f3 = butterfly4D(fdima2, fdimb1,withbn=withbn,full=full)
self.p3 = sepConv4d(fdimb1,fdimb1, with_bn=False,full=full)
full=True
self.f2 = butterfly4D(fdima2, fdimb2,withbn=withbn,full=full)
self.p2 = sepConv4d(fdimb2,fdimb2, with_bn=False,full=full)
self.flow_reg64 = flow_reg([fdimb1*size[0],size[1]//64,size[2]//64], ent=use_entropy, maxdisp=self.md[0], fac=self.fac)
self.flow_reg32 = flow_reg([fdimb1*size[0],size[1]//32,size[2]//32], ent=use_entropy, maxdisp=self.md[1])
self.flow_reg16 = flow_reg([fdimb1*size[0],size[1]//16,size[2]//16], ent=use_entropy, maxdisp=self.md[2])
self.flow_reg8 = flow_reg([fdimb1*size[0],size[1]//8,size[2]//8] , ent=use_entropy, maxdisp=self.md[3])
self.flow_reg4 = flow_reg([fdimb2*size[0],size[1]//4,size[2]//4] , ent=use_entropy, maxdisp=self.md[4])
self.warp5 = WarpModule([size[0],size[1]//32,size[2]//32])
self.warp4 = WarpModule([size[0],size[1]//16,size[2]//16])
self.warp3 = WarpModule([size[0],size[1]//8,size[2]//8])
self.warp2 = WarpModule([size[0],size[1]//4,size[2]//4])
if self.training:
self.warpx = WarpModule([size[0],size[1],size[2]])
## hypotheses fusion modules, adopted from the refinement module of PWCNet
# https://github.com/NVlabs/PWC-Net/blob/master/PyTorch/models/PWCNet.py
# c6
self.dc6_conv1 = conv(128+4*fdimb1, 128, kernel_size=3, stride=1, padding=1, dilation=1)
self.dc6_conv2 = conv(128, 128, kernel_size=3, stride=1, padding=2, dilation=2)
self.dc6_conv3 = conv(128, 128, kernel_size=3, stride=1, padding=4, dilation=4)
self.dc6_conv4 = conv(128, 96, kernel_size=3, stride=1, padding=8, dilation=8)
self.dc6_conv5 = conv(96, 64, kernel_size=3, stride=1, padding=16, dilation=16)
self.dc6_conv6 = conv(64, 32, kernel_size=3, stride=1, padding=1, dilation=1)
self.dc6_conv7 = nn.Conv2d(32,2*fdimb1,kernel_size=3,stride=1,padding=1,bias=True)
# c5
self.dc5_conv1 = conv(128+4*fdimb1*2, 128, kernel_size=3, stride=1, padding=1, dilation=1)
self.dc5_conv2 = conv(128, 128, kernel_size=3, stride=1, padding=2, dilation=2)
self.dc5_conv3 = conv(128, 128, kernel_size=3, stride=1, padding=4, dilation=4)
self.dc5_conv4 = conv(128, 96, kernel_size=3, stride=1, padding=8, dilation=8)
self.dc5_conv5 = conv(96, 64, kernel_size=3, stride=1, padding=16, dilation=16)
self.dc5_conv6 = conv(64, 32, kernel_size=3, stride=1, padding=1, dilation=1)
self.dc5_conv7 = nn.Conv2d(32,2*fdimb1*2,kernel_size=3,stride=1,padding=1,bias=True)
# c4
self.dc4_conv1 = conv(128+4*fdimb1*3, 128, kernel_size=3, stride=1, padding=1, dilation=1)
self.dc4_conv2 = conv(128, 128, kernel_size=3, stride=1, padding=2, dilation=2)
self.dc4_conv3 = conv(128, 128, kernel_size=3, stride=1, padding=4, dilation=4)
self.dc4_conv4 = conv(128, 96, kernel_size=3, stride=1, padding=8, dilation=8)
self.dc4_conv5 = conv(96, 64, kernel_size=3, stride=1, padding=16, dilation=16)
self.dc4_conv6 = conv(64, 32, kernel_size=3, stride=1, padding=1, dilation=1)
self.dc4_conv7 = nn.Conv2d(32,2*fdimb1*3,kernel_size=3,stride=1,padding=1,bias=True)
# c3
self.dc3_conv1 = conv(64+16*fdimb1, 128, kernel_size=3, stride=1, padding=1, dilation=1)
self.dc3_conv2 = conv(128, 128, kernel_size=3, stride=1, padding=2, dilation=2)
self.dc3_conv3 = conv(128, 128, kernel_size=3, stride=1, padding=4, dilation=4)
self.dc3_conv4 = conv(128, 96, kernel_size=3, stride=1, padding=8, dilation=8)
self.dc3_conv5 = conv(96, 64, kernel_size=3, stride=1, padding=16, dilation=16)
self.dc3_conv6 = conv(64, 32, kernel_size=3, stride=1, padding=1, dilation=1)
self.dc3_conv7 = nn.Conv2d(32,8*fdimb1,kernel_size=3,stride=1,padding=1,bias=True)
# c2
self.dc2_conv1 = conv(64+16*fdimb1+4*fdimb2, 128, kernel_size=3, stride=1, padding=1, dilation=1)
self.dc2_conv2 = conv(128, 128, kernel_size=3, stride=1, padding=2, dilation=2)
self.dc2_conv3 = conv(128, 128, kernel_size=3, stride=1, padding=4, dilation=4)
self.dc2_conv4 = conv(128, 96, kernel_size=3, stride=1, padding=8, dilation=8)
self.dc2_conv5 = conv(96, 64, kernel_size=3, stride=1, padding=16, dilation=16)
self.dc2_conv6 = conv(64, 32, kernel_size=3, stride=1, padding=1, dilation=1)
self.dc2_conv7 = nn.Conv2d(32,4*2*fdimb1 + 2*fdimb2,kernel_size=3,stride=1,padding=1,bias=True)
self.dc6_conv = nn.Sequential( self.dc6_conv1,
self.dc6_conv2,
self.dc6_conv3,
self.dc6_conv4,
self.dc6_conv5,
self.dc6_conv6,
self.dc6_conv7)
self.dc5_conv = nn.Sequential( self.dc5_conv1,
self.dc5_conv2,
self.dc5_conv3,
self.dc5_conv4,
self.dc5_conv5,
self.dc5_conv6,
self.dc5_conv7)
self.dc4_conv = nn.Sequential( self.dc4_conv1,
self.dc4_conv2,
self.dc4_conv3,
self.dc4_conv4,
self.dc4_conv5,
self.dc4_conv6,
self.dc4_conv7)
self.dc3_conv = nn.Sequential( self.dc3_conv1,
self.dc3_conv2,
self.dc3_conv3,
self.dc3_conv4,
self.dc3_conv5,
self.dc3_conv6,
self.dc3_conv7)
self.dc2_conv = nn.Sequential( self.dc2_conv1,
self.dc2_conv2,
self.dc2_conv3,
self.dc2_conv4,
self.dc2_conv5,
self.dc2_conv6,
self.dc2_conv7)
## Out-of-range detection
self.dc6_convo = nn.Sequential(conv(128+4*fdimb1, 128, kernel_size=3, stride=1, padding=1, dilation=1),
conv(128, 128, kernel_size=3, stride=1, padding=2, dilation=2),
conv(128, 128, kernel_size=3, stride=1, padding=4, dilation=4),
conv(128, 96, kernel_size=3, stride=1, padding=8, dilation=8),
conv(96, 64, kernel_size=3, stride=1, padding=16, dilation=16),
conv(64, 32, kernel_size=3, stride=1, padding=1, dilation=1),
nn.Conv2d(32,1,kernel_size=3,stride=1,padding=1,bias=True))
self.dc5_convo = nn.Sequential(conv(128+2*4*fdimb1, 128, kernel_size=3, stride=1, padding=1, dilation=1),
conv(128, 128, kernel_size=3, stride=1, padding=2, dilation=2),
conv(128, 128, kernel_size=3, stride=1, padding=4, dilation=4),
conv(128, 96, kernel_size=3, stride=1, padding=8, dilation=8),
conv(96, 64, kernel_size=3, stride=1, padding=16, dilation=16),
conv(64, 32, kernel_size=3, stride=1, padding=1, dilation=1),
nn.Conv2d(32,1,kernel_size=3,stride=1,padding=1,bias=True))
self.dc4_convo = nn.Sequential(conv(128+3*4*fdimb1, 128, kernel_size=3, stride=1, padding=1, dilation=1),
conv(128, 128, kernel_size=3, stride=1, padding=2, dilation=2),
conv(128, 128, kernel_size=3, stride=1, padding=4, dilation=4),
conv(128, 96, kernel_size=3, stride=1, padding=8, dilation=8),
conv(96, 64, kernel_size=3, stride=1, padding=16, dilation=16),
conv(64, 32, kernel_size=3, stride=1, padding=1, dilation=1),
nn.Conv2d(32,1,kernel_size=3,stride=1,padding=1,bias=True))
self.dc3_convo = nn.Sequential(conv(64+16*fdimb1, 128, kernel_size=3, stride=1, padding=1, dilation=1),
conv(128, 128, kernel_size=3, stride=1, padding=2, dilation=2),
conv(128, 128, kernel_size=3, stride=1, padding=4, dilation=4),
conv(128, 96, kernel_size=3, stride=1, padding=8, dilation=8),
conv(96, 64, kernel_size=3, stride=1, padding=16, dilation=16),
conv(64, 32, kernel_size=3, stride=1, padding=1, dilation=1),
nn.Conv2d(32,1,kernel_size=3,stride=1,padding=1,bias=True))
self.dc2_convo = nn.Sequential(conv(64+16*fdimb1+4*fdimb2, 128, kernel_size=3, stride=1, padding=1, dilation=1),
conv(128, 128, kernel_size=3, stride=1, padding=2, dilation=2),
conv(128, 128, kernel_size=3, stride=1, padding=4, dilation=4),
conv(128, 96, kernel_size=3, stride=1, padding=8, dilation=8),
conv(96, 64, kernel_size=3, stride=1, padding=16, dilation=16),
conv(64, 32, kernel_size=3, stride=1, padding=1, dilation=1),
nn.Conv2d(32,1,kernel_size=3,stride=1,padding=1,bias=True))
# affine-exp
self.f3d2v1 = conv(64, 32, kernel_size=3, stride=1, padding=1,dilation=1) #
self.f3d2v2 = conv(1, 32, kernel_size=3, stride=1, padding=1,dilation=1) #
self.f3d2v3 = conv(1, 32, kernel_size=3, stride=1, padding=1,dilation=1) #
self.f3d2v4 = conv(1, 32, kernel_size=3, stride=1, padding=1,dilation=1) #
self.f3d2v5 = conv(64, 32, kernel_size=3, stride=1, padding=1,dilation=1) #
self.f3d2v6 = conv(12*81, 32, kernel_size=3, stride=1, padding=1,dilation=1) #
self.f3d2 = bfmodule(128-64,1)
# depth change net
self.dcnetv1 = conv(64, 32, kernel_size=3, stride=1, padding=1,dilation=1) #
self.dcnetv2 = conv(1, 32, kernel_size=3, stride=1, padding=1,dilation=1) #
self.dcnetv3 = conv(1, 32, kernel_size=3, stride=1, padding=1,dilation=1) #
self.dcnetv4 = conv(1, 32, kernel_size=3, stride=1, padding=1,dilation=1) #
self.dcnetv5 = conv(12*81, 32, kernel_size=3, stride=1, padding=1,dilation=1) #
self.dcnetv6 = conv(4, 32, kernel_size=3, stride=1, padding=1,dilation=1) #
if exp_unc:
self.dcnet = bfmodule(128,2)
else:
self.dcnet = bfmodule(128,1)
for m in self.modules():
if isinstance(m, nn.Conv3d):
n = m.kernel_size[0] * m.kernel_size[1]*m.kernel_size[2] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if hasattr(m.bias,'data'):
m.bias.data.zero_()
self.facs = [self.fac,1,1,1,1]
self.warp_modules = nn.ModuleList([None, self.warp5, self.warp4, self.warp3, self.warp2])
self.f_modules = nn.ModuleList([self.f6, self.f5, self.f4, self.f3, self.f2])
self.p_modules = nn.ModuleList([self.p6, self.p5, self.p4, self.p3, self.p2])
self.reg_modules = nn.ModuleList([self.flow_reg64, self.flow_reg32, self.flow_reg16, self.flow_reg8, self.flow_reg4])
self.oor_modules = nn.ModuleList([self.dc6_convo, self.dc5_convo, self.dc4_convo, self.dc3_convo, self.dc2_convo])
self.fuse_modules = nn.ModuleList([self.dc6_conv, self.dc5_conv, self.dc4_conv, self.dc3_conv, self.dc2_conv])
def corrf(self, refimg_fea, targetimg_fea,maxdisp, fac=1):
if self.training:
#fast correlation function
b,c,h,w = refimg_fea.shape
targetimg_fea = F.unfold(targetimg_fea, (2*int(maxdisp)//fac+1,2*maxdisp+1), padding=(int(maxdisp)//fac,maxdisp)).view(b,c, 2*int(maxdisp)//fac+1,2*maxdisp+1,h,w).permute(0,1,3,2,4,5).contiguous()
cost = refimg_fea.view(b,c,h,w)[:,:,np.newaxis, np.newaxis]*targetimg_fea
cost = F.leaky_relu(cost, 0.1,inplace=True)
else:
#slow correlation function
b,c,height,width = refimg_fea.shape
if refimg_fea.is_cuda:
cost = Variable(torch.cuda.FloatTensor(b,c,2*maxdisp+1,2*int(maxdisp//fac)+1,height,width)).fill_(0.) # b,c,u,v,h,w
else:
cost = Variable(torch.FloatTensor(b,c,2*maxdisp+1,2*int(maxdisp//fac)+1,height,width)).fill_(0.) # b,c,u,v,h,w
for i in range(2*maxdisp+1):
ind = i-maxdisp
for j in range(2*int(maxdisp//fac)+1):
indd = j-int(maxdisp//fac)
feata = refimg_fea[:,:,max(0,-indd):height-indd,max(0,-ind):width-ind]
featb = targetimg_fea[:,:,max(0,+indd):height+indd,max(0,ind):width+ind]
diff = (feata*featb)
cost[:, :, i,j,max(0,-indd):height-indd,max(0,-ind):width-ind] = diff # standard
cost = F.leaky_relu(cost, 0.1,inplace=True)
return cost
def cost_matching(self,up_flow, c1, c2, flowh, enth, level):
"""
up_flow: upsample coarse flow
c1: normalized feature of image 1
c2: normalized feature of image 2
flowh: flow hypotheses
enth: entropy
"""
# normalize
c1n = c1 / (c1.norm(dim=1, keepdim=True)+1e-9)
c2n = c2 / (c2.norm(dim=1, keepdim=True)+1e-9)
# cost volume
if level == 0:
warp = c2n
else:
warp,_ = self.warp_modules[level](c2n, up_flow)
feat = self.corrf(c1n,warp,self.md[level],fac=self.facs[level])
feat = self.f_modules[level](feat)
cost = self.p_modules[level](feat) # b, 16, u,v,h,w
# soft WTA
b,c,u,v,h,w = cost.shape
cost = cost.view(-1,u,v,h,w) # bx16, 9,9,h,w, also predict uncertainty from here
flowhh,enthh = self.reg_modules[level](cost) # bx16, 2, h, w
flowhh = flowhh.view(b,c,2,h,w)
if level > 0:
flowhh = flowhh + up_flow[:,np.newaxis]
flowhh = flowhh.view(b,-1,h,w) # b, 16*2, h, w
enthh = enthh.view(b,-1,h,w) # b, 16*1, h, w
# append coarse hypotheses
if level == 0:
flowh = flowhh
enth = enthh
else:
flowh = torch.cat((flowhh, F.upsample(flowh.detach()*2, [flowhh.shape[2],flowhh.shape[3]], mode='bilinear')),1) # b, k2--k2, h, w
enth = torch.cat((enthh, F.upsample(enth, [flowhh.shape[2],flowhh.shape[3]], mode='bilinear')),1)
if self.training or level==4:
x = torch.cat((enth.detach(), flowh.detach(), c1),1)
oor = self.oor_modules[level](x)[:,0]
else: oor = None
# hypotheses fusion
x = torch.cat((enth.detach(), flowh.detach(), c1),1)
va = self.fuse_modules[level](x)
va = va.view(b,-1,2,h,w)
flow = ( flowh.view(b,-1,2,h,w) * F.softmax(va,1) ).sum(1) # b, 2k, 2, h, w
return flow, flowh, enth, oor
def affine(self,pref,flow, pw=1):
b,_,lh,lw=flow.shape
ptar = pref + flow
pw = 1
pref = F.unfold(pref, (pw*2+1,pw*2+1), padding=(pw)).view(b,2,(pw*2+1)**2,lh,lw)-pref[:,:,np.newaxis]
ptar = F.unfold(ptar, (pw*2+1,pw*2+1), padding=(pw)).view(b,2,(pw*2+1)**2,lh,lw)-ptar[:,:,np.newaxis] # b, 2,9,h,w
pref = pref.permute(0,3,4,1,2).reshape(b*lh*lw,2,(pw*2+1)**2)
ptar = ptar.permute(0,3,4,1,2).reshape(b*lh*lw,2,(pw*2+1)**2)
prefprefT = pref.matmul(pref.permute(0,2,1))
ppdet = prefprefT[:,0,0]*prefprefT[:,1,1]-prefprefT[:,1,0]*prefprefT[:,0,1]
ppinv = torch.cat((prefprefT[:,1,1:],-prefprefT[:,0,1:], -prefprefT[:,1:,0], prefprefT[:,0:1,0]),1).view(-1,2,2)/ppdet.clamp(1e-10,np.inf)[:,np.newaxis,np.newaxis]
Affine = ptar.matmul(pref.permute(0,2,1)).matmul(ppinv)
Error = (Affine.matmul(pref)-ptar).norm(2,1).mean(1).view(b,1,lh,lw)
Avol = (Affine[:,0,0]*Affine[:,1,1]-Affine[:,1,0]*Affine[:,0,1]).view(b,1,lh,lw).abs().clamp(1e-10,np.inf)
exp = Avol.sqrt()
mask = (exp>0.5) & (exp<2) & (Error<0.1)
mask = mask[:,0]
exp = exp.clamp(0.5,2)
exp[Error>0.1]=1
return exp, Error, mask
def affine_mask(self,pref,flow, pw=3):
"""
pref: reference coordinates
pw: patch width
"""
flmask = flow[:,2:]
flow = flow[:,:2]
b,_,lh,lw=flow.shape
ptar = pref + flow
pref = F.unfold(pref, (pw*2+1,pw*2+1), padding=(pw)).view(b,2,(pw*2+1)**2,lh,lw)-pref[:,:,np.newaxis]
ptar = F.unfold(ptar, (pw*2+1,pw*2+1), padding=(pw)).view(b,2,(pw*2+1)**2,lh,lw)-ptar[:,:,np.newaxis] # b, 2,9,h,w
conf_flow = flmask
conf_flow = F.unfold(conf_flow,(pw*2+1,pw*2+1), padding=(pw)).view(b,1,(pw*2+1)**2,lh,lw)
count = conf_flow.sum(2,keepdims=True)
conf_flow = ((pw*2+1)**2)*conf_flow / count
pref = pref * conf_flow
ptar = ptar * conf_flow
pref = pref.permute(0,3,4,1,2).reshape(b*lh*lw,2,(pw*2+1)**2)
ptar = ptar.permute(0,3,4,1,2).reshape(b*lh*lw,2,(pw*2+1)**2)
prefprefT = pref.matmul(pref.permute(0,2,1))
ppdet = prefprefT[:,0,0]*prefprefT[:,1,1]-prefprefT[:,1,0]*prefprefT[:,0,1]
ppinv = torch.cat((prefprefT[:,1,1:],-prefprefT[:,0,1:], -prefprefT[:,1:,0], prefprefT[:,0:1,0]),1).view(-1,2,2)/ppdet.clamp(1e-10,np.inf)[:,np.newaxis,np.newaxis]
Affine = ptar.matmul(pref.permute(0,2,1)).matmul(ppinv)
Error = (Affine.matmul(pref)-ptar).norm(2,1).mean(1).view(b,1,lh,lw)
Avol = (Affine[:,0,0]*Affine[:,1,1]-Affine[:,1,0]*Affine[:,0,1]).view(b,1,lh,lw).abs().clamp(1e-10,np.inf)
exp = Avol.sqrt()
mask = (exp>0.5) & (exp<2) & (Error<0.2) & (flmask.bool()) & (count[:,0]>4)
mask = mask[:,0]
exp = exp.clamp(0.5,2)
exp[Error>0.2]=1
return exp, Error, mask
def get_oor_loss(self, flowl0, oor3, maxdisp, occ_mask,mask):
"""
return out-of-range loss
"""
oor3_gt = (flowl0.abs() > maxdisp).detach() # (8*self.md[3])
oor3_gt = (((oor3_gt.sum(1)>0) + occ_mask)>0).float() # oor, or occluded
#weights = oor3_gt.sum().float()/(oor3_gt.shape[0]*oor3_gt.shape[1]*oor3_gt.shape[2])
oor3_gt = oor3_gt[mask]
weights = oor3_gt.sum().float()/(oor3_gt.shape[0])
weights = oor3_gt * (1-weights) + (1-oor3_gt) * weights
loss_oor3 = F.binary_cross_entropy_with_logits(oor3[mask],oor3_gt,size_average=True, weight=weights)
return loss_oor3
def weight_parameters(self):
return [param for name, param in self.named_parameters() if 'weight' in name]
def bias_parameters(self):
return [param for name, param in self.named_parameters() if 'bias' in name]
def forward(self,im,disc_aux=None,disp_input=None):
bs = im.shape[0]//2
if self.training and disc_aux[-1]: # if only fine-tuning expansion
reset=True
self.eval()
torch.set_grad_enabled(False)
else: reset=False
c06,c05,c04,c03,c02 = self.pspnet(im)
c16 = c06[:bs]; c26 = c06[bs:]
c15 = c05[:bs]; c25 = c05[bs:]
c14 = c04[:bs]; c24 = c04[bs:]
c13 = c03[:bs]; c23 = c03[bs:]
c12 = c02[:bs]; c22 = c02[bs:]
## matching 6
flow6, flow6h, ent6h, oor6 = self.cost_matching(None, c16, c26, None, None,level=0)
## matching 5
up_flow6 = F.upsample(flow6, [im.size()[2]//32,im.size()[3]//32], mode='bilinear')*2
flow5, flow5h, ent5h, oor5 = self.cost_matching(up_flow6, c15, c25, flow6h, ent6h,level=1)
## matching 4
up_flow5 = F.upsample(flow5, [im.size()[2]//16,im.size()[3]//16], mode='bilinear')*2
flow4, flow4h, ent4h, oor4 = self.cost_matching(up_flow5, c14, c24, flow5h, ent5h,level=2)
## matching 3
up_flow4 = F.upsample(flow4, [im.size()[2]//8,im.size()[3]//8], mode='bilinear')*2
flow3, flow3h, ent3h, oor3 = self.cost_matching(up_flow4, c13, c23, flow4h, ent4h,level=3)
## matching 2
up_flow3 = F.upsample(flow3, [im.size()[2]//4,im.size()[3]//4], mode='bilinear')*2
flow2, flow2h, ent2h, oor2 = self.cost_matching(up_flow3, c12, c22, flow3h, ent3h,level=4)
if reset and disc_aux[-1] == 1:
torch.set_grad_enabled(True)
self.train()
if not self.training or disc_aux[-1]:
# expansion
b,_,h,w = flow2.shape
exp2,err2,_ = self.affine(get_grid(b,h,w)[:,0].permute(0,3,1,2).repeat(b,1,1,1).clone(), flow2.detach(),pw=1)
x = torch.cat((
self.f3d2v2(-exp2.log()),
self.f3d2v3(err2),
),1)
dchange2 = -exp2.log()+1./200*self.f3d2(x)[0]
# depth change net
iexp2 = F.upsample(dchange2.clone(), [im.size()[2],im.size()[3]], mode='bilinear')
x = torch.cat((self.dcnetv1(c12.detach()),
self.dcnetv2(dchange2.detach()),
self.dcnetv3(-exp2.log()),
self.dcnetv4(err2),
),1)
dcneto = 1./200*self.dcnet(x)[0]
dchange2 = dchange2.detach() + dcneto[:,:1]
dchange2 = F.upsample(dchange2, [im.size()[2],im.size()[3]], mode='bilinear')
if dcneto.shape[1]>1:
dc_unc = dcneto[:,1:2]
else:
dc_unc = torch.zeros_like(dcneto)
dc_unc = F.upsample(dc_unc, [im.size()[2],im.size()[3]], mode='bilinear')[:,0]
flow2 = F.upsample(flow2.detach(), [im.size()[2],im.size()[3]], mode='bilinear')*4
return flow2, oor2[0], dchange2[0,0], iexp2[0,0]
|
banmo-main
|
third_party/vcnplus/models/VCNplus.py
|
# ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# Written by Bin Xiao (Bin.Xiao@microsoft.com)
# Modified by Dequan Wang and Xingyi Zhou
# ------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import math
import logging
import torch
import torch.nn as nn
from .DCNv2.DCN.dcn_v2 import DCN
import torch.utils.model_zoo as model_zoo
BN_MOMENTUM = 0.1
logger = logging.getLogger(__name__)
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1,
bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion,
momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
def fill_up_weights(up):
w = up.weight.data
f = math.ceil(w.size(2) / 2)
c = (2 * f - 1 - f % 2) / (2. * f)
for i in range(w.size(2)):
for j in range(w.size(3)):
w[0, 0, i, j] = \
(1 - math.fabs(i / f - c)) * (1 - math.fabs(j / f - c))
for c in range(1, w.size(0)):
w[c, 0, :, :] = w[0, 0, :, :]
def fill_fc_weights(layers):
for m in layers.modules():
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, std=0.001)
# torch.nn.init.kaiming_normal_(m.weight.data, nonlinearity='relu')
# torch.nn.init.xavier_normal_(m.weight.data)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
class PoseResNet(nn.Module):
def __init__(self, block, layers, heads, head_conv):
self.inplanes = 64
self.heads = heads
self.deconv_with_bias = False
super(PoseResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
# used for deconv layers
self.deconv_layers = self._make_deconv_layer(
3,
[256, 128, 64],
[4, 4, 4],
)
for head in self.heads:
classes = self.heads[head]
if head_conv > 0:
fc = nn.Sequential(
nn.Conv2d(64, head_conv,
kernel_size=3, padding=1, bias=True),
nn.ReLU(inplace=True),
nn.Conv2d(head_conv, classes,
kernel_size=1, stride=1,
padding=0, bias=True))
if 'hm' in head:
fc[-1].bias.data.fill_(-2.19)
else:
fill_fc_weights(fc)
else:
fc = nn.Conv2d(64, classes,
kernel_size=1, stride=1,
padding=0, bias=True)
if 'hm' in head:
fc.bias.data.fill_(-2.19)
else:
fill_fc_weights(fc)
self.__setattr__(head, fc)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion, momentum=BN_MOMENTUM),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def _get_deconv_cfg(self, deconv_kernel, index):
if deconv_kernel == 4:
padding = 1
output_padding = 0
elif deconv_kernel == 3:
padding = 1
output_padding = 1
elif deconv_kernel == 2:
padding = 0
output_padding = 0
return deconv_kernel, padding, output_padding
def _make_deconv_layer(self, num_layers, num_filters, num_kernels):
assert num_layers == len(num_filters), \
'ERROR: num_deconv_layers is different len(num_deconv_filters)'
assert num_layers == len(num_kernels), \
'ERROR: num_deconv_layers is different len(num_deconv_filters)'
layers = []
for i in range(num_layers):
kernel, padding, output_padding = \
self._get_deconv_cfg(num_kernels[i], i)
planes = num_filters[i]
fc = DCN(self.inplanes, planes,
kernel_size=(3,3), stride=1,
padding=1, dilation=1, deformable_groups=1)
# fc = nn.Conv2d(self.inplanes, planes,
# kernel_size=3, stride=1,
# padding=1, dilation=1, bias=False)
# fill_fc_weights(fc)
up = nn.ConvTranspose2d(
in_channels=planes,
out_channels=planes,
kernel_size=kernel,
stride=2,
padding=padding,
output_padding=output_padding,
bias=self.deconv_with_bias)
fill_up_weights(up)
layers.append(fc)
layers.append(nn.BatchNorm2d(planes, momentum=BN_MOMENTUM))
layers.append(nn.ReLU(inplace=True))
layers.append(up)
layers.append(nn.BatchNorm2d(planes, momentum=BN_MOMENTUM))
layers.append(nn.ReLU(inplace=True))
self.inplanes = planes
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.deconv_layers(x)
ret = {}
for head in self.heads:
ret[head] = self.__getattr__(head)(x)
return [ret]
def init_weights(self, num_layers):
if 1:
url = model_urls['resnet{}'.format(num_layers)]
pretrained_state_dict = model_zoo.load_url(url)
print('=> loading pretrained model {}'.format(url))
self.load_state_dict(pretrained_state_dict, strict=False)
print('=> init deconv weights from normal distribution')
for name, m in self.deconv_layers.named_modules():
if isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
resnet_spec = {18: (BasicBlock, [2, 2, 2, 2]),
34: (BasicBlock, [3, 4, 6, 3]),
50: (Bottleneck, [3, 4, 6, 3]),
101: (Bottleneck, [3, 4, 23, 3]),
152: (Bottleneck, [3, 8, 36, 3])}
def get_pose_net(num_layers, heads, head_conv=256):
block_class, layers = resnet_spec[num_layers]
model = PoseResNet(block_class, layers, heads, head_conv=head_conv)
model.init_weights(num_layers)
return model
|
banmo-main
|
third_party/vcnplus/models/networks/resnet_dcn.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import math
import logging
import numpy as np
from os.path import join
import torch
from torch import nn
import torch.nn.functional as F
import torch.utils.model_zoo as model_zoo
from .DCNv2.DCN.dcn_v2 import DCN
BN_MOMENTUM = 0.1
logger = logging.getLogger(__name__)
def get_model_url(data='imagenet', name='dla34', hash='ba72cf86'):
return join('http://dl.yf.io/dla/models', data, '{}-{}.pth'.format(name, hash))
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
def __init__(self, inplanes, planes, stride=1, dilation=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3,
stride=stride, padding=dilation,
bias=False, dilation=dilation)
self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3,
stride=1, padding=dilation,
bias=False, dilation=dilation)
self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.stride = stride
def forward(self, x, residual=None):
if residual is None:
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 2
def __init__(self, inplanes, planes, stride=1, dilation=1):
super(Bottleneck, self).__init__()
expansion = Bottleneck.expansion
bottle_planes = planes // expansion
self.conv1 = nn.Conv2d(inplanes, bottle_planes,
kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(bottle_planes, momentum=BN_MOMENTUM)
self.conv2 = nn.Conv2d(bottle_planes, bottle_planes, kernel_size=3,
stride=stride, padding=dilation,
bias=False, dilation=dilation)
self.bn2 = nn.BatchNorm2d(bottle_planes, momentum=BN_MOMENTUM)
self.conv3 = nn.Conv2d(bottle_planes, planes,
kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.stride = stride
def forward(self, x, residual=None):
if residual is None:
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
out += residual
out = self.relu(out)
return out
class BottleneckX(nn.Module):
expansion = 2
cardinality = 32
def __init__(self, inplanes, planes, stride=1, dilation=1):
super(BottleneckX, self).__init__()
cardinality = BottleneckX.cardinality
# dim = int(math.floor(planes * (BottleneckV5.expansion / 64.0)))
# bottle_planes = dim * cardinality
bottle_planes = planes * cardinality // 32
self.conv1 = nn.Conv2d(inplanes, bottle_planes,
kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(bottle_planes, momentum=BN_MOMENTUM)
self.conv2 = nn.Conv2d(bottle_planes, bottle_planes, kernel_size=3,
stride=stride, padding=dilation, bias=False,
dilation=dilation, groups=cardinality)
self.bn2 = nn.BatchNorm2d(bottle_planes, momentum=BN_MOMENTUM)
self.conv3 = nn.Conv2d(bottle_planes, planes,
kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.stride = stride
def forward(self, x, residual=None):
if residual is None:
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
out += residual
out = self.relu(out)
return out
class Root(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, residual):
super(Root, self).__init__()
self.conv = nn.Conv2d(
in_channels, out_channels, 1,
stride=1, bias=False, padding=(kernel_size - 1) // 2)
self.bn = nn.BatchNorm2d(out_channels, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.residual = residual
def forward(self, *x):
children = x
x = self.conv(torch.cat(x, 1))
x = self.bn(x)
if self.residual:
x += children[0]
x = self.relu(x)
return x
class Tree(nn.Module):
def __init__(self, levels, block, in_channels, out_channels, stride=1,
level_root=False, root_dim=0, root_kernel_size=1,
dilation=1, root_residual=False):
super(Tree, self).__init__()
if root_dim == 0:
root_dim = 2 * out_channels
if level_root:
root_dim += in_channels
if levels == 1:
self.tree1 = block(in_channels, out_channels, stride,
dilation=dilation)
self.tree2 = block(out_channels, out_channels, 1,
dilation=dilation)
else:
self.tree1 = Tree(levels - 1, block, in_channels, out_channels,
stride, root_dim=0,
root_kernel_size=root_kernel_size,
dilation=dilation, root_residual=root_residual)
self.tree2 = Tree(levels - 1, block, out_channels, out_channels,
root_dim=root_dim + out_channels,
root_kernel_size=root_kernel_size,
dilation=dilation, root_residual=root_residual)
if levels == 1:
self.root = Root(root_dim, out_channels, root_kernel_size,
root_residual)
self.level_root = level_root
self.root_dim = root_dim
self.downsample = None
self.project = None
self.levels = levels
if stride > 1:
self.downsample = nn.MaxPool2d(stride, stride=stride)
if in_channels != out_channels:
self.project = nn.Sequential(
nn.Conv2d(in_channels, out_channels,
kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(out_channels, momentum=BN_MOMENTUM)
)
def forward(self, x, residual=None, children=None):
children = [] if children is None else children
bottom = self.downsample(x) if self.downsample else x
residual = self.project(bottom) if self.project else bottom
if self.level_root:
children.append(bottom)
x1 = self.tree1(x, residual)
if self.levels == 1:
x2 = self.tree2(x1)
x = self.root(x2, x1, *children)
else:
children.append(x1)
x = self.tree2(x1, children=children)
return x
class DLA(nn.Module):
def __init__(self, levels, channels, num_classes=1000,
block=BasicBlock, residual_root=False, linear_root=False,num_input=14):
super(DLA, self).__init__()
self.channels = channels
self.num_classes = num_classes
self.base_layer = nn.Sequential(
nn.Conv2d(num_input, channels[0], kernel_size=7, stride=1,
padding=3, bias=False),
nn.BatchNorm2d(channels[0], momentum=BN_MOMENTUM),
nn.ReLU(inplace=True))
self.level0 = self._make_conv_level(
channels[0], channels[0], levels[0])
self.level1 = self._make_conv_level(
channels[0], channels[1], levels[1], stride=2)
self.level2 = Tree(levels[2], block, channels[1], channels[2], 2,
level_root=False,
root_residual=residual_root)
self.level3 = Tree(levels[3], block, channels[2], channels[3], 2,
level_root=True, root_residual=residual_root)
self.level4 = Tree(levels[4], block, channels[3], channels[4], 2,
level_root=True, root_residual=residual_root)
self.level5 = Tree(levels[5], block, channels[4], channels[5], 2,
level_root=True, root_residual=residual_root)
# for m in self.modules():
# if isinstance(m, nn.Conv2d):
# n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
# m.weight.data.normal_(0, math.sqrt(2. / n))
# elif isinstance(m, nn.BatchNorm2d):
# m.weight.data.fill_(1)
# m.bias.data.zero_()
def _make_level(self, block, inplanes, planes, blocks, stride=1):
downsample = None
if stride != 1 or inplanes != planes:
downsample = nn.Sequential(
nn.MaxPool2d(stride, stride=stride),
nn.Conv2d(inplanes, planes,
kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(planes, momentum=BN_MOMENTUM),
)
layers = []
layers.append(block(inplanes, planes, stride, downsample=downsample))
for i in range(1, blocks):
layers.append(block(inplanes, planes))
return nn.Sequential(*layers)
def _make_conv_level(self, inplanes, planes, convs, stride=1, dilation=1):
modules = []
for i in range(convs):
modules.extend([
nn.Conv2d(inplanes, planes, kernel_size=3,
stride=stride if i == 0 else 1,
padding=dilation, bias=False, dilation=dilation),
nn.BatchNorm2d(planes, momentum=BN_MOMENTUM),
nn.ReLU(inplace=True)])
inplanes = planes
return nn.Sequential(*modules)
def forward(self, x):
y = []
x = self.base_layer(x)
for i in range(6):
x = getattr(self, 'level{}'.format(i))(x)
y.append(x)
return y
def load_pretrained_model(self, data='imagenet', name='dla34', hash='ba72cf86'):
# fc = self.fc
if name.endswith('.pth'):
model_weights = torch.load(data + name)
else:
model_url = get_model_url(data, name, hash)
model_weights = model_zoo.load_url(model_url)
num_classes = len(model_weights[list(model_weights.keys())[-1]])
self.fc = nn.Conv2d(
self.channels[-1], num_classes,
kernel_size=1, stride=1, padding=0, bias=True)
self.load_state_dict(model_weights)
# self.fc = fc
def dla34(pretrained=True, **kwargs): # DLA-34
model = DLA([1, 1, 1, 2, 2, 1],
[16, 32, 64, 128, 256, 512],
block=BasicBlock, **kwargs)
if pretrained:
model.load_pretrained_model(data='imagenet', name='dla34', hash='ba72cf86')
return model
class Identity(nn.Module):
def __init__(self):
super(Identity, self).__init__()
def forward(self, x):
return x
def fill_fc_weights(layers):
for m in layers.modules():
if isinstance(m, nn.Conv2d):
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def fill_up_weights(up):
w = up.weight.data
f = math.ceil(w.size(2) / 2)
c = (2 * f - 1 - f % 2) / (2. * f)
for i in range(w.size(2)):
for j in range(w.size(3)):
w[0, 0, i, j] = \
(1 - math.fabs(i / f - c)) * (1 - math.fabs(j / f - c))
for c in range(1, w.size(0)):
w[c, 0, :, :] = w[0, 0, :, :]
class DeformConv(nn.Module):
def __init__(self, chi, cho):
super(DeformConv, self).__init__()
self.actf = nn.Sequential(
nn.BatchNorm2d(cho, momentum=BN_MOMENTUM),
nn.ReLU(inplace=True)
)
self.conv = DCN(chi, cho, kernel_size=(3,3), stride=1, padding=1, dilation=1, deformable_groups=1)
def forward(self, x):
x = self.conv(x)
x = self.actf(x)
return x
class IDAUp(nn.Module):
def __init__(self, o, channels, up_f):
super(IDAUp, self).__init__()
for i in range(1, len(channels)):
c = channels[i]
f = int(up_f[i])
proj = DeformConv(c, o)
node = DeformConv(o, o)
up = nn.ConvTranspose2d(o, o, f * 2, stride=f,
padding=f // 2, output_padding=0,
groups=o, bias=False)
fill_up_weights(up)
setattr(self, 'proj_' + str(i), proj)
setattr(self, 'up_' + str(i), up)
setattr(self, 'node_' + str(i), node)
def forward(self, layers, startp, endp):
for i in range(startp + 1, endp):
upsample = getattr(self, 'up_' + str(i - startp))
project = getattr(self, 'proj_' + str(i - startp))
layers[i] = upsample(project(layers[i]))
node = getattr(self, 'node_' + str(i - startp))
layers[i] = node(layers[i] + layers[i - 1])
class DLAUp(nn.Module):
def __init__(self, startp, channels, scales, in_channels=None):
super(DLAUp, self).__init__()
self.startp = startp
if in_channels is None:
in_channels = channels
self.channels = channels
channels = list(channels)
scales = np.array(scales, dtype=int)
for i in range(len(channels) - 1):
j = -i - 2
setattr(self, 'ida_{}'.format(i),
IDAUp(channels[j], in_channels[j:],
scales[j:] // scales[j]))
scales[j + 1:] = scales[j]
in_channels[j + 1:] = [channels[j] for _ in channels[j + 1:]]
def forward(self, layers):
out = [layers[-1]] # start with 32
for i in range(len(layers) - self.startp - 1):
ida = getattr(self, 'ida_{}'.format(i))
ida(layers, len(layers) -i - 2, len(layers))
out.insert(0, layers[-1])
return out
class Interpolate(nn.Module):
def __init__(self, scale, mode):
super(Interpolate, self).__init__()
self.scale = scale
self.mode = mode
def forward(self, x):
x = F.interpolate(x, scale_factor=self.scale, mode=self.mode, align_corners=False)
return x
class DLASeg(nn.Module):
def __init__(self, base_name, heads, pretrained, down_ratio, final_kernel,
last_level, head_conv, out_channel=0,num_input=14):
super(DLASeg, self).__init__()
assert down_ratio in [2, 4, 8, 16]
self.first_level = int(np.log2(down_ratio))
self.last_level = last_level
self.base = globals()[base_name](pretrained=pretrained,num_input=num_input)
channels = self.base.channels
scales = [2 ** i for i in range(len(channels[self.first_level:]))]
self.dla_up = DLAUp(self.first_level, channels[self.first_level:], scales)
if out_channel == 0:
out_channel = channels[self.first_level]
self.ida_up = IDAUp(out_channel, channels[self.first_level:self.last_level],
[2 ** i for i in range(self.last_level - self.first_level)])
self.heads = heads
for head in self.heads:
classes = self.heads[head]
if head_conv > 0:
fc = nn.Sequential(
nn.Conv2d(channels[self.first_level], head_conv,
kernel_size=3, padding=1, bias=True),
nn.ReLU(inplace=True),
nn.Conv2d(head_conv, classes,
kernel_size=final_kernel, stride=1,
padding=final_kernel // 2, bias=True))
if 'hm' in head:
fc[-1].bias.data.fill_(-2.19)
else:
fill_fc_weights(fc)
else:
fc = nn.Conv2d(channels[self.first_level], classes,
kernel_size=final_kernel, stride=1,
padding=final_kernel // 2, bias=True)
if 'hm' in head:
fc.bias.data.fill_(-2.19)
else:
fill_fc_weights(fc)
self.__setattr__(head, fc)
def forward(self, x):
x = self.base(x)
x = self.dla_up(x)
y = []
for i in range(self.last_level - self.first_level):
y.append(x[i].clone())
self.ida_up(y, 0, len(y))
z = {}
for head in self.heads:
z[head] = self.__getattr__(head)(y[-1])
return [z]
def get_pose_net(num_layers, heads, head_conv=256, down_ratio=4,num_input=14):
model = DLASeg('dla{}'.format(num_layers), heads,
pretrained=False,
#pretrained=True,
down_ratio=down_ratio,
final_kernel=1,
last_level=5,
head_conv=head_conv,num_input=num_input)
return model
|
banmo-main
|
third_party/vcnplus/models/networks/pose_dla_dcn.py
|
# ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# Written by Bin Xiao (Bin.Xiao@microsoft.com)
# Modified by Xingyi Zhou
# ------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import torch
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
BN_MOMENTUM = 0.1
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1,
bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion,
momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class PoseResNet(nn.Module):
def __init__(self, block, layers, heads, head_conv, **kwargs):
self.inplanes = 64
self.deconv_with_bias = False
self.heads = heads
super(PoseResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
# used for deconv layers
self.deconv_layers = self._make_deconv_layer(
3,
[256, 256, 256],
[4, 4, 4],
)
# self.final_layer = []
for head in sorted(self.heads):
num_output = self.heads[head]
if head_conv > 0:
fc = nn.Sequential(
nn.Conv2d(256, head_conv,
kernel_size=3, padding=1, bias=True),
nn.ReLU(inplace=True),
nn.Conv2d(head_conv, num_output,
kernel_size=1, stride=1, padding=0))
else:
fc = nn.Conv2d(
in_channels=256,
out_channels=num_output,
kernel_size=1,
stride=1,
padding=0
)
self.__setattr__(head, fc)
# self.final_layer = nn.ModuleList(self.final_layer)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion, momentum=BN_MOMENTUM),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def _get_deconv_cfg(self, deconv_kernel, index):
if deconv_kernel == 4:
padding = 1
output_padding = 0
elif deconv_kernel == 3:
padding = 1
output_padding = 1
elif deconv_kernel == 2:
padding = 0
output_padding = 0
return deconv_kernel, padding, output_padding
def _make_deconv_layer(self, num_layers, num_filters, num_kernels):
assert num_layers == len(num_filters), \
'ERROR: num_deconv_layers is different len(num_deconv_filters)'
assert num_layers == len(num_kernels), \
'ERROR: num_deconv_layers is different len(num_deconv_filters)'
layers = []
for i in range(num_layers):
kernel, padding, output_padding = \
self._get_deconv_cfg(num_kernels[i], i)
planes = num_filters[i]
layers.append(
nn.ConvTranspose2d(
in_channels=self.inplanes,
out_channels=planes,
kernel_size=kernel,
stride=2,
padding=padding,
output_padding=output_padding,
bias=self.deconv_with_bias))
layers.append(nn.BatchNorm2d(planes, momentum=BN_MOMENTUM))
layers.append(nn.ReLU(inplace=True))
self.inplanes = planes
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.deconv_layers(x)
ret = {}
for head in self.heads:
ret[head] = self.__getattr__(head)(x)
return [ret]
def init_weights(self, num_layers, pretrained=True):
if pretrained:
# print('=> init resnet deconv weights from normal distribution')
for _, m in self.deconv_layers.named_modules():
if isinstance(m, nn.ConvTranspose2d):
# print('=> init {}.weight as normal(0, 0.001)'.format(name))
# print('=> init {}.bias as 0'.format(name))
nn.init.normal_(m.weight, std=0.001)
if self.deconv_with_bias:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
# print('=> init {}.weight as 1'.format(name))
# print('=> init {}.bias as 0'.format(name))
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# print('=> init final conv weights from normal distribution')
for head in self.heads:
final_layer = self.__getattr__(head)
for i, m in enumerate(final_layer.modules()):
if isinstance(m, nn.Conv2d):
# nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
# print('=> init {}.weight as normal(0, 0.001)'.format(name))
# print('=> init {}.bias as 0'.format(name))
if m.weight.shape[0] == self.heads[head]:
if 'hm' in head:
nn.init.constant_(m.bias, -2.19)
else:
nn.init.normal_(m.weight, std=0.001)
nn.init.constant_(m.bias, 0)
#pretrained_state_dict = torch.load(pretrained)
url = model_urls['resnet{}'.format(num_layers)]
pretrained_state_dict = model_zoo.load_url(url)
print('=> loading pretrained model {}'.format(url))
self.load_state_dict(pretrained_state_dict, strict=False)
else:
print('=> imagenet pretrained model dose not exist')
print('=> please download it first')
raise ValueError('imagenet pretrained model does not exist')
resnet_spec = {18: (BasicBlock, [2, 2, 2, 2]),
34: (BasicBlock, [3, 4, 6, 3]),
50: (Bottleneck, [3, 4, 6, 3]),
101: (Bottleneck, [3, 4, 23, 3]),
152: (Bottleneck, [3, 8, 36, 3])}
def get_pose_net(num_layers, heads, head_conv):
block_class, layers = resnet_spec[num_layers]
model = PoseResNet(block_class, layers, heads, head_conv=head_conv)
model.init_weights(num_layers, pretrained=True)
return model
|
banmo-main
|
third_party/vcnplus/models/networks/msra_resnet.py
|
# ------------------------------------------------------------------------------
# This code is base on
# CornerNet (https://github.com/princeton-vl/CornerNet)
# Copyright (c) 2018, University of Michigan
# Licensed under the BSD 3-Clause License
# ------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import torch
import torch.nn as nn
class convolution(nn.Module):
def __init__(self, k, inp_dim, out_dim, stride=1, with_bn=True):
super(convolution, self).__init__()
pad = (k - 1) // 2
self.conv = nn.Conv2d(inp_dim, out_dim, (k, k), padding=(pad, pad), stride=(stride, stride), bias=not with_bn)
self.bn = nn.BatchNorm2d(out_dim) if with_bn else nn.Sequential()
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
conv = self.conv(x)
bn = self.bn(conv)
relu = self.relu(bn)
return relu
class fully_connected(nn.Module):
def __init__(self, inp_dim, out_dim, with_bn=True):
super(fully_connected, self).__init__()
self.with_bn = with_bn
self.linear = nn.Linear(inp_dim, out_dim)
if self.with_bn:
self.bn = nn.BatchNorm1d(out_dim)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
linear = self.linear(x)
bn = self.bn(linear) if self.with_bn else linear
relu = self.relu(bn)
return relu
class residual(nn.Module):
def __init__(self, k, inp_dim, out_dim, stride=1, with_bn=True):
super(residual, self).__init__()
self.conv1 = nn.Conv2d(inp_dim, out_dim, (3, 3), padding=(1, 1), stride=(stride, stride), bias=False)
self.bn1 = nn.BatchNorm2d(out_dim)
self.relu1 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(out_dim, out_dim, (3, 3), padding=(1, 1), bias=False)
self.bn2 = nn.BatchNorm2d(out_dim)
self.skip = nn.Sequential(
nn.Conv2d(inp_dim, out_dim, (1, 1), stride=(stride, stride), bias=False),
nn.BatchNorm2d(out_dim)
) if stride != 1 or inp_dim != out_dim else nn.Sequential()
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
conv1 = self.conv1(x)
bn1 = self.bn1(conv1)
relu1 = self.relu1(bn1)
conv2 = self.conv2(relu1)
bn2 = self.bn2(conv2)
skip = self.skip(x)
return self.relu(bn2 + skip)
def make_layer(k, inp_dim, out_dim, modules, layer=convolution, **kwargs):
layers = [layer(k, inp_dim, out_dim, **kwargs)]
for _ in range(1, modules):
layers.append(layer(k, out_dim, out_dim, **kwargs))
return nn.Sequential(*layers)
def make_layer_revr(k, inp_dim, out_dim, modules, layer=convolution, **kwargs):
layers = []
for _ in range(modules - 1):
layers.append(layer(k, inp_dim, inp_dim, **kwargs))
layers.append(layer(k, inp_dim, out_dim, **kwargs))
return nn.Sequential(*layers)
class MergeUp(nn.Module):
def forward(self, up1, up2):
return up1 + up2
def make_merge_layer(dim):
return MergeUp()
# def make_pool_layer(dim):
# return nn.MaxPool2d(kernel_size=2, stride=2)
def make_pool_layer(dim):
return nn.Sequential()
def make_unpool_layer(dim):
return nn.Upsample(scale_factor=2)
def make_kp_layer(cnv_dim, curr_dim, out_dim):
return nn.Sequential(
convolution(3, cnv_dim, curr_dim, with_bn=False),
nn.Conv2d(curr_dim, out_dim, (1, 1))
)
def make_inter_layer(dim):
return residual(3, dim, dim)
def make_cnv_layer(inp_dim, out_dim):
return convolution(3, inp_dim, out_dim)
class kp_module(nn.Module):
def __init__(
self, n, dims, modules, layer=residual,
make_up_layer=make_layer, make_low_layer=make_layer,
make_hg_layer=make_layer, make_hg_layer_revr=make_layer_revr,
make_pool_layer=make_pool_layer, make_unpool_layer=make_unpool_layer,
make_merge_layer=make_merge_layer, **kwargs
):
super(kp_module, self).__init__()
self.n = n
curr_mod = modules[0]
next_mod = modules[1]
curr_dim = dims[0]
next_dim = dims[1]
self.up1 = make_up_layer(
3, curr_dim, curr_dim, curr_mod,
layer=layer, **kwargs
)
self.max1 = make_pool_layer(curr_dim)
self.low1 = make_hg_layer(
3, curr_dim, next_dim, curr_mod,
layer=layer, **kwargs
)
self.low2 = kp_module(
n - 1, dims[1:], modules[1:], layer=layer,
make_up_layer=make_up_layer,
make_low_layer=make_low_layer,
make_hg_layer=make_hg_layer,
make_hg_layer_revr=make_hg_layer_revr,
make_pool_layer=make_pool_layer,
make_unpool_layer=make_unpool_layer,
make_merge_layer=make_merge_layer,
**kwargs
) if self.n > 1 else \
make_low_layer(
3, next_dim, next_dim, next_mod,
layer=layer, **kwargs
)
self.low3 = make_hg_layer_revr(
3, next_dim, curr_dim, curr_mod,
layer=layer, **kwargs
)
self.up2 = make_unpool_layer(curr_dim)
self.merge = make_merge_layer(curr_dim)
def forward(self, x):
up1 = self.up1(x)
max1 = self.max1(x)
low1 = self.low1(max1)
low2 = self.low2(low1)
low3 = self.low3(low2)
up2 = self.up2(low3)
return self.merge(up1, up2)
class exkp(nn.Module):
def __init__(
self, n, nstack, dims, modules, heads, pre=None, cnv_dim=256,
make_tl_layer=None, make_br_layer=None,
make_cnv_layer=make_cnv_layer, make_heat_layer=make_kp_layer,
make_tag_layer=make_kp_layer, make_regr_layer=make_kp_layer,
make_up_layer=make_layer, make_low_layer=make_layer,
make_hg_layer=make_layer, make_hg_layer_revr=make_layer_revr,
make_pool_layer=make_pool_layer, make_unpool_layer=make_unpool_layer,
make_merge_layer=make_merge_layer, make_inter_layer=make_inter_layer,
kp_layer=residual
):
super(exkp, self).__init__()
self.nstack = nstack
self.heads = heads
curr_dim = dims[0]
self.pre = nn.Sequential(
convolution(7, 3, 128, stride=2),
residual(3, 128, 256, stride=2)
) if pre is None else pre
self.kps = nn.ModuleList([
kp_module(
n, dims, modules, layer=kp_layer,
make_up_layer=make_up_layer,
make_low_layer=make_low_layer,
make_hg_layer=make_hg_layer,
make_hg_layer_revr=make_hg_layer_revr,
make_pool_layer=make_pool_layer,
make_unpool_layer=make_unpool_layer,
make_merge_layer=make_merge_layer
) for _ in range(nstack)
])
self.cnvs = nn.ModuleList([
make_cnv_layer(curr_dim, cnv_dim) for _ in range(nstack)
])
self.inters = nn.ModuleList([
make_inter_layer(curr_dim) for _ in range(nstack - 1)
])
self.inters_ = nn.ModuleList([
nn.Sequential(
nn.Conv2d(curr_dim, curr_dim, (1, 1), bias=False),
nn.BatchNorm2d(curr_dim)
) for _ in range(nstack - 1)
])
self.cnvs_ = nn.ModuleList([
nn.Sequential(
nn.Conv2d(cnv_dim, curr_dim, (1, 1), bias=False),
nn.BatchNorm2d(curr_dim)
) for _ in range(nstack - 1)
])
## keypoint heatmaps
for head in heads.keys():
if 'hm' in head:
module = nn.ModuleList([
make_heat_layer(
cnv_dim, curr_dim, heads[head]) for _ in range(nstack)
])
self.__setattr__(head, module)
for heat in self.__getattr__(head):
heat[-1].bias.data.fill_(-2.19)
else:
module = nn.ModuleList([
make_regr_layer(
cnv_dim, curr_dim, heads[head]) for _ in range(nstack)
])
self.__setattr__(head, module)
self.relu = nn.ReLU(inplace=True)
def forward(self, image):
# print('image shape', image.shape)
inter = self.pre(image)
outs = []
for ind in range(self.nstack):
kp_, cnv_ = self.kps[ind], self.cnvs[ind]
kp = kp_(inter)
cnv = cnv_(kp)
out = {}
for head in self.heads:
layer = self.__getattr__(head)[ind]
y = layer(cnv)
out[head] = y
outs.append(out)
if ind < self.nstack - 1:
inter = self.inters_[ind](inter) + self.cnvs_[ind](cnv)
inter = self.relu(inter)
inter = self.inters[ind](inter)
return outs
def make_hg_layer(kernel, dim0, dim1, mod, layer=convolution, **kwargs):
layers = [layer(kernel, dim0, dim1, stride=2)]
layers += [layer(kernel, dim1, dim1) for _ in range(mod - 1)]
return nn.Sequential(*layers)
class HourglassNet(exkp):
def __init__(self, heads, num_stacks=2):
n = 5
dims = [256, 256, 384, 384, 384, 512]
modules = [2, 2, 2, 2, 2, 4]
super(HourglassNet, self).__init__(
n, num_stacks, dims, modules, heads,
make_tl_layer=None,
make_br_layer=None,
make_pool_layer=make_pool_layer,
make_hg_layer=make_hg_layer,
kp_layer=residual, cnv_dim=256
)
def get_large_hourglass_net(num_layers, heads, head_conv):
model = HourglassNet(heads, 2)
return model
|
banmo-main
|
third_party/vcnplus/models/networks/large_hourglass.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from os.path import join
import torch
from torch import nn
import torch.utils.model_zoo as model_zoo
import numpy as np
BatchNorm = nn.BatchNorm2d
def get_model_url(data='imagenet', name='dla34', hash='ba72cf86'):
return join('http://dl.yf.io/dla/models', data, '{}-{}.pth'.format(name, hash))
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
def __init__(self, inplanes, planes, stride=1, dilation=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3,
stride=stride, padding=dilation,
bias=False, dilation=dilation)
self.bn1 = BatchNorm(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3,
stride=1, padding=dilation,
bias=False, dilation=dilation)
self.bn2 = BatchNorm(planes)
self.stride = stride
def forward(self, x, residual=None):
if residual is None:
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 2
def __init__(self, inplanes, planes, stride=1, dilation=1):
super(Bottleneck, self).__init__()
expansion = Bottleneck.expansion
bottle_planes = planes // expansion
self.conv1 = nn.Conv2d(inplanes, bottle_planes,
kernel_size=1, bias=False)
self.bn1 = BatchNorm(bottle_planes)
self.conv2 = nn.Conv2d(bottle_planes, bottle_planes, kernel_size=3,
stride=stride, padding=dilation,
bias=False, dilation=dilation)
self.bn2 = BatchNorm(bottle_planes)
self.conv3 = nn.Conv2d(bottle_planes, planes,
kernel_size=1, bias=False)
self.bn3 = BatchNorm(planes)
self.relu = nn.ReLU(inplace=True)
self.stride = stride
def forward(self, x, residual=None):
if residual is None:
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
out += residual
out = self.relu(out)
return out
class BottleneckX(nn.Module):
expansion = 2
cardinality = 32
def __init__(self, inplanes, planes, stride=1, dilation=1):
super(BottleneckX, self).__init__()
cardinality = BottleneckX.cardinality
# dim = int(math.floor(planes * (BottleneckV5.expansion / 64.0)))
# bottle_planes = dim * cardinality
bottle_planes = planes * cardinality // 32
self.conv1 = nn.Conv2d(inplanes, bottle_planes,
kernel_size=1, bias=False)
self.bn1 = BatchNorm(bottle_planes)
self.conv2 = nn.Conv2d(bottle_planes, bottle_planes, kernel_size=3,
stride=stride, padding=dilation, bias=False,
dilation=dilation, groups=cardinality)
self.bn2 = BatchNorm(bottle_planes)
self.conv3 = nn.Conv2d(bottle_planes, planes,
kernel_size=1, bias=False)
self.bn3 = BatchNorm(planes)
self.relu = nn.ReLU(inplace=True)
self.stride = stride
def forward(self, x, residual=None):
if residual is None:
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
out += residual
out = self.relu(out)
return out
class Root(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, residual):
super(Root, self).__init__()
self.conv = nn.Conv2d(
in_channels, out_channels, 1,
stride=1, bias=False, padding=(kernel_size - 1) // 2)
self.bn = BatchNorm(out_channels)
self.relu = nn.ReLU(inplace=True)
self.residual = residual
def forward(self, *x):
children = x
x = self.conv(torch.cat(x, 1))
x = self.bn(x)
if self.residual:
x += children[0]
x = self.relu(x)
return x
class Tree(nn.Module):
def __init__(self, levels, block, in_channels, out_channels, stride=1,
level_root=False, root_dim=0, root_kernel_size=1,
dilation=1, root_residual=False):
super(Tree, self).__init__()
if root_dim == 0:
root_dim = 2 * out_channels
if level_root:
root_dim += in_channels
if levels == 1:
self.tree1 = block(in_channels, out_channels, stride,
dilation=dilation)
self.tree2 = block(out_channels, out_channels, 1,
dilation=dilation)
else:
self.tree1 = Tree(levels - 1, block, in_channels, out_channels,
stride, root_dim=0,
root_kernel_size=root_kernel_size,
dilation=dilation, root_residual=root_residual)
self.tree2 = Tree(levels - 1, block, out_channels, out_channels,
root_dim=root_dim + out_channels,
root_kernel_size=root_kernel_size,
dilation=dilation, root_residual=root_residual)
if levels == 1:
self.root = Root(root_dim, out_channels, root_kernel_size,
root_residual)
self.level_root = level_root
self.root_dim = root_dim
self.downsample = None
self.project = None
self.levels = levels
if stride > 1:
self.downsample = nn.MaxPool2d(stride, stride=stride)
if in_channels != out_channels:
self.project = nn.Sequential(
nn.Conv2d(in_channels, out_channels,
kernel_size=1, stride=1, bias=False),
BatchNorm(out_channels)
)
def forward(self, x, residual=None, children=None):
children = [] if children is None else children
bottom = self.downsample(x) if self.downsample else x
residual = self.project(bottom) if self.project else bottom
if self.level_root:
children.append(bottom)
x1 = self.tree1(x, residual)
if self.levels == 1:
x2 = self.tree2(x1)
x = self.root(x2, x1, *children)
else:
children.append(x1)
x = self.tree2(x1, children=children)
return x
class DLA(nn.Module):
def __init__(self, levels, channels, num_classes=1000,
block=BasicBlock, residual_root=False, return_levels=False,
pool_size=7, linear_root=False):
super(DLA, self).__init__()
self.channels = channels
self.return_levels = return_levels
self.num_classes = num_classes
self.base_layer = nn.Sequential(
nn.Conv2d(3, channels[0], kernel_size=7, stride=1,
padding=3, bias=False),
BatchNorm(channels[0]),
nn.ReLU(inplace=True))
self.level0 = self._make_conv_level(
channels[0], channels[0], levels[0])
self.level1 = self._make_conv_level(
channels[0], channels[1], levels[1], stride=2)
self.level2 = Tree(levels[2], block, channels[1], channels[2], 2,
level_root=False,
root_residual=residual_root)
self.level3 = Tree(levels[3], block, channels[2], channels[3], 2,
level_root=True, root_residual=residual_root)
self.level4 = Tree(levels[4], block, channels[3], channels[4], 2,
level_root=True, root_residual=residual_root)
self.level5 = Tree(levels[5], block, channels[4], channels[5], 2,
level_root=True, root_residual=residual_root)
self.avgpool = nn.AvgPool2d(pool_size)
self.fc = nn.Conv2d(channels[-1], num_classes, kernel_size=1,
stride=1, padding=0, bias=True)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, BatchNorm):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_level(self, block, inplanes, planes, blocks, stride=1):
downsample = None
if stride != 1 or inplanes != planes:
downsample = nn.Sequential(
nn.MaxPool2d(stride, stride=stride),
nn.Conv2d(inplanes, planes,
kernel_size=1, stride=1, bias=False),
BatchNorm(planes),
)
layers = []
layers.append(block(inplanes, planes, stride, downsample=downsample))
for i in range(1, blocks):
layers.append(block(inplanes, planes))
return nn.Sequential(*layers)
def _make_conv_level(self, inplanes, planes, convs, stride=1, dilation=1):
modules = []
for i in range(convs):
modules.extend([
nn.Conv2d(inplanes, planes, kernel_size=3,
stride=stride if i == 0 else 1,
padding=dilation, bias=False, dilation=dilation),
BatchNorm(planes),
nn.ReLU(inplace=True)])
inplanes = planes
return nn.Sequential(*modules)
def forward(self, x):
y = []
x = self.base_layer(x)
for i in range(6):
x = getattr(self, 'level{}'.format(i))(x)
y.append(x)
if self.return_levels:
return y
else:
x = self.avgpool(x)
x = self.fc(x)
x = x.view(x.size(0), -1)
return x
def load_pretrained_model(self, data='imagenet', name='dla34', hash='ba72cf86'):
fc = self.fc
if name.endswith('.pth'):
model_weights = torch.load(data + name)
else:
model_url = get_model_url(data, name, hash)
model_weights = model_zoo.load_url(model_url)
num_classes = len(model_weights[list(model_weights.keys())[-1]])
self.fc = nn.Conv2d(
self.channels[-1], num_classes,
kernel_size=1, stride=1, padding=0, bias=True)
self.load_state_dict(model_weights)
self.fc = fc
def dla34(pretrained, **kwargs): # DLA-34
model = DLA([1, 1, 1, 2, 2, 1],
[16, 32, 64, 128, 256, 512],
block=BasicBlock, **kwargs)
if pretrained:
model.load_pretrained_model(data='imagenet', name='dla34', hash='ba72cf86')
return model
def dla46_c(pretrained=None, **kwargs): # DLA-46-C
Bottleneck.expansion = 2
model = DLA([1, 1, 1, 2, 2, 1],
[16, 32, 64, 64, 128, 256],
block=Bottleneck, **kwargs)
if pretrained is not None:
model.load_pretrained_model(pretrained, 'dla46_c')
return model
def dla46x_c(pretrained=None, **kwargs): # DLA-X-46-C
BottleneckX.expansion = 2
model = DLA([1, 1, 1, 2, 2, 1],
[16, 32, 64, 64, 128, 256],
block=BottleneckX, **kwargs)
if pretrained is not None:
model.load_pretrained_model(pretrained, 'dla46x_c')
return model
def dla60x_c(pretrained, **kwargs): # DLA-X-60-C
BottleneckX.expansion = 2
model = DLA([1, 1, 1, 2, 3, 1],
[16, 32, 64, 64, 128, 256],
block=BottleneckX, **kwargs)
if pretrained:
model.load_pretrained_model(data='imagenet', name='dla60x_c', hash='b870c45c')
return model
def dla60(pretrained=None, **kwargs): # DLA-60
Bottleneck.expansion = 2
model = DLA([1, 1, 1, 2, 3, 1],
[16, 32, 128, 256, 512, 1024],
block=Bottleneck, **kwargs)
if pretrained is not None:
model.load_pretrained_model(pretrained, 'dla60')
return model
def dla60x(pretrained=None, **kwargs): # DLA-X-60
BottleneckX.expansion = 2
model = DLA([1, 1, 1, 2, 3, 1],
[16, 32, 128, 256, 512, 1024],
block=BottleneckX, **kwargs)
if pretrained is not None:
model.load_pretrained_model(pretrained, 'dla60x')
return model
def dla102(pretrained=None, **kwargs): # DLA-102
Bottleneck.expansion = 2
model = DLA([1, 1, 1, 3, 4, 1], [16, 32, 128, 256, 512, 1024],
block=Bottleneck, residual_root=True, **kwargs)
if pretrained is not None:
model.load_pretrained_model(pretrained, 'dla102')
return model
def dla102x(pretrained=None, **kwargs): # DLA-X-102
BottleneckX.expansion = 2
model = DLA([1, 1, 1, 3, 4, 1], [16, 32, 128, 256, 512, 1024],
block=BottleneckX, residual_root=True, **kwargs)
if pretrained is not None:
model.load_pretrained_model(pretrained, 'dla102x')
return model
def dla102x2(pretrained=None, **kwargs): # DLA-X-102 64
BottleneckX.cardinality = 64
model = DLA([1, 1, 1, 3, 4, 1], [16, 32, 128, 256, 512, 1024],
block=BottleneckX, residual_root=True, **kwargs)
if pretrained is not None:
model.load_pretrained_model(pretrained, 'dla102x2')
return model
def dla169(pretrained=None, **kwargs): # DLA-169
Bottleneck.expansion = 2
model = DLA([1, 1, 2, 3, 5, 1], [16, 32, 128, 256, 512, 1024],
block=Bottleneck, residual_root=True, **kwargs)
if pretrained is not None:
model.load_pretrained_model(pretrained, 'dla169')
return model
def set_bn(bn):
global BatchNorm
BatchNorm = bn
dla.BatchNorm = bn
class Identity(nn.Module):
def __init__(self):
super(Identity, self).__init__()
def forward(self, x):
return x
def fill_up_weights(up):
w = up.weight.data
f = math.ceil(w.size(2) / 2)
c = (2 * f - 1 - f % 2) / (2. * f)
for i in range(w.size(2)):
for j in range(w.size(3)):
w[0, 0, i, j] = \
(1 - math.fabs(i / f - c)) * (1 - math.fabs(j / f - c))
for c in range(1, w.size(0)):
w[c, 0, :, :] = w[0, 0, :, :]
class IDAUp(nn.Module):
def __init__(self, node_kernel, out_dim, channels, up_factors):
super(IDAUp, self).__init__()
self.channels = channels
self.out_dim = out_dim
for i, c in enumerate(channels):
if c == out_dim:
proj = Identity()
else:
proj = nn.Sequential(
nn.Conv2d(c, out_dim,
kernel_size=1, stride=1, bias=False),
BatchNorm(out_dim),
nn.ReLU(inplace=True))
f = int(up_factors[i])
if f == 1:
up = Identity()
else:
up = nn.ConvTranspose2d(
out_dim, out_dim, f * 2, stride=f, padding=f // 2,
output_padding=0, groups=out_dim, bias=False)
fill_up_weights(up)
setattr(self, 'proj_' + str(i), proj)
setattr(self, 'up_' + str(i), up)
for i in range(1, len(channels)):
node = nn.Sequential(
nn.Conv2d(out_dim * 2, out_dim,
kernel_size=node_kernel, stride=1,
padding=node_kernel // 2, bias=False),
BatchNorm(out_dim),
nn.ReLU(inplace=True))
setattr(self, 'node_' + str(i), node)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, BatchNorm):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, layers):
assert len(self.channels) == len(layers), \
'{} vs {} layers'.format(len(self.channels), len(layers))
layers = list(layers)
for i, l in enumerate(layers):
upsample = getattr(self, 'up_' + str(i))
project = getattr(self, 'proj_' + str(i))
layers[i] = upsample(project(l))
x = layers[0]
y = []
for i in range(1, len(layers)):
node = getattr(self, 'node_' + str(i))
x = node(torch.cat([x, layers[i]], 1))
y.append(x)
return x, y
class DLAUp(nn.Module):
def __init__(self, channels, scales=(1, 2, 4, 8, 16), in_channels=None):
super(DLAUp, self).__init__()
if in_channels is None:
in_channels = channels
self.channels = channels
channels = list(channels)
scales = np.array(scales, dtype=int)
for i in range(len(channels) - 1):
j = -i - 2
setattr(self, 'ida_{}'.format(i),
IDAUp(3, channels[j], in_channels[j:],
scales[j:] // scales[j]))
scales[j + 1:] = scales[j]
in_channels[j + 1:] = [channels[j] for _ in channels[j + 1:]]
def forward(self, layers):
layers = list(layers)
assert len(layers) > 1
for i in range(len(layers) - 1):
ida = getattr(self, 'ida_{}'.format(i))
x, y = ida(layers[-i - 2:])
layers[-i - 1:] = y
return x
def fill_fc_weights(layers):
for m in layers.modules():
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, std=0.001)
# torch.nn.init.kaiming_normal_(m.weight.data, nonlinearity='relu')
# torch.nn.init.xavier_normal_(m.weight.data)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
class DLASeg(nn.Module):
def __init__(self, base_name, heads,
pretrained=True, down_ratio=4, head_conv=256):
super(DLASeg, self).__init__()
assert down_ratio in [2, 4, 8, 16]
self.heads = heads
self.first_level = int(np.log2(down_ratio))
self.base = globals()[base_name](
pretrained=pretrained, return_levels=True)
channels = self.base.channels
scales = [2 ** i for i in range(len(channels[self.first_level:]))]
self.dla_up = DLAUp(channels[self.first_level:], scales=scales)
'''
self.fc = nn.Sequential(
nn.Conv2d(channels[self.first_level], classes, kernel_size=1,
stride=1, padding=0, bias=True)
)
'''
for head in self.heads:
classes = self.heads[head]
if head_conv > 0:
fc = nn.Sequential(
nn.Conv2d(channels[self.first_level], head_conv,
kernel_size=3, padding=1, bias=True),
nn.ReLU(inplace=True),
nn.Conv2d(head_conv, classes,
kernel_size=1, stride=1,
padding=0, bias=True))
if 'hm' in head:
fc[-1].bias.data.fill_(-2.19)
else:
fill_fc_weights(fc)
else:
fc = nn.Conv2d(channels[self.first_level], classes,
kernel_size=1, stride=1,
padding=0, bias=True)
if 'hm' in head:
fc.bias.data.fill_(-2.19)
else:
fill_fc_weights(fc)
self.__setattr__(head, fc)
'''
up_factor = 2 ** self.first_level
if up_factor > 1:
up = nn.ConvTranspose2d(classes, classes, up_factor * 2,
stride=up_factor, padding=up_factor // 2,
output_padding=0, groups=classes,
bias=False)
fill_up_weights(up)
up.weight.requires_grad = False
else:
up = Identity()
self.up = up
self.softmax = nn.LogSoftmax(dim=1)
for m in self.fc.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, BatchNorm):
m.weight.data.fill_(1)
m.bias.data.zero_()
'''
def forward(self, x):
x = self.base(x)
x = self.dla_up(x[self.first_level:])
# x = self.fc(x)
# y = self.softmax(self.up(x))
ret = {}
for head in self.heads:
ret[head] = self.__getattr__(head)(x)
return [ret]
'''
def optim_parameters(self, memo=None):
for param in self.base.parameters():
yield param
for param in self.dla_up.parameters():
yield param
for param in self.fc.parameters():
yield param
'''
'''
def dla34up(classes, pretrained_base=None, **kwargs):
model = DLASeg('dla34', classes, pretrained_base=pretrained_base, **kwargs)
return model
def dla60up(classes, pretrained_base=None, **kwargs):
model = DLASeg('dla60', classes, pretrained_base=pretrained_base, **kwargs)
return model
def dla102up(classes, pretrained_base=None, **kwargs):
model = DLASeg('dla102', classes,
pretrained_base=pretrained_base, **kwargs)
return model
def dla169up(classes, pretrained_base=None, **kwargs):
model = DLASeg('dla169', classes,
pretrained_base=pretrained_base, **kwargs)
return model
'''
def get_pose_net(num_layers, heads, head_conv=256, down_ratio=4):
model = DLASeg('dla{}'.format(num_layers), heads,
pretrained=True,
down_ratio=down_ratio,
head_conv=head_conv)
return model
|
banmo-main
|
third_party/vcnplus/models/networks/dlav0.py
|
#!/usr/bin/env python
import os
import glob
import torch
from torch.utils.cpp_extension import CUDA_HOME
from torch.utils.cpp_extension import CppExtension
from torch.utils.cpp_extension import CUDAExtension
from setuptools import find_packages
from setuptools import setup
requirements = ["torch", "torchvision"]
def get_extensions():
this_dir = os.path.dirname(os.path.abspath(__file__))
extensions_dir = os.path.join(this_dir, "DCN", "src")
main_file = glob.glob(os.path.join(extensions_dir, "*.cpp"))
source_cpu = glob.glob(os.path.join(extensions_dir, "cpu", "*.cpp"))
source_cuda = glob.glob(os.path.join(extensions_dir, "cuda", "*.cu"))
#os.environ["CC"] = "g++"
sources = main_file + source_cpu
extension = CppExtension
extra_compile_args = {'cxx': ['-std=c++14']}
define_macros = []
#if torch.cuda.is_available() and CUDA_HOME is not None:
if torch.cuda.is_available():
extension = CUDAExtension
sources += source_cuda
define_macros += [("WITH_CUDA", None)]
extra_compile_args["nvcc"] = [
"-DCUDA_HAS_FP16=1",
"-D__CUDA_NO_HALF_OPERATORS__",
"-D__CUDA_NO_HALF_CONVERSIONS__",
"-D__CUDA_NO_HALF2_OPERATORS__",
]
else:
#raise NotImplementedError('Cuda is not available')
pass
sources = [os.path.join(extensions_dir, s) for s in sources]
include_dirs = [extensions_dir]
ext_modules = [
extension(
"_ext",
sources,
include_dirs=include_dirs,
define_macros=define_macros,
extra_compile_args=extra_compile_args,
)
]
return ext_modules
setup(
name="DCNv2",
version="0.1",
author="charlesshang",
url="https://github.com/charlesshang/DCNv2",
description="deformable convolutional networks",
packages=find_packages(exclude=("configs", "tests",)),
# install_requires=requirements,
ext_modules=get_extensions(),
cmdclass={"build_ext": torch.utils.cpp_extension.BuildExtension},
)
|
banmo-main
|
third_party/vcnplus/models/networks/DCNv2/setup.py
|
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import time
import torch
import torch.nn as nn
from torch.autograd import gradcheck
from dcn_v2 import dcn_v2_conv, DCNv2, DCN
from dcn_v2 import dcn_v2_pooling, DCNv2Pooling, DCNPooling
deformable_groups = 1
N, inC, inH, inW = 2, 2, 4, 4
outC = 2
kH, kW = 3, 3
def conv_identify(weight, bias):
weight.data.zero_()
bias.data.zero_()
o, i, h, w = weight.shape
y = h//2
x = w//2
for p in range(i):
for q in range(o):
if p == q:
weight.data[q, p, y, x] = 1.0
def check_zero_offset():
conv_offset = nn.Conv2d(inC, deformable_groups * 2 * kH * kW,
kernel_size=(kH, kW),
stride=(1, 1),
padding=(1, 1),
bias=True)
conv_mask = nn.Conv2d(inC, deformable_groups * 1 * kH * kW,
kernel_size=(kH, kW),
stride=(1, 1),
padding=(1, 1),
bias=True)
dcn_v2 = DCNv2(inC, outC, (kH, kW),
stride=1, padding=1, dilation=1,
deformable_groups=deformable_groups)
conv_offset.weight.data.zero_()
conv_offset.bias.data.zero_()
conv_mask.weight.data.zero_()
conv_mask.bias.data.zero_()
conv_identify(dcn_v2.weight, dcn_v2.bias)
input = torch.randn(N, inC, inH, inW)
offset = conv_offset(input)
mask = conv_mask(input)
mask = torch.sigmoid(mask)
output = dcn_v2(input, offset, mask)
output *= 2
d = (input - output).abs().max()
if d < 1e-10:
print('Zero offset passed')
else:
print('Zero offset failed')
print(input)
print(output)
def check_gradient_dconv():
input = torch.rand(N, inC, inH, inW) * 0.01
input.requires_grad = True
offset = torch.randn(N, deformable_groups * 2 * kW * kH, inH, inW) * 2
# offset.data.zero_()
# offset.data -= 0.5
offset.requires_grad = True
mask = torch.rand(N, deformable_groups * 1 * kW * kH, inH, inW)
# mask.data.zero_()
mask.requires_grad = True
mask = torch.sigmoid(mask)
weight = torch.randn(outC, inC, kH, kW)
weight.requires_grad = True
bias = torch.rand(outC)
bias.requires_grad = True
stride = 1
padding = 1
dilation = 1
print('check_gradient_dconv: ',
gradcheck(dcn_v2_conv, (input, offset, mask, weight, bias,
stride, padding, dilation, deformable_groups),
eps=1e-3, atol=1e-4, rtol=1e-2))
def check_pooling_zero_offset():
input = torch.randn(2, 16, 64, 64).zero_()
input[0, :, 16:26, 16:26] = 1.
input[1, :, 10:20, 20:30] = 2.
rois = torch.tensor([
[0, 65, 65, 103, 103],
[1, 81, 41, 119, 79],
]).float()
pooling = DCNv2Pooling(spatial_scale=1.0 / 4,
pooled_size=7,
output_dim=16,
no_trans=True,
group_size=1,
trans_std=0.0)
out = pooling(input, rois, input.new())
s = ', '.join(['%f' % out[i, :, :, :].mean().item()
for i in range(rois.shape[0])])
print(s)
dpooling = DCNv2Pooling(spatial_scale=1.0 / 4,
pooled_size=7,
output_dim=16,
no_trans=False,
group_size=1,
trans_std=0.0)
offset = torch.randn(20, 2, 7, 7).zero_()
dout = dpooling(input, rois, offset)
s = ', '.join(['%f' % dout[i, :, :, :].mean().item()
for i in range(rois.shape[0])])
print(s)
def check_gradient_dpooling():
input = torch.randn(2, 3, 5, 5) * 0.01
N = 4
batch_inds = torch.randint(2, (N, 1)).float()
x = torch.rand((N, 1)).float() * 15
y = torch.rand((N, 1)).float() * 15
w = torch.rand((N, 1)).float() * 10
h = torch.rand((N, 1)).float() * 10
rois = torch.cat((batch_inds, x, y, x + w, y + h), dim=1)
offset = torch.randn(N, 2, 3, 3)
input.requires_grad = True
offset.requires_grad = True
spatial_scale = 1.0 / 4
pooled_size = 3
output_dim = 3
no_trans = 0
group_size = 1
trans_std = 0.0
sample_per_part = 4
part_size = pooled_size
print('check_gradient_dpooling:',
gradcheck(dcn_v2_pooling, (input, rois, offset,
spatial_scale,
pooled_size,
output_dim,
no_trans,
group_size,
part_size,
sample_per_part,
trans_std),
eps=1e-4))
def example_dconv():
input = torch.randn(2, 64, 128, 128)
# wrap all things (offset and mask) in DCN
dcn = DCN(64, 64, kernel_size=(3, 3), stride=1,
padding=1, deformable_groups=2)
# print(dcn.weight.shape, input.shape)
output = dcn(input)
targert = output.new(*output.size())
targert.data.uniform_(-0.01, 0.01)
error = (targert - output).mean()
error.backward()
print(output.shape)
def example_dpooling():
input = torch.randn(2, 32, 64, 64)
batch_inds = torch.randint(2, (20, 1)).float()
x = torch.randint(256, (20, 1)).float()
y = torch.randint(256, (20, 1)).float()
w = torch.randint(64, (20, 1)).float()
h = torch.randint(64, (20, 1)).float()
rois = torch.cat((batch_inds, x, y, x + w, y + h), dim=1)
offset = torch.randn(20, 2, 7, 7)
input.requires_grad = True
offset.requires_grad = True
# normal roi_align
pooling = DCNv2Pooling(spatial_scale=1.0 / 4,
pooled_size=7,
output_dim=32,
no_trans=True,
group_size=1,
trans_std=0.1)
# deformable pooling
dpooling = DCNv2Pooling(spatial_scale=1.0 / 4,
pooled_size=7,
output_dim=32,
no_trans=False,
group_size=1,
trans_std=0.1)
out = pooling(input, rois, offset)
dout = dpooling(input, rois, offset)
print(out.shape)
print(dout.shape)
target_out = out.new(*out.size())
target_out.data.uniform_(-0.01, 0.01)
target_dout = dout.new(*dout.size())
target_dout.data.uniform_(-0.01, 0.01)
e = (target_out - out).mean()
e.backward()
e = (target_dout - dout).mean()
e.backward()
def example_mdpooling():
input = torch.randn(2, 32, 64, 64)
input.requires_grad = True
batch_inds = torch.randint(2, (20, 1)).float()
x = torch.randint(256, (20, 1)).float()
y = torch.randint(256, (20, 1)).float()
w = torch.randint(64, (20, 1)).float()
h = torch.randint(64, (20, 1)).float()
rois = torch.cat((batch_inds, x, y, x + w, y + h), dim=1)
# mdformable pooling (V2)
dpooling = DCNPooling(spatial_scale=1.0 / 4,
pooled_size=7,
output_dim=32,
no_trans=False,
group_size=1,
trans_std=0.1,
deform_fc_dim=1024)
dout = dpooling(input, rois)
target = dout.new(*dout.size())
target.data.uniform_(-0.1, 0.1)
error = (target - dout).mean()
error.backward()
print(dout.shape)
if __name__ == '__main__':
example_dconv()
example_dpooling()
example_mdpooling()
check_pooling_zero_offset()
# zero offset check
if inC == outC:
check_zero_offset()
check_gradient_dpooling()
check_gradient_dconv()
# """
# ****** Note: backward is not reentrant error may not be a serious problem,
# ****** since the max error is less than 1e-7,
# ****** Still looking for what trigger this problem
# """
|
banmo-main
|
third_party/vcnplus/models/networks/DCNv2/DCN/testcpu.py
|
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import time
import torch
import torch.nn as nn
from torch.autograd import gradcheck
from dcn_v2 import dcn_v2_conv, DCNv2, DCN
from dcn_v2 import dcn_v2_pooling, DCNv2Pooling, DCNPooling
deformable_groups = 1
N, inC, inH, inW = 2, 2, 4, 4
outC = 2
kH, kW = 3, 3
def conv_identify(weight, bias):
weight.data.zero_()
bias.data.zero_()
o, i, h, w = weight.shape
y = h//2
x = w//2
for p in range(i):
for q in range(o):
if p == q:
weight.data[q, p, y, x] = 1.0
def check_zero_offset():
conv_offset = nn.Conv2d(inC, deformable_groups * 2 * kH * kW,
kernel_size=(kH, kW),
stride=(1, 1),
padding=(1, 1),
bias=True).cuda()
conv_mask = nn.Conv2d(inC, deformable_groups * 1 * kH * kW,
kernel_size=(kH, kW),
stride=(1, 1),
padding=(1, 1),
bias=True).cuda()
dcn_v2 = DCNv2(inC, outC, (kH, kW),
stride=1, padding=1, dilation=1,
deformable_groups=deformable_groups).cuda()
conv_offset.weight.data.zero_()
conv_offset.bias.data.zero_()
conv_mask.weight.data.zero_()
conv_mask.bias.data.zero_()
conv_identify(dcn_v2.weight, dcn_v2.bias)
input = torch.randn(N, inC, inH, inW).cuda()
offset = conv_offset(input)
mask = conv_mask(input)
mask = torch.sigmoid(mask)
output = dcn_v2(input, offset, mask)
output *= 2
d = (input - output).abs().max()
if d < 1e-10:
print('Zero offset passed')
else:
print('Zero offset failed')
print(input)
print(output)
def check_gradient_dconv():
input = torch.rand(N, inC, inH, inW).cuda() * 0.01
input.requires_grad = True
offset = torch.randn(N, deformable_groups * 2 * kW * kH, inH, inW).cuda() * 2
# offset.data.zero_()
# offset.data -= 0.5
offset.requires_grad = True
mask = torch.rand(N, deformable_groups * 1 * kW * kH, inH, inW).cuda()
# mask.data.zero_()
mask.requires_grad = True
mask = torch.sigmoid(mask)
weight = torch.randn(outC, inC, kH, kW).cuda()
weight.requires_grad = True
bias = torch.rand(outC).cuda()
bias.requires_grad = True
stride = 1
padding = 1
dilation = 1
print('check_gradient_dconv: ',
gradcheck(dcn_v2_conv, (input, offset, mask, weight, bias,
stride, padding, dilation, deformable_groups),
eps=1e-3, atol=1e-4, rtol=1e-2))
def check_pooling_zero_offset():
input = torch.randn(2, 16, 64, 64).cuda().zero_()
input[0, :, 16:26, 16:26] = 1.
input[1, :, 10:20, 20:30] = 2.
rois = torch.tensor([
[0, 65, 65, 103, 103],
[1, 81, 41, 119, 79],
]).cuda().float()
pooling = DCNv2Pooling(spatial_scale=1.0 / 4,
pooled_size=7,
output_dim=16,
no_trans=True,
group_size=1,
trans_std=0.0).cuda()
out = pooling(input, rois, input.new())
s = ', '.join(['%f' % out[i, :, :, :].mean().item()
for i in range(rois.shape[0])])
print(s)
dpooling = DCNv2Pooling(spatial_scale=1.0 / 4,
pooled_size=7,
output_dim=16,
no_trans=False,
group_size=1,
trans_std=0.0).cuda()
offset = torch.randn(20, 2, 7, 7).cuda().zero_()
dout = dpooling(input, rois, offset)
s = ', '.join(['%f' % dout[i, :, :, :].mean().item()
for i in range(rois.shape[0])])
print(s)
def check_gradient_dpooling():
input = torch.randn(2, 3, 5, 5).cuda().float() * 0.01
N = 4
batch_inds = torch.randint(2, (N, 1)).cuda().float()
x = torch.rand((N, 1)).cuda().float() * 15
y = torch.rand((N, 1)).cuda().float() * 15
w = torch.rand((N, 1)).cuda().float() * 10
h = torch.rand((N, 1)).cuda().float() * 10
rois = torch.cat((batch_inds, x, y, x + w, y + h), dim=1)
offset = torch.randn(N, 2, 3, 3).cuda()
input.requires_grad = True
offset.requires_grad = True
spatial_scale = 1.0 / 4
pooled_size = 3
output_dim = 3
no_trans = 0
group_size = 1
trans_std = 0.0
sample_per_part = 4
part_size = pooled_size
print('check_gradient_dpooling:',
gradcheck(dcn_v2_pooling, (input, rois, offset,
spatial_scale,
pooled_size,
output_dim,
no_trans,
group_size,
part_size,
sample_per_part,
trans_std),
eps=1e-4))
def example_dconv():
input = torch.randn(2, 64, 128, 128).cuda()
# wrap all things (offset and mask) in DCN
dcn = DCN(64, 64, kernel_size=(3, 3), stride=1,
padding=1, deformable_groups=2).cuda()
# print(dcn.weight.shape, input.shape)
output = dcn(input)
targert = output.new(*output.size())
targert.data.uniform_(-0.01, 0.01)
error = (targert - output).mean()
error.backward()
print(output.shape)
def example_dpooling():
input = torch.randn(2, 32, 64, 64).cuda()
batch_inds = torch.randint(2, (20, 1)).cuda().float()
x = torch.randint(256, (20, 1)).cuda().float()
y = torch.randint(256, (20, 1)).cuda().float()
w = torch.randint(64, (20, 1)).cuda().float()
h = torch.randint(64, (20, 1)).cuda().float()
rois = torch.cat((batch_inds, x, y, x + w, y + h), dim=1)
offset = torch.randn(20, 2, 7, 7).cuda()
input.requires_grad = True
offset.requires_grad = True
# normal roi_align
pooling = DCNv2Pooling(spatial_scale=1.0 / 4,
pooled_size=7,
output_dim=32,
no_trans=True,
group_size=1,
trans_std=0.1).cuda()
# deformable pooling
dpooling = DCNv2Pooling(spatial_scale=1.0 / 4,
pooled_size=7,
output_dim=32,
no_trans=False,
group_size=1,
trans_std=0.1).cuda()
out = pooling(input, rois, offset)
dout = dpooling(input, rois, offset)
print(out.shape)
print(dout.shape)
target_out = out.new(*out.size())
target_out.data.uniform_(-0.01, 0.01)
target_dout = dout.new(*dout.size())
target_dout.data.uniform_(-0.01, 0.01)
e = (target_out - out).mean()
e.backward()
e = (target_dout - dout).mean()
e.backward()
def example_mdpooling():
input = torch.randn(2, 32, 64, 64).cuda()
input.requires_grad = True
batch_inds = torch.randint(2, (20, 1)).cuda().float()
x = torch.randint(256, (20, 1)).cuda().float()
y = torch.randint(256, (20, 1)).cuda().float()
w = torch.randint(64, (20, 1)).cuda().float()
h = torch.randint(64, (20, 1)).cuda().float()
rois = torch.cat((batch_inds, x, y, x + w, y + h), dim=1)
# mdformable pooling (V2)
dpooling = DCNPooling(spatial_scale=1.0 / 4,
pooled_size=7,
output_dim=32,
no_trans=False,
group_size=1,
trans_std=0.1,
deform_fc_dim=1024).cuda()
dout = dpooling(input, rois)
target = dout.new(*dout.size())
target.data.uniform_(-0.1, 0.1)
error = (target - dout).mean()
error.backward()
print(dout.shape)
if __name__ == '__main__':
example_dconv()
example_dpooling()
example_mdpooling()
check_pooling_zero_offset()
# zero offset check
if inC == outC:
check_zero_offset()
check_gradient_dpooling()
check_gradient_dconv()
# """
# ****** Note: backward is not reentrant error may not be a serious problem,
# ****** since the max error is less than 1e-7,
# ****** Still looking for what trigger this problem
# """
|
banmo-main
|
third_party/vcnplus/models/networks/DCNv2/DCN/testcuda.py
|
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import math
import torch
from torch import nn
from torch.autograd import Function
from torch.nn.modules.utils import _pair
from torch.autograd.function import once_differentiable
import _ext as _backend
class _DCNv2(Function):
@staticmethod
def forward(ctx, input, offset, mask, weight, bias,
stride, padding, dilation, deformable_groups):
ctx.stride = _pair(stride)
ctx.padding = _pair(padding)
ctx.dilation = _pair(dilation)
ctx.kernel_size = _pair(weight.shape[2:4])
ctx.deformable_groups = deformable_groups
output = _backend.dcn_v2_forward(input, weight, bias,
offset, mask,
ctx.kernel_size[0], ctx.kernel_size[1],
ctx.stride[0], ctx.stride[1],
ctx.padding[0], ctx.padding[1],
ctx.dilation[0], ctx.dilation[1],
ctx.deformable_groups)
ctx.save_for_backward(input, offset, mask, weight, bias)
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
input, offset, mask, weight, bias = ctx.saved_tensors
grad_input, grad_offset, grad_mask, grad_weight, grad_bias = \
_backend.dcn_v2_backward(input, weight,
bias,
offset, mask,
grad_output,
ctx.kernel_size[0], ctx.kernel_size[1],
ctx.stride[0], ctx.stride[1],
ctx.padding[0], ctx.padding[1],
ctx.dilation[0], ctx.dilation[1],
ctx.deformable_groups)
return grad_input, grad_offset, grad_mask, grad_weight, grad_bias,\
None, None, None, None,
dcn_v2_conv = _DCNv2.apply
class DCNv2(nn.Module):
def __init__(self, in_channels, out_channels,
kernel_size, stride, padding, dilation=1, deformable_groups=1):
super(DCNv2, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = _pair(kernel_size)
self.stride = _pair(stride)
self.padding = _pair(padding)
self.dilation = _pair(dilation)
self.deformable_groups = deformable_groups
self.weight = nn.Parameter(torch.Tensor(
out_channels, in_channels, *self.kernel_size))
self.bias = nn.Parameter(torch.Tensor(out_channels))
self.reset_parameters()
def reset_parameters(self):
n = self.in_channels
for k in self.kernel_size:
n *= k
stdv = 1. / math.sqrt(n)
self.weight.data.uniform_(-stdv, stdv)
self.bias.data.zero_()
def forward(self, input, offset, mask):
assert 2 * self.deformable_groups * self.kernel_size[0] * self.kernel_size[1] == \
offset.shape[1]
assert self.deformable_groups * self.kernel_size[0] * self.kernel_size[1] == \
mask.shape[1]
return dcn_v2_conv(input, offset, mask,
self.weight,
self.bias,
self.stride,
self.padding,
self.dilation,
self.deformable_groups)
class DCN(DCNv2):
def __init__(self, in_channels, out_channels,
kernel_size, stride, padding,
dilation=1, deformable_groups=1):
super(DCN, self).__init__(in_channels, out_channels,
kernel_size, stride, padding, dilation, deformable_groups)
channels_ = self.deformable_groups * 3 * self.kernel_size[0] * self.kernel_size[1]
self.conv_offset_mask = nn.Conv2d(self.in_channels,
channels_,
kernel_size=self.kernel_size,
stride=self.stride,
padding=self.padding,
bias=True)
self.init_offset()
def init_offset(self):
self.conv_offset_mask.weight.data.zero_()
self.conv_offset_mask.bias.data.zero_()
def forward(self, input):
out = self.conv_offset_mask(input)
o1, o2, mask = torch.chunk(out, 3, dim=1)
offset = torch.cat((o1, o2), dim=1)
mask = torch.sigmoid(mask)
return dcn_v2_conv(input, offset, mask,
self.weight, self.bias,
self.stride,
self.padding,
self.dilation,
self.deformable_groups)
class _DCNv2Pooling(Function):
@staticmethod
def forward(ctx, input, rois, offset,
spatial_scale,
pooled_size,
output_dim,
no_trans,
group_size=1,
part_size=None,
sample_per_part=4,
trans_std=.0):
ctx.spatial_scale = spatial_scale
ctx.no_trans = int(no_trans)
ctx.output_dim = output_dim
ctx.group_size = group_size
ctx.pooled_size = pooled_size
ctx.part_size = pooled_size if part_size is None else part_size
ctx.sample_per_part = sample_per_part
ctx.trans_std = trans_std
output, output_count = \
_backend.dcn_v2_psroi_pooling_forward(input, rois, offset,
ctx.no_trans, ctx.spatial_scale,
ctx.output_dim, ctx.group_size,
ctx.pooled_size, ctx.part_size,
ctx.sample_per_part, ctx.trans_std)
ctx.save_for_backward(input, rois, offset, output_count)
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
input, rois, offset, output_count = ctx.saved_tensors
grad_input, grad_offset = \
_backend.dcn_v2_psroi_pooling_backward(grad_output,
input,
rois,
offset,
output_count,
ctx.no_trans,
ctx.spatial_scale,
ctx.output_dim,
ctx.group_size,
ctx.pooled_size,
ctx.part_size,
ctx.sample_per_part,
ctx.trans_std)
return grad_input, None, grad_offset, \
None, None, None, None, None, None, None, None
dcn_v2_pooling = _DCNv2Pooling.apply
class DCNv2Pooling(nn.Module):
def __init__(self,
spatial_scale,
pooled_size,
output_dim,
no_trans,
group_size=1,
part_size=None,
sample_per_part=4,
trans_std=.0):
super(DCNv2Pooling, self).__init__()
self.spatial_scale = spatial_scale
self.pooled_size = pooled_size
self.output_dim = output_dim
self.no_trans = no_trans
self.group_size = group_size
self.part_size = pooled_size if part_size is None else part_size
self.sample_per_part = sample_per_part
self.trans_std = trans_std
def forward(self, input, rois, offset):
assert input.shape[1] == self.output_dim
if self.no_trans:
offset = input.new()
return dcn_v2_pooling(input, rois, offset,
self.spatial_scale,
self.pooled_size,
self.output_dim,
self.no_trans,
self.group_size,
self.part_size,
self.sample_per_part,
self.trans_std)
class DCNPooling(DCNv2Pooling):
def __init__(self,
spatial_scale,
pooled_size,
output_dim,
no_trans,
group_size=1,
part_size=None,
sample_per_part=4,
trans_std=.0,
deform_fc_dim=1024):
super(DCNPooling, self).__init__(spatial_scale,
pooled_size,
output_dim,
no_trans,
group_size,
part_size,
sample_per_part,
trans_std)
self.deform_fc_dim = deform_fc_dim
if not no_trans:
self.offset_mask_fc = nn.Sequential(
nn.Linear(self.pooled_size * self.pooled_size *
self.output_dim, self.deform_fc_dim),
nn.ReLU(inplace=True),
nn.Linear(self.deform_fc_dim, self.deform_fc_dim),
nn.ReLU(inplace=True),
nn.Linear(self.deform_fc_dim, self.pooled_size *
self.pooled_size * 3)
)
self.offset_mask_fc[4].weight.data.zero_()
self.offset_mask_fc[4].bias.data.zero_()
def forward(self, input, rois):
offset = input.new()
if not self.no_trans:
# do roi_align first
n = rois.shape[0]
roi = dcn_v2_pooling(input, rois, offset,
self.spatial_scale,
self.pooled_size,
self.output_dim,
True, # no trans
self.group_size,
self.part_size,
self.sample_per_part,
self.trans_std)
# build mask and offset
offset_mask = self.offset_mask_fc(roi.view(n, -1))
offset_mask = offset_mask.view(
n, 3, self.pooled_size, self.pooled_size)
o1, o2, mask = torch.chunk(offset_mask, 3, dim=1)
offset = torch.cat((o1, o2), dim=1)
mask = torch.sigmoid(mask)
# do pooling with offset and mask
return dcn_v2_pooling(input, rois, offset,
self.spatial_scale,
self.pooled_size,
self.output_dim,
self.no_trans,
self.group_size,
self.part_size,
self.sample_per_part,
self.trans_std) * mask
# only roi_align
return dcn_v2_pooling(input, rois, offset,
self.spatial_scale,
self.pooled_size,
self.output_dim,
self.no_trans,
self.group_size,
self.part_size,
self.sample_per_part,
self.trans_std)
|
banmo-main
|
third_party/vcnplus/models/networks/DCNv2/DCN/dcn_v2.py
|
from .dcn_v2 import *
|
banmo-main
|
third_party/vcnplus/models/networks/DCNv2/DCN/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import sys
sys.path.insert(0,'third_party')
sys.path.insert(0,'./')
import numpy as np
import trimesh
import torch
import cv2
import pdb
from scipy.spatial.transform import Rotation as R
from utils.io import mkdir_p
import argparse
parser = argparse.ArgumentParser(description='render camera trajectories')
parser.add_argument('--outdir', default='tmp/traj',
help='output dir')
parser.add_argument('--nframes', default=90,type=int,
help='number of frames to render')
parser.add_argument('--alpha', default=0.5,type=float,
help='0-1, percentage of a full cycle')
parser.add_argument('--init_a', default=0.5,type=float,
help='0-1, percentage of a full cycle for initial pose')
parser.add_argument('--focal', default=2,type=float,
help='focal length')
parser.add_argument('--d_obj', default=3,type=float,
help='object depth')
parser.add_argument('--can_rand', dest='can_rand',action='store_true',
help='ranomize canonical space')
parser.add_argument('--img_size', default=512,type=int,
help='image size')
args = parser.parse_args()
## io
img_size = args.img_size
d_obj = args.d_obj
mkdir_p(args.outdir)
rot_rand = torch.Tensor(R.random().as_matrix()).cuda()
# to be compatible with other seqs
base_rmat = torch.eye(3).cuda()
base_rmat[0,0] = -1
base_rmat[1,1] = -1
for i in range(0,args.nframes):
# set cameras
#rotx = np.random.rand()
rotx=0.
if i==0: rotx=0.
roty = args.init_a*6.28+args.alpha*6.28*i/args.nframes
rotz = 0.
Rmat = cv2.Rodrigues(np.asarray([rotx, roty, rotz]))[0]
Rmat = torch.Tensor(Rmat).cuda()
# random rot
if args.can_rand:
Rmat = Rmat.matmul(rot_rand.T)
Rmat = Rmat.matmul(base_rmat)
Tmat = torch.Tensor([0,0,d_obj] ).cuda()
K = torch.Tensor([args.focal,args.focal,0,0] ).cuda()
Kimg = torch.Tensor([args.focal*img_size/2.,args.focal*img_size/2.,img_size/2.,img_size/2.] ).cuda()
# add RTK: [R_3x3|T_3x1]
# [fx,fy,px,py], to the ndc space
rtk = np.zeros((4,4))
rtk[:3,:3] = Rmat.cpu().numpy()
rtk[:3, 3] = Tmat.cpu().numpy()
rtk[3, :] = Kimg .cpu().numpy()
np.savetxt('%s/%05d.txt' %(args.outdir,i),rtk)
|
banmo-main
|
scripts/misc/generate_traj.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
# python scripts/add_cam_noise.py cam-files/cse-ama/ 30
import cv2
import numpy as np
import pdb
import sys
import glob
import os
cam_dir=sys.argv[1]
std_rot=float(sys.argv[2]) # deg
seqname=cam_dir.split('/')[-2]
std=np.pi/180*std_rot
odir='%s-gauss-%d'%(cam_dir.rsplit('/',1)[-2],std_rot)
os.makedirs(odir, exist_ok=True)
camlist = glob.glob('%s/*.txt'%(cam_dir))
camlist = sorted(camlist)
for idx,path in enumerate(camlist):
rtk = np.loadtxt(path)
rtk_mod = rtk.copy()
# random rot
rot_rand = np.random.normal(0,std,3)
rot_rand = cv2.Rodrigues(rot_rand)[0]
rtk_mod[:3,:3] = rot_rand.dot(rtk_mod[:3,:3])
rtk_mod[:2,3] = 0
rtk_mod[2,3] = 3
fid = path.rsplit('/',1)[1]
path_mod = '%s/%s'%(odir,fid)
np.savetxt(path_mod, rtk_mod)
print(rtk)
|
banmo-main
|
scripts/misc/add_cam_noise.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
# from: https://gist.github.com/adewes/5884820
import random
def get_random_color(pastel_factor = 0.5):
return [(x+pastel_factor)/(1.0+pastel_factor) for x in [random.uniform(0,1.0) for i in [1,2,3]]]
def color_distance(c1,c2):
return sum([abs(x[0]-x[1]) for x in zip(c1,c2)])
def generate_new_color(existing_colors,pastel_factor = 0.5):
max_distance = None
best_color = None
for i in range(0,100):
color = get_random_color(pastel_factor = pastel_factor)
if not existing_colors:
return color
best_distance = min([color_distance(color,c) for c in existing_colors])
if not max_distance or best_distance > max_distance:
max_distance = best_distance
best_color = color
return best_color
if __name__ == '__main__':
#To make your color choice reproducible, uncomment the following line:
random.seed(10)
colors = []
for i in range(0,65):
colors.append(generate_new_color(colors,pastel_factor = 0.1))
import numpy as np
print((np.asarray(colors)*255).astype(int))
|
banmo-main
|
scripts/misc/random_colors.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import sys, os
sys.path.append(os.path.dirname(os.path.dirname(sys.path[0])))
os.environ["PYOPENGL_PLATFORM"] = "egl" #opengl seems to only work with TPU
sys.path.insert(0,'third_party')
import subprocess
import imageio
import glob
from utils.io import save_vid
import matplotlib.pyplot as plt
import numpy as np
import torch
import cv2
import pdb
import argparse
import trimesh
from nnutils.geom_utils import obj_to_cam, pinhole_cam, obj2cam_np
from dataloader import frameloader
import pyrender
from pyrender import IntrinsicsCamera,Mesh, Node, Scene,OffscreenRenderer
import configparser
import matplotlib
cmap = matplotlib.cm.get_cmap('cool')
from utils.io import config_to_dataloader, draw_cams, str_to_frame, \
extract_data_info
import pytorch3d
import pytorch3d.ops
parser = argparse.ArgumentParser(description='render mesh')
parser.add_argument('--testdir', default='',
help='path to test dir')
parser.add_argument('--seqname', default='camel',
help='sequence to test')
parser.add_argument('--outpath', default='/data/gengshay/output.gif',
help='output path')
parser.add_argument('--overlay', default='no',
help='whether to overlay with the input')
parser.add_argument('--cam_type', default='perspective',
help='camera model, orthographic or perspective')
parser.add_argument('--vis_bones', dest='vis_bones',action='store_true',
help='whether show transparent surface and vis bones')
parser.add_argument('--vis_cam', dest='vis_cam',action='store_true',
help='whether show camera trajectory')
parser.add_argument('--vis_traj', dest='vis_traj', action='store_true',
help='whether show trajectory of vertices')
parser.add_argument('--append_img', default='no',
help='whether append images before the seq')
parser.add_argument('--append_render', default='yes',
help='whether append renderings')
parser.add_argument('--nosmooth', dest='smooth', action='store_false',
help='whether to smooth vertex colors and positions')
parser.add_argument('--corresp', dest='corresp', action='store_true',
help='whether to render correspondence')
parser.add_argument('--floor', dest='floor', action='store_true',
help='whether to add floor')
parser.add_argument('--show_dp', dest='show_dp',action='store_true',
help='whether to visualizae densepose if available')
parser.add_argument('--freeze', dest='freeze',action='store_true',
help='freeze object at frist frame')
parser.add_argument('--rest', dest='rest',action='store_true',
help='render rest object shape')
parser.add_argument('--vp', default=0, type=int,
help='which viewpoint to render 0,1,2')
parser.add_argument('--gtdir', default='',
help='path to gt dir')
parser.add_argument('--test_frames', default='9',
help='a list of video index or num of frames, {0,1,2}, 30')
parser.add_argument('--root_frames', default='',
help='a list of video index or num of frames, {0,1,2}, 30')
parser.add_argument('--gt_pmat',
default='/private/home/gengshany/data/AMA/T_swing/calibration/Camera1.Pmat.cal',
help='path to ama projection matrix, evaluation only')
parser.add_argument('--vis_gtmesh', dest='vis_gtmesh', action='store_true',
help='whether to visualize ground-truth mesh in eval')
parser.add_argument('--clean', dest='clean', action='store_true',
help='whether to use cc to clean up input mesh')
parser.add_argument('--gray_color', dest='gray_color', action='store_true',
help='whether to overwrite color with gray')
args = parser.parse_args()
gt_meshes = [trimesh.load(i, process=False) for i in sorted( glob.glob('%s/*.obj'%(args.gtdir)) )]
def main():
print(args.testdir)
if args.rest:
mesh_rest = trimesh.load('%s/mesh-rest.obj'%(args.testdir),process=False)
# read all the data
all_anno = []
all_mesh = []
all_bone = []
all_cam = []
all_fr = []
# eval dataloader
opts_dict = {}
opts_dict['seqname'] = args.seqname
opts_dict['img_size'] = 512 # dummy value
opts_dict['rtk_path'] = ''
evalloader = frameloader.eval_loader(opts_dict)
data_info = extract_data_info(evalloader)
idx_render = str_to_frame(args.test_frames, data_info)
if args.root_frames=='': idx_render_root = idx_render
else: idx_render_root = str_to_frame(args.root_frames, data_info)
# get eval frames
imglist = []
for dataset in evalloader.dataset.datasets:
imglist += dataset.imglist[:-1] # excluding the last frame
rootlist =[imglist[i] for i in idx_render_root]
imglist = [imglist[i] for i in idx_render]
seqname_list = []
## subsumple frames ##This may cause bug at nvs##
#if len(imglist)>150:
# imglist = imglist[::(len(imglist)//150)]
rootlist = [rootlist[i] for i in \
np.linspace(0,len(rootlist)-1,len(imglist),dtype=int)]
for idx,name in enumerate(imglist):
rgb_img = cv2.imread(name)
if args.show_dp:
# replace with densepose
name1, name2 = name.rsplit('/',1)
dppath = '%s/vis-%s'%(name1.replace('JPEGImages', 'Densepose'), name2)
if os.path.exists(dppath):
rgb_img = cv2.resize(cv2.imread(dppath), rgb_img.shape[:2][::-1])
try: sil_img = cv2.imread(name.replace('JPEGImages', 'Annotations').replace('.jpg', '.png'),0)[:,:,None]
except: sil_img = np.zeros(rgb_img.shape)[:,:,0]
all_anno.append([rgb_img,sil_img,0,0,name])
seqname = name.split('/')[-2]
seqname_list.append(seqname)
fr = int(name.split('/')[-1].split('.')[-2])
all_fr.append(fr)
print('%s/%d'%(seqname, fr))
if args.append_render=="yes":
try:
mesh = trimesh.load('%s/%s-mesh-%05d.obj'%(args.testdir, seqname, fr),process=False)
if args.clean:
# keep the largest mesh
mesh = [i for i in mesh.split(only_watertight=False)]
mesh = sorted(mesh, key=lambda x:x.vertices.shape[0])
mesh = mesh[-1]
if args.gray_color:
mesh.visual.vertex_colors[:,:3]=128 # necessary for color override
all_mesh.append(mesh)
name_root = rootlist[idx]
seqname_root = name_root.split('/')[-2]
fr_root = int(name_root.split('/')[-1].split('.')[-2])
cam = np.loadtxt('%s/%s-cam-%05d.txt'%(args.testdir, seqname_root, fr_root))
all_cam.append(cam)
bone = trimesh.load('%s/%s-bone-%05d.obj'%(args.testdir, seqname,fr),process=False)
all_bone.append(bone)
except: print('no mesh found')
else:
# dummy variable
mesh = trimesh.creation.uv_sphere(radius=1,count=[2, 2])
all_mesh.append(mesh)
# process bones, trajectories and cameras
num_original_verts = []
num_original_faces = []
pts_trajs = []
col_trajs = []
traj_len = len(all_mesh) #TODO shuld be dependent on the seqname
pts_num = len(all_mesh[0].vertices)
traj_num = min(1000, pts_num)
traj_idx = np.random.choice(pts_num, traj_num)
scene_scale = np.abs(all_mesh[0].vertices).max()
for i in range(len(all_mesh)):
if args.vis_bones:
all_mesh[i].visual.vertex_colors[:,-1]=254 # necessary for color override
num_original_verts.append( all_mesh[i].vertices.shape[0])
num_original_faces.append( all_mesh[i].faces.shape[0] )
try: bone=all_bone[i]
except: bone=trimesh.Trimesh()
all_mesh[i] = trimesh.util.concatenate([all_mesh[i], bone])
# change color according to time
if args.vis_traj:
pts_traj = np.zeros((traj_len, traj_num,2,3))
col_traj = np.zeros((traj_len, traj_num,2,4))
for j in range(traj_len):
if i-j-1<0 or seqname_list[j] != seqname_list[i]: continue
pts_traj[j,:,0] = all_mesh[i-j-1].vertices[traj_idx]
pts_traj[j,:,1] = all_mesh[i-j].vertices [traj_idx]
col_traj[j,:,0] = cmap(float(i-j-1)/traj_len)
col_traj[j,:,1] = cmap(float(i-j)/traj_len)
pts_trajs.append(pts_traj)
col_trajs.append(col_traj)
# change color according to time
if args.vis_cam:
mesh_cam = draw_cams(all_cam, axis=False)
mesh_cam.export('%s/mesh_cam-%s.obj'%(args.testdir,seqname))
# read images
input_size = all_anno[0][0].shape[:2]
#output_size = input_size
output_size = (int(input_size[0] * 480/input_size[1]), 480)# 270x480
frames=[]
ctrajs=[]
rndsils=[]
cd_ave=[] # average chamfer distance
f001=[] # f@1%
f002=[]
f005=[]
if args.append_img=="yes":
if args.append_render=='yes':
if args.freeze: napp_fr = 30
else: napp_fr = int(len(all_anno)//5)
for i in range(napp_fr):
frames.append(cv2.resize(all_anno[0][0],output_size[::-1])[:,:,::-1])
else:
for i in range(len(all_anno)):
#silframe=cv2.resize((all_anno[i][1]>0).astype(float),output_size[::-1])*255
imgframe=cv2.resize(all_anno[i][0],output_size[::-1])[:,:,::-1]
#redframe=(np.asarray([1,0,0])[None,None] * silframe[:,:,None]).astype(np.uint8)
#imgframe = cv2.addWeighted(imgframe, 1, redframe, 0.5, 0)
frames.append(imgframe)
#frames.append(cv2.resize(all_anno[i][1],output_size[::-1])*255) # silhouette
#frames.append(cv2.resize(all_anno[i][0],output_size[::-1])[:,:,::-1]) # frame
#strx = sorted(glob.glob('%s/*'%datapath))[i]# kp
#strx = strx.replace('JPEGImages', 'KP')
#kpimg = cv2.imread('%s/%s'%(strx.rsplit('/',1)[0],strx.rsplit('/',1)[1].replace('.jpg', '_rendered.png')))
#frames.append(cv2.resize(kpimg,output_size[::-1])[:,:,::-1])
#strx = sorted(glob.glob('%s/*'%datapath))[init_frame:end_frame][::dframe][i]# flow
#strx = strx.replace('JPEGImages', 'FlowBW')
#flowimg = cv2.imread('%s/vis-%s'%(strx.rsplit('/',1)[0],strx.rsplit('/',1)[1]))
#frames.append(cv2.resize(flowimg,output_size[::-1])[:,:,::-1])
# process cameras
theta = 9*np.pi/9
#theta = 7*np.pi/9
init_light_pose = np.asarray([[1,0,0,0],[0,np.cos(theta),-np.sin(theta),0],[0,np.sin(theta),np.cos(theta),0],[0,0,0,1]])
init_light_pose0 =np.asarray([[1,0,0,0],[0,0,-1,0],[0,1,0,0],[0,0,0,1]])
if args.freeze or args.rest:
size = len(all_mesh)
#size = 150
else:
size = len(all_mesh)
for i in range(size):
if args.append_render=='no':break
# render flow between mesh 1 and 2
if args.freeze or args.rest:
print(i)
refimg, refsil, refkp, refvis, refname = all_anno[0]
img_size = max(refimg.shape)
if args.freeze: refmesh = all_mesh[0]
elif args.rest: refmesh = mesh_rest
#refmesh.vertices -= refmesh.vertices.mean(0)[None]
#refmesh.vertices /= 1.2*np.abs(refmesh.vertices).max()
refcam = all_cam[0].copy()
rot_turntb = cv2.Rodrigues(np.asarray([0.,i*2*np.pi/size,0.]))[0]
refcam[:3,:3] = rot_turntb.dot( refcam[:3,:3] )
refcam[:2,3] = 0 # trans xy
if args.vis_cam:
refcam[2,3] = 10 # depth
refcam[3,:2] = 8*img_size/2 # fl
refcam[3,2] = refimg.shape[1]/2 # px py
refcam[3,3] = refimg.shape[0]/2 # px py
else:
refimg, refsil, refkp, refvis, refname = all_anno[i]
print('%s'%(refname))
img_size = max(refimg.shape)
refmesh = all_mesh[i]
refcam = all_cam[i]
# load vertices
refface = torch.Tensor(refmesh.faces[None]).cuda()
verts = torch.Tensor(refmesh.vertices[None]).cuda()
# change viewpoint
vp_tmat = refcam[:3,3]
vp_kmat = refcam[3]
if args.vp==-1:
# static camera
#vp_rmat = (refcam[:3,:3].T).dot(all_cam[0][:3,:3])
vp_rmat = all_cam[0][:3,:3].dot(refcam[:3,:3].T)
# vp_rmat = cv2.Rodrigues(np.asarray([np.pi/2,0,0]))[0].dot(vp_rmat) # bev
vp_tmat = all_cam[0][:3,3]
vp_kmat = all_cam[0][3].copy()
vp_kmat[2] = vp_kmat[2]/all_anno[0][0].shape[1]*all_anno[i][0].shape[1]
vp_kmat[3] = vp_kmat[3]/all_anno[0][0].shape[0]*all_anno[i][0].shape[0]
elif args.vp==-2:
# canonical camera
can_vis_rot = cv2.Rodrigues(np.asarray([0,np.pi/3,0]))[0].dot(\
cv2.Rodrigues(np.asarray([np.pi, 0,0 ]))[0])
vp_rmat = can_vis_rot.dot(refcam[:3,:3].T)
vp_tmat = np.zeros(3)
vp_tmat[2] = all_cam[0][2,3]
vp_kmat = all_cam[0][3].copy()
vp_kmat[2] = vp_kmat[2]/all_anno[0][0].shape[1]*all_anno[i][0].shape[1]
vp_kmat[3] = vp_kmat[3]/all_anno[0][0].shape[0]*all_anno[i][0].shape[0]
elif args.vp==1:
vp_rmat = cv2.Rodrigues(np.asarray([0,np.pi/2,0]))[0]
elif args.vp==2:
vp_rmat = cv2.Rodrigues(np.asarray([np.pi/2,0,0]))[0]
else:
vp_rmat = cv2.Rodrigues(np.asarray([0.,0,0]))[0]
refcam_vp = refcam.copy()
#refcam_vp[:3,:3] = refcam_vp[:3,:3].dot(vp_rmat)
refcam_vp[:3,:3] = vp_rmat.dot(refcam_vp[:3,:3])
if args.vp==1 or args.vp==2:
vmean = verts[0].mean(0).cpu()
vp_tmat[:2] = (-refcam_vp[:3,:3].dot(vmean))[:2]
refcam_vp[:3,3] = vp_tmat
refcam_vp[3] = vp_kmat
# render
Rmat = torch.Tensor(refcam_vp[None,:3,:3]).cuda()
Tmat = torch.Tensor(refcam_vp[None,:3,3]).cuda()
ppoint =refcam_vp[3,2:]
focal = refcam_vp[3,:2]
verts = obj_to_cam(verts, Rmat, Tmat)
r = OffscreenRenderer(img_size, img_size)
colors = refmesh.visual.vertex_colors
scene = Scene(ambient_light=0.4*np.asarray([1.,1.,1.,1.]))
direc_l = pyrender.DirectionalLight(color=np.ones(3), intensity=6.0)
colors= np.concatenate([0.6*colors[:,:3].astype(np.uint8), colors[:,3:]],-1) # avoid overexposure
# project trajectories to image
if args.vis_traj:
pts_trajs[i] = obj2cam_np(pts_trajs[i], Rmat, Tmat)
if args.vis_cam:
mesh_cam_transformed = mesh_cam.copy()
mesh_cam_transformed.vertices = obj2cam_np(mesh_cam_transformed.vertices, Rmat, Tmat)
# compute error if ground-truth is given
if len(args.gtdir)>0:
if len(gt_meshes)>0:
verts_gt = torch.Tensor(gt_meshes[i].vertices[None]).cuda()
refface_gt=torch.Tensor(gt_meshes[i].faces[None]).cuda()
else:
verts_gt = verts
refface_gt = refface
# ama camera coord -> scale -> our camera coord
if args.gt_pmat!='canonical':
pmat = np.loadtxt(args.gt_pmat)
K,R,T,_,_,_,_=cv2.decomposeProjectionMatrix(pmat)
Rmat_gt = R
Tmat_gt = T[:3,0]/T[-1,0]
Tmat_gt = Rmat_gt.dot(-Tmat_gt[...,None])[...,0]
K = K/K[-1,-1]
ppoint[0] = K[0,2]
ppoint[1] = K[1,2]
focal[0] = K[0,0]
focal[1] = K[1,1]
else:
Rmat_gt = np.eye(3)
Tmat_gt = np.asarray([0,0,0]) # assuming synthetic obj has depth 3
# render ground-truth to different viewpoint according to cam prediction
#Rmat_gt = refcam[:3,:3].T
#Tmat_gt = -refcam[:3,:3].T.dot(refcam[:3,3:4])[...,0]
#Rmat_gt = refcam_vp[:3,:3].dot(Rmat_gt)
#Tmat_gt = refcam_vp[:3,:3].dot(Tmat_gt[...,None])[...,0] + refcam_vp[:3,3]
# transform gt to camera
Rmat_gt = torch.Tensor(Rmat_gt).cuda()[None]
Tmat_gt = torch.Tensor(Tmat_gt).cuda()[None]
# max length of axis aligned bbox
bbox_max = float((verts_gt.max(1)[0]-verts_gt.min(1)[0]).max().cpu())
verts_gt = obj_to_cam(verts_gt, Rmat_gt, Tmat_gt)
import chamfer3D.dist_chamfer_3D
import fscore
chamLoss = chamfer3D.dist_chamfer_3D.chamfer_3DDist()
## use ICP for ours improve resutls
fitted_scale = verts_gt[...,-1].median() / verts[...,-1].median()
verts = verts*fitted_scale
frts = pytorch3d.ops.iterative_closest_point(verts,verts_gt, \
estimate_scale=False,max_iterations=100)
verts = ((frts.RTs.s*verts).matmul(frts.RTs.R)+frts.RTs.T[:,None])
## show registered meshes
#t=trimesh.Trimesh(verts[0].cpu()).export('tmp/0.obj')
#t=trimesh.Trimesh(verts_gt[0].cpu()).export('tmp/1.obj')
#pdb.set_trace()
raw_cd,raw_cd_back,_,_ = chamLoss(verts_gt,verts) # this returns distance squared
f1,_,_ = fscore.fscore(raw_cd, raw_cd_back,
threshold = (bbox_max*0.01)**2)
f2,_,_ = fscore.fscore(raw_cd, raw_cd_back,
threshold = (bbox_max*0.02)**2)
f5,_,_ = fscore.fscore(raw_cd, raw_cd_back,
threshold = (bbox_max*0.05)**2)
# sum
raw_cd = np.asarray(raw_cd.cpu()[0])
raw_cd_back = np.asarray(raw_cd_back.cpu()[0])
raw_cd = np.sqrt(raw_cd)
raw_cd_back = np.sqrt(raw_cd_back)
cd_mean = raw_cd.mean() + raw_cd_back.mean()
cd_ave.append(cd_mean)
f001.append( f1.cpu().numpy())
f002.append( f2.cpu().numpy())
f005.append( f5.cpu().numpy())
print('cd:%.2f cm'%(100*cd_mean))
cm = plt.get_cmap('plasma')
if args.vis_gtmesh:
verts = verts_gt
refface = refface_gt
colors = cm(raw_cd*5)
else:
colors = cm(raw_cd_back*5)
smooth=args.smooth
if args.freeze:
tbone = 0
else:
tbone = i
if args.vis_bones:
mesh = trimesh.Trimesh(vertices=np.asarray(verts[0,:num_original_verts[tbone],:3].cpu()), faces=np.asarray(refface[0,:num_original_faces[tbone]].cpu()),vertex_colors=colors)
meshr = Mesh.from_trimesh(mesh,smooth=smooth)
meshr._primitives[0].material.RoughnessFactor=.5
scene.add_node( Node(mesh=meshr ))
mesh2 = trimesh.Trimesh(vertices=np.asarray(verts[0,num_original_verts[tbone]:,:3].cpu()), faces=np.asarray(refface[0,num_original_faces[tbone]:].cpu()-num_original_verts[tbone]),vertex_colors=colors[num_original_verts[tbone]:])
if len(mesh2.vertices)>0:
mesh2=Mesh.from_trimesh(mesh2,smooth=smooth)
mesh2._primitives[0].material.RoughnessFactor=.5
scene.add_node( Node(mesh=mesh2))
else:
mesh = trimesh.Trimesh(vertices=np.asarray(verts[0,:,:3].cpu()), faces=np.asarray(refface[0].cpu()),vertex_colors=colors)
meshr = Mesh.from_trimesh(mesh,smooth=smooth)
meshr._primitives[0].material.RoughnessFactor=.5
scene.add_node( Node(mesh=meshr ))
if args.vis_traj:
pts = pts_trajs[i].reshape(-1,3)# np.asarray([[-1,-1,1],[1,1,1]]) # 2TxNx3
colors = col_trajs[i].reshape(-1,4)#np.random.uniform(size=pts.shape)
m = Mesh([pyrender.Primitive(pts,mode=1,color_0=colors)])
scene.add_node( Node(mesh=m))
if args.vis_cam:
mesh_cam_transformed=Mesh.from_trimesh(mesh_cam_transformed)
mesh_cam_transformed._primitives[0].material.RoughnessFactor=1.
scene.add_node( Node(mesh=mesh_cam_transformed))
floor_mesh = trimesh.load('./mesh_material/wood.obj',process=False)
floor_mesh.vertices = np.concatenate([floor_mesh.vertices[:,:1], floor_mesh.vertices[:,2:3], floor_mesh.vertices[:,1:2]],-1 )
xfloor = 10*mesh.vertices[:,0].min() + (10*mesh.vertices[:,0].max()-10*mesh.vertices[:,0].min())*(floor_mesh.vertices[:,0:1] - floor_mesh.vertices[:,0].min())/(floor_mesh.vertices[:,0].max()-floor_mesh.vertices[:,0].min())
yfloor = floor_mesh.vertices[:,1:2]; yfloor[:] = (mesh.vertices[:,1].max())
zfloor = 0.5*mesh.vertices[:,2].min() + (10*mesh.vertices[:,2].max()-0.5*mesh.vertices[:,2].min())*(floor_mesh.vertices[:,2:3] - floor_mesh.vertices[:,2].min())/(floor_mesh.vertices[:,2].max()-floor_mesh.vertices[:,2].min())
floor_mesh.vertices = np.concatenate([xfloor,yfloor,zfloor],-1)
floor_mesh = trimesh.Trimesh(floor_mesh.vertices, floor_mesh.faces, vertex_colors=255*np.ones((4,4), dtype=np.uint8))
if args.floor:
scene.add_node( Node(mesh=Mesh.from_trimesh(floor_mesh))) # overrides the prev. one
if args.cam_type=='perspective':
cam = IntrinsicsCamera(
focal[0],
focal[0],
ppoint[0],
ppoint[1],
znear=1e-3,zfar=1000)
else:
cam = pyrender.OrthographicCamera(xmag=1., ymag=1.)
cam_pose = -np.eye(4); cam_pose[0,0]=1; cam_pose[-1,-1]=1
cam_node = scene.add(cam, pose=cam_pose)
light_pose = init_light_pose
direc_l_node = scene.add(direc_l, pose=light_pose)
#if args.vis_bones:
# color, depth = r.render(scene,flags=pyrender.RenderFlags.SHADOWS_DIRECTIONAL)
#else:
# color, depth = r.render(scene,flags=pyrender.RenderFlags.SHADOWS_DIRECTIONAL | pyrender.RenderFlags.SKIP_CULL_FACES)
color, depth = r.render(scene,flags=pyrender.RenderFlags.SHADOWS_DIRECTIONAL | pyrender.RenderFlags.SKIP_CULL_FACES)
r.delete()
color = color[:refimg.shape[0],:refimg.shape[1],:3]
rndsil = (depth[:refimg.shape[0],:refimg.shape[1]]>0).astype(int)*100
if args.overlay=='yes':
color = cv2.addWeighted(color, 0.5, refimg[:,:,::-1], 0.5, 0)
prefix = (args.outpath).split('/')[-1].split('.')[0]
color = color.copy(); color[0,0,:] = 0
imoutpath = '%s/%s-mrender%03d.jpg'%(args.testdir, prefix,i)
cv2.imwrite(imoutpath,color[:,:,::-1] )
color = cv2.resize(color, output_size[::-1])
frames.append(color)
# TODO save cams
cam_scale = output_size[1] / rndsil.shape[1]
ctraj = torch.cat([Rmat, Tmat[...,None]],-1).cpu().numpy() # 1,3,4
kmat = np.asarray([focal[0]*cam_scale,
focal[0]*cam_scale,
ppoint[0]*cam_scale,
ppoint[1]*cam_scale])
ctraj = np.concatenate([ctraj,kmat[None,None,:]],1) # 1,4,4
ctrajs.append(ctraj[0])
rndsil = cv2.resize(rndsil.astype(np.int16), output_size[::-1])
rndsils.append(rndsil)
if args.gtdir != '':
cd_ave = np.asarray(cd_ave)
print('ave chamfer dis: %.1f cm'%(100*cd_ave.mean()))
print('max chamfer dis: %.1f cm'%(100*np.max(cd_ave)))
f001 = np.asarray(f001)
print('ave f-score at d=1%%: %.1f%%'%(100*np.mean(f001)))
print('min f-score at d=1%%: %.1f%%'%(100*np.min( f001)))
f002 = np.asarray(f002)
print('ave f-score at d=2%%: %.1f%%'%(100*np.mean(f002)))
print('min f-score at d=2%%: %.1f%%'%(100*np.min( f002)))
f005 = np.asarray(f005)
print('ave f-score at d=5%%: %.1f%%'%(100*np.mean(f005)))
print('min f-score at d=5%%: %.1f%%'%(100*np.min( f005)))
save_vid(args.outpath, frames, suffix='.gif')
save_vid(args.outpath, frames, suffix='.mp4',upsample_frame=0)
# save camera trajectory and reference sil
for idx in range(len(ctrajs)):
save_path = '%s-ctrajs-%05d.txt'%(args.outpath, idx)
np.savetxt(save_path, ctrajs[idx])
save_path = '%s-refsil-%05d.png'%(args.outpath, idx)
cv2.imwrite(save_path, rndsils[idx])
if __name__ == '__main__':
main()
|
banmo-main
|
scripts/visualize/render_vis.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import sys, os
sys.path.append(os.path.dirname(os.path.dirname(sys.path[0])))
os.environ["PYOPENGL_PLATFORM"] = "egl" #opengl seems to only work with TPU
curr_dir = os.path.abspath(os.getcwd())
sys.path.insert(0,curr_dir)
import pdb
import glob
import numpy as np
import configparser
from utils.io import config_to_dataloader, draw_cams, render_root_txt
cam_dir=sys.argv[1]
cap_frame=int(sys.argv[2])
def main():
render_root_txt(cam_dir, cap_frame)
# python ... path to camera folder
# will draw a trajectory of camera locations
if __name__ == '__main__':
main()
|
banmo-main
|
scripts/visualize/render_root_txt.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import sys, os
import pdb
sys.path.append(os.path.dirname(os.path.dirname(sys.path[0])))
os.environ["PYOPENGL_PLATFORM"] = "egl" #opengl seems to only work with TPU
curr_dir = os.path.abspath(os.getcwd())
sys.path.insert(0,curr_dir)
import subprocess
import imageio
import glob
from utils.io import save_vid
import matplotlib.pyplot as plt
import numpy as np
import torch
import cv2
import argparse
import trimesh
from nnutils.geom_utils import obj_to_cam, pinhole_cam, obj2cam_np
import pyrender
from pyrender import IntrinsicsCamera,Mesh, Node, Scene,OffscreenRenderer
import configparser
import matplotlib
cmap = matplotlib.cm.get_cmap('cool')
from utils.io import config_to_dataloader, draw_cams
parser = argparse.ArgumentParser(description='script to render cameras over epochs')
parser.add_argument('--testdir', default='',
help='path to test dir')
parser.add_argument('--cap_frame', default=-1,type=int,
help='number of frames to cap')
parser.add_argument('--first_idx', default=0,type=int,
help='first frame index to vis')
parser.add_argument('--last_idx', default=-1,type=int,
help='last frame index to vis')
parser.add_argument('--mesh_only', dest='mesh_only',action='store_true',
help='whether to only render rest mesh')
args = parser.parse_args()
img_size = 1024
def main():
# read all the data
logname = args.testdir.split('/')[-2]
varlist = [i for i in glob.glob('%s/vars_*.npy'%args.testdir) \
if 'latest.npy' not in i]
varlist = sorted(varlist,
key=lambda x:int(x.split('/')[-1].split('vars_')[-1].split('.npy')[0]))
# get first index that is used for optimization
var = np.load(varlist[-1],allow_pickle=True)[()]
var['rtk'] = var['rtk'][args.first_idx:args.last_idx]
first_valid_idx = np.linalg.norm(var['rtk'][:,:3,3], 2,-1)>0
first_valid_idx = np.argmax(first_valid_idx)
#varlist = varlist[1:]
if args.cap_frame>-1:
varlist = varlist[:args.cap_frame]
size = len(varlist)
mesh_cams = []
mesh_objs = []
for var_path in varlist:
# construct camera mesh
var = np.load(var_path,allow_pickle=True)[()]
var['rtk'] = var['rtk'][args.first_idx:args.last_idx]
mesh_cams.append(draw_cams(var['rtk'][first_valid_idx:]))
mesh_objs.append(var['mesh_rest'])
frames = []
# process cameras
for i in range(size):
print(i)
refcam = var['rtk'][first_valid_idx].copy()
## median camera trans
#mtrans = np.median(np.linalg.norm(var['rtk'][first_valid_idx:,:3,3],2,-1))
# max camera trans
mtrans = np.max(np.linalg.norm(var['rtk'][first_valid_idx:,:3,3],2,-1))
refcam[:2,3] = 0 # trans xy
refcam[2,3] = 4*mtrans # depth
refcam[3,:2] = 4*img_size/2 # fl
refcam[3,2] = img_size/2
refcam[3,3] = img_size/2
vp_rmat = refcam[:3,:3]
if args.mesh_only: refcam[3,:2] *= 2 # make it appear larger
else:
vp_rmat = cv2.Rodrigues(np.asarray([np.pi/2,0,0]))[0].dot(vp_rmat) # bev
refcam[:3,:3] = vp_rmat
# load vertices
refmesh = mesh_cams[i]
refface = torch.Tensor(refmesh.faces[None]).cuda()
verts = torch.Tensor(refmesh.vertices[None]).cuda()
# render
Rmat = torch.Tensor(refcam[None,:3,:3]).cuda()
Tmat = torch.Tensor(refcam[None,:3,3]).cuda()
ppoint =refcam[3,2:]
focal = refcam[3,:2]
verts = obj_to_cam(verts, Rmat, Tmat)
r = OffscreenRenderer(img_size, img_size)
colors = refmesh.visual.vertex_colors
scene = Scene(ambient_light=0.4*np.asarray([1.,1.,1.,1.]))
direc_l = pyrender.DirectionalLight(color=np.ones(3), intensity=6.0)
colors= np.concatenate([0.6*colors[:,:3].astype(np.uint8), colors[:,3:]],-1) # avoid overexposure
smooth=True
mesh = trimesh.Trimesh(vertices=np.asarray(verts[0,:,:3].cpu()), faces=np.asarray(refface[0].cpu()),vertex_colors=colors)
meshr = Mesh.from_trimesh(mesh,smooth=smooth)
meshr._primitives[0].material.RoughnessFactor=.5
if not args.mesh_only:
scene.add_node( Node(mesh=meshr ))
mesh_obj = mesh_objs[i]
if args.mesh_only:
# assign gray color
mesh_obj.visual.vertex_colors[...,:3] = 64
if len(mesh_obj.vertices)>0:
mesh_obj.vertices = obj2cam_np(mesh_obj.vertices, Rmat, Tmat)
mesh_obj=Mesh.from_trimesh(mesh_obj,smooth=smooth)
mesh_obj._primitives[0].material.RoughnessFactor=1.
scene.add_node( Node(mesh=mesh_obj))
cam = IntrinsicsCamera(
focal[0],
focal[0],
ppoint[0],
ppoint[1],
znear=1e-3,zfar=1000)
cam_pose = -np.eye(4); cam_pose[0,0]=1; cam_pose[-1,-1]=1
cam_node = scene.add(cam, pose=cam_pose)
light_pose =np.asarray([[1,0,0,0],[0,0,-1,0],[0,1,0,0],[0,0,0,1]],dtype=float)
light_pose[:3,:3] = cv2.Rodrigues(np.asarray([np.pi,0,0]))[0]
direc_l_node = scene.add(direc_l, pose=light_pose)
color, depth = r.render(scene,flags=pyrender.RenderFlags.SHADOWS_DIRECTIONAL | pyrender.RenderFlags.SKIP_CULL_FACES)
r.delete()
# save image
color = color.astype(np.uint8)
color = cv2.putText(color, 'epoch: %02d'%(i), (30,50),
cv2.FONT_HERSHEY_SIMPLEX,2, (256,0,0), 2)
imoutpath = '%s/mesh-cam-%02d.png'%(args.testdir,i)
cv2.imwrite(imoutpath,color[:,:,::-1] )
frames.append(color)
save_vid('%s/mesh-cam'%args.testdir, frames, suffix='.gif')
save_vid('%s/mesh-cam'%args.testdir, frames, suffix='.mp4',upsample_frame=-1)
if __name__ == '__main__':
main()
|
banmo-main
|
scripts/visualize/render_root.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
"""
bash scripts/render_nvs.sh
"""
from absl import flags, app
import sys
sys.path.insert(0,'')
sys.path.insert(0,'third_party')
import numpy as np
import torch
import os
import glob
import pdb
import cv2
import trimesh
from scipy.spatial.transform import Rotation as R
import imageio
from collections import defaultdict
from utils.io import save_vid, str_to_frame, save_bones, load_root, load_sils
from utils.colors import label_colormap
from nnutils.train_utils import v2s_trainer
from nnutils.geom_utils import obj_to_cam, tensor2array, vec_to_sim3, \
raycast, sample_xy, K2inv, get_near_far, \
chunk_rays
from nnutils.rendering import render_rays
from ext_utils.util_flow import write_pfm
from ext_utils.flowlib import cat_imgflo
opts = flags.FLAGS
# script specific ones
flags.DEFINE_integer('maxframe', 0, 'maximum number frame to render')
flags.DEFINE_integer('vidid', 0, 'video id that determines the env code')
flags.DEFINE_integer('bullet_time', -1, 'frame id in a video to show bullet time')
flags.DEFINE_float('scale', 0.1,
'scale applied to the rendered image (wrt focal length)')
flags.DEFINE_string('rootdir', 'tmp/traj/','root body directory')
flags.DEFINE_string('nvs_outpath', 'tmp/nvs-','output prefix')
def construct_rays_nvs(img_size, rtks, near_far, rndmask, device):
"""
rndmask: controls which pixel to render
"""
bs = rtks.shape[0]
rtks = torch.Tensor(rtks).to(device)
rndmask = torch.Tensor(rndmask).to(device).view(-1)>0
_, xys = sample_xy(img_size, bs, 0, device, return_all=True)
xys=xys[:,rndmask]
Rmat = rtks[:,:3,:3]
Tmat = rtks[:,:3,3]
Kinv = K2inv(rtks[:,3])
rays = raycast(xys, Rmat, Tmat, Kinv, near_far)
return rays
def main(_):
trainer = v2s_trainer(opts, is_eval=True)
data_info = trainer.init_dataset()
trainer.define_model(data_info)
model = trainer.model
model.eval()
nerf_models = model.nerf_models
embeddings = model.embeddings
# bs, 4,4 (R|T)
# (f|p)
rtks = load_root(opts.rootdir, 0) # cap frame=0=>load all
rndsils = load_sils(opts.rootdir.replace('ctrajs', 'refsil'),0)
if opts.maxframe>0:
sample_idx = np.linspace(0,len(rtks)-1,opts.maxframe).astype(int)
rtks = rtks[sample_idx]
rndsils = rndsils[sample_idx]
else:
sample_idx = np.linspace(0,len(rtks)-1, len(rtks)).astype(int)
img_size = rndsils[0].shape
if img_size[0] > img_size[1]:
img_type='vert'
else:
img_type='hori'
# determine render image scale
rtks[:,3] = rtks[:,3]*opts.scale
bs = len(rtks)
img_size = int(max(img_size)*opts.scale)
print("render size: %d"%img_size)
model.img_size = img_size
opts.render_size = img_size
vars_np = {}
vars_np['rtk'] = rtks
vars_np['idk'] = np.ones(bs)
near_far = torch.zeros(bs,2).to(model.device)
near_far = get_near_far(near_far,
vars_np,
pts=model.latest_vars['mesh_rest'].vertices)
vidid = torch.Tensor([opts.vidid]).to(model.device).long()
source_l = model.data_offset[opts.vidid+1] - model.data_offset[opts.vidid] -1
embedid = torch.Tensor(sample_idx).to(model.device).long() + \
model.data_offset[opts.vidid]
if opts.bullet_time>-1: embedid[:] = opts.bullet_time+model.data_offset[opts.vidid]
print(embedid)
rgbs = []
sils = []
viss = []
for i in range(bs):
rndsil = rndsils[i]
rndmask = np.zeros((img_size, img_size))
if img_type=='vert':
size_short_edge = int(rndsil.shape[1] * img_size/rndsil.shape[0])
rndsil = cv2.resize(rndsil, (size_short_edge, img_size))
rndmask[:,:size_short_edge] = rndsil
else:
size_short_edge = int(rndsil.shape[0] * img_size/rndsil.shape[1])
rndsil = cv2.resize(rndsil, (img_size, size_short_edge))
rndmask[:size_short_edge] = rndsil
rays = construct_rays_nvs(model.img_size, rtks[i:i+1],
near_far[i:i+1], rndmask, model.device)
# add env code
rays['env_code'] = model.env_code(embedid[i:i+1])[:,None]
rays['env_code'] = rays['env_code'].repeat(1,rays['nsample'],1)
# add bones
time_embedded = model.pose_code(embedid[i:i+1])[:,None]
rays['time_embedded'] = time_embedded.repeat(1,rays['nsample'],1)
if opts.lbs and model.num_bone_used>0:
bone_rts = model.nerf_body_rts(embedid[i:i+1])
rays['bone_rts'] = bone_rts.repeat(1,rays['nsample'],1)
model.update_delta_rts(rays)
with torch.no_grad():
# render images only
results=defaultdict(list)
bs_rays = rays['bs'] * rays['nsample'] #
for j in range(0, bs_rays, opts.chunk):
rays_chunk = chunk_rays(rays,j,opts.chunk)
rendered_chunks = render_rays(nerf_models,
embeddings,
rays_chunk,
N_samples = opts.ndepth,
perturb=0,
noise_std=0,
chunk=opts.chunk, # chunk size is effective in val mode
use_fine=True,
img_size=model.img_size,
obj_bound = model.latest_vars['obj_bound'],
render_vis=True,
opts=opts,
)
for k, v in rendered_chunks.items():
results[k] += [v]
for k, v in results.items():
v = torch.cat(v, 0)
v = v.view(rays['nsample'], -1)
results[k] = v
rgb = results['img_coarse'].cpu().numpy()
dph = results['depth_rnd'] [...,0].cpu().numpy()
sil = results['sil_coarse'][...,0].cpu().numpy()
vis = results['vis_pred'] [...,0].cpu().numpy()
sil[sil<0.5] = 0
rgb[sil<0.5] = 1
rgbtmp = np.ones((img_size, img_size, 3))
dphtmp = np.ones((img_size, img_size))
siltmp = np.ones((img_size, img_size))
vistmp = np.ones((img_size, img_size))
rgbtmp[rndmask>0] = rgb
dphtmp[rndmask>0] = dph
siltmp[rndmask>0] = sil
vistmp[rndmask>0] = vis
if img_type=='vert':
rgb = rgbtmp[:,:size_short_edge]
sil = siltmp[:,:size_short_edge]
vis = vistmp[:,:size_short_edge]
dph = dphtmp[:,:size_short_edge]
else:
rgb = rgbtmp[:size_short_edge]
sil = siltmp[:size_short_edge]
vis = vistmp[:size_short_edge]
dph = dphtmp[:size_short_edge]
rgbs.append(rgb)
sils.append(sil*255)
viss.append(vis*255)
cv2.imwrite('%s-rgb_%05d.png'%(opts.nvs_outpath,i), rgb[...,::-1]*255)
cv2.imwrite('%s-sil_%05d.png'%(opts.nvs_outpath,i), sil*255)
cv2.imwrite('%s-vis_%05d.png'%(opts.nvs_outpath,i), vis*255)
save_vid('%s-rgb'%(opts.nvs_outpath), rgbs, suffix='.mp4',upsample_frame=0)
save_vid('%s-sil'%(opts.nvs_outpath), sils, suffix='.mp4',upsample_frame=0)
save_vid('%s-vis'%(opts.nvs_outpath), viss, suffix='.mp4',upsample_frame=0)
if __name__ == '__main__':
app.run(main)
|
banmo-main
|
scripts/visualize/nvs.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
"""
bash scripts/render_nvs.sh
"""
from absl import flags, app
import sys
sys.path.insert(0,'')
sys.path.insert(0,'third_party')
import numpy as np
import torch
import os
import glob
import pdb
import cv2
import trimesh
from scipy.spatial.transform import Rotation as R
import imageio
from collections import defaultdict
import matplotlib.cm
cmap = matplotlib.cm.get_cmap('plasma')
from utils.io import save_vid, str_to_frame, save_bones, load_root, load_sils
from utils.colors import label_colormap
from nnutils.train_utils import v2s_trainer
from nnutils.geom_utils import obj_to_cam, tensor2array, vec_to_sim3, \
raycast, sample_xy, K2inv, get_near_far, \
chunk_rays
from nnutils.rendering import render_rays
from ext_utils.util_flow import write_pfm
from ext_utils.flowlib import cat_imgflo
opts = flags.FLAGS
# script specific ones
flags.DEFINE_integer('maxframe', 1, 'maximum number frame to render')
flags.DEFINE_integer('vidid', 0, 'video id that determines the env code')
flags.DEFINE_integer('bullet_time', -1, 'frame id in a video to show bullet time')
flags.DEFINE_float('scale', 0.1,
'scale applied to the rendered image (wrt focal length)')
flags.DEFINE_string('rootdir', 'tmp/traj/','root body directory')
flags.DEFINE_string('nvs_outpath', 'tmp/nvs-','output prefix')
def construct_rays_nvs(img_size, rtks, near_far, rndmask, device):
"""
rndmask: controls which pixel to render
"""
bs = rtks.shape[0]
rtks = torch.Tensor(rtks).to(device)
rndmask = torch.Tensor(rndmask).to(device).view(-1)>0
_, xys = sample_xy(img_size, bs, 0, device, return_all=True)
xys=xys[:,rndmask]
Rmat = rtks[:,:3,:3]
Tmat = rtks[:,:3,3]
Kinv = K2inv(rtks[:,3])
rays = raycast(xys, Rmat, Tmat, Kinv, near_far)
return rays
def main(_):
trainer = v2s_trainer(opts, is_eval=True)
data_info = trainer.init_dataset()
trainer.define_model(data_info)
model = trainer.model
model.eval()
nerf_models = model.nerf_models
embeddings = model.embeddings
# bs, 4,4 (R|T)
# (f|p)
nframe=120
img_size = int(512 * opts.scale)
fl = img_size
pp = img_size/2
rtks = np.zeros((nframe,4,4))
rot1 = cv2.Rodrigues(np.asarray([0,np.pi/2,0]))[0]
rot2 = cv2.Rodrigues(np.asarray([np.pi,0,0]))[0]
rtks[:,:3,:3] = np.dot(rot1, rot2)[None]
rtks[:,2,3] = 0.2
rtks[:,3] = np.asarray([fl,fl,pp,pp])[None]
sample_idx = np.asarray(range(nframe)).astype(int)
# determine render image scale
bs = len(rtks)
print("render size: %d"%img_size)
model.img_size = img_size
opts.render_size = img_size
vars_np = {}
vars_np['rtk'] = rtks
vars_np['idk'] = np.ones(bs)
near_far = torch.zeros(bs,2).to(model.device)
near_far = get_near_far(near_far,
vars_np,
pts=model.latest_vars['mesh_rest'].vertices)
depth_near = near_far[0,0].cpu().numpy()
depth_far = near_far[0,1].cpu().numpy()
vidid = torch.Tensor([opts.vidid]).to(model.device).long()
source_l = model.data_offset[opts.vidid+1] - model.data_offset[opts.vidid] -1
embedid = torch.Tensor(sample_idx).to(model.device).long() + \
model.data_offset[opts.vidid]
print(embedid)
rgbs = []
sils = []
dphs = []
viss = []
for i in range(bs):
model_path = '%s/%s'% (opts.model_path.rsplit('/',1)[0], 'params_%d.pth'%(i))
trainer.load_network(model_path, is_eval=True)# load latest
rndmask = np.ones((img_size, img_size))>0
rays = construct_rays_nvs(model.img_size, rtks[i:i+1],
near_far[i:i+1], rndmask, model.device)
# add env code
rays['env_code'] = model.env_code(embedid[i:i+1])[:,None]
rays['env_code'] = rays['env_code'].repeat(1,rays['nsample'],1)
## add bones
#time_embedded = model.pose_code(embedid[i:i+1])[:,None]
#rays['time_embedded'] = time_embedded.repeat(1,rays['nsample'],1)
#if opts.lbs and model.num_bone_used>0:
# bone_rts = model.nerf_body_rts(embedid[i:i+1])
# rays['bone_rts'] = bone_rts.repeat(1,rays['nsample'],1)
# model.update_delta_rts(rays)
with torch.no_grad():
# render images only
results=defaultdict(list)
bs_rays = rays['bs'] * rays['nsample'] #
for j in range(0, bs_rays, opts.chunk):
rays_chunk = chunk_rays(rays,j,opts.chunk)
rendered_chunks = render_rays(nerf_models,
embeddings,
rays_chunk,
N_samples = opts.ndepth,
perturb=0,
noise_std=0,
chunk=opts.chunk, # chunk size is effective in val mode
use_fine=True,
img_size=model.img_size,
obj_bound = model.latest_vars['obj_bound'],
render_vis=True,
opts=opts,
)
for k, v in rendered_chunks.items():
results[k] += [v]
for k, v in results.items():
v = torch.cat(v, 0)
v = v.view(rays['nsample'], -1)
results[k] = v
rgb = results['img_coarse'].cpu().numpy()
dph = results['depth_rnd'] [...,0].cpu().numpy()
sil = results['sil_coarse'][...,0].cpu().numpy()
vis = results['vis_pred'] [...,0].cpu().numpy()
#sil[sil<0.5] = 0
#rgb[sil<0.5] = 1
rgbtmp = np.ones((img_size, img_size, 3))
dphtmp = np.ones((img_size, img_size))
siltmp = np.ones((img_size, img_size))
vistmp = np.ones((img_size, img_size))
rgbtmp[rndmask>0] = rgb
dphtmp[rndmask>0] = dph
siltmp[rndmask>0] = sil
vistmp[rndmask>0] = vis
rgb = rgbtmp
sil = siltmp
vis = vistmp
dph = dphtmp
dph = (dph - depth_near) / (depth_far - depth_near)*2
dph = np.clip(dph,0,1)
dph = cmap(dph)
rgb = rgb * sil[...,None]
dph = dph * sil[...,None]
rgbs.append(rgb)
sils.append(sil*255)
viss.append(vis*255)
dphs.append(dph*255)
cv2.imwrite('%s-rgb_%05d.png'%(opts.nvs_outpath,i), rgb[...,::-1]*255)
cv2.imwrite('%s-sil_%05d.png'%(opts.nvs_outpath,i), sil*255)
cv2.imwrite('%s-vis_%05d.png'%(opts.nvs_outpath,i), vis*255)
cv2.imwrite('%s-dph_%05d.png'%(opts.nvs_outpath,i), dph[...,::-1]*255)
save_vid('%s-rgb'%(opts.nvs_outpath), rgbs, suffix='.mp4')
save_vid('%s-sil'%(opts.nvs_outpath), sils, suffix='.mp4')
save_vid('%s-vis'%(opts.nvs_outpath), viss, suffix='.mp4')
save_vid('%s-dph'%(opts.nvs_outpath), dphs, suffix='.mp4')
if __name__ == '__main__':
app.run(main)
|
banmo-main
|
scripts/visualize/nvs_iter.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
# TODO: pass ft_cse to use fine-tuned feature
# TODO: pass fine_steps -1 to use fine samples
from absl import flags, app
import sys
sys.path.insert(0,'')
sys.path.insert(0,'third_party')
import numpy as np
from matplotlib import pyplot as plt
import matplotlib
import torch
import os
import glob
import pdb
import cv2
import trimesh
from scipy.spatial.transform import Rotation as R
import imageio
from utils.io import save_vid, str_to_frame, save_bones, draw_lines, vis_match
from utils.colors import label_colormap
from nnutils.train_utils import v2s_trainer
from nnutils.geom_utils import obj_to_cam, tensor2array, vec_to_sim3, obj_to_cam,\
Kmatinv, K2mat, K2inv, sample_xy, resample_dp,\
raycast
from nnutils.loss_utils import kp_reproj, feat_match, kp_reproj_loss
from ext_utils.util_flow import write_pfm
from ext_utils.flowlib import cat_imgflo
opts = flags.FLAGS
def construct_rays(dp_feats_rsmp, model, xys, rand_inds,
Rmat, Tmat, Kinv, near_far, flip=True):
device = dp_feats_rsmp.device
bs,nsample,_ =xys.shape
opts = model.opts
embedid=model.embedid
embedid = embedid.long().to(device)[:,None]
rays = raycast(xys, Rmat, Tmat, Kinv, near_far)
rtk_vec = rays['rtk_vec']
del rays
feats_at_samp = [dp_feats_rsmp[i].view(model.num_feat,-1).T\
[rand_inds[i].long()] for i in range(bs)]
feats_at_samp = torch.stack(feats_at_samp,0) # bs,ns,num_feat
# TODO implement for se3
if opts.lbs and model.num_bone_used>0:
bone_rts = model.nerf_body_rts(embedid)
bone_rts = bone_rts.repeat(1,nsample,1)
# TODO rearrange inputs
feats_at_samp = feats_at_samp.view(-1, model.num_feat)
xys = xys.view(-1,1,2)
if flip:
rtk_vec = rtk_vec.view(bs//2,2,-1).flip(1).view(rtk_vec.shape)
bone_rts = bone_rts.view(bs//2,2,-1).flip(1).view(bone_rts.shape)
rays = {'rtk_vec': rtk_vec,
'bone_rts': bone_rts}
return rays, feats_at_samp, xys
def match_frames(trainer, idxs, nsample=200):
idxs = [int(i) for i in idxs.split(' ')]
bs = len(idxs)
opts = trainer.opts
device = trainer.device
model = trainer.model
model.eval()
# load frames and aux data
for dataset in trainer.evalloader.dataset.datasets:
dataset.load_pair = False
batch = []
for i in idxs:
batch.append( trainer.evalloader.dataset[i] )
batch = trainer.evalloader.collate_fn(batch)
model.set_input(batch)
rtk = model.rtk
Rmat = rtk[:,:3,:3]
Tmat = rtk[:,:3,3]
Kmat = K2mat(rtk[:,3,:])
kaug = model.kaug # according to cropping, p = Kaug Kmat P
Kaug = K2inv(kaug)
Kinv = Kmatinv(Kaug.matmul(Kmat))
near_far = model.near_far[model.frameid.long()]
dp_feats_rsmp = model.dp_feats
# construct rays for sampled pixels
rand_inds, xys = sample_xy(opts.img_size, bs, nsample, device,return_all=False)
rays, feats_at_samp, xys = construct_rays(dp_feats_rsmp, model, xys, rand_inds,
Rmat, Tmat, Kinv, near_far)
model.update_delta_rts(rays)
# re-project
with torch.no_grad():
pts_pred = feat_match(model.nerf_feat, model.embedding_xyz, feats_at_samp,
model.latest_vars['obj_bound'],grid_size=20,is_training=False)
pts_pred = pts_pred.view(bs,nsample,3)
xy_reproj = kp_reproj(pts_pred, model.nerf_models, model.embedding_xyz, rays)
# draw
imgs_trg = model.imgs.view(bs//2,2,-1).flip(1).view(model.imgs.shape)
xy_reproj = xy_reproj.view(bs,nsample,2)
xys = xys.view(bs,nsample, 2)
sil_at_samp = torch.stack([model.masks[i].view(-1,1)[rand_inds[i]] \
for i in range(bs)],0) # bs,ns,1
for i in range(bs):
img1 = model.imgs[i]
img2 = imgs_trg[i]
img = torch.cat([img1, img2],2)
valid_idx = sil_at_samp[i].bool()[...,0]
p1s = xys[i][valid_idx]
p2s = xy_reproj[i][valid_idx]
p2s[...,0] = p2s[...,0] + img1.shape[2]
img = draw_lines(img, p1s,p2s)
cv2.imwrite('tmp/match_%04d.png'%i, img)
# visualize matching error
if opts.render_size<=128:
with torch.no_grad():
rendered, rand_inds = model.nerf_render(rtk, kaug, model.embedid,
nsample=opts.nsample, ndepth=opts.ndepth)
xyz_camera = rendered['xyz_camera_vis'][0].reshape(opts.render_size**2,-1)
xyz_canonical = rendered['xyz_canonical_vis'][0].reshape(opts.render_size**2,-1)
skip_idx = len(xyz_camera)//50 # vis 50 rays
trimesh.Trimesh(xyz_camera[0::skip_idx].reshape(-1,3).cpu()).\
export('tmp/match_camera_pts.obj')
trimesh.Trimesh(xyz_canonical[0::skip_idx].reshape(-1,3).cpu()).\
export('tmp/match_canonical_pts.obj')
vis_match(rendered, model.masks, model.imgs,
bs,opts.img_size, opts.ndepth)
## construct rays for all pixels
#rand_inds, xys = sample_xy(opts.img_size, bs, nsample, device,return_all=True)
#rays, feats_at_samp, xys = construct_rays(dp_feats_rsmp, model, xys, rand_inds,
# Rmat, Tmat, Kinv, near_far, flip=False)
#with torch.no_grad():
# pts_pred = feat_match(model.nerf_feat, model.embedding_xyz, feats_at_samp,
# model.latest_vars['obj_bound'],grid_size=20,is_training=False)
# pts_pred = pts_pred.view(bs,opts.render_size**2,3)
# proj_err = kp_reproj_loss(pts_pred, xys, model.nerf_models,
# model.embedding_xyz, rays)
# proj_err = proj_err.view(pts_pred.shape[:-1]+(1,))
# proj_err = proj_err/opts.img_size * 2
# results = {}
# results['proj_err'] = proj_err
## visualize current error stats
#feat_err=model.latest_vars['fp_err'][:,0]
#proj_err=model.latest_vars['fp_err'][:,1]
#feat_err = feat_err[feat_err>0]
#proj_err = proj_err[proj_err>0]
#print('feat-med: %f'%(np.median(feat_err)))
#print('proj-med: %f'%(np.median(proj_err)))
#plt.hist(feat_err,bins=100)
#plt.savefig('tmp/viser_feat_err.jpg')
#plt.clf()
#plt.hist(proj_err,bins=100)
#plt.savefig('tmp/viser_proj_err.jpg')
# visualize codes
with torch.no_grad():
fid = torch.Tensor(range(0,len(model.impath))).cuda().long()
D=model.pose_code(fid)
D = D.view(len(fid),-1)
##TODO
#px = torch.Tensor(range(len(D))).cuda()
#py = px*2
#pz = px*5+1
#D = torch.stack([px,py,pz],-1)
D = D-D.mean(0)[None]
A = D.T.matmul(D)/D.shape[0] # fxf
U,S,V=torch.svd(A) #
code_proj_3d=D.matmul(V[:,:3])
cmap = matplotlib.cm.get_cmap('cool')
time = np.asarray(range(len(model.impath)))
time = time/time.max()
code_proj_3d=code_proj_3d.detach().cpu().numpy()
trimesh.Trimesh(code_proj_3d, vertex_colors=cmap(time)).export('tmp/0.obj')
#plt.figure(figsize=(16,16))
plot_stack = []
weight_dir = opts.model_path.rsplit('/',1)[0]
bne_path = sorted(glob.glob('%s/%s-*bne-mrender*.jpg'%\
(weight_dir, opts.seqname)))
img_path = model.impath.copy()
## remove the last img for each video to make shape consistent with bone renders
#for i in model.data_offset[1:][::-1]:
# img_path.remove(img_path[i-1])
# code_proj_3d = np.delete(code_proj_3d, i-1,0)
# plot the first video
img_path = img_path [:model.data_offset[1]-2]
code_proj_3d = code_proj_3d[:model.data_offset[1]-2]
try:
bne_path = bne_path [:model.data_offset[1]-2]
except:
pass
for i in range(len(code_proj_3d)):
plt.plot(code_proj_3d[i,0], code_proj_3d[i,1], color=cmap(time[i]), marker='o')
plt.annotate(str(i), (code_proj_3d[i,0], code_proj_3d[i,1]))
plt.xlim(code_proj_3d[:,0].min(), code_proj_3d[:,0].max())
plt.ylim(code_proj_3d[:,1].min(), code_proj_3d[:,1].max())
fig = plt.gcf()
fig.canvas.draw()
plot = np.frombuffer(fig.canvas.tostring_rgb(), dtype=np.uint8)
plot = plot.reshape(fig.canvas.get_width_height()[::-1] + (3,))
print('plot pose code of frame id:%03d'%i)
if len(bne_path) == len(code_proj_3d):
bneimg = cv2.imread(bne_path[i])
bneimg = cv2.resize(bneimg,\
(bneimg.shape[1]*plot.shape[0]//bneimg.shape[0], plot.shape[0]))
img=cv2.imread(img_path[i])[:,:,::-1]
img = cv2.resize(img,\
(img.shape[1]*plot.shape[0]//img.shape[0], plot.shape[0]))
plot = np.hstack([img, bneimg, plot])
plot_stack.append(plot)
save_vid('tmp/code', plot_stack, suffix='.mp4',
upsample_frame=150.,fps=30)
save_vid('tmp/code', plot_stack, suffix='.gif',
upsample_frame=150.,fps=30)
# vis dps
cv2.imwrite('tmp/match_dpc.png', model.dp_vis[model.dps[0].long()].cpu().numpy()*255)
def main(_):
opts.img_size=opts.render_size
trainer = v2s_trainer(opts, is_eval=True)
data_info = trainer.init_dataset()
trainer.define_model(data_info)
#write matching function
img_match = match_frames(trainer, opts.match_frames)
if __name__ == '__main__':
app.run(main)
|
banmo-main
|
scripts/visualize/match.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
"""
python scripts/ama-process/ama2davis.py --path ./database/T_swing/
"""
import pdb
import cv2
import numpy as np
import os
import glob
import argparse
import sys
from shutil import copyfile
sys.path.insert(0,'')
from utils.io import mkdir_p
parser = argparse.ArgumentParser(description='script to render cameras over epochs')
parser.add_argument('--path', default='',
help='path to ama seq dir')
args = parser.parse_args()
path = '%s/images/*'%args.path
seqname = args.path.strip('/').split('/')[-1]
outdir = './database/DAVIS/'
vid_idx = 0
for rgb_path in sorted(glob.glob(path)):
vid_idx_tmp = int(rgb_path.split('/')[-1].split('_')[0][5:])
if vid_idx_tmp != vid_idx:
idx=0
vid_idx = vid_idx_tmp
outsil_dir = '%s/Annotations/Full-Resolution/%s%d'%(outdir, seqname,vid_idx)
outrgb_dir = '%s/JPEGImages/Full-Resolution/%s%d'%(outdir, seqname,vid_idx)
#TODO delete if exists
mkdir_p(outrgb_dir)
mkdir_p(outsil_dir)
sil_path = rgb_path.replace('images', 'silhouettes').replace('Image','Silhouette')
outsil_path = '%s/%05d.png'%(outsil_dir, idx)
sil = cv2.imread(sil_path,0)
sil = (sil>0).astype(np.uint8)
# remove extra sils
nb_components, output, stats, centroids = \
cv2.connectedComponentsWithStats(sil, connectivity=8)
if nb_components>1:
max_label, max_size = max([(i, stats[i, cv2.CC_STAT_AREA]) for i in range(1, nb_components)], key=lambda x: x[1])
sil = output == max_label
sil = (sil>0).astype(np.uint8)*128
cv2.imwrite(outsil_path, sil)
outrgb_path = '%s/%05d.jpg'%(outrgb_dir, idx)
img = cv2.imread(rgb_path)
cv2.imwrite(outrgb_path, img)
print(outrgb_path)
print(outsil_path)
idx = idx+1
|
banmo-main
|
scripts/ama-process/ama2davis.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import numpy as np
import cv2
import pdb
pmat = np.loadtxt('/private/home/gengshany/data/AMA/T_swing/calibration/Camera1.Pmat.cal')
K,R,T,_,_,_,_=cv2.decomposeProjectionMatrix(pmat)
print(K/K[-1,-1])
print(R)
print(T/T[-1])
pdb.set_trace()
|
banmo-main
|
scripts/ama-process/read_cam.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import sys
sys.path.insert(0,'third_party')
sys.path.insert(0,'./')
import numpy as np
import trimesh
import torch
import cv2
import pdb
from scipy.spatial.transform import Rotation as R
from nnutils.geom_utils import obj_to_cam, pinhole_cam, render_color, render_flow
from ext_utils.flowlib import flow_to_image
from ext_utils.util_flow import write_pfm
from utils.io import mkdir_p
import soft_renderer as sr
import argparse
parser = argparse.ArgumentParser(description='render data')
parser.add_argument('--outdir', default='eagle',
help='output dir')
parser.add_argument('--model', default='eagle',
help='model to render, {eagle, hands}')
parser.add_argument('--rot_axis', default='y',
help='axis to rotate around')
parser.add_argument('--nframes', default=3,type=int,
help='number of frames to render')
parser.add_argument('--alpha', default=1.,type=float,
help='0-1, percentage of a full cycle')
parser.add_argument('--init_a', default=0.25,type=float,
help='0-1, percentage of a full cycle for initial pose')
parser.add_argument('--xspeed', default=0,type=float,
help='times speed up')
parser.add_argument('--focal', default=2,type=float,
help='focal length')
parser.add_argument('--d_obj', default=3,type=float,
help='object depth')
parser.add_argument('--can_rand', dest='can_rand',action='store_true',
help='ranomize canonical space')
parser.add_argument('--img_size', default=512,type=int,
help='image size')
parser.add_argument('--render_flow', dest='render_flow',action='store_true',
help='render flow')
args = parser.parse_args()
## io
img_size = args.img_size
bgcolor = None
#bgcolor = np.asarray([0,0,0])
d_obj = args.d_obj
filedir='database'
rot_rand = torch.Tensor(R.random().as_matrix()).cuda()
overts_list = []
for i in range(args.nframes):
if args.model=='eagle':
mesh = sr.Mesh.from_obj('database/eagle/Eagle-original_%06d.obj'%int(i*args.xspeed), load_texture=True, texture_res=5, texture_type='surface')
elif args.model=='hands':
mesh = sr.Mesh.from_obj('database/hands/hands_%06d.obj'%int(1+i*args.xspeed), load_texture=True, texture_res=100, texture_type='surface')
overts = mesh.vertices
if i==0:
center = overts.mean(1)[:,None]
scale = max((overts - center)[0].abs().max(0)[0])
overts -= center
overts *= 1.0 / float(scale)
overts[:,:,1]*= -1 # aligh with camera coordinate
# random rot
if args.can_rand:
overts[0] = overts[0].matmul(rot_rand.T)
overts_list.append(overts)
colors=mesh.textures
faces = mesh.faces
mkdir_p( '%s/DAVIS/JPEGImages/Full-Resolution/%s/' %(filedir,args.outdir))
mkdir_p( '%s/DAVIS/Annotations/Full-Resolution/%s/' %(filedir,args.outdir))
mkdir_p( '%s/DAVIS/Cameras/Full-Resolution/%s/' %(filedir,args.outdir))
mkdir_p( '%s/DAVIS/Meshes/Full-Resolution/%s/' %(filedir,args.outdir))
# soft renderer
renderer = sr.SoftRenderer(image_size=img_size, sigma_val=1e-12,
camera_mode='look_at',perspective=False, aggr_func_rgb='hard',
light_mode='vertex', light_intensity_ambient=1.,light_intensity_directionals=0.)
#light_intensity_ambient=0.,light_intensity_directionals=1., light_directions=[-1.,-0.5,1.])
verts_ndc_list = []
for i in range(0,args.nframes):
verts = overts_list[i]
# set cameras
#rotx = np.random.rand()
if args.rot_axis=='x':
rotx = args.init_a*6.28+args.alpha*6.28*i/args.nframes
else:
rotx=0.
# if i==0: rotx=0.
if args.rot_axis=='y':
roty = args.init_a*6.28+args.alpha*6.28*i/args.nframes
else:
roty = 0
rotz = 0.
Rmat = cv2.Rodrigues(np.asarray([rotx, roty, rotz]))[0]
Rmat = torch.Tensor(Rmat).cuda()
# random rot
if args.can_rand:
Rmat = Rmat.matmul(rot_rand.T)
Tmat = torch.Tensor([0,0,d_obj] ).cuda()
K = torch.Tensor([args.focal,args.focal,0,0] ).cuda()
Kimg = torch.Tensor([args.focal*img_size/2.,args.focal*img_size/2.,img_size/2.,img_size/2.] ).cuda()
# add RTK: [R_3x3|T_3x1]
# [fx,fy,px,py], to the ndc space
rtk = np.zeros((4,4))
rtk[:3,:3] = Rmat.cpu().numpy()
rtk[:3, 3] = Tmat.cpu().numpy()
rtk[3, :] = Kimg .cpu().numpy()
# obj-cam transform
verts = obj_to_cam(verts, Rmat, Tmat)
mesh_cam = trimesh.Trimesh(vertices=verts[0].cpu().numpy(),
faces=faces[0].cpu().numpy())
trimesh.repair.fix_inversion(mesh_cam)
# pespective projection
verts = pinhole_cam(verts, K)
verts_ndc_list.append(verts.clone())
# render sil+rgb
rendered = render_color(renderer, verts, faces, colors, texture_type='surface')
rendered_img = rendered[0,:3].permute(1,2,0).cpu().numpy()*255
rendered_sil = rendered[0,-1].cpu().numpy()*128
if bgcolor is None:
bgcolor = 255-rendered_img[rendered_sil.astype(bool)].mean(0)
rendered_img[~rendered_sil.astype(bool)]=bgcolor[None]
cv2.imwrite('%s/DAVIS/JPEGImages/Full-Resolution/%s/%05d.jpg' %(filedir,args.outdir,i),rendered_img[:,:,::-1])
cv2.imwrite('%s/DAVIS/Annotations/Full-Resolution/%s/%05d.png' %(filedir,args.outdir,i),rendered_sil)
np.savetxt('%s/DAVIS/Cameras/Full-Resolution/%s/%05d.txt' %(filedir,args.outdir,i),rtk)
mesh_cam.export('%s/DAVIS/Meshes/Full-Resolution/%s/%05d.obj' %(filedir,args.outdir,i))
print(i)
if args.render_flow:
for dframe in [1,2,4,8,16,32]:
print('dframe: %d'%(dframe))
flobw_outdir = '%s/DAVIS/FlowBW_%d/Full-Resolution/%s/'%(filedir,dframe,args.outdir)
flofw_outdir = '%s/DAVIS/FlowFW_%d/Full-Resolution/%s/'%(filedir,dframe,args.outdir)
mkdir_p(flofw_outdir)
mkdir_p(flobw_outdir)
# render flow
occ = -np.ones((img_size, img_size)).astype(np.float32)
for i in range(dframe,args.nframes):
verts_ndc = verts_ndc_list[i-dframe]
verts_ndc_n = verts_ndc_list[i]
flow_fw = render_flow(renderer, verts_ndc, faces, verts_ndc_n)
flow_bw = render_flow(renderer, verts_ndc_n, faces, verts_ndc)
# to pixels
flow_fw = flow_fw*(img_size-1)/2
flow_bw = flow_bw*(img_size-1)/2
flow_fw = flow_fw.cpu().numpy()[0]
flow_bw = flow_bw.cpu().numpy()[0]
write_pfm( '%s/flo-%05d.pfm'%(flofw_outdir,i-dframe),flow_fw)
write_pfm( '%s/flo-%05d.pfm'%(flobw_outdir,i), flow_bw)
write_pfm( '%s/occ-%05d.pfm'%(flofw_outdir,i-dframe),occ)
write_pfm( '%s/occ-%05d.pfm'%(flobw_outdir,i), occ)
cv2.imwrite('%s/col-%05d.jpg'%(flofw_outdir,i-dframe),flow_to_image(flow_fw)[:,:,::-1])
cv2.imwrite('%s/col-%05d.jpg'%(flobw_outdir,i), flow_to_image(flow_bw)[:,:,::-1])
|
banmo-main
|
scripts/synthetic/render_synthetic.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
# python scripts/eval_root.py cam-files/adult7-b25/ cam-files/adult-masked-cam/ 1000
import sys, os
sys.path.append(os.path.dirname(os.path.dirname(sys.path[0])))
os.environ["PYOPENGL_PLATFORM"] = "egl" #opengl seems to only work with TPU
curr_dir = os.path.abspath(os.getcwd())
sys.path.insert(0,curr_dir)
import pdb
import glob
import numpy as np
import torch
import cv2
import soft_renderer as sr
import argparse
import trimesh
import configparser
from utils.io import config_to_dataloader, draw_cams, load_root
from nnutils.geom_utils import rot_angle, align_sim3
root_a_dir=sys.argv[1]
root_b_dir=sys.argv[2]
cap_frame=int(sys.argv[3])
def umeyama_alignment(x, y, with_scale=False):
"""
https://github.com/Huangying-Zhan/kitti-odom-eval/blob/master/kitti_odometry.py
Computes the least squares solution parameters of an Sim(m) matrix
that minimizes the distance between a set of registered points.
Umeyama, Shinji: Least-squares estimation of transformation parameters
between two point patterns. IEEE PAMI, 1991
:param x: mxn matrix of points, m = dimension, n = nr. of data points
:param y: mxn matrix of points, m = dimension, n = nr. of data points
:param with_scale: set to True to align also the scale (default: 1.0 scale)
:return: r, t, c - rotation matrix, translation vector and scale factor
"""
if x.shape != y.shape:
assert False, "x.shape not equal to y.shape"
# m = dimension, n = nr. of data points
m, n = x.shape
# means, eq. 34 and 35
mean_x = x.mean(axis=1)
mean_y = y.mean(axis=1)
# variance, eq. 36
# "transpose" for column subtraction
sigma_x = 1.0 / n * (np.linalg.norm(x - mean_x[:, np.newaxis])**2)
# covariance matrix, eq. 38
outer_sum = np.zeros((m, m))
for i in range(n):
outer_sum += np.outer((y[:, i] - mean_y), (x[:, i] - mean_x))
cov_xy = np.multiply(1.0 / n, outer_sum)
# SVD (text betw. eq. 38 and 39)
u, d, v = np.linalg.svd(cov_xy)
# S matrix, eq. 43
s = np.eye(m)
if np.linalg.det(u) * np.linalg.det(v) < 0.0:
# Ensure a RHS coordinate system (Kabsch algorithm).
s[m - 1, m - 1] = -1
# rotation, eq. 40
r = u.dot(s).dot(v)
# scale & translation, eq. 42 and 41
c = 1 / sigma_x * np.trace(np.diag(d).dot(s)) if with_scale else 1.0
t = mean_y - np.multiply(c, r.dot(mean_x))
return r, t, c
def main():
rootlist_a = load_root(root_a_dir, cap_frame)
rootlist_b = load_root(root_b_dir, cap_frame)
# align
rootlist_b = align_sim3(rootlist_a, rootlist_b)
# construct camera mesh
mesh_a = draw_cams(rootlist_a, color='gray')
mesh_b = draw_cams(rootlist_b)
mesh = trimesh.util.concatenate([mesh_a, mesh_b])
mesh.export('0.obj')
# python ... path to camera folder
# will draw a trajectory of camera locations
if __name__ == '__main__':
main()
|
banmo-main
|
scripts/eval/eval_root.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pdb
import os.path as osp
import sys
sys.path.insert(0,'third_party')
import numpy as np
from absl import flags, app
import torch
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
from torch.utils.data.dataloader import default_collate
import torch.nn.functional as F
import cv2
import time
from scipy.ndimage import binary_erosion
from ext_utils.util_flow import readPFM
from ext_utils.flowlib import warp_flow
from nnutils.geom_utils import resample_dp
def read_json(filepath, mask):
import json
with open(filepath) as f:
maxscore=-1
for pid in json.load(f)['people']:
ppose = np.asarray(pid['pose_keypoints_2d']).reshape((-1,3))
pocc = cv2.remap(mask.astype(int), ppose[:,0].astype(np.float32),ppose[:,1].astype(np.float32),interpolation=cv2.INTER_NEAREST)
pscore = pocc.sum()
if pscore>maxscore: maxscore = pscore; maxpose = ppose
return maxpose
# -------------- Dataset ------------- #
# ------------------------------------ #
class BaseDataset(Dataset):
'''
img, mask, kp, pose data loader
'''
def __init__(self, opts, filter_key=None):
# Child class should define/load:
# self.kp_perm
# self.img_dir
# self.anno
# self.anno_sfm
self.opts = opts
self.img_size = opts['img_size']
self.filter_key = filter_key
self.flip=0
self.crop_factor = 1.2
self.load_pair = True
self.spec_dt = 0 # whether to specify the dframe, only in preload
def mirror_image(self, img, mask):
if np.random.rand(1) > 0.5:
# Need copy bc torch collate doesnt like neg strides
img_flip = img[:, ::-1, :].copy()
mask_flip = mask[:, ::-1].copy()
return img_flip, mask_flip
else:
return img, mask
def __len__(self):
return self.num_imgs
def read_raw(self, im0idx, flowfw,dframe):
#ss = time.time()
img_path = self.imglist[im0idx]
img = cv2.imread(img_path)[:,:,::-1] / 255.0
shape = img.shape
if len(shape) == 2:
img = np.repeat(np.expand_dims(img, 2), 3, axis=2)
mask = cv2.imread(self.masklist[im0idx],0)
#print('mask+img:%f'%(time.time()-ss))
mask = mask/np.sort(np.unique(mask))[1]
occluder = mask==255
mask[occluder] = 0
if mask.shape[0]!=img.shape[0] or mask.shape[1]!=img.shape[1]:
mask = cv2.resize(mask, img.shape[:2][::-1],interpolation=cv2.INTER_NEAREST)
mask = binary_erosion(mask,iterations=2)
mask = np.expand_dims(mask, 2)
#print('mask sort:%f'%(time.time()-ss))
# flow
if flowfw:
flowpath = self.flowfwlist[im0idx]
else:
flowpath = self.flowbwlist[im0idx]
flowpath = flowpath.replace('FlowBW', 'FlowBW_%d'%(dframe)).\
replace('FlowFW', 'FlowFW_%d'%(dframe))
try:
flow = readPFM(flowpath)[0]
occ = readPFM(flowpath.replace('flo-', 'occ-'))[0]
h,w,_ = mask.shape
oh,ow=flow.shape[:2]
factor_h = h/oh
factor_w = w/ow
flow = cv2.resize(flow, (w,h))
occ = cv2.resize(occ, (w,h))
flow[...,0] *= factor_w
flow[...,1] *= factor_h
except:
print('warning: loading empty flow from %s'%(flowpath))
flow = np.zeros_like(img)
occ = np.zeros_like(mask)
flow = flow[...,:2]
occ[occluder] = 0
#print('flo:%f'%(time.time()-ss))
try:
dp = readPFM(self.dplist[im0idx])[0]
except:
print('error loading densepose surface')
dp = np.zeros_like(occ)
try:
dp_feat = readPFM(self.featlist[im0idx])[0]
dp_bbox = np.loadtxt(self.bboxlist[im0idx])
except:
print('error loading densepose feature')
dp_feat = np.zeros((16*112,112))
dp_bbox = np.zeros((4))
dp= (dp *50).astype(np.int32)
dp_feat = dp_feat.reshape((16,112,112)).copy()
#print('dp:%f'%(time.time()-ss))
# add RTK: [R_3x3|T_3x1]
# [fx,fy,px,py], to the ndc space
try:
rtk_path = self.rtklist[im0idx]
rtk = np.loadtxt(rtk_path)
except:
#print('warning: loading empty camera')
#print(rtk_path)
rtk = np.zeros((4,4))
rtk[:3,:3] = np.eye(3)
rtk[:3, 3] = np.asarray([0,0,10])
rtk[3, :] = np.asarray([512,512,256,256])
# create mask for visible vs unkonwn
vis2d = np.ones_like(mask)
#print('rtk:%f'%(time.time()-ss))
# crop the image according to mask
kaug, hp0, A, B= self.compute_crop_params(mask)
#print('crop params:%f'%(time.time()-ss))
x0 = hp0[:,:,0].astype(np.float32)
y0 = hp0[:,:,1].astype(np.float32)
img = cv2.remap(img,x0,y0,interpolation=cv2.INTER_LINEAR)
mask = cv2.remap(mask.astype(int),x0,y0,interpolation=cv2.INTER_NEAREST)
flow = cv2.remap(flow,x0,y0,interpolation=cv2.INTER_LINEAR)
occ = cv2.remap(occ,x0,y0,interpolation=cv2.INTER_LINEAR)
dp =cv2.remap(dp, x0,y0,interpolation=cv2.INTER_NEAREST)
vis2d=cv2.remap(vis2d.astype(int),x0,y0,interpolation=cv2.INTER_NEAREST)
#print('crop:%f'%(time.time()-ss))
# Finally transpose the image to 3xHxW
img = np.transpose(img, (2, 0, 1))
mask = (mask>0).astype(float)
#TODO transform dp feat to same size as img
dp_feat_rsmp = resample_dp(F.normalize(torch.Tensor(dp_feat)[None],2,1),
torch.Tensor(dp_bbox)[None],
torch.Tensor(kaug )[None],
self.img_size)
rt_dict = {}
rt_dict['img'] = img
rt_dict['mask'] = mask
rt_dict['flow'] = flow
rt_dict['occ'] = occ
rt_dict['dp'] = dp
rt_dict['vis2d'] = vis2d
rt_dict['dp_feat'] = dp_feat
rt_dict['dp_feat_rsmp'] = dp_feat_rsmp
rt_dict['dp_bbox'] = dp_bbox
rt_dict['rtk'] = rtk
return rt_dict, kaug, hp0, A,B
def compute_crop_params(self, mask):
#ss=time.time()
indices = np.where(mask>0); xid = indices[1]; yid = indices[0]
center = ( (xid.max()+xid.min())//2, (yid.max()+yid.min())//2)
length = ( (xid.max()-xid.min())//2, (yid.max()-yid.min())//2)
length = (int(self.crop_factor*length[0]), int(self.crop_factor*length[1]))
#print('center:%f'%(time.time()-ss))
maxw=self.img_size;maxh=self.img_size
orisize = (2*length[0], 2*length[1])
alp = [orisize[0]/maxw ,orisize[1]/maxw]
# intrinsics induced by augmentation: augmented to to original img
# correct cx,cy at clip space (not tx, ty)
if self.flip==0:
pps = np.asarray([float( center[0] - length[0] ), float( center[1] - length[1] )])
else:
pps = np.asarray([-float( center[0] - length[0] ), float( center[1] - length[1] )])
kaug = np.asarray([alp[0], alp[1], pps[0], pps[1]])
x0,y0 =np.meshgrid(range(maxw),range(maxh))
A = np.eye(3)
B = np.asarray([[alp[0],0,(center[0]-length[0])],
[0,alp[1],(center[1]-length[1])],
[0,0,1]]).T
hp0 = np.stack([x0,y0,np.ones_like(x0)],-1) # screen coord
hp0 = np.dot(hp0,A.dot(B)) # image coord
return kaug, hp0, A,B
def flow_process(self,flow, flown, occ, occn, hp0, hp1, A,B,Ap,Bp):
maxw=self.img_size;maxh=self.img_size
# augmenta flow
hp1c = np.concatenate([flow[:,:,:2] + hp0[:,:,:2], np.ones_like(hp0[:,:,:1])],-1) # image coord
hp1c = hp1c.dot(np.linalg.inv(Ap.dot(Bp))) # screen coord
flow[:,:,:2] = hp1c[:,:,:2] - np.stack(np.meshgrid(range(maxw),range(maxh)),-1)
hp0c = np.concatenate([flown[:,:,:2] +hp1[:,:,:2], np.ones_like(hp0[:,:,:1])],-1) # image coord
hp0c = hp0c.dot(np.linalg.inv(A.dot(B))) # screen coord
flown[:,:,:2] =hp0c[:,:,:2] - np.stack(np.meshgrid(range(maxw),range(maxh)),-1)
#fb check
x0,y0 =np.meshgrid(range(maxw),range(maxh))
hp0 = np.stack([x0,y0],-1) # screen coord
#hp0 = np.stack([x0,y0,np.ones_like(x0)],-1) # screen coord
dis = warp_flow(hp0 + flown, flow[:,:,:2]) - hp0
dis = np.linalg.norm(dis[:,:,:2],2,-1)
occ = dis / self.img_size * 2
#occ = np.exp(-5*occ) # 1/5 img size
occ = np.exp(-25*occ)
occ[occ<0.25] = 0. # this corresp to 1/40 img size
#dis = np.linalg.norm(dis[:,:,:2],2,-1) * 0.1
#occ[occ!=0] = dis[occ!=0]
disn = warp_flow(hp0 + flow, flown[:,:,:2]) - hp0
disn = np.linalg.norm(disn[:,:,:2],2,-1)
occn = disn / self.img_size * 2
occn = np.exp(-25*occn)
occn[occn<0.25] = 0.
#disn = np.linalg.norm(disn[:,:,:2],2,-1) * 0.1
#occn[occn!=0] = disn[occn!=0]
# ndc
flow[:,:,0] = 2 * (flow[:,:,0]/maxw)
flow[:,:,1] = 2 * (flow[:,:,1]/maxh)
#flow[:,:,2] = np.logical_and(flow[:,:,2]!=0, occ<10) # as the valid pixels
flown[:,:,0] = 2 * (flown[:,:,0]/maxw)
flown[:,:,1] = 2 * (flown[:,:,1]/maxh)
#flown[:,:,2] = np.logical_and(flown[:,:,2]!=0, occn<10) # as the valid pixels
flow = np.transpose(flow, (2, 0, 1))
flown = np.transpose(flown, (2, 0, 1))
return flow, flown, occ, occn
def load_data(self, index):
#pdb.set_trace()
#ss = time.time()
try:dataid = self.dataid
except: dataid=0
im0idx = self.baselist[index]
dir_fac = self.directlist[index]*2-1
dframe_list = [2,4,8,16,32]
max_id = max(self.baselist)
dframe_list = [1] + [i for i in dframe_list if (im0idx%i==0) and \
int(im0idx+i*dir_fac) <= max_id]
dframe = np.random.choice(dframe_list)
if self.spec_dt>0:dframe=self.dframe
if self.directlist[index]==1:
# forward flow
im1idx = im0idx + dframe
flowfw = True
else:
im1idx = im0idx - dframe
flowfw = False
rt_dict, kaug, hp0, A,B = self.read_raw(im0idx, flowfw=flowfw,
dframe=dframe)
img = rt_dict['img']
mask = rt_dict['mask']
flow = rt_dict['flow']
occ = rt_dict['occ']
dp = rt_dict['dp']
vis2d = rt_dict['vis2d']
dp_feat = rt_dict['dp_feat']
dp_bbox = rt_dict['dp_bbox']
rtk = rt_dict['rtk']
dp_feat_rsmp = rt_dict['dp_feat_rsmp']
frameid = im0idx
is_canonical = self.can_frame == im0idx
#print('before 2nd read-raw:%f'%(time.time()-ss))
if self.load_pair:
rt_dictn,kaugn,hp1,Ap,Bp = self.read_raw(im1idx, flowfw=(not flowfw),
dframe=dframe)
imgn = rt_dictn['img']
maskn = rt_dictn['mask']
flown = rt_dictn['flow']
occn = rt_dictn['occ']
dpn = rt_dictn['dp']
vis2dn= rt_dictn['vis2d']
dp_featn = rt_dictn['dp_feat']
dp_bboxn = rt_dictn['dp_bbox']
rtkn = rt_dictn['rtk']
dp_featn_rsmp = rt_dictn['dp_feat_rsmp']
is_canonicaln = self.can_frame == im1idx
#print('before process:%f'%(time.time()-ss))
flow, flown, occ, occn = self.flow_process(flow, flown, occ, occn,
hp0, hp1, A,B,Ap,Bp)
#print('after process:%f'%(time.time()-ss))
# stack data
img = np.stack([img, imgn])
mask= np.stack([mask,maskn])
flow= np.stack([flow, flown])
occ = np.stack([occ, occn])
dp = np.stack([dp, dpn])
vis2d= np.stack([vis2d, vis2dn])
dp_feat= np.stack([dp_feat, dp_featn])
dp_feat_rsmp= np.stack([dp_feat_rsmp, dp_featn_rsmp])
dp_bbox = np.stack([dp_bbox, dp_bboxn])
rtk= np.stack([rtk, rtkn])
kaug= np.stack([kaug,kaugn])
dataid= np.stack([dataid, dataid])
frameid= np.stack([im0idx, im1idx])
is_canonical= np.stack([is_canonical, is_canonicaln])
elem = {}
elem['img'] = img # s
elem['mask'] = mask # s
elem['flow'] = flow # s
elem['occ'] = occ # s
elem['dp'] = dp # x
elem['dp_feat'] = dp_feat # y
elem['dp_feat_rsmp'] = dp_feat_rsmp # y
elem['dp_bbox'] = dp_bbox
elem['vis2d'] = vis2d # y
elem['rtk'] = rtk
elem['kaug'] = kaug
elem['dataid'] = dataid
elem['frameid'] = frameid
elem['is_canonical'] = is_canonical
return elem
def preload_data(self, index):
#TODO combine to a single function with load_data
try:dataid = self.dataid
except: dataid=0
im0idx = self.baselist[index]
dir_fac = self.directlist[index]*2-1
dframe_list = [2,4,8,16,32]
max_id = max(self.baselist)
dframe_list = [1] + [i for i in dframe_list if (im0idx%i==0) and \
int(im0idx+i*dir_fac) <= max_id]
dframe = np.random.choice(dframe_list)
if self.spec_dt>0:dframe=self.dframe
save_dir = self.imglist[0].replace('JPEGImages', 'Preload').rsplit('/',1)[0]
data_path = '%s/%d_%05d.npy'%(save_dir, dframe, im0idx)
elem = np.load(data_path,allow_pickle=True).item()
# modify dataid according to training time ones
elem['dataid'] = np.stack([dataid, dataid])[None]
# reload rtk based on rtk predictions
# add RTK: [R_3x3|T_3x1]
# [fx,fy,px,py], to the ndc space
# always forward flow
im1idx = im0idx + dframe
try:
rtk_path = self.rtklist[im0idx]
rtk = np.loadtxt(rtk_path)
rtkn_path = self.rtklist[im1idx]
rtkn = np.loadtxt(rtkn_path)
rtk = np.stack([rtk, rtkn])
except:
#print('warning: loading empty camera')
#print(rtk_path)
rtk = np.zeros((4,4))
rtk[:3,:3] = np.eye(3)
rtk[:3, 3] = np.asarray([0,0,10])
rtk[3, :] = np.asarray([512,512,256,256])
rtkn = rtk.copy()
rtk = np.stack([rtk, rtkn])
elem['rtk']= rtk[None]
for k in elem.keys():
elem[k] = elem[k][0]
if not self.load_pair:
elem[k] = elem[k][:1]
# deal with img_size (only for eval visualization purpose)
current_size = elem['img'].shape[-1]
# how to make sure target_size is even
# target size (512?) + 2pad = image size (512)
target_size = int(self.img_size / self.crop_factor * 1.2 /2) * 2
pad = (self.img_size - target_size)//2
for k in ['img', 'mask', 'flow', 'occ', 'dp', 'vis2d']:
tensor = torch.Tensor(elem[k]).view(1,-1,current_size, current_size)
tensor = F.interpolate(tensor, (target_size, target_size),
mode='nearest')
tensor = F.pad(tensor, (pad, pad, pad, pad))
elem[k] = tensor.numpy()
# deal with intrinsics change due to crop factor
length = elem['kaug'][:,:2] * 512 / 2 / 1.2
elem['kaug'][:,2:] += length*(1.2-self.crop_factor)
elem['kaug'][:,:2] *= current_size/float(target_size)
return elem
def __getitem__(self, index):
if self.preload:
# find the corresponding fw index in the dataset
if self.directlist[index] != 1:
refidx = self.baselist[index]-1
same_idx = np.where(np.asarray(self.baselist)==refidx)[0]
index = sorted(same_idx)[0]
try:
# fail loading the last index of the dataset
elem = self.preload_data(index)
except:
print('loading %d failed'%index)
elem = self.preload_data(0)
else:
elem = self.load_data(index)
return elem
|
banmo-main
|
dataloader/vidbase.py
|
banmo-main
|
dataloader/__init__.py
|
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path as osp
import numpy as np
import scipy.io as sio
from absl import flags, app
import random
import torch
from torch.utils.data import Dataset
import pdb
import glob
from torch.utils.data import DataLoader
import configparser
from utils.io import config_to_dataloader
opts = flags.FLAGS
def _init_fn(worker_id):
np.random.seed(1003)
random.seed(1003)
#----------- Data Loader ----------#
#----------------------------------#
def data_loader(opts_dict, shuffle=True):
num_workers = opts_dict['n_data_workers'] * opts_dict['batch_size']
num_workers = min(num_workers, 8)
#num_workers = 0
print('# workers: %d'%num_workers)
print('# pairs: %d'%opts_dict['batch_size'])
data_inuse = config_to_dataloader(opts_dict)
sampler = torch.utils.data.distributed.DistributedSampler(
data_inuse,
num_replicas=opts_dict['ngpu'],
rank=opts_dict['local_rank'],
shuffle=True
)
data_inuse = DataLoader(data_inuse,
batch_size= opts_dict['batch_size'], num_workers=num_workers,
drop_last=True, worker_init_fn=_init_fn, pin_memory=True,
sampler=sampler)
return data_inuse
#----------- Eval Data Loader ----------#
#----------------------------------#
def eval_loader(opts_dict):
num_workers = 0
dataset = config_to_dataloader(opts_dict,is_eval=True)
dataset = DataLoader(dataset,
batch_size= 1, num_workers=num_workers, drop_last=False, pin_memory=True, shuffle=False)
return dataset
|
banmo-main
|
dataloader/frameloader.py
|
"""
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
"""
from setuptools import setup, find_packages
setup(
name='clutrr',
version='1.0.0',
description='Compositional Language Understanding with Text-based Relational Reasoning',
packages=find_packages(exclude=(
'data', 'mturk')),
include_package_data=True,
)
|
clutrr-main
|
setup.py
|
"""
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
"""
# Clean the templates from mturk annotated data
# Input = mturk annotated file (amt_mturk.csv)
# Output = placeholder json
import pandas as pd
import argparse
from nltk.tokenize import word_tokenize
import difflib
import json
from sacremoses import MosesDetokenizer
detokenizer = MosesDetokenizer()
def extract_placeholder(df):
"""
Given the AMT annotated datasets, extract the placeholders.
Important to maintain the order of the entities after being matched
For example, to replace a proof state (2,3),(3,4), the order is
important.
For the paper, we provide the set of cleaned train and test splits for the placeholders
See `Clutrr.setup()` for download locations
:param df:
:return:
"""
#skipped = [109] # skipping the Jose - Richard row, shouldn't have approved it
skipped = []
for i, row in df.iterrows():
story = row['paraphrase']
ents_gender = {dd.split(':')[0]: dd.split(':')[1] for dd in row['genders'].split(',')}
words = word_tokenize(story)
ent_id_g = {}
if i in skipped:
continue
# skipping a problematic row where two names are very similar.
# TODO: remove this from the AMT study as well
if 'Micheal' in ents_gender and 'Michael' in ents_gender:
skipped.append(i)
continue
# build entity -> key list
# here order of entity is important, so first we fetch the ordering from
# the proof state
proof = eval(row['proof_state'])
m_built = []
if len(proof) > 0:
built = []
for prd in proof:
pr_lhs = list(prd.keys())[0]
pr_rhs = prd[pr_lhs]
if pr_lhs not in built:
built.extend(pr_rhs)
else:
pr_i = built.index(pr_lhs)
built[pr_i] = pr_rhs
for b in built:
if type(b) != list:
m_built.append(b)
else:
m_built.extend(b)
else:
# when there is no proof state, consider the order from query
query = eval(row['query'])
m_built.append((query[0], '', query[-1]))
# with the proof state, create an ordered ENT_id_gender dict
ent_gender_keys = {}
ordered_ents = []
# add entities in the dictionary
def add_ent(entity):
if entity not in ent_gender_keys:
ent_gender_keys[entity] = 'ENT_{}_{}'.format(len(ent_gender_keys), ents_gender[entity])
ordered_ents.append(entity)
for edge in m_built:
add_ent(edge[0])
add_ent(edge[-1])
if len(ordered_ents) != len(ents_gender):
print(i)
return
for ent_id, (ent, gender) in enumerate(ents_gender.items()):
matches = difflib.get_close_matches(ent, words, cutoff=0.9)
if len(matches) == 0:
print(row['paraphrase'])
print(ent)
return
match_idxs = [i for i, x in enumerate(words) if x in matches]
for wi in match_idxs:
words[wi] = ent_gender_keys[ent]
ent_id_g[ent_id] = gender
gender_key = '-'.join([ents_gender[ent] for ent in ordered_ents])
replaced = detokenizer.detokenize(words, return_str=True)
df.at[i, 'template'] = replaced
df.at[i, 'template_gender'] = gender_key
print('Skipped', skipped)
return df, skipped
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--mfile', type=str, default='amt_mturk.csv', help='MTurk generated file')
parser.add_argument('--outfile', type=str, default='amt_placeholders', help='placeholders json file')
parser.add_argument('--split', type=float, default=0.8, help='Train/Test split.')
args = parser.parse_args()
df = pd.read_csv(args.mfile)
# do not use the rejected samples
df = df[df.review != 'rejected']
print("Number of accepted rows : {}".format(len(df)))
df, skipped = extract_placeholder(df)
# create a json file for easy lookup
placeholders = {}
for i, row in df.iterrows():
if i in skipped:
continue
if row['f_comb'] not in placeholders:
placeholders[row['f_comb']] = {}
if row['template_gender'] not in placeholders[row['f_comb']]:
placeholders[row['f_comb']][row['template_gender']] = []
placeholders[row['f_comb']][row['template_gender']].append(row['template'])
# training and testing split of the placeholders
train_p = {}
test_p = {}
for key, gv in placeholders.items():
if key not in train_p:
train_p[key] = {}
test_p[key] = {}
for gk, ps in gv.items():
split = int(len(placeholders[key][gk]) * args.split)
train_p[key][gk] = placeholders[key][gk][:split]
test_p[key][gk] = placeholders[key][gk][split:]
# save
json.dump(train_p, open(args.outfile + '.train.json','w'))
json.dump(test_p, open(args.outfile + '.test.json', 'w'))
json.dump(placeholders, open(args.outfile + '.json','w'))
print("Done.")
if __name__ == '__main__':
main()
|
clutrr-main
|
clutrr/template_mturk.py
|
clutrr-main
|
clutrr/__init__.py
|
|
"""
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
"""
# Generate story-summary pairs
from clutrr.actors.ancestry import Ancestry
from clutrr.relations.builder import RelationBuilder
from tqdm import tqdm
import random
import numpy as np
import json
import copy
from clutrr.args import get_args
from clutrr.store.store import Store
from clutrr.utils.utils import comb_indexes
import pandas as pd
from clutrr.relations.templator import *
#store = Store()
def generate_rows(args, store, task_name, split=0.8, prev_patterns=None):
# pre-flight checks
combination_length = min(args.combination_length, args.relation_length)
if not args.use_mturk_template:
if combination_length > 1:
raise NotImplementedError("combination of two or more relations not implemented in Synthetic templating")
else:
if combination_length > 3:
raise NotImplementedError("combinations of > 3 not implemented in AMT Templating")
# generate
print(args.relation_length)
print("Loading templates...")
all_puzzles = {}
if args.template_split:
train_templates = json.load(open(args.template_file + '.train.json'))
test_templates = json.load(open(args.template_file + '.test.json'))
else:
train_templates = json.load(open(args.template_file + '.json'))
test_templates = json.load(open(args.template_file + '.json'))
if args.use_mturk_template:
templatorClass = TemplatorAMT
else:
synthetic_templates_per_rel = {}
for key, val in store.relations_store.items():
for gender, gv in val.items():
synthetic_templates_per_rel[gv['rel']] = gv['p']
templatorClass = TemplatorSynthetic
train_templates = synthetic_templates_per_rel
test_templates = synthetic_templates_per_rel
# Build a mapping from ANY relation to the SAME list of sentences for asking queries
query_templates = {}
for key, val in store.relations_store.items():
for gender, gv in val.items():
query_templates[gv['rel']] = store.question_store['relational']
query_templator_class = TemplatorSynthetic
pb = tqdm(total=args.num_rows)
num_stories = args.num_rows
stories_left = num_stories
columns = ['id', 'story', 'query', 'text_query', 'target', 'text_target', 'clean_story', 'proof_state', 'f_comb',
'task_name','story_edges','edge_types','query_edge','genders', 'syn_story', 'node_mapping', 'task_split']
f_comb_count = {}
rows = []
anc_num = 0
anc_num += 1
anc = Ancestry(args, store)
rb = RelationBuilder(args, store, anc)
while stories_left > 0:
status = rb.build()
if not status:
rb.reset_puzzle()
rb.anc.next_flip()
continue
rb.add_facts()
# keeping a count of generated patterns to make sure we have homogenous distribution
if len(f_comb_count) > 0 and args.equal:
min_c = min([v for k,v in f_comb_count.items()])
weight = {k:(min_c/v) for k,v in f_comb_count.items()}
rb.generate_puzzles(weight)
else:
rb.generate_puzzles()
# if unique_test_pattern flag is set, and split is 0 (which indicates the task is test),
# only take the same test patterns as before
# also assert that the relation - test is present
if args.unique_test_pattern and split == 0 and len(prev_patterns) > 0 and len(prev_patterns[args.relation_length]['test']) > 0:
# if all these conditions met, prune the puzzles
todel = []
for pid,puzzle in rb.puzzles.items():
if puzzle.relation_comb not in prev_patterns[args.relation_length]['test']:
todel.append(pid)
for pid in todel:
del rb.puzzles[pid]
# now we have got the puzzles, assign the templators
for pid, puzzle in rb.puzzles.items():
if puzzle.relation_comb not in f_comb_count:
f_comb_count[puzzle.relation_comb] = 0
f_comb_count[puzzle.relation_comb] += 1
pb.update(1)
stories_left -= 1
# store the puzzles
all_puzzles.update(rb.puzzles)
rb.reset_puzzle()
rb.anc.next_flip()
pb.close()
print("Puzzles created. Now splitting train and test on pattern level")
print("Number of unique puzzles : {}".format(len(all_puzzles)))
pattern_puzzles = {}
for pid, pz in all_puzzles.items():
if pz.relation_comb not in pattern_puzzles:
pattern_puzzles[pz.relation_comb] = []
pattern_puzzles[pz.relation_comb].append(pid)
print("Number of unique patterns : {}".format(len(pattern_puzzles)))
train_puzzles = []
test_puzzles = []
sp = int(len(pattern_puzzles) * split)
all_patterns = list(pattern_puzzles.keys())
no_pattern_overlap = not args.holdout
# if k=2, then set no_pattern_overlap=True
if args.relation_length == 2:
no_pattern_overlap = True
if not no_pattern_overlap:
# for case > 3, strict no pattern overlap
train_patterns = all_patterns[:sp]
pzs = [pattern_puzzles[p] for p in train_patterns]
pzs = [s for p in pzs for s in p]
train_puzzles.extend(pzs)
test_patterns = all_patterns[sp:]
pzs = [pattern_puzzles[p] for p in test_patterns]
pzs = [s for p in pzs for s in p]
test_puzzles.extend(pzs)
else:
# for case of 2, pattern overlap but templators are different
# In this case, we have overlapping patterns, first choose the overlapping patterns
# we directly split on puzzle level
train_patterns = all_patterns
test_patterns = all_patterns[sp:]
pzs_train = []
pzs_test = []
for pattern in all_patterns:
pz = pattern_puzzles[pattern]
if pattern in test_patterns:
# now split - hacky way
sz = int(len(pz) * (split - 0.2))
pzs_train.extend(pz[:sz])
pzs_test.extend(pz[sz:])
else:
pzs_train.extend(pz)
train_puzzles.extend(pzs_train)
test_puzzles.extend(pzs_test)
print("# Train puzzles : {}".format(len(train_puzzles)))
print("# Test puzzles : {}".format(len(test_puzzles)))
pb = tqdm(total=len(all_puzzles))
# saving in csv
for pid, puzzle in all_puzzles.items():
task_split = ''
if pid in train_puzzles:
task_split = 'train'
templator = templatorClass(templates=train_templates, family=puzzle.anc.family_data)
elif pid in test_puzzles:
task_split = 'test'
templator = templatorClass(templates=test_templates, family=puzzle.anc.family_data)
else:
AssertionError("pid must be either in train or test")
story_text = puzzle.generate_text(stype='story', combination_length=combination_length, templator=templator)
fact_text = puzzle.generate_text(stype='fact', combination_length=combination_length, templator=templator)
story = story_text + fact_text
story = random.sample(story, len(story))
story = ' '.join(story)
clean_story = ' '.join(story_text)
target_text = puzzle.generate_text(stype='target', combination_length=1, templator=templator)
story_key_edges = puzzle.get_story_relations(stype='story') + puzzle.get_story_relations(stype='fact')
# Build query text
query_templator = query_templator_class(templates=query_templates, family=puzzle.anc.family_data)
query_text = puzzle.generate_text(stype='query', combination_length=1, templator=query_templator)
query_text = ' '.join(query_text)
query_text = query_text.replace('?.', '?') # remove trailing '.'
puzzle.convert_node_ids(stype='story')
puzzle.convert_node_ids(stype='fact')
story_keys_changed_ids = puzzle.get_sorted_story_edges(stype='story') + puzzle.get_sorted_story_edges(stype='fact')
query_edge = puzzle.get_sorted_query_edge()
genders = puzzle.get_name_gender_string()
rows.append([pid, story, puzzle.query_text, query_text, puzzle.target_edge_rel, target_text,
clean_story, puzzle.proof_trace, puzzle.relation_comb, task_name, story_keys_changed_ids,
story_key_edges, query_edge, genders, '', puzzle.story_sort_dict, task_split])
pb.update(1)
pb.close()
print("{} ancestries created".format(anc_num))
print("Number of unique patterns : {}".format(len(f_comb_count)))
return columns, rows, all_puzzles, train_patterns, test_patterns
def test_run(args):
store = Store(args)
anc = Ancestry(args, store)
rb = RelationBuilder(args, store, anc)
rb.num_rel = 3
all_patterns = set()
while True:
for j in range(len(anc.family_data.keys())):
rb.build()
up = rb.unique_patterns()
all_patterns.update(up)
print(len(all_patterns))
rb.reset_puzzle()
if not rb.anc.next_flip():
break
print("Number of unique puzzles : {}".format(len(all_patterns)))
rb.add_facts()
rb.generate_puzzles()
print("Generated {} puzzles".format(len(rb.puzzles)))
pid = random.choice(list(rb.puzzles.keys()))
print(rb.puzzles[pid])
def main(args):
store = Store(args)
header, rows = generate_rows(args, store)
df = pd.DataFrame(columns=header, data=rows)
# split test train
msk = np.random.rand(len(df)) > args.test
train_df = df[msk]
test_df = df[~msk]
train_df.to_csv(args.output + '_train.csv')
test_df.to_csv(args.output + '_test.csv')
if __name__ == '__main__':
args = get_args()
test_run(args)
#main(args)
|
clutrr-main
|
clutrr/generator.py
|
"""
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
"""
## Note: With these current args (max level 3, min_child = max_child = 4), its only possible to generate
## upto 8 relations in my cpu. The code is not optimized yet.
import argparse
def get_args(command=None):
parser = argparse.ArgumentParser()
# graph parameters
parser.add_argument("--max_levels", default=3, type=int, help="max number of levels")
parser.add_argument("--min_child", default=4, type=int, help="max number of children per node")
parser.add_argument("--max_child", default=4, type=int, help="max number of children per node")
parser.add_argument("--p_marry", default=1.0, type=float, help="Probability of marriage among nodes")
# story parameters
parser.add_argument("--boundary",default=True, action='store_true', help='Boundary in entities')
parser.add_argument("--output", default="gen_m3", type=str, help='Prefix of the output file')
# Arguments not used now, use `--train_tasks` to set the task type and relation length
# parser.add_argument("--relation_length", default=3, type=int, help="Max relation path length")
# noise choices
# parser.add_argument("--noise_support", default=False, action='store_true',
# help="Noise type: Supporting facts")
# parser.add_argument("--noise_irrelevant", default=False, action='store_true',
# help="Noise type: Irrelevant facts")
# parser.add_argument("--noise_disconnected", default=False, action='store_true',
# help="Noise type: Disconnected facts")
# parser.add_argument("--noise_attributes", default=False, action='store_true',
# help="Noise type: Random attributes")
# store locations
parser.add_argument("--rules_store", default="rules_store.yaml", type=str, help='Rules store')
parser.add_argument("--relations_store", default="relations_store.yaml", type=str, help='Relations store')
parser.add_argument("--attribute_store", default="attribute_store.json", type=str, help='Attributes store')
parser.add_argument("--question_store", default="question_store.yaml", type=str, help='Question store')
# task
parser.add_argument("--train_tasks", default="1.3", type=str, help='Define which task to create dataset for, including the relationship length, comma separated')
parser.add_argument("--test_tasks", default="1.3", type=str, help='Define which tasks including the relation lengths to test for, comma separaated')
parser.add_argument("--train_rows", default=100, type=int, help='number of train rows')
parser.add_argument("--test_rows", default=100, type=int, help='number of test rows')
parser.add_argument("--memory", default=1, type=float, help='Percentage of tasks which are just memory retrieval')
parser.add_argument("--data_type", default="train", type=str, help='train/test')
# question type
parser.add_argument("--question", default=0, type=int, help='Question type. 0 -> relational, 1 -> yes/no')
# others
# parser.add_argument("--min_distractor_relations", default=8, type=int, help="Distractor relations about entities")
parser.add_argument("-v","--verbose", default=False, action='store_true',
help='print the paths')
parser.add_argument("-t","--test_split", default=0.2, help="Testing split")
parser.add_argument("--equal", default=False, action='store_true',
help="Make sure each pattern is equal. Warning: Time complexity of generation increases if this flag is set.")
parser.add_argument("--analyze", default=False, action='store_true', help="Analyze generated files")
parser.add_argument("--mturk", default=False, action='store_true', help='prepare data for mturk')
parser.add_argument("--holdout", default=False, action='store_true', help='if true, then hold out unique patterns in the test set')
parser.add_argument("--data_name", default='', type=str, help='Dataset name')
parser.add_argument("--use_mturk_template", default=False, action='store_true', help='use the templating data for mturk')
parser.add_argument("--template_length", type=int, default=2, help="Max Length of the template to substitute")
parser.add_argument("--template_file", type=str, default="amt_placeholders_clean.json", help="location of placeholders")
parser.add_argument("--template_split", default=True, action='store_true', help='Split on template level')
parser.add_argument("--combination_length", type=int, default=1, help="number of relations to combine together")
parser.add_argument("--output_dir", type=str, default="data", help="output_dir")
parser.add_argument("--store_full_puzzles", default=False, action='store_true',
help='store the full puzzle data in puzzles.pkl file. Warning: may take considerable amount of disk space!')
parser.add_argument("--unique_test_pattern", default=False, action='store_true', help="If true, have unique patterns generated in the first gen, and then choose from it.")
if command:
return parser.parse_args(command.split(' '))
else:
return parser.parse_args()
|
clutrr-main
|
clutrr/args.py
|
"""
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
"""
# main file which defines the tasks
from clutrr.args import get_args
from clutrr.generator import generate_rows
from clutrr.store.store import Store
import pandas as pd
import glob
import copy
import uuid
import os
import json
import shutil
import sys
import nltk
from nltk.tokenize import word_tokenize
import pickle as pkl
import requests
import hashlib
import zipfile
# check if nltk.punkt is installed
try:
nltk.data.find('tokenizers/punkt')
except LookupError:
nltk.download('punkt')
logPath = '../logs/'
fileName = 'data'
# sha of the placeholder files
SHA_SUM = 'ed2264836bb17fe094dc21fe6bb7492b000df520eb86f8e60b8441121f8ff924'
download_url = "https://cs.mcgill.ca/~ksinha4/data/"
import logging
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s',
handlers=[
# logging.FileHandler("{0}/{1}.log".format(logPath, fileName)),
logging.StreamHandler()
]
)
logger = logging.getLogger()
class Clutrr:
"""
Data Generation Script for the paper
"CLUTRR - A benchmark suite for inductive reasoning on text"
"""
def __init__(self, args):
self.args = self._init_vars(args)
# store the unique patterns for each relation here
self.unique_patterns = {}
self.setup()
def generate(self, choice, args, num_rows=0, data_type='train', multi=False, split=None):
"""
Choose the task and the relation length
Return the used args for storing
:param choice:
:param args:
:param num_rows:
:param data_type:
:param multi:
:return:
"""
args = copy.deepcopy(args)
args.num_rows = num_rows
args.data_type = data_type
if not multi:
task, relation_length = choice.split('.')
task_name = 'task_{}'.format(task)
logger.info("mode : {}, task : {}, rel_length : {}".format(data_type, task_name, relation_length))
task_method = getattr(self, task_name, lambda: "Task {} not implemented".format(choice))
args = task_method(args)
args.relation_length = int(relation_length)
store = Store(args)
columns, rows, all_puzzles, train_patterns, test_patterns = generate_rows(args,
store, task_name + '.{}'.format(relation_length), split=split, prev_patterns=self.unique_patterns)
self.unique_patterns[int(relation_length)] = {
'train': train_patterns,
'test': test_patterns
}
return (columns, rows, all_puzzles), args
else:
rows = []
columns = []
puzzles = {}
for ch in choice:
task, relation_length = ch.split('.')
task_name = 'task_{}'.format(task)
logger.info("task : {}, rel_length : {}".format(task_name, relation_length))
task_method = getattr(self, task_name, lambda: "Task {} not implemented".format(choice))
args = task_method(args)
args.relation_length = int(relation_length)
store = Store(args)
columns,r,pz = generate_rows(args, store, task_name + '.{}'.format(relation_length))
rows.extend(r)
puzzles.update(pz)
return ((columns, rows, puzzles), args)
def run_task(self):
"""
Default dispatcher method
"""
args = self.args
train_rows = args.train_rows
test_rows = args.test_rows
train_choices = args.train_tasks.split(',')
test_choices = args.test_tasks.split(',')
all_choices = []
for t in train_choices:
if t not in all_choices:
all_choices.append(t)
for t in test_choices:
if t not in all_choices:
all_choices.append(t)
train_datas = []
for choice in all_choices:
if choice in train_choices:
# split
choice_split = train_rows / (train_rows + test_rows)
num_rows = train_rows + test_rows
else:
# test, no split
choice_split = 0.0
num_rows = test_rows
print("Split : {}".format(choice_split))
train_datas.append(self.generate(choice, args, num_rows=num_rows, data_type='train', split=choice_split))
self.store(train_datas, None, args)
def assign_name(self, args, task_name):
"""
Create a name for the datasets:
- training file should end with _train
- testing file should end with _test
- each file name should have an unique hex
:param args:
:return:
"""
name = '{}_{}.csv'.format(task_name, args.data_type)
return name
def store(self, train_data, test_data, args):
"""
Take the dataset and do the following:
- Create a name for the files
- Create a folder and put the files in
- Write the config in a file and put it in the folder
- Compute the hash of the train and test files and store it in a file
:param train_data list of rows
:param test_data list of list of rows
:return:
"""
train_tasks = args.train_tasks.split(',')
all_puzzles = {}
train_df = []
test_df = []
for i, td in enumerate(train_data):
train_rows_puzzles, train_args = td
assert len(train_rows_puzzles) == 3
train_rows, train_puzzles = train_rows_puzzles[:-1], train_rows_puzzles[-1]
trdfs = [r for r in train_rows[1] if r[-1] == 'train']
tsdfs = [r for r in train_rows[1] if r[-1] == 'test']
train_df.append(pd.DataFrame(columns=train_rows[0], data=trdfs))
test_df.append(pd.DataFrame(columns=train_rows[0], data=tsdfs))
train_df = pd.concat(train_df)
test_df = pd.concat(test_df)
logger.info("Training rows : {}".format(len(train_df)))
logger.info("Testing rows : {}".format(len(test_df)))
# prepare configs
all_config = {}
train_fl_name = self.assign_name(train_args, args.train_tasks)
all_config['train_task'] = {args.train_tasks: train_fl_name}
all_config['test_tasks'] = {}
test_fl_names = []
all_config['args'] = {}
all_config['args'][train_fl_name] = vars(train_args)
test_tasks = args.test_tasks.split(',')
test_dfs = []
for test_task in test_tasks:
train_args.data_type = 'test'
test_fl_name = self.assign_name(train_args,test_task)
all_config['args'][test_fl_name] = vars(train_args)
test_fl_names.append(test_fl_name)
test_dfs.append(test_df[test_df.task_name == 'task_'+test_task])
base_path = os.path.abspath(os.pardir)
# derive folder name as a random selection of characters
directory = ''
while True:
folder_name = 'data_{}'.format(str(uuid.uuid4())[:8])
directory = os.path.join(base_path, args.output_dir, folder_name)
if not os.path.exists(directory):
os.makedirs(directory)
break
train_df.to_csv(os.path.join(directory, train_fl_name))
for i,test_fl_name in enumerate(test_fl_names):
test_df = test_dfs[i]
test_df.to_csv(os.path.join(directory, test_fl_name))
# dump config
json.dump(all_config, open(os.path.join(directory, 'config.json'),'w'))
if args.store_full_puzzles:
# dump all puzzles
pkl.dump(all_puzzles, open(os.path.join(directory, 'puzzles.pkl'),'wb'), protocol=-1)
shutil.make_archive(directory, 'zip', directory)
logger.info("Created dataset in {}".format(directory))
self.analyze_data(directory)
if args.mturk:
self.keep_unique(directory)
def analyze_data(self, directory):
"""
Analyze a given directory
:param directory:
:return:
"""
all_files = glob.glob(os.path.join(directory,'*.csv'))
for fl in all_files:
logger.info("Analyzing file {}".format(fl))
df = pd.read_csv(fl)
df['word_len'] = df.story.apply(lambda x: len(word_tokenize(x)))
df['word_len_clean'] = df.clean_story.apply(lambda x: len(word_tokenize(x)))
print("Max words : ", df.word_len.max())
print("Min words : ", df.word_len.min())
print("For clean story : ")
print("Max words : ", df.word_len_clean.max())
print("Min words : ", df.word_len_clean.min())
logger.info("Analysis complete")
def keep_unique(self, directory, num=1):
"""
Keep num unique rows for each pattern. Handy for Mturk collection.
:param num:
:return:
"""
all_files = glob.glob(os.path.join(directory, '*.csv'))
for fl in all_files:
df = pd.read_csv(fl)
uniq_patterns = df['f_comb'].unique()
udf = []
for up in uniq_patterns:
# randomly select one row for this unique pattern
rd = df[df['f_comb'] == up].sample(num)
udf.append(rd)
udf = pd.concat(udf)
udf.to_csv(fl)
def _init_vars(self, args):
args.noise_support = False
args.noise_irrelevant = False
args.noise_disconnected = False
args.noise_attributes = False
args.memory = 0
return args
def task_1(self, args):
"""
Basic family relation without any noise
:return:
"""
args.output += '_task1'
return args
def task_2(self, args):
"""
Family relation with supporting facts
:return:
"""
args.noise_support = True
args.output += '_task2'
return args
def task_3(self, args):
"""
Family relation with irrelevant facts
:return:
"""
args.noise_irrelevant = True
args.output += '_task3'
return args
def task_4(self, args):
"""
Family relation with disconnected facts
:return:
"""
args.noise_disconnected = True
args.output += '_task4'
return args
def task_5(self, args):
"""
Family relation with all facts
:return:
"""
args.noise_support = True
args.noise_irrelevant = True
args.noise_disconnected = True
args.output += '_task5'
return args
def task_6(self, args):
"""
Family relation with only memory retrieval
:param args:
:return:
"""
args.memory = 1.0
args.output += '_task6'
return args
def task_7(self, args):
"""
Family relation with mixed memory and reasoning
:param args:
:return:
"""
args.memory = 0.5
args.output += '_task7'
args.noise_support = False
args.noise_disconnected = False
args.noise_disconnected = False
return args
def setup(self):
"""
Download placeholders and update args
:return:
"""
placeholder_zip = "cleaned_placeholders.zip"
placeholder_url = download_url + placeholder_zip
base_path = os.path.abspath(os.pardir)
placeholder_loc = os.path.join(base_path, placeholder_zip)
if os.path.exists(placeholder_loc):
print("downloaded placeholder data exists")
else:
print("Downloading placeholder data")
r = requests.get(placeholder_url)
with open(placeholder_loc, 'wb') as f:
f.write(r.content)
# check shasum
sha1 = hashlib.sha256()
BUF_SIZE = 65536
with open(placeholder_loc, 'rb') as f:
while True:
data = f.read(BUF_SIZE)
if not data:
break
sha1.update(data)
print("sha256 : {}".format(sha1.hexdigest()))
print("checking ...")
if sha1.hexdigest() != SHA_SUM:
raise AssertionError("downloaded corrupt data, sha256 doesn't match")
print("Data valid")
# extract zip
with zipfile.ZipFile(placeholder_loc, "r") as zip_ref:
zip_ref.extractall(os.path.join(base_path, 'clutrr'))
# set args
self.args.template_file = "cleaned_placeholders/amt_placeholders_clean"
if __name__ == '__main__':
args = get_args()
logger.info("Data generation started for configurations : ")
logger.info('\ntogrep : {0}\n'.format(sys.argv[1:]))
cl = Clutrr(args)
cl.run_task()
logger.info("\ntogrep : Data generation done {0}\n".format(sys.argv[1:]))
logger.info("-----------------------------------------------------")
|
clutrr-main
|
clutrr/main.py
|
"""
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
"""
# Main Puzzle class which maintains the state of a single puzzle
import uuid
import random
from clutrr.utils.utils import comb_indexes
from clutrr.relations.templator import Templator
import copy
import networkx as nx
import matplotlib.pyplot as plt
import numpy as np
class Fact:
"""
Fact class to store the additional facts
"""
def __init__(self,
fact_type=None,
fact_edges=None):
"""
:param fact_type: Type of the fact, supporting / irrelevant / disconnected
:param fact_edges:
"""
self.fact_type = fact_type
self.fact_edges = fact_edges
def __str__(self):
if self.fact_edges:
return "Type: {}, E: {}".format(self.fact_type, self.fact_edges)
class Puzzle:
"""
Puzzle class containing the logic to build and maintain the state of a single puzzle
"""
def __init__(self,
id = None,
target_edge=None,
story=None,
proof=None,
query_edge=None,
ancestry=None,
relations_obj=None
):
"""
:param id: unique id of the puzzle
:param target_edge: the target edge, (node_a, node_b)
:param story: list of edges consisting of the story
:param proof: proof state of the resolution from target edge to story
:param query_edge: edge to query, usually the same as target_edge
:param ancestry: full background graph the story was derived from
:param relations_obj: store of the rule base of the relations
"""
if id is None:
self.id = str(uuid.uuid4())
else:
self.id = id
self.target_edge = target_edge
self.story = story
self.proof_trace = proof
self.facts = []
self.query_edge = query_edge
self.anc = ancestry
self.relations_obj = relations_obj
# derived values
self.query_text = None
self.target_edge_rel = None
self.story_rel = None
self.text_question = None
self.relation_comb = None
# derived full text story
self.full_text_story = None
# story edges with sorted node ids
self.story_sorted_ids = None
self.story_sort_dict = {} # mapping between the original node id and sorted node id
# the templator instances to use
self.train_templates = None
self.test_templates = None
def derive_vals(self):
self.query_text = self.format_edge(self.target_edge)
self.target_edge_rel = self.get_edge_relation(self.target_edge)
self.story_rel = [self.format_edge_rel(story) for story in self.story]
self.relation_comb = '-'.join([self.get_edge_rel(x)['rel'] for x in self.story])
def add_fact(self, fact_type, fact):
"""
Add a fact to the model
:param fact_type:
:param fact:
:return:
"""
self.facts.append(Fact(fact_type=fact_type, fact_edges=fact))
def clear_facts(self):
"""
Clear all noise facts of the puzzle
:return:
"""
self.facts = []
def get_full_story(self, randomize=True):
"""
Combine story and facts
:param randomize:
:return:
"""
full_story = self.story + [edge for fact in self.facts for edge in fact.fact_edges]
if randomize:
full_story = random.sample(full_story, len(full_story))
return full_story
def get_all_noise(self):
"""
Get only noise edges
:return:
"""
return [edge for fact in self.facts for edge in fact.fact_edges]
def get_clean_story(self):
"""
Return the clean story
:return:
"""
return self.story
def generate_text(self, stype='story', combination_length=1, templator:Templator=None, edges=None):
"""
:param stype: can be story, fact, target, or query
:param combination_length: the max length of combining the edges for text replacement
:param templator: templator class
:param edges: if provided, use these edges instead of stypes
:return:
"""
generated_rows = []
if edges is None:
if stype == 'story':
edges_to_convert = copy.copy(self.story)
elif stype == 'fact':
edges_to_convert = copy.copy([fact.fact_edges for fact in self.facts])
edges_to_convert = [y for x in edges_to_convert for y in x]
elif stype == 'target':
# derive the relation (solution) from the target edge
edges_to_convert = [copy.copy(self.target_edge)]
elif stype == 'query':
# derive the question from the target edge
edges_to_convert = [copy.copy(self.target_edge)]
else:
raise NotImplementedError("stype not implemented")
else:
edges_to_convert = edges
combined_edges = comb_indexes(edges_to_convert, combination_length)
for comb_group in combined_edges:
r_combs = ['-'.join([self.get_edge_relation(edge) for edge in edge_group])
for edge_group in comb_group]
# typo unfix for "neice niece"
r_combs = [r.replace('niece','neice') if 'niece' in r else r for r in r_combs ]
r_entities = [[ent for edge in edge_group for ent in edge] for edge_group
in comb_group]
prows = [templator.replace_template(edge_group, r_entities[group_id])
for group_id, edge_group in enumerate(r_combs)]
# if contains None, then reject this combination
prc = [x for x in prows if x is not None]
if len(prc) == len(prows):
generated_rows.append(prows)
# select the generated row such that the priority of
# complex decomposition is higher. sort by length and choose the min
generated_rows = list(sorted(generated_rows, key=len))
generated_rows = [g for g in generated_rows if len(g) > 0]
if stype == 'story':
if len(generated_rows) == 0:
# assert
raise AssertionError()
if len(generated_rows) > 0:
generated_row = random.choice(generated_rows)
for g in generated_row:
if type(g) != str:
import ipdb; ipdb.set_trace()
return generated_row
else:
return []
def convert_node_ids(self, stype='story'):
"""
Given node ids in edges, change the ids into a sorted version
:param stype:
:return:
"""
if stype == 'story':
edges_tc = copy.copy(self.story)
elif stype == 'fact':
edges_tc = copy.copy([fact.fact_edges for fact in self.facts])
edges_tc = [y for x in edges_tc for y in x]
else:
raise NotImplementedError("stype not implemented")
node_ct = len(self.story_sort_dict)
for key in edges_tc:
if key[0] not in self.story_sort_dict:
self.story_sort_dict[key[0]] = node_ct
node_ct += 1
if key[1] not in self.story_sort_dict:
self.story_sort_dict[key[1]] = node_ct
node_ct += 1
def get_name_gender_string(self):
"""
Create a combination of name:Gender
:return:
"""
if self.story_sorted_ids is None:
self.convert_node_ids('story')
return ','.join(['{}:{}'.format(self.anc.family_data[node_id].name,
self.anc.family_data[node_id].gender)
for node_id in self.story_sort_dict.keys()])
def get_sorted_story_edges(self, stype='story'):
"""
Overlay changed node ids onto story edges
:param stype:
:return:
"""
if stype == 'story':
edges_tc = copy.copy(self.story)
elif stype == 'fact':
edges_tc = copy.copy([fact.fact_edges for fact in self.facts])
edges_tc = [y for x in edges_tc for y in x]
else:
raise NotImplementedError("stype not implemented")
edge_keys_changed_id = [(self.story_sort_dict[key[0]],
self.story_sort_dict[key[1]]) for key in edges_tc]
return edge_keys_changed_id
def get_story_relations(self, stype='story'):
if stype == 'story':
edges_tc = copy.copy(self.story)
elif stype == 'fact':
edges_tc = copy.copy([fact.fact_edges for fact in self.facts])
edges_tc = [y for x in edges_tc for y in x]
else:
raise NotImplementedError("stype not implemented")
return [self.get_edge_relation(p) for p in edges_tc]
def get_sorted_query_edge(self):
"""
Overlay changed node ids onto query edge
:return:
"""
return (self.story_sort_dict[self.target_edge[0]],
self.story_sort_dict[self.target_edge[1]])
def get_target_relation(self):
"""
Get target relation
:return:
"""
return self.get_edge_relation(self.target_edge)
def get_edge_rel(self, edge, rel_type='family'):
# get node attributes
node_b_attr = self.anc.family_data[edge[1]]
relation = self.anc.family[edge][rel_type]
edge_rel = self.relations_obj[relation][node_b_attr.gender]
return edge_rel
def get_edge_relation(self, edge, rel_type='family'):
node_b_attr = self.anc.family_data[edge[1]]
relation = self.anc.family[edge][rel_type]
edge_rel = self.relations_obj[relation][node_b_attr.gender]
return edge_rel['rel']
def format_edge(self, edge):
"""
Given an edge (x,y), format it into (name(x), name(y))
:param edge:
:return:
"""
node_a_attr = self.anc.family_data[edge[0]]
node_b_attr = self.anc.family_data[edge[1]]
new_edge = (node_a_attr.name, node_b_attr.name)
return new_edge
def format_edge_rel(self, edge, rel_type='family'):
"""
Given an edge (x,y), format it into (name(x), rel(x,y), name(y))
:param edge:
:return:
"""
node_a_attr = self.anc.family_data[edge[0]]
node_b_attr = self.anc.family_data[edge[1]]
edge_rel = self.get_edge_rel(edge, rel_type)['rel']
new_edge = (node_a_attr.name, edge_rel, node_b_attr.name)
return new_edge
def get_unique_relations(self):
"""
Get all unique relations from rule store
:return:
"""
rels = []
for meta_rel, val in self.relations_obj.items():
for sp_rel, sp_val in val.items():
rels.append(sp_val['rel'])
rels.remove('no-relation')
return rels
def display(self):
"""
Display the puzzle in a network diagram
:return:
"""
G = nx.MultiDiGraph()
fs = self.get_full_story()
names = {}
rels = {}
forward_edges = []
backward_edges = []
gendered_nodes = {'male':[], 'female':[]}
for edge in fs:
G.add_node(edge[0])
G.add_node(edge[1])
gendered_nodes[self.anc.family_data[edge[0]].gender].append(edge[0])
gendered_nodes[self.anc.family_data[edge[1]].gender].append(edge[1])
names[edge[0]] = self.anc.family_data[edge[0]].name
names[edge[1]] = self.anc.family_data[edge[1]].name
G.add_edge(edge[0], edge[1])
forward_edges.append(edge)
rels[edge] = self.get_edge_relation(edge)
G.add_edge(edge[1], edge[0])
backward_edges.append(edge)
rels[(edge[1], edge[0])] = self.get_edge_relation((edge[1], edge[0]))
pos = nx.layout.random_layout(G)
nx.draw_networkx_nodes(G, pos, nodelist=gendered_nodes['male'], node_color='b', node_size=100, alpha=0.8)
nx.draw_networkx_nodes(G, pos, nodelist=gendered_nodes['female'], node_color='r', node_size=100, alpha=0.8)
nx.draw_networkx_labels(G, pos, names)
nx.draw_networkx_edges(G, pos, edgelist=forward_edges, arrowstyle='-', edge_color='r')
nx.draw_networkx_edges(G, pos, edgelist=backward_edges, arrowstyle='-', edge_color='b')
edge_labels = nx.draw_networkx_edge_labels(G, pos, rels)
ax = plt.gca()
ax.set_axis_off()
plt.show()
def __str__(self):
tmp = "Story : \n"
tmp += "{} \n".format(self.story)
tmp += "{} \n".format([self.format_edge_rel(e) for e in self.story])
tmp += "Additional facts : \n"
for fact in self.facts:
tmp += "{} \n".format(fact)
return tmp
|
clutrr-main
|
clutrr/relations/puzzle.py
|
clutrr-main
|
clutrr/relations/__init__.py
|
|
"""
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
"""
import copy
import random
class Templator:
"""
Templator base class
"""
def __init__(self, templates, family):
self.templates = copy.copy(templates)
self.family = family # dict containing node informations
self.used_template = ''
self.entity_id_dict = {}
self.seen_ent = set()
def choose_template(self, *args, **kwargs):
pass
def replace_template(self, *args, **kwargs):
pass
class TemplatorAMT(Templator):
"""
Replaces story with the templates obtained from AMT
"""
def __init__(self, templates, family):
super(TemplatorAMT, self).__init__(templates=templates, family=family)
def choose_template(self, f_comb, entities, verbose=False):
"""
Choose a template to use. Do not use the same template in this current context
:return:
"""
self.entity_id_dict = {}
self.seen_ent = set()
gender_comb = []
# build the dictionary of entity - ids
for ent in entities:
if ent not in self.seen_ent:
gender_comb.append(self.family[ent].gender)
self.seen_ent.add(ent)
self.entity_id_dict[ent] = len(self.entity_id_dict)
gender_comb = '-'.join(gender_comb)
if verbose:
print(f_comb)
print(gender_comb)
print(len(self.templates[f_comb][gender_comb]))
if gender_comb not in self.templates[f_comb] or len(self.templates[f_comb][gender_comb]) == 0:
raise NotImplementedError("template combination not found.")
available_templates = self.templates[f_comb][gender_comb]
chosen_template = random.choice(available_templates)
self.used_template = chosen_template
used_i = self.templates[f_comb][gender_comb].index(chosen_template)
# remove the used template
# del self.templates[f_comb][gender_comb][used_i]
return chosen_template
def replace_template(self, f_comb, entities, verbose=False):
try:
chosen_template = self.choose_template(f_comb, entities, verbose=verbose)
for ent_id, ent in enumerate(list(set(entities))):
node = self.family[ent]
gender = node.gender
name = node.name
chosen_template = chosen_template.replace('ENT_{}_{}'.format(self.entity_id_dict[ent], gender), '[{}]'.format(name))
return chosen_template
except:
# chosen template not found
return None
class TemplatorSynthetic(Templator):
"""
Replaces story with the templates obtained from Synthetic rule base
Here, templates is self.relations_obj[relation]
"""
def __init__(self, templates, family):
super(TemplatorSynthetic, self).__init__(templates=templates, family=family)
def choose_template(self, f_comb, entities, verbose=False):
"""
Choose a template to use. Do not use the same template in this current context
:return:
"""
self.entity_id_dict = {}
self.seen_ent = set()
available_templates = self.templates[f_comb]
chosen_template = random.choice(available_templates)
self.used_template = chosen_template
return chosen_template
def replace_template(self, f_comb, entities, verbose=False):
assert len(entities) == 2
chosen_template = self.choose_template(f_comb, entities, verbose=verbose)
node_a_attr = self.family[entities[0]]
node_b_attr = self.family[entities[1]]
node_a_name = node_a_attr.name
node_b_name = node_b_attr.name
assert node_a_name != node_b_name
node_a_name = '[{}]'.format(node_a_name)
node_b_name = '[{}]'.format(node_b_name)
text = chosen_template.replace('e_1', node_a_name)
text = text.replace('e_2', node_b_name)
return text + '. '
|
clutrr-main
|
clutrr/relations/templator.py
|
"""
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
"""
# New builder class which makes use of our new data generation
import random
import itertools as it
import copy
from clutrr.store.store import Store
import uuid
from clutrr.relations.puzzle import Puzzle
class RelationBuilder:
"""
Relation builder class
Steps:
- Accept a skeleton class
- Iteratively:
- Invert the relations
- Sample edge e (n1, n2)
- Select the rule which matches this edge e (n1,n2) -> r
- introduce a variable x so that (n1,x) + (x,n2) -> r
- find the x which satifies both s.t x =/= {n1, n2}
- either add to story
- or recurse
Changes:
- Relation types are "family","work", etc (as given in ``relation_types``
- When applying the rules, make sure to confirm to these types
"""
def __init__(self,args, store:Store, anc):
self.anc = anc
self.args = args
self.rules = store.rules_store
self.store = store
self.comp_rules = self.rules['compositional']
self.inv_rules = self.rules['inverse-equivalence']
self.sym_rules = self.rules['symmetric']
self.eq_rules = self.rules['equivalence']
self.relation_types = self.rules['relation_types']
self.comp_rules_inv = self._invert_rule(self.rules['compositional'])
self.inv_rules_inv = self._invert_rule(self.rules['inverse-equivalence'])
self.sym_rules_inv = self._invert_rule(self.rules['symmetric'])
self.eq_rules_inv = self._invert_rule(self.rules['equivalence'])
self.relations_obj = store.relations_store
self.boundary = args.boundary
self.num_rel = args.relation_length
self.puzzles = {}
self.puzzle_ct = 0
self.expansions = {} # (a,b) : [list]
# save the edges which are used already
self.done_edges = set()
self.apply_almost_complete()
self.precompute_expansions(list(self.anc.family.keys()))
def _invert_rule(self, rule):
"""
Given a rule, invert it to be RHS:LHS
:param rule:
:return:
"""
inv_rules = {}
for tp, rules in rule.items():
inv_rules[tp] = {}
for key, val in rules.items():
if type(val) == str:
if val not in inv_rules[tp]:
inv_rules[tp][val] = []
inv_rules[tp][val].append(key)
else:
for k2, v2 in val.items():
if v2 not in inv_rules[tp]:
inv_rules[tp][v2] = []
inv_rules[tp][v2].append((key, k2))
return inv_rules
def invert_rel(self, rel_type='family'):
"""
Invert the relations
:return:
"""
if rel_type not in self.inv_rules:
return None
inv_family = copy.deepcopy(self.anc.family)
for edge, rel in self.anc.family.items():
relation = rel[rel_type]
if relation in self.inv_rules[rel_type]:
inv_rel = self.inv_rules[rel_type][relation]
if (edge[1], edge[0]) not in inv_family:
inv_family[(edge[1], edge[0])] = {}
inv_family[(edge[1], edge[0])][rel_type] = inv_rel
self.anc.family = inv_family
def equivalence_rel(self, rel_type='family'):
"""
Use equivalence relations
:return:
"""
if rel_type not in self.eq_rules:
return None
n_family = copy.deepcopy(self.anc.family)
for edge, rel in self.anc.family.items():
relation = rel[rel_type]
if relation in self.eq_rules[rel_type]:
eq_rel = self.eq_rules[rel_type][relation]
n_family[(edge[0],edge[1])][rel_type] = eq_rel
self.anc.family = n_family
def symmetry_rel(self, rel_type='family'):
"""
Use equivalence relations
:return:
"""
if rel_type not in self.sym_rules:
return None
n_family = copy.deepcopy(self.anc.family)
for edge, rel in self.anc.family.items():
relation = rel[rel_type]
if relation in self.sym_rules[rel_type]:
sym_rel = self.sym_rules[rel_type][relation]
if (edge[1], edge[0]) not in n_family:
n_family[(edge[1], edge[0])] = {}
n_family[(edge[1], edge[0])][rel_type] = sym_rel
self.anc.family = n_family
def compose_rel(self, edge_1, edge_2, rel_type='family', verbose=False):
"""
Given an edge pair, add the edges into a single edge following the rules
in the dictionary
:param edge_1: (x,z)
:param edge_2: (z,y)
:param rel_type:
:return: (x,y)
"""
# dont allow self edges
if edge_1[0] == edge_1[1]:
return None
if edge_2[0] == edge_2[1]:
return None
if edge_1[1] == edge_2[0] and edge_1[0] != edge_2[1]:
n_edge = (edge_1[0], edge_2[1])
if n_edge not in self.anc.family and \
(edge_1 in self.anc.family and
self.anc.family[edge_1][rel_type] in self.comp_rules[rel_type]):
if edge_2 in self.anc.family and \
self.anc.family[edge_2][rel_type] in self.comp_rules[rel_type][self.anc.family[edge_1][rel_type]]:
n_rel = self.comp_rules[rel_type][self.anc.family[edge_1][rel_type]][self.anc.family[edge_2][rel_type]]
if n_edge not in self.anc.family:
self.anc.family[n_edge] = {}
self.anc.family[n_edge][rel_type] = n_rel
if verbose:
print(edge_1, edge_2, n_rel)
return n_edge
return None
def almost_complete(self,edge):
"""
Build an almost complete graph by iteratively applying the rules
Recursively apply rules and invert
:return:
"""
# apply symmetric, equivalence and inverse rules
self.invert_rel()
self.equivalence_rel()
self.symmetry_rel()
# apply compositional rules
keys = list(self.anc.family.keys())
edge_1 = [self.compose_rel(e, edge) for e in keys if e[1] == edge[0]]
edge_2 = [self.compose_rel(edge, e) for e in keys if e[0] == edge[1]]
edge_1 = list(filter(None.__ne__, edge_1))
edge_2 = list(filter(None.__ne__, edge_2))
for e in edge_1:
self.almost_complete(e)
for e in edge_2:
self.almost_complete(e)
def apply_almost_complete(self):
"""
For each edge apply ``almost_complete``
:return:
"""
print("Almost completing the family graph with {} nodes...".format(len(self.anc.family_data)))
for i in range(len(self.anc.family_data)):
for j in range(len(self.anc.family_data)):
if i != j:
self.almost_complete((i, j))
print("Initial family tree created with {} edges".format(
len(set([k for k, v in self.anc.family.items()]))))
def build(self):
"""
Build the stories and targets for the current family configuration
and save it in memory. These will be used later for post-processing
:param num_rel:
:return:
"""
available_edges = set([k for k, v in self.anc.family.items()]) - self.done_edges
#print("Available edges to derive backwards - {}".format(len(available_edges)))
for edge in available_edges:
pz = self.build_one_puzzle(edge)
if pz:
self.puzzles[pz.id] = pz
self.puzzle_ct += 1
if len(self.puzzles) == 0:
print("No puzzles could be generated with this current set of arguments. Consider increasing the family tree.")
return False
#print("Generated {}".format(len(self.puzzles)))
return True
def build_one_puzzle(self, edge):
"""
Build one puzzle
Return False if unable to make the puzzle
:return: type Puzzle
"""
story, proof_trace = self.derive([edge], k=self.num_rel - 1)
if len(story) == self.num_rel:
id = str(uuid.uuid4())
pz = Puzzle(id=id, target_edge=edge, story=story,
proof=proof_trace, ancestry=copy.deepcopy(self.anc),
relations_obj=copy.deepcopy(self.relations_obj))
pz.derive_vals()
return pz
else:
return False
def reset_puzzle(self):
"""Reset puzzle to none"""
self.puzzles = {}
self.puzzles_ct = 0
def unique_patterns(self):
"""Get unique patterns in this puzzle"""
f_comb_count = {}
for pid, puzzle in self.puzzles.items():
if puzzle.relation_comb not in f_comb_count:
f_comb_count[puzzle.relation_comb] = 0
f_comb_count[puzzle.relation_comb] += 1
return set(f_comb_count.keys())
def _value_counts(self):
pztype = {}
for pid, puzzle in self.puzzles.items():
f_comb = puzzle.relation_comb
if f_comb not in pztype:
pztype[f_comb] = []
pztype[f_comb].append(pid)
return pztype
def prune_puzzles(self, weight=None):
"""
In order to keep all puzzles homogenously distributed ("f_comb"), we calcuate
the count of all type of puzzles, and retain the minimum count
:param weight: a dict of weights f_comb:p where 0 <= p <= 1
:return:
"""
pztype = self._value_counts()
pztype_min_count = min([len(v) for k,v in pztype.items()])
keep_puzzles = []
for f_comb, pids in pztype.items():
keep_puzzles.extend(random.sample(pids, pztype_min_count))
not_keep = set(self.puzzles.keys()) - set(keep_puzzles)
for pid in not_keep:
del self.puzzles[pid]
if weight:
pztype = self._value_counts()
# fill in missing weights
for f_comb, pids in pztype.items():
if f_comb not in weight:
weight[f_comb] = 1.0
keep_puzzles = []
for f_comb,pids in pztype.items():
if weight[f_comb] == 1.0:
keep_puzzles.extend(pids)
not_keep = set(self.puzzles.keys()) - set(keep_puzzles)
for pid in not_keep:
del self.puzzles[pid]
def add_facts_to_puzzle(self, puzzle):
"""
For a given puzzle, add different types of facts
- 1 : Provide supporting facts. After creating the essential fact graph, expand on any
k number of edges (randomly)
- 2: Irrelevant facts: after creating the relevant fact graph, expand on an edge,
but only provide dangling expansions
- 3: Disconnected facts: along with relevant facts, provide a tree which is completely
separate from the proof path
- 4: Random attributes: school, place of birth, etc.
If unable to add the required facts, return False
Else, return the puzzle
:return:
"""
if self.args.noise_support:
# Supporting facts
# A <-> B <-> C ==> A <-> D <-> C , A <-> D <-> B <-> C
story = puzzle.story
extra_story = []
for se in story:
e_pair = self.expand_new(se)
if e_pair:
if puzzle.target_edge not in e_pair and e_pair[0][1] not in set([p for e in puzzle.story for p in e]):
extra_story.append(tuple(e_pair))
if len(extra_story) == 0:
return False
else:
# choose a sample of 1 to k-1 edge pairs
num_edges = random.choice(range(1, (len(story) // 2) + 1))
extra_story = random.sample(extra_story, min(num_edges, len(extra_story)))
# untuple the extra stories
extra_story = [k for e in extra_story for k in e]
self._test_supporting(story, extra_story)
puzzle.add_fact(fact_type='supporting', fact=extra_story)
if self.args.noise_irrelevant:
# Irrelevant facts
# A <-> B <-> C ==> A <-> D <-> E
# Must have only one common node with the story
story = puzzle.story
num_edges = len(story)
sampled_edge = random.choice(story)
extra_story = []
for i in range(num_edges):
tmp = sampled_edge
seen_pairs = set()
pair = self.expand_new(sampled_edge)
if pair:
while len(extra_story) == 0 and (tuple(pair) not in seen_pairs):
seen_pairs.add(tuple(pair))
for e in pair:
if e != puzzle.target_edge and not self._subset(story, [e], k=2):
extra_story.append(e)
sampled_edge = e
break
if tmp == sampled_edge:
sampled_edge = random.choice(story)
if len(extra_story) == 0:
return False
else:
# add a length restriction so as to not create super long text
# length restriction should be k+1 than the current k
extra_story = random.sample(extra_story, min(len(extra_story), len(story) // 2))
self._test_irrelevant(story, extra_story)
puzzle.add_fact(fact_type='irrelevant', fact=extra_story)
if self.args.noise_disconnected:
# Disconnected facts
story = puzzle.story
nodes_story = set([y for x in list(story) for y in x])
nodes_not_in_story = set(self.anc.family_data.keys()) - nodes_story
possible_edges = [(x, y) for x, y in it.combinations(list(nodes_not_in_story), 2) if
(x, y) in self.anc.family]
num_edges = random.choice(range(1, (len(story) // 2) + 1))
possible_edges = random.sample(possible_edges, min(num_edges, len(possible_edges)))
if len(possible_edges) == 0:
return False
self._test_disconnected(story, possible_edges)
puzzle.add_fact(fact_type='disconnected', fact=possible_edges)
return puzzle
def add_facts(self):
"""
For a given puzzle, add different types of facts
- 1 : Provide supporting facts. After creating the essential fact graph, expand on any
k number of edges (randomly)
- 2: Irrelevant facts: after creating the relevant fact graph, expand on an edge,
but only provide dangling expansions
- 3: Disconnected facts: along with relevant facts, provide a tree which is completely
separate from the proof path
- 4: Random attributes: school, place of birth, etc.
If unable to add the required facts, return False
:return:
"""
mark_ids_for_deletion = []
for puzzle_id in self.puzzles.keys():
puzzle = self.add_facts_to_puzzle(self.puzzles[puzzle_id])
if puzzle:
self.puzzles[puzzle_id] = puzzle
else:
mark_ids_for_deletion.append(puzzle_id)
for id in mark_ids_for_deletion:
del self.puzzles[id]
def precompute_expansions(self, edge_list, tp='family'):
"""
Given a list of edges, precompute the one level expansions on all of them
Given (x,y) -> get (x,z), (z,y) s.t. it follows our set of rules
Store the expansions as a list : (x,y) : [[(x,a),(a,y)], [(x,b),(b,y)] ... ]
:param edge_list:
:return:
"""
for edge in edge_list:
relation = self.anc.family[edge][tp]
if relation not in self.comp_rules_inv[tp]:
continue
rules = list(self.comp_rules_inv[tp][relation])
for rule in rules:
for node in self.anc.family_data.keys():
e1 = (edge[0], node)
e2 = (node, edge[1])
if e1 in self.anc.family and self.anc.family[e1][tp] == rule[0] \
and e2 in self.anc.family and self.anc.family[e2][tp] == rule[1]:
new_edge_pair = [e1, e2]
if edge not in self.expansions:
self.expansions[edge] = []
self.expansions[edge].append(new_edge_pair)
self.expansions[edge] = it.cycle(self.expansions[edge])
def expand_new(self, edge, tp='family'):
relation = self.anc.family[edge][tp]
if relation not in self.comp_rules_inv[tp]:
return None
if edge in self.expansions:
return self.expansions[edge].__next__()
else:
return None
def expand(self, edge, tp='family'):
"""
Given an edge, break the edge into two compositional edges from the given
family graph. Eg, if input is (x,y), break the edge into (x,z) and (z,y)
following the rules
:param edge: Edge to break
:param ignore_edges: Edges to ignore while breaking an edge. Used to ignore loops
:param k: if k == 0, stop recursing
:return:
"""
relation = self.anc.family[edge][tp]
if relation not in self.comp_rules_inv[tp]:
return None
rules = list(self.comp_rules_inv[tp][relation])
while len(rules) > 0:
rule = random.choice(rules)
rules.remove(rule)
for node in self.anc.family_data.keys():
e1 = (edge[0], node)
e2 = (node, edge[1])
if e1 in self.anc.family and self.anc.family[e1][tp] == rule[0] \
and e2 in self.anc.family and self.anc.family[e2][tp] == rule[1]:
return [e1, e2]
return None
def derive(self, edge_list, k=3):
"""
Given a list of edges, expand elements from the edge until we reach k
:param edge_list:
:param k:
:return:
"""
proof_trace = []
seen = set()
while k>0:
if len(set(edge_list)) - len(seen) == 0:
break
if len(list(set(edge_list) - seen)) == 0:
break
e = random.choice(list(set(edge_list) - seen))
seen.add(e)
ex_e = self.expand_new(e)
if ex_e and (ex_e[0] not in seen and ex_e[1] not in seen and ex_e[0][::-1] not in seen and ex_e[1][::-1] not in seen):
pos = edge_list.index(e)
edge_list.insert(pos, ex_e[-1])
edge_list.insert(pos, ex_e[0])
edge_list.remove(e)
#edge_list.extend(ex_e)
# format proof into human readable form
e = self._format_edge_rel(e)
ex_e = [self._format_edge_rel(x) for x in ex_e]
proof_trace.append({e:ex_e})
k = k-1
return edge_list, proof_trace
def _get_edge_rel(self, edge, rel_type='family'):
# get node attributes
node_b_attr = self.anc.family_data[edge[1]]
relation = self.anc.family[edge][rel_type]
edge_rel = self.relations_obj[relation][node_b_attr.gender]
return edge_rel
def get_edge_relation(self, edge, rel_type='family'):
node_b_attr = self.anc.family_data[edge[1]]
relation = self.anc.family[edge][rel_type]
edge_rel = self.relations_obj[relation][node_b_attr.gender]
return edge_rel['rel']
def _format_edge(self, edge):
"""
Given an edge (x,y), format it into (name(x), name(y))
:param edge:
:return:
"""
node_a_attr = self.anc.family_data[edge[0]]
node_b_attr = self.anc.family_data[edge[1]]
new_edge = (node_a_attr.name, node_b_attr.name)
return new_edge
def _format_edge_rel(self, edge, rel_type='family'):
"""
Given an edge (x,y), format it into (name(x), rel(x,y), name(y))
:param edge:
:return:
"""
node_a_attr = self.anc.family_data[edge[0]]
node_b_attr = self.anc.family_data[edge[1]]
edge_rel = self._get_edge_rel(edge, rel_type)['rel']
new_edge = (node_a_attr.name, edge_rel, node_b_attr.name)
return new_edge
def stringify(self, edge, rel_type='family'):
"""
Build story string from the edge
:param edge: tuple
:return:
"""
# get node attributes
node_a_attr = self.anc.family_data[edge[0]]
node_b_attr = self.anc.family_data[edge[1]]
relation = self._get_edge_rel(edge, rel_type)
placeholders = relation['p']
placeholder = random.choice(placeholders)
node_a_name = node_a_attr.name
node_b_name = node_b_attr.name
assert node_a_name != node_b_name
if self.boundary:
node_a_name = '[{}]'.format(node_a_name)
node_b_name = '[{}]'.format(node_b_name)
text = placeholder.replace('e_1', node_a_name)
text = text.replace('e_2', node_b_name)
return text + '. '
def generate_puzzles(self, weight=None):
"""
Prune the puzzles according to weight
Deprecated: puzzle generation logic moved to `build`
:return:
"""
self.prune_puzzles(weight)
def generate_question(self, query):
"""
Given a query edge, generate a textual question from the question placeholder bank
Use args.question to either generate a relational question or a yes/no question
:param query:
:return:
"""
# TODO: return a question from the placeholder
# TODO: future work
return ''
def _flatten_tuples(self, story):
return list(sum(story, ()))
def _unique_nodes(self, story):
return set(self._flatten_tuples(story))
def _subset(self, story, fact, k=2):
"""
Whether at least k fact nodes are present in a given story
:param story:
:param fact:
:return:
"""
all_entities = self._unique_nodes(story)
all_fact_entities = self._unique_nodes(fact)
return len(all_entities.intersection(all_fact_entities)) >= k
## Testing modules
def _test_story(self, story):
"""
Given a list of edges of the story, test whether they are logically valid
(x,y),(y,z) is valid, (x,y),(x,z) is not
:param story: list of tuples
:return:
"""
for e_i in range(len(story) - 1):
assert story[e_i][-1] == story[e_i + 1][0]
def _test_disconnected(self, story, fact):
"""
Given a story and the fact, check whether the fact is a disconnected fact
If irrelevant, then there would be no node match between story and fact
:param story: Array of tuples
:param fact: Array of tuples
:return:
"""
all_entities = self._unique_nodes(story)
all_fact_entities = self._unique_nodes(fact)
assert len(all_entities.intersection(all_fact_entities)) == 0
def _test_irrelevant(self, story, fact):
"""
Given a story and the fact, check whether the fact is a irrelevant fact
If irrelevant, then there would be exactly one node match between story and fact
:param story: Array of tuples
:param fact: Array of tuples
:return:
"""
all_entities = self._unique_nodes(story)
all_fact_entities = self._unique_nodes(fact)
assert len(all_entities.intersection(all_fact_entities)) == 1
def _test_supporting(self, story, fact):
"""
Given a story and the fact, check whether the fact is a irrelevant fact
If irrelevant, then there would be >= 2 node match between story and fact
:param story: Array of tuples
:param fact: Array of tuples
:return:
"""
all_entities = self._unique_nodes(story)
all_fact_entities =self._unique_nodes(fact)
assert len(all_entities.intersection(all_fact_entities)) >= 2
|
clutrr-main
|
clutrr/relations/builder.py
|
"""
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
"""
# File which was used in data collection from AMT using ParlAI-Mturk.
# Wrapper to communicate with backend database
# The database (Mongo) is used to maintain a set of collections
# - data we need to annotate : gold
# - dump for annotated data : mturk - this should also contain our manual tests
import os
from pymongo import MongoClient
from bson.objectid import ObjectId
import pandas as pd
import random
import glob
import schedule
import time
import datetime
import nltk
import subprocess
from numpy.random import choice
import argparse
KOUSTUV_ID = "A1W0QQF93UM08"
PORT = 27017
COLLECTION = 'amt_study'
GOLD_TABLE = 'gold'
MTURK_TABLE = 'mturk'
REVIEW_TABLE = 'review' # special table only used when we use review-only mode
USER_BASE = '/private/home/koustuvs/'
CLUTRR_BASE = USER_BASE + 'mlp/clutrr-2.0/'
SQLITE_BASE = CLUTRR_BASE + 'mturk/parlai/mturk/core/run_data/'
DRIVE_PATH = USER_BASE + 'Google Drive/clutrr/'
class DB:
def __init__(self, host='localhost', port=PORT, collection=COLLECTION, test_prob=0.0):
# initiate the db connection
self.client = MongoClient(host, port)
#print("Connected to backend MongoDB data at {}:{}".format(host, port))
self.gold = self.client[collection][GOLD_TABLE]
self.mturk = self.client[collection][MTURK_TABLE]
self.review = self.client[collection][REVIEW_TABLE]
self.test_prob = test_prob
self.test_worker = KOUSTUV_ID
def _read_csv(self, path):
assert path.endswith('.csv')
return pd.read_csv(path)
def upload(self, data_path, db='gold'):
"""
Given a csv file, upload the entire dataframe in the particular db
:param data:
:param db:
:return:
"""
print("Reading {}".format(data_path))
data = self._read_csv(data_path)
records = data.to_dict(orient='records')
# add used counter if gold and test
# add reviewed counter if mturk
num_records = len(records)
print("Number of records found : {}".format(len(records)))
for rec in records:
if db == 'gold':
rec['used'] = 0
else:
rec['reviewed'] = 0
sents = nltk.sent_tokenize(rec['story'])
rec['relation_length'] = len(sents)
mdb = getattr(self, db)
# prune the records which are already present in the database
keep_idx = []
for rec_idx, rec in enumerate(records):
fd = mdb.find({'id': rec['id']}).count()
if fd == 0:
keep_idx.append(rec_idx)
records = [records[idx] for idx in keep_idx]
num_kept = len(records)
print("Number of records already in db : {}".format(num_records - num_kept))
if len(records) > 0:
r = mdb.insert_many(records)
print("Inserted {} records in db {}".format(len(records), db))
def update_gender(self, data_path):
"""
Update the genders
:param data_path:
:return:
"""
print("Reading {}".format(data_path))
data = self._read_csv(data_path)
for i, row in data.iterrows():
self.mturk.update_many({'gold_id': ObjectId(row['_id'])}, {"$set": {'genders': row['genders']}}, upsert=False)
print('Updated {} records'.format(len(data)))
def choose_relation(self):
# unused records
avg_used = list(self.gold.aggregate([{'$group': {'_id': '$relation_length', 'avg': {'$avg': '$used'}}}]))
# normalize
avg = [rel['avg'] for rel in avg_used]
relations = [rel['_id'] for rel in avg_used]
# dont server relation 3 for a moment
#rel_idx = relations.index(3)
#del relations[rel_idx]
#del avg[rel_idx]
print("Found {} distinct relations".format(relations))
norm_avg = self._norm(avg)
# inverse the probability
delta = 0.01
norm_avg = [1 / i + delta for i in norm_avg]
norm_avg = self._norm(norm_avg)
rand_relation = int(choice(relations, 1, p=norm_avg)[0])
print("Choosing relation {}".format(rand_relation))
return rand_relation
def get_gold(self, rand_relation=None):
"""
Find the gold record to annotate.
Rotation policy: first randomly choose a relation_length, then choose the least used
annotation
:return:
"""
if not rand_relation:
rand_relation = self.choose_relation()
print("Randomly choosing {}".format(rand_relation))
record = self.gold.find_one({'relation_length': rand_relation}, sort=[("used",1)])
return record
def get_gold_by_id(self, id=''):
"""
Get a specific gold record by id
:param id:
:return:
"""
try:
record = self.gold.find_one({'_id': ObjectId(id)})
except:
record = None
return record
def _norm(self, arr):
s = sum(arr)
return [r/s for r in arr]
def get_peer(self, worker_id='test', relation_length=2):
"""
Get an annotation which is not done by the current worker, and which isn't reviewed
Also, no need to choose relation of length 1
With some probability, choose our test records
:param worker_id:
:param relation_length:
:return: None if no suitable candidate found
"""
using_test = False
record = None
if relation_length == 1:
relation_length = random.choice([2,3])
print("Choosing records with test probability {}".format(self.test_prob))
if random.uniform(0,1) <= self.test_prob:
using_test = True
record_cursor = self.mturk.find({'worker_id': self.test_worker, 'relation_length': relation_length},
sort=[("used",1)])
print("Choosing a test record to annotate")
else:
record_cursor = self.mturk.find({'worker_id': {"$nin": [worker_id, self.test_worker]}, 'relation_length': relation_length, 'used':1})
print("Choosing a review record to annotate")
rec_found = False
if record_cursor.count() > 0:
rec_found = True
print("Found a record to annotate")
if not using_test and not rec_found:
# if no candidate peer is found, default to test
record_cursor = self.mturk.find({'worker_id': self.test_worker, 'relation_length': relation_length},
sort=[("used",1)])
print("No records found, reverting back to test")
if record_cursor.count() > 0:
record = random.choice(list(record_cursor))
if not record:
# did not find either candidate peer nor test, raise error
raise FileNotFoundError("no candidate found in db")
return record
def save_review(self, record, worker_id, rating=0.0):
"""
Save the review. If its correct, then 1.0, else 0.0.
:param record:
:param rating:
:return:
"""
assert 'reviews' in record
assert 'reviewed_by' in record
record['used'] = len(record['reviewed_by']) + 1
record['reviewed_by'].append({worker_id: rating})
self.mturk.update_one({'_id': record['_id']}, {"$set": record}, upsert=False)
def save_annotation(self, record, worker_id):
""" Save the user annotation
"""
if 'worker_id' not in record:
record['worker_id'] = ''
record['worker_id'] = worker_id
if 'reviews' not in record:
record['reviews'] = 0
record['reviews'] = 0
if 'reviewed_by' not in record:
record['reviewed_by'] = []
record['reviewed_by'] = []
record['used'] = 0
# change the id
record['gold_id'] = record['_id']
del record['_id']
self.mturk.insert_one(record)
self.gold.update_one({'_id': record['gold_id']}, {'$inc': {'used': 1}}, upsert=False)
def done_review(self, worker_id, assignment_id, task_group_id):
"""
Mark with timestamp when a worker has done a review
:param worker_id:
:return:
"""
self.review.insert_one({'worker_id':worker_id,
'assignment_id': assignment_id,
'task_group_id':task_group_id,
'accepted': ''})
def import_data(self):
path = CLUTRR_BASE + 'mturk_data/*'
print("Checking the path: {}".format(path))
files = glob.glob(path)
print("Files found : {}".format(len(files)))
for fl in files:
if fl.endswith('gold.csv'):
self.upload(fl, db='gold')
if fl.endswith('mturk.csv'):
self.upload(fl, db='mturk')
def export(self, base_path=CLUTRR_BASE, batch_size=100):
"""
Dump datasets into csv
:return:
"""
print("Exporting datasets ...")
gold = pd.DataFrame(list(self.gold.find()))
gold_path = os.path.join(base_path,"amt_gold.csv")
mturk_path = base_path
mturk = pd.DataFrame(list(self.mturk.find()))
print("Gold : {} records to {}".format(len(gold), gold_path))
print("Mturk : {} records to {}".format(len(mturk), mturk_path))
gold.to_csv(gold_path)
# save data in batches
mturk_splits = splitDataFrameIntoSmaller(mturk, chunkSize=batch_size)
for i, mturk_b in enumerate(mturk_splits):
mturk_b.to_csv(os.path.join(mturk_path, "amt_mturk_{}.csv".format(i)))
def export_mongodb(self, path=CLUTRR_BASE):
"""
Export the entire mongodb dump to location, preferably a google drive
:param path:
:return:
"""
print("Exporting local mongodb to {}".format(path))
command = "mongodump --db {} --out {} --gzip".format(COLLECTION, path)
res = subprocess.run(command.split(" "), stdout=subprocess.PIPE)
print(res)
def export_sqlite(self, path=CLUTRR_BASE, sqlite_path=SQLITE_BASE):
"""
Zip and export the sqlite database in sqlite path
:param path:
:return:
"""
print("Export local sqlite db to {}".format(path))
command = "zip -q -r {}/run_data.zip {}".format(path, sqlite_path)
res = subprocess.run(command.split(" "), stdout=subprocess.PIPE)
print(res)
def update_relation_length(self):
print("Updating...")
gold = self.gold.find({})
up = 0
for rec in gold:
rec['relation_length'] = len(nltk.sent_tokenize(rec['story']))
self.gold.update_one({'_id': rec['_id']}, {"$set": rec}, upsert=False)
up += 1
mturk = self.mturk.find({})
for rec in mturk:
rec['relation_length'] = len(nltk.sent_tokenize(rec['story']))
self.mturk.update_one({'_id': rec['_id']}, {"$set": rec}, upsert=False)
up += 1
print("Updated {} records".format(up))
def close_connections(self):
#print("Closing connection")
self.client.close()
def import_job():
data = DB(port=PORT)
data.import_data()
data.close_connections()
def export_job(folder, batch_size=100):
save_path = os.path.join(CLUTRR_BASE, folder)
if not os.path.exists(save_path):
os.mkdir(save_path)
data = DB(port=PORT)
data.export(base_path=save_path, batch_size=batch_size)
save_user_path = os.path.join(USER_BASE, folder)
if not os.path.exists(save_user_path):
os.mkdir(save_user_path)
data.export(base_path=save_user_path, batch_size=batch_size)
data.close_connections()
def backup_job():
data = DB(port=PORT)
data.export_mongodb()
data.export_sqlite()
def info_job():
data = DB(port=PORT)
print("Generating statistics at {}".format(datetime.datetime.now().strftime("%Y-%m-%d %H:%M")))
gold_c = data.gold.find({}).count()
pending_c = data.gold.count_documents({'used':0})
avg_used = list(data.gold.aggregate([{'$group': {'_id':None,'avg' : {'$avg' : '$used'}}}]))
if len(avg_used) > 0:
avg_used = avg_used[0]['avg']
mturk_c = data.mturk.count_documents({})
uniq_workers = len(data.mturk.find({}).distinct("worker_id"))
mturk_c_1 = data.mturk.count_documents({'relation_length':1})
gold_agg = list(data.gold.aggregate([{'$group': {'_id': {'relation_length': '$relation_length', 'f_comb': '$f_comb'},
'avg' : {'$avg' : '$used'}}}, {'$sort': {"_id.relation_length": 1}}]))
mturk_reviews = list(data.mturk.aggregate([{'$group': {'_id': None, 'total_rev': {'$sum': {'$size': '$reviewed_by'}}}}]))
for rec in gold_agg:
if rec['_id']['relation_length'] != 3:
print(rec['_id']['relation_length'], '\t', rec['_id']['f_comb'], '\t', rec['avg'])
mturk_c_2 = data.mturk.count_documents({'relation_length':2})
#gold_c_2_u = list(data.gold.aggregate([{'$group': {'_id':None,'relation_length':2, 'avg' : {'$avg' : '$used'}}}]))[0]['avg']
mturk_c_3 = data.mturk.count_documents({'relation_length':3})
#gold_c_3_u = list(data.gold.aggregate([{'$group': {'_id':None,'relation_length':3, 'avg' : {'$avg' : '$used'}}}]))[0]['avg']
print("Number of gold data : {} \n ".format(gold_c) +
"Number of pending rows to annotate : {} \n ".format(pending_c) +
"Average times each gold row has been used : {} \n ".format(avg_used) +
"Number of annotations given : {} \n".format(mturk_c) +
"Unique workers : {}\n".format(uniq_workers) +
"Number of 1 relations annotated : {}\n".format(mturk_c_1) +
"Number of 2 relations annotated : {}\n".format(mturk_c_2) +
"Number of 3 relations annotated : {}\n".format(mturk_c_3) +
"Total reviews provided : {}\n".format(mturk_reviews[0]['total_rev']))
def update_genders():
data = DB(port=PORT)
data.update_gender('/private/home/koustuvs/mlp/clutrr-2.0/amt_gold_gender.csv')
data.close_connections()
def test_get_gold(k=100):
data = DB(port=PORT)
rel_chosen = {1:0,2:0,3:0}
for i in range(k):
record = data.get_gold()
rel_chosen[record['relation_length']] +=1
print(rel_chosen)
data.close_connections()
def splitDataFrameIntoSmaller(df, chunkSize = 10000):
listOfDf = list()
numberChunks = len(df) // chunkSize + 1
for i in range(numberChunks):
listOfDf.append(df[i*chunkSize:(i+1)*chunkSize])
return listOfDf
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# graph parameters
parser.add_argument("--server", action='store_true', help="start the server")
parser.add_argument("--import_db", default='', type=str, help="Import the files to server")
parser.add_argument("--batch_size", default=100, type=int, help="Export batch size")
parser.add_argument("--schedule_interval", default=10, type=int, help="schedule interval minutes")
parser.add_argument("--save_folder", default='amt_annotated_data', type=str, help="data location")
args = parser.parse_args()
if len(args.import_db) > 0:
import_job()
if args.server:
export_job(args.save_folder, batch_size=args.batch_size)
info_job()
#backup_job()
print("Scheduling jobs...")
schedule.every(args.schedule_interval).minutes.do(export_job, args.save_folder, batch_size=args.batch_size)
schedule.every(args.schedule_interval).minutes.do(info_job)
# redundant backups
schedule.every().day.at("23:00").do(backup_job)
while True:
schedule.run_pending()
time.sleep(1)
|
clutrr-main
|
clutrr/utils/data_backend.py
|
"""
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
"""
# Split the test files into their own task specific files
# Not required in actual data generation
import pandas as pd
import os
import glob
import json
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# graph parameters
parser.add_argument("--data_folder", default='data_emnlp', type=str, help="data folder")
args = parser.parse_args()
base_path = os.path.abspath(os.path.join(os.pardir, os.pardir))
print(base_path)
# search for directories
dirs = glob.glob(os.path.join(base_path, args.data_folder, '*'))
dirs = [dir for dir in dirs if os.path.isdir(dir)]
print("Found {} directories".format(len(dirs)))
print(dirs)
for folder in dirs:
# read config file
config = json.load(open(os.path.join(folder, 'config.json')))
# get test_file
test_files = glob.glob(os.path.join(folder, '*_test.csv'))
# get splittable test files
test_files = [t for t in test_files if len(t.split(',')) > 1]
for test_file in test_files:
df = pd.read_csv(test_file)
test_fl_name = test_file.split('/')[-1]
tasks = df.task_name.unique()
for task in tasks:
dft = df[df.task_name == task]
tname = task.split('task_')[-1]
flname = tname + '_test.csv'
dft.to_csv(os.path.join(folder, flname))
config['args'][flname] = config['args'][test_fl_name]
config['test_tasks'][tname] = test_fl_name
del config['args'][test_fl_name]
json.dump(config, open(os.path.join(folder, 'config.json'),'w'))
# backup the original test_files
for test_file in test_files:
os.rename(test_file, test_file.replace('_test','_backupt'))
print("splitting done")
|
clutrr-main
|
clutrr/utils/test_splitter.py
|
"""
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
"""
# file to create and maintain an index.html file which will contain a table of datasets for easy maintainance
import glob
import json
import os
import requests
import datetime
import pandas as pd
import argparse
template_header = '''
<html><head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1, minimal-ui">
<title>CLUTRR Dataset List</title>
<link rel="stylesheet" href="style.css">
<style>
body {
box-sizing: border-box;
min-width: 200px;
max-width: 980px;
margin: 0 auto;
padding: 45px;
}
</style>
</head>
<body>
<article class="markdown-body">
<h1><a id="user-content-github-markdown-css-demo" class="anchor" href="#github-markdown-css-demo" aria-hidden="true"><span class="octicon octicon-link"></span></a>CLUTRR v2.0 Dataset List</h1>
<p><a name="user-content-headers"></a></p><a name="user-content-headers">
</a>
<p>Contains the list of datasets and their generation configuration.</p>
<table><thead>
<tr>
<th>Dataset name</th>
<th>Name</th>
<th align="center">Training</th>
<th aligh="right">Number of Training rows</th>
<th align="right">Testing</th>
<th align="right">Number of Testing rows</th>
<th align="right">Time created</th>
<th align="right">Holdout</th>
</tr>
</thead><tbody>
'''
template_footer = '''
</tbody></table>
<p>For questions, contact Koustuv Sinha. A csv of this table is <a href="{}">available here.</a></p>
</article>
</body></html>
'''
#
CSS_TEMPLATE = 'https://sindresorhus.com/github-markdown-css/github-markdown.css'
def generate_webpage(data_path):
"""
Reads the list of directories, reads their config file, and generates a Github flavored webpage
<tr>
<td></td>
<td></td>
<td></td>
</tr>
:return:
"""
folders = glob.glob(os.path.join(data_path, '*', ''))
print("Found {} folders.".format(len(folders)))
web_page = template_header
generated_at = '<p>This webpage is autogenerated at {}</p>'.format(datetime.datetime.now().strftime("%Y-%m-%d %H:%M"))
data_names = []
unames = []
train = []
test = []
num_train = []
num_test = []
times = []
holdouts = []
for folder in folders:
print('Reading {}'.format(folder))
config = json.load(open(os.path.join(folder, 'config.json')))
train_task = config['train_task'].keys()
test_tasks = config['test_tasks'].keys()
train_rows = sum([config['args'][config['train_task'][tr]]['num_rows'] for tr in train_task])
test_rows = sum([config[config['test_tasks'][tr]]['num_rows'] for tr in test_tasks])
one_tt = list(train_task)[0]
name = folder.split('/')[-2]
name_url = '<a href={}>{}</a>'.format(name + '.zip', name)
gen_time = datetime.datetime.fromtimestamp(os.stat(folder).st_mtime).strftime("%y-%m-%d / %H:%M")
holdout = ','.join([config['args'][config['train_task'][tr]]['holdout'] if 'holdout' in config['args'][config['train_task'][tr]] else 'None' for tr in train_task])
data_names.append(config['args'][config['train_task'][one_tt]]['data_name'])
unames.append(name_url)
train.append(','.join(train_task))
num_train.append(train_rows)
num_test.append(test_rows)
test.append(','.join(test_tasks))
times.append(gen_time)
holdouts.append(holdout)
df = pd.DataFrame(data={'data_name': data_names, 'unames': unames, 'train': train, 'test':test, 'num_train':num_train, 'num_test':num_test, 'times':times, 'holdout':holdouts})
df.sort_values(by=['times'], inplace=True)
data_csv = os.path.join(data_path, 'dataset_details.csv')
df.to_csv(data_csv)
for i,row in df.iterrows():
row_web = '<tr><td>{}</td><td>{}</td><td>{}</td><td>{}</td><td>{}</td><td>{}</td><td>{}</td><td>{}</td></tr>'.format(
row['data_name'], row['unames'], row['train'], row['num_train'], row['test'], row['num_test'], row['times'], row['holdout'])
web_page += row_web
web_page += generated_at
web_page += template_footer.format('dataset_details.csv')
css = requests.get(CSS_TEMPLATE).text
with open(os.path.join(data_path, 'style.css'), 'w') as fp:
fp.write(css)
with open(os.path.join(data_path, 'index.html'), 'w') as fp:
fp.write(web_page)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--output_dir", type=str, default="/home/ml/ksinha4/clutrr/data", help="output_dir")
args = parser.parse_args()
generate_webpage(args.output_dir)
|
clutrr-main
|
clutrr/utils/web.py
|
clutrr-main
|
clutrr/utils/__init__.py
|
|
"""
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
"""
import itertools as it
import numpy as np
import csv
import pandas as pd
import random
def pairwise(iterable):
"""
Recipe from itertools
:param iterable:
:return: "s -> (s0,s1), (s1,s2), (s2, s3), ..."
"""
a, b = it.tee(iterable)
next(b, None)
return zip(a, b)
def prob_dist(rows):
row_dict = {}
for row in rows:
if row[-1] not in row_dict:
row_dict[row[-1]] = []
row_dict[row[-1]].append(row[:2])
rel_probs = {k: (len(v) / len(rows)) for k, v in row_dict.items()}
return rel_probs
def split_train_test(args, rows):
# split training testing
r1 = prob_dist(rows)
indices = range(len(rows))
mask_i = np.random.choice(indices,
int(len(indices) * args.train_test_split),
replace=False)
test_indices = [i for i in indices if i not in set(mask_i)]
train_indices = [i for i in indices if i in set(mask_i)]
train_rows = [rows[ti] for ti in train_indices]
r_train = prob_dist(train_rows)
test_rows = [rows[ti] for ti in test_indices]
r_test = prob_dist(test_rows)
train_rows = [row[:-1] for row in train_rows]
test_rows = [row[:-1] for row in test_rows]
return train_rows, test_rows
def write2file(args, rows, filename):
with open(filename, 'w') as fp:
for argi in vars(args):
fp.write('# {} {}\n'.format(argi, getattr(args, argi)))
writer = csv.writer(fp)
writer.writerow(['story','summary'])
for row in rows:
writer.writerow(row)
def sanity_check(filename, rows):
## sanity check
df = pd.read_csv(filename, skip_blank_lines=True, comment='#')
print('Total rows : {}'.format(len(df)))
assert len(rows) == len(df)
class CDS:
def combinationSum(self, candidates, target):
res = []
candidates.sort()
self.dfs(candidates, target, 0, [], res)
return res
def dfs(self, nums, target, index, path, res):
if target < 0:
return # backtracking
if target == 0:
res.append(path)
return
for i in range(index, len(nums)):
self.dfs(nums, target - nums[i], i, path + [nums[i]], res)
class unique_element:
def __init__(self, value, occurrences):
self.value = value
self.occurrences = occurrences
def perm_unique(elements):
eset = set(elements)
listunique = [unique_element(i, elements.count(i)) for i in eset]
u = len(elements)
return perm_unique_helper(listunique, [0] * u, u - 1)
def perm_unique_helper(listunique, result_list, d):
if d < 0:
yield tuple(result_list)
else:
for i in listunique:
if i.occurrences > 0:
result_list[d] = i.value
i.occurrences -= 1
for g in perm_unique_helper(listunique, result_list, d - 1):
yield g
i.occurrences += 1
def comb_indexes(sn, max_seq_len=3):
"""
Idea here is to generate all combinations maintaining the order
Eg, [a,b,c,d] => [[a],[b],[c],[d]], [[a,b],[c],[d]], [[a,b,c],[d]], etc ...
where the max sequence is max_seq_len
:param sn:
:param max_seq_len:
:return:
"""
s_n = len(sn)
cd = CDS()
some_comb = cd.combinationSum(list(range(1,max_seq_len+1)),s_n)
all_comb = [list(perm_unique(x)) for x in some_comb]
all_comb = [y for r in all_comb for y in r]
pairs = []
for pt in all_comb:
rsa = []
stt = 0
for yt in pt:
rsa.append(sn[stt:stt+yt])
stt += yt
pairs.append(rsa)
return pairs
def choose_random_subsequence(sn, max_seq_len=3):
return random.choice(comb_indexes(sn, max_seq_len))
|
clutrr-main
|
clutrr/utils/utils.py
|
clutrr-main
|
clutrr/actors/__init__.py
|
|
"""
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
"""
import numpy as np
import names
import copy
import random
from clutrr.actors.actor import Actor, Entity
from clutrr.store.store import Store
#store = Store()
class Ancestry:
"""
Ancestry of people to simulate
Class to create a skeleton graph
Changes:
- Maintain a dictionary instead of networkx graph.
- The keys to the dictionary will be (node_id_x, node_id_y) : a dict of relations
- a dict of relations will ensure the use of family, work, etc different relations logically seperate
- key of the relations:
- "family" --> family type relations
- "work" --> work related relations
- Maintain a separate dictionary for mapping of node_id to details
- Relation keyword to be taken from rules_store
"""
def __init__(self, args, store:Store,
relationship_type={'SO':1,'child':2}, taken_names=None):
self.family = {} # dict (node_id_a, node_id_b) : rel dict
self.family_data = {} # dict to hold node_id details
self.work_data = {} # dict to hold work location id details
self.store = store
self.max_levels = args.max_levels
self.min_child = args.min_child
self.max_child = args.max_child
self.p_marry = args.p_marry
self.relationship_type = relationship_type
self.levels = 0 # keep track of the levels
self.node_ct = 0
self.flipped = [] # track of nodes which are gender flipped
self.taken_names = taken_names if taken_names else copy.deepcopy(self.store.attr_names) # keep track of names which are already taken
self.simulate()
#self.add_work_relations()
def simulate(self):
"""
Main function to run the simulation to create a family tree
:return:
"""
self.node_ct = 0
self.levels = random.randint(1,self.max_levels)
# we are root, for now just add one head of family
gender = 'male'
nodes = self.add_members(gender=gender, num=1)
parents = nodes
for level in range(self.max_levels):
# build generation
generation_nodes = []
for node in parents:
# marry with probability p_marry
decision_marry = np.random.choice([True,False],1,p=[self.p_marry, 1-self.p_marry])
if decision_marry:
# add the partner
nodes = self.add_members(gender=self.toggle_gender(node), num=1)
self.make_relation(node, nodes[0], relation='SO')
# always leave the last level as single children
if level != self.max_levels - 1:
# add the children for this parent
num_childs = random.randint(self.min_child, self.max_child)
child_nodes = self.add_members(num=num_childs)
if len(child_nodes) > 0:
for ch_node in child_nodes:
self.make_relation(node, ch_node, relation='child')
self.make_relation(nodes[0], ch_node, relation='child')
generation_nodes.extend(child_nodes)
parents = generation_nodes
def add_members(self, gender='male', num=1):
"""
Add members into family
:param gender: male/female. if num > 1 then randomize
:param num: default 1.
:return: list of node ids added, new node id
"""
node_id = self.node_ct
added_nodes = []
for x in range(num):
if num > 1:
gender = random.choice(['male', 'female'])
# select a name that is not taken
name = names.get_first_name(gender=gender)
while name in self.taken_names:
name = names.get_first_name(gender=gender)
self.taken_names.add(name)
node = Actor(
name=name, gender=gender, node_id=node_id, store=self.store)
added_nodes.append(node)
self.family_data[node_id] = node
node_id += 1
self.node_ct = node_id
return added_nodes
def make_relation(self, node_a, node_b, relation='SO'):
"""
Add a relation between two nodes
:param node_a: integer id of the node
:param node_b: integer id of the node
:param relation: either SO->1, or child->2
:return:
"""
node_a_id = node_a.node_id
node_b_id = node_b.node_id
rel_tuple = (node_a_id, node_b_id)
if rel_tuple not in self.family:
self.family[rel_tuple] = {'family': relation}
def toggle_gender(self, node):
if node.gender == 'male':
return 'female'
else:
return 'male'
def print_family(self):
ps = ','.join(["{}.{}.{}".format(k, v.name[0], v.gender) for k,v in self.family_data.items()])
return ps
def next_flip(self):
"""
Given an ancestry,
- maintain a set of nodes who have already been gender flipped
- sample one node to flip from the rest
- check if the node contains a SO relationship. if so, toggle both
- add the flipped nodes into the already flipped pile
- if no nodes are left, then return False. else return True
:return:
"""
candidates = list(set(self.family_data.keys()) - set(self.flipped))
if len(candidates) == 0:
# all candidates flipped already
# reset flip
self.flipped = []
else:
node = random.choice(candidates)
relations_with_node = [node_pair for node_pair in self.family.keys() if node_pair[0] == node]
SO_relation = [node_pair for node_pair in relations_with_node if self.family[node_pair]['family'] == 'SO']
assert len(SO_relation) <= 1
if len(SO_relation) == 1:
so_node = SO_relation[0][1]
# flip both
self.family_data[node].gender = self.toggle_gender(self.family_data[node])
self.family_data[so_node].gender = self.toggle_gender(self.family_data[so_node])
# exchange their names too
tmp_name = self.family_data[node].name
self.family_data[node].name = self.family_data[so_node].name
self.family_data[so_node].name = tmp_name
self.flipped.append(node)
self.flipped.append(so_node)
#print("flipping couples ...")
#print("Flipped {} to {}".format(node, self.family_data[node].gender))
#print("Flipped {} to {}".format(so_node, self.family_data[so_node].gender))
else:
# only childs, flip them
self.family_data[node].gender = self.toggle_gender(self.family_data[node])
# choose a new gender appropriate name
gender = self.family_data[node].gender
while name in self.taken_names:
name = names.get_first_name(gender=gender)
self.family_data[node].name = name
self.flipped.append(node)
#print("flipping singles ...")
#print("Flipped {} to {}".format(node, self.family_data[node].gender))
def add_work_relations(self, w=0.3):
"""
Policy of adding working relations:
- Add w work locations
- Divide the population into these w bins
- Add works_at relation
- Within each bin:
- Assign m managers
:return:
"""
num_pop = len(self.family_data)
pop_ids = self.family_data.keys()
work_locations = random.sample(self.store.attribute_store['work']['options'], int(num_pop * w))
node_ct = self.node_ct
work_bins = {}
pop_per_loc = num_pop // len(work_locations)
for wl in work_locations:
self.work_data[node_ct] = Entity(name=wl, etype='work')
w = random.sample(pop_ids, pop_per_loc)
pop_ids = list(set(pop_ids) - set(w))
work_bins[wl] = {"id": node_ct, "w": w}
node_ct+=1
if len(pop_ids) > 0:
work_bins[work_locations[-1]]["w"].extend(pop_ids)
self.node_ct = node_ct
for wl in work_locations:
e_id = work_bins[wl]["id"]
pops = work_bins[wl]["w"]
for p in pops:
edge = (e_id, p)
if edge not in self.family:
self.family[edge] = {'family':'', 'work': []}
if 'work' not in self.family[edge]:
self.family[edge]['work'] = []
self.family[edge]['work'].append('works_at')
# select manager
manager = random.choice(pops)
for p in pops:
edge = (p, manager)
if edge not in self.family:
self.family[edge] = {'family':'', 'work': []}
if 'work' not in self.family[edge]:
self.family[edge]['work'] = []
self.family[edge]['work'].append('manager')
if __name__=='__main__':
#pdb.set_trace()
anc = Ancestry()
anc.add_work_relations()
|
clutrr-main
|
clutrr/actors/ancestry.py
|
"""
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
"""
import random
class Actor:
"""
male or female actor
"""
def __init__(self, gender='male', name='', node_id=0, store={}):
self.gender = gender
self.name = name
self.node_id = node_id
## irrelevant attributes
## also make the irrelevant attributes random. Not every entity will have them all
self.attributes = {
'school' : '',
'location_born' : '',
'social_media_active' : False,
'social_media_preferred': '',
'political_views' : '',
'hobby' : '',
'sport': '',
}
self.attribute_store = store.attribute_store
self.fill_attributes()
def fill_attributes(self):
for key,val in self.attribute_store.items():
random_val = random.choice(val['options'])
random_attr = '[{}]'.format(random_val)
name = '[{}]'.format(self.name)
random_placeholder = random.choice(val['placeholders'])
text = random_placeholder.replace('e_x', name).replace('attr_x', random_attr) + ". "
self.attributes[key] = text
def __repr__(self):
return "<Actor name:{} gender:{} node_id:{}".format(
self.name, self.gender, self.node_id)
def __str__(self):
return "Actor node, name: {}, gender : {}, node_id : {}".format(
self.name, self.gender, self.node_id
)
class Entity:
"""
work or related entities
etype="work"
"""
def __init__(self, name='', etype='', node_id=0):
self.name = name
self.etype = etype
self.node_id = node_id
def __repr__(self):
return "<Entity name:{} etype: {} node_id:{}".format(
self.name, self.etype, self.node_id)
def __str__(self):
return "Entity node, name: {}, etype: {}, node_id : {}".format(
self.name, self.etype, self.node_id
)
|
clutrr-main
|
clutrr/actors/actor.py
|
"""
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
"""
import os
import json
import yaml
class Store:
def __init__(self,args):
attribute_store = args.attribute_store if args.attribute_store else 'attribute_store.json'
relations_store = args.relations_store if args.relations_store else 'relations_store.json'
question_store = args.question_store if args.question_store else 'question_store.json'
rules_store = args.rules_store if args.rules_store else 'rules_store.yaml'
self.base_path = os.path.dirname(os.path.realpath(__file__)).split('store')[0]
self.attribute_store = json.load(open(os.path.join(self.base_path, 'store', attribute_store)))
self.relations_store = yaml.load(open(os.path.join(self.base_path, 'store', relations_store)))
self.question_store = yaml.load(open(os.path.join(self.base_path, 'store', question_store)))
self.rules_store = yaml.load(open(os.path.join(self.base_path, 'store', rules_store)))
# TODO: do we need this?
## Relationship type has basic values 0,1 and 2, whereas the
## rest should be inferred. Like, child + child = 4 = grand
self.relationship_type = {
'SO': 1,
'child': 2,
'sibling': 0,
'in-laws': 3,
'grand': 4,
'no-relation': -1
}
attr_names = [v["options"] for k,v in self.attribute_store.items()]
self.attr_names = set([x for p in attr_names for x in p])
|
clutrr-main
|
clutrr/store/store.py
|
clutrr-main
|
clutrr/store/__init__.py
|
|
import os
import re
import setuptools
class CleanCommand(setuptools.Command):
"""Custom clean command to tidy up the project root."""
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
os.system("rm -vrf ./build ./dist ./*.pyc ./*.tgz ./*.egg-info")
directory = os.path.dirname(os.path.abspath(__file__))
# Extract version information
path = os.path.join(directory, "metal", "__init__.py")
with open(path) as read_file:
text = read_file.read()
pattern = re.compile(r"^__version__ = ['\"]([^'\"]*)['\"]", re.MULTILINE)
version = pattern.search(text).group(1)
# Extract long_description
path = os.path.join(directory, "README.md")
with open(path) as read_file:
long_description = read_file.read()
setuptools.setup(
name="snorkel-metal",
version=version,
url="https://github.com/HazyResearch/metal",
description="A system for quickly generating training data with multi-task weak supervision",
long_description_content_type="text/markdown",
long_description=long_description,
license="Apache License 2.0",
packages=setuptools.find_packages(),
python_requires=">=3.6",
install_requires=[
"dill",
"networkx>=2.2",
"numpy",
"pandas",
"torch>=1.0",
"scipy",
"tqdm",
"scikit-learn",
],
include_package_data=True,
keywords="machine-learning ai information-extraction weak-supervision mtl multitask multi-task-learning",
classifiers=[
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering :: Information Analysis",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
],
project_urls={ # Optional
"Homepage": "https://hazyresearch.github.io/snorkel/",
"Source": "https://github.com/HazyResearch/metal/",
"Bug Reports": "https://github.com/HazyResearch/metal/issues",
"Citation": "https://arxiv.org/abs/1810.02840",
},
cmdclass={"clean": CleanCommand},
)
|
metal-master
|
setup.py
|
import numpy as np
import sklearn.metrics as skm
import torch
from metal.utils import arraylike_to_numpy, pred_to_prob
def accuracy_score(gold, pred, ignore_in_gold=[], ignore_in_pred=[]):
"""
Calculate (micro) accuracy.
Args:
gold: A 1d array-like of gold labels
pred: A 1d array-like of predicted labels (assuming abstain = 0)
ignore_in_gold: A list of labels for which elements having that gold
label will be ignored.
ignore_in_pred: A list of labels for which elements having that pred
label will be ignored.
Returns:
A float, the (micro) accuracy score
"""
gold, pred = _preprocess(gold, pred, ignore_in_gold, ignore_in_pred)
if len(gold) and len(pred):
acc = np.sum(gold == pred) / len(gold)
else:
acc = 0
return acc
def coverage_score(gold, pred, ignore_in_gold=[], ignore_in_pred=[]):
"""
Calculate (global) coverage.
Args:
gold: A 1d array-like of gold labels
pred: A 1d array-like of predicted labels (assuming abstain = 0)
ignore_in_gold: A list of labels for which elements having that gold
label will be ignored.
ignore_in_pred: A list of labels for which elements having that pred
label will be ignored.
Returns:
A float, the (global) coverage score
"""
gold, pred = _preprocess(gold, pred, ignore_in_gold, ignore_in_pred)
return np.sum(pred != 0) / len(pred)
def precision_score(gold, pred, pos_label=1, ignore_in_gold=[], ignore_in_pred=[]):
"""
Calculate precision for a single class.
Args:
gold: A 1d array-like of gold labels
pred: A 1d array-like of predicted labels (assuming abstain = 0)
ignore_in_gold: A list of labels for which elements having that gold
label will be ignored.
ignore_in_pred: A list of labels for which elements having that pred
label will be ignored.
pos_label: The class label to treat as positive for precision
Returns:
pre: The (float) precision score
"""
gold, pred = _preprocess(gold, pred, ignore_in_gold, ignore_in_pred)
positives = np.where(pred == pos_label, 1, 0).astype(bool)
trues = np.where(gold == pos_label, 1, 0).astype(bool)
TP = np.sum(positives * trues)
FP = np.sum(positives * np.logical_not(trues))
if TP or FP:
pre = TP / (TP + FP)
else:
pre = 0
return pre
def recall_score(gold, pred, pos_label=1, ignore_in_gold=[], ignore_in_pred=[]):
"""
Calculate recall for a single class.
Args:
gold: A 1d array-like of gold labels
pred: A 1d array-like of predicted labels (assuming abstain = 0)
ignore_in_gold: A list of labels for which elements having that gold
label will be ignored.
ignore_in_pred: A list of labels for which elements having that pred
label will be ignored.
pos_label: The class label to treat as positive for recall
Returns:
rec: The (float) recall score
"""
gold, pred = _preprocess(gold, pred, ignore_in_gold, ignore_in_pred)
positives = np.where(pred == pos_label, 1, 0).astype(bool)
trues = np.where(gold == pos_label, 1, 0).astype(bool)
TP = np.sum(positives * trues)
FN = np.sum(np.logical_not(positives) * trues)
if TP or FN:
rec = TP / (TP + FN)
else:
rec = 0
return rec
def fbeta_score(
gold, pred, pos_label=1, beta=1.0, ignore_in_gold=[], ignore_in_pred=[]
):
"""
Calculate recall for a single class.
Args:
gold: A 1d array-like of gold labels
pred: A 1d array-like of predicted labels (assuming abstain = 0)
ignore_in_gold: A list of labels for which elements having that gold
label will be ignored.
ignore_in_pred: A list of labels for which elements having that pred
label will be ignored.
pos_label: The class label to treat as positive for f-beta
beta: The beta to use in the f-beta score calculation
Returns:
fbeta: The (float) f-beta score
"""
gold, pred = _preprocess(gold, pred, ignore_in_gold, ignore_in_pred)
pre = precision_score(gold, pred, pos_label=pos_label)
rec = recall_score(gold, pred, pos_label=pos_label)
if pre or rec:
fbeta = (1 + beta ** 2) * (pre * rec) / ((beta ** 2 * pre) + rec)
else:
fbeta = 0
return fbeta
def f1_score(gold, pred, **kwargs):
return fbeta_score(gold, pred, beta=1.0, **kwargs)
def roc_auc_score(gold, probs, ignore_in_gold=[], ignore_in_pred=[]):
"""Compute the ROC AUC score, given the gold labels and predicted probs.
Args:
gold: A 1d array-like of gold labels
probs: A 2d array-like of predicted probabilities
ignore_in_gold: A list of labels for which elements having that gold
label will be ignored.
Returns:
roc_auc_score: The (float) roc_auc score
"""
gold = arraylike_to_numpy(gold)
# Filter out the ignore_in_gold (but not ignore_in_pred)
# Note the current sub-functions (below) do not handle this...
if len(ignore_in_pred) > 0:
raise ValueError("ignore_in_pred not defined for ROC-AUC score.")
keep = [x not in ignore_in_gold for x in gold]
gold = gold[keep]
probs = probs[keep, :]
# Convert gold to one-hot indicator format, using the k inferred from probs
gold_s = pred_to_prob(torch.from_numpy(gold), k=probs.shape[1]).numpy()
return skm.roc_auc_score(gold_s, probs)
def _drop_ignored(gold, pred, ignore_in_gold, ignore_in_pred):
"""Remove from gold and pred all items with labels designated to ignore."""
keepers = np.ones_like(gold).astype(bool)
for x in ignore_in_gold:
keepers *= np.where(gold != x, 1, 0).astype(bool)
for x in ignore_in_pred:
keepers *= np.where(pred != x, 1, 0).astype(bool)
gold = gold[keepers]
pred = pred[keepers]
return gold, pred
def _preprocess(gold, pred, ignore_in_gold, ignore_in_pred):
gold = arraylike_to_numpy(gold)
pred = arraylike_to_numpy(pred)
if ignore_in_gold or ignore_in_pred:
gold, pred = _drop_ignored(gold, pred, ignore_in_gold, ignore_in_pred)
return gold, pred
METRICS = {
"accuracy": accuracy_score,
"coverage": coverage_score,
"precision": precision_score,
"recall": recall_score,
"f1": f1_score,
"fbeta": fbeta_score,
"roc-auc": roc_auc_score,
}
def metric_score(gold, pred, metric, probs=None, **kwargs):
if metric not in METRICS:
msg = f"The metric you provided ({metric}) is not supported."
raise ValueError(msg)
# Note special handling because requires the predicted probabilities
elif metric == "roc-auc":
if probs is None:
raise ValueError("ROC-AUC score requries the predicted probs.")
return roc_auc_score(gold, probs, **kwargs)
else:
return METRICS[metric](gold, pred, **kwargs)
|
metal-master
|
metal/metrics.py
|
import os
import random
import warnings
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from scipy.sparse import issparse
from torch.utils.data import DataLoader, Dataset, TensorDataset
from metal.analysis import confusion_matrix
from metal.logging import Checkpointer, Logger, LogWriter, TensorBoardWriter
from metal.metrics import metric_score
from metal.utils import place_on_gpu, recursive_merge_dicts
# Import tqdm_notebook if in Jupyter notebook
try:
from IPython import get_ipython
if "IPKernelApp" not in get_ipython().config:
raise ImportError("console")
except (AttributeError, ImportError):
from tqdm import tqdm
else:
# Only use tqdm notebook if not in travis testing
if "CI" not in os.environ:
from tqdm import tqdm_notebook as tqdm
else:
from tqdm import tqdm
class Classifier(nn.Module):
"""Simple abstract base class for a probabilistic classifier.
The main contribution of children classes will be an implementation of the
predict_proba() method. The relationships between the predict/score
functions are as follows:
score
|
predict
|
*predict_proba
The method predict_proba() method calculates the probabilistic labels,
the predict() method handles tie-breaking, and the score() method
calculates metrics based on predictions.
Args:
k: (int) The cardinality of the classifier
config: (dict) A config dictionary
"""
# A class variable indicating whether the class implements its own custom L2
# regularization (True) or not (False); in the latter case, generic L2 in
# the optimizer is used
implements_l2 = False
def __init__(self, k, config):
super().__init__()
self.config = config
self.multitask = False
self.k = k
# Set random seed
if self.config["seed"] is None:
self.config["seed"] = np.random.randint(1e6)
self._set_seed(self.config["seed"])
# Confirm that cuda is available if config is using CUDA
if self.config["device"] != "cpu" and not torch.cuda.is_available():
raise ValueError("device=cuda but CUDA not available.")
# By default, put model in eval mode; switch to train mode in training
self.eval()
def predict_proba(self, X, **kwargs):
"""Predicts probabilistic labels for an input X on all tasks
Args:
X: An appropriate input for the child class of Classifier
Returns:
An [n, k] np.ndarray of probabilities
"""
raise NotImplementedError
def predict(self, X, break_ties="random", return_probs=False, **kwargs):
"""Predicts (int) labels for an input X on all tasks
Args:
X: The input for the predict_proba method
break_ties: A tie-breaking policy (see Classifier._break_ties())
return_probs: Return the predicted probabilities as well
Returns:
Y_p: An n-dim np.ndarray of predictions in {1,...k}
[Optionally: Y_s: An [n, k] np.ndarray of predicted probabilities]
"""
Y_s = self._to_numpy(self.predict_proba(X, **kwargs))
Y_p = self._break_ties(Y_s, break_ties).astype(np.int)
if return_probs:
return Y_p, Y_s
else:
return Y_p
def score(
self,
data,
metric="accuracy",
break_ties="random",
verbose=True,
print_confusion_matrix=True,
**kwargs,
):
"""Scores the predictive performance of the Classifier on all tasks
Args:
data: a Pytorch DataLoader, Dataset, or tuple with Tensors (X,Y):
X: The input for the predict method
Y: An [n] or [n, 1] torch.Tensor or np.ndarray of target labels
in {1,...,k}
metric: A metric (string) with which to score performance or a
list of such metrics
break_ties: A tie-breaking policy (see Classifier._break_ties())
verbose: The verbosity for just this score method; it will not
update the class config.
print_confusion_matrix: Print confusion matrix (overwritten to False if
verbose=False)
Returns:
scores: A (float) score or a list of such scores if kwarg metric
is a list
"""
Y_p, Y, Y_s = self._get_predictions(
data, break_ties=break_ties, return_probs=True, **kwargs
)
# Evaluate on the specified metrics
return_list = isinstance(metric, list)
metric_list = metric if isinstance(metric, list) else [metric]
scores = []
for metric in metric_list:
score = metric_score(Y, Y_p, metric, probs=Y_s, ignore_in_gold=[0])
scores.append(score)
if verbose:
print(f"{metric.capitalize()}: {score:.3f}")
# Optionally print confusion matrix
if print_confusion_matrix and verbose:
confusion_matrix(Y, Y_p, pretty_print=True)
# If a single metric was given as a string (not list), return a float
if len(scores) == 1 and not return_list:
return scores[0]
else:
return scores
def train_model(self, *args, **kwargs):
"""Trains a classifier
Take care to initialize weights outside the training loop and zero out
gradients at the beginning of each iteration inside the loop.
NOTE: self.train() is a method in nn.Module class, so we name this
method `train_model` so as not to conflict.
"""
raise NotImplementedError
def _train_model(
self, train_data, loss_fn, valid_data=None, log_writer=None, restore_state={}
):
"""The internal training routine called by train_model() after setup
Args:
train_data: a tuple of Tensors (X,Y), a Dataset, or a DataLoader of
X (data) and Y (labels) for the train split
loss_fn: the loss function to minimize (maps *data -> loss)
valid_data: a tuple of Tensors (X,Y), a Dataset, or a DataLoader of
X (data) and Y (labels) for the dev split
restore_state: a dictionary containing model weights (optimizer, main network) and training information
If valid_data is not provided, then no checkpointing or
evaluation on the dev set will occur.
"""
# Set model to train mode
self.train()
train_config = self.config["train_config"]
# Convert data to DataLoaders
train_loader = self._create_data_loader(train_data)
valid_loader = self._create_data_loader(valid_data)
epoch_size = len(train_loader.dataset)
# Move model to GPU
if self.config["verbose"] and self.config["device"] != "cpu":
print("Using GPU...")
self.to(self.config["device"])
# Set training components
self._set_writer(train_config)
self._set_logger(train_config, epoch_size)
self._set_checkpointer(train_config)
self._set_optimizer(train_config)
self._set_scheduler(train_config)
# Restore model if necessary
if restore_state:
start_iteration = self._restore_training_state(restore_state)
else:
start_iteration = 0
# Train the model
metrics_hist = {} # The most recently seen value for all metrics
for epoch in range(start_iteration, train_config["n_epochs"]):
progress_bar = (
train_config["progress_bar"]
and self.config["verbose"]
and self.logger.log_unit == "epochs"
)
t = tqdm(
enumerate(train_loader),
total=len(train_loader),
disable=(not progress_bar),
)
self.running_loss = 0.0
self.running_examples = 0
for batch_num, data in t:
# NOTE: actual batch_size may not equal config's target batch_size
batch_size = len(data[0])
# Moving data to device
if self.config["device"] != "cpu":
data = place_on_gpu(data)
# Zero the parameter gradients
self.optimizer.zero_grad()
# Forward pass to calculate the average loss per example
loss = loss_fn(*data)
if torch.isnan(loss):
msg = "Loss is NaN. Consider reducing learning rate."
raise Exception(msg)
# Backward pass to calculate gradients
# Loss is an average loss per example
loss.backward()
# Perform optimizer step
self.optimizer.step()
# Calculate metrics, log, and checkpoint as necessary
metrics_dict = self._execute_logging(
train_loader, valid_loader, loss, batch_size
)
metrics_hist.update(metrics_dict)
# tqdm output
t.set_postfix(loss=metrics_dict["train/loss"])
# Apply learning rate scheduler
self._update_scheduler(epoch, metrics_hist)
self.eval()
# Restore best model if applicable
if self.checkpointer and self.checkpointer.checkpoint_best:
self.checkpointer.load_best_model(model=self)
# Write log if applicable
if self.writer:
if self.writer.include_config:
self.writer.add_config(self.config)
self.writer.close()
# Print confusion matrix if applicable
if self.config["verbose"]:
print("Finished Training")
if valid_loader is not None:
self.score(
valid_loader,
metric=train_config["validation_metric"],
verbose=True,
print_confusion_matrix=True,
)
def _get_loss_fn(self):
"""Returns a loss function"""
msg = (
"Abstract class: _get_loss_fn() must be implemented by a child "
"class of Classifier."
)
raise NotImplementedError(msg)
def save(self, destination, **kwargs):
"""Serialize and save a model.
Example:
end_model = EndModel(...)
end_model.train_model(...)
end_model.save("my_end_model.pkl")
"""
with open(destination, "wb") as f:
torch.save(self, f, **kwargs)
@staticmethod
def load(source, **kwargs):
"""Deserialize and load a model.
Example:
end_model = EndModel.load("my_end_model.pkl")
end_model.score(...)
"""
with open(source, "rb") as f:
return torch.load(f, **kwargs)
def update_config(self, update_dict):
"""Updates self.config with the values in a given update dictionary"""
self.config = recursive_merge_dicts(self.config, update_dict)
def reset(self):
"""Initializes all modules in a network"""
# The apply(f) method recursively calls f on itself and all children
self.apply(self._reset_module)
@staticmethod
def _reset_module(m):
"""An initialization method to be applied recursively to all modules"""
raise NotImplementedError
def resume_training(self, train_data, model_path, valid_data=None):
"""This model resume training of a classifier by reloading the appropriate state_dicts for each model
Args:
train_data: a tuple of Tensors (X,Y), a Dataset, or a DataLoader of
X (data) and Y (labels) for the train split
model_path: the path to the saved checpoint for resuming training
valid_data: a tuple of Tensors (X,Y), a Dataset, or a DataLoader of
X (data) and Y (labels) for the dev split
"""
restore_state = self.checkpointer.restore(model_path)
loss_fn = self._get_loss_fn()
self.train()
self._train_model(
train_data=train_data,
loss_fn=loss_fn,
valid_data=valid_data,
restore_state=restore_state,
)
def _restore_training_state(self, restore_state):
"""Restores the model and optimizer states
This helper function restores the model's state to a given iteration so
that a user can resume training at any epoch.
Args:
restore_state: a state_dict dictionary
"""
self.load_state_dict(restore_state["model"])
self.optimizer.load_state_dict(restore_state["optimizer"])
self.lr_scheduler.load_state_dict(restore_state["lr_scheduler"])
start_iteration = restore_state["iteration"] + 1
if self.config["verbose"]:
print(f"Restored checkpoint to iteration {start_iteration}.")
if restore_state["best_model_found"]:
# Update checkpointer with appropriate information about best model
# Note that the best model found so far may not be the model in the
# checkpoint that is currently being loaded.
self.checkpointer.best_model_found = True
self.checkpointer.best_iteration = restore_state["best_iteration"]
self.checkpointer.best_score = restore_state["best_score"]
if self.config["verbose"]:
print(
f"Updated checkpointer: "
f"best_score={self.checkpointer.best_score:.3f}, "
f"best_iteration={self.checkpointer.best_iteration}"
)
return start_iteration
def _create_dataset(self, *data):
"""Converts input data to the appropriate Dataset"""
# Make sure data is a tuple of dense tensors
data = [self._to_torch(x, dtype=torch.FloatTensor) for x in data]
return TensorDataset(*data)
def _create_data_loader(self, data, **kwargs):
"""Converts input data into a DataLoader"""
if data is None:
return None
# Set DataLoader config
# NOTE: Not applicable if data is already a DataLoader
config = {
**self.config["train_config"]["data_loader_config"],
**kwargs,
"pin_memory": self.config["device"] != "cpu",
}
# Return data as DataLoader
if isinstance(data, DataLoader):
return data
elif isinstance(data, Dataset):
return DataLoader(data, **config)
elif isinstance(data, (tuple, list)):
return DataLoader(self._create_dataset(*data), **config)
else:
raise ValueError("Input data type not recognized.")
def _set_seed(self, seed):
self.seed = seed
if self.config["device"] != "cpu":
torch.backends.cudnn.enabled = True
torch.cuda.manual_seed(seed)
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
def _set_writer(self, train_config):
if train_config["writer"] is None:
self.writer = None
elif train_config["writer"] == "json":
self.writer = LogWriter(**(train_config["writer_config"]))
elif train_config["writer"] == "tensorboard":
self.writer = TensorBoardWriter(**(train_config["writer_config"]))
else:
raise Exception(f"Unrecognized writer: {train_config['writer']}")
def _set_logger(self, train_config, epoch_size):
self.logger = Logger(
train_config["logger_config"],
self.writer,
epoch_size,
verbose=self.config["verbose"],
)
def _set_checkpointer(self, train_config):
if train_config["checkpoint"]:
# Default to valid split for checkpoint metric
checkpoint_config = train_config["checkpoint_config"]
checkpoint_metric = checkpoint_config["checkpoint_metric"]
if checkpoint_metric.count("/") == 0:
checkpoint_config["checkpoint_metric"] = f"valid/{checkpoint_metric}"
self.checkpointer = Checkpointer(
checkpoint_config, verbose=self.config["verbose"]
)
else:
self.checkpointer = None
def _set_optimizer(self, train_config):
optimizer_config = train_config["optimizer_config"]
opt = optimizer_config["optimizer"]
# We set L2 here if the class does not implement its own L2 reg
l2 = 0 if self.implements_l2 else train_config.get("l2", 0)
parameters = filter(lambda p: p.requires_grad, self.parameters())
if opt == "sgd":
optimizer = optim.SGD(
parameters,
**optimizer_config["optimizer_common"],
**optimizer_config["sgd_config"],
weight_decay=l2,
)
elif opt == "rmsprop":
optimizer = optim.RMSprop(
parameters,
**optimizer_config["optimizer_common"],
**optimizer_config["rmsprop_config"],
weight_decay=l2,
)
elif opt == "adam":
optimizer = optim.Adam(
parameters,
**optimizer_config["optimizer_common"],
**optimizer_config["adam_config"],
weight_decay=l2,
)
elif opt == "sparseadam":
optimizer = optim.SparseAdam(
parameters,
**optimizer_config["optimizer_common"],
**optimizer_config["adam_config"],
)
if l2:
raise Exception(
"SparseAdam optimizer does not support weight_decay (l2 penalty)."
)
else:
raise ValueError(f"Did not recognize optimizer option '{opt}'")
self.optimizer = optimizer
def _set_scheduler(self, train_config):
lr_scheduler = train_config["lr_scheduler"]
if lr_scheduler is None:
lr_scheduler = None
else:
lr_scheduler_config = train_config["lr_scheduler_config"]
if lr_scheduler == "exponential":
lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(
self.optimizer, **lr_scheduler_config["exponential_config"]
)
elif lr_scheduler == "reduce_on_plateau":
lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
self.optimizer, **lr_scheduler_config["plateau_config"]
)
else:
raise ValueError(
f"Did not recognize lr_scheduler option '{lr_scheduler}'"
)
self.lr_scheduler = lr_scheduler
def _update_scheduler(self, epoch, metrics_dict):
train_config = self.config["train_config"]
if self.lr_scheduler is not None:
lr_scheduler_config = train_config["lr_scheduler_config"]
if epoch + 1 >= lr_scheduler_config["lr_freeze"]:
if train_config["lr_scheduler"] == "reduce_on_plateau":
checkpoint_config = train_config["checkpoint_config"]
metric_name = checkpoint_config["checkpoint_metric"]
score = metrics_dict.get(metric_name, None)
if score is not None:
self.lr_scheduler.step(score)
else:
self.lr_scheduler.step()
def _execute_logging(self, train_loader, valid_loader, loss, batch_size):
self.eval()
self.running_loss += loss.item() * batch_size
self.running_examples += batch_size
# Initialize metrics dict
metrics_dict = {}
# Always add average loss
metrics_dict["train/loss"] = self.running_loss / self.running_examples
if self.logger.check(batch_size):
logger_metrics = self.logger.calculate_metrics(
self, train_loader, valid_loader, metrics_dict
)
metrics_dict.update(logger_metrics)
self.logger.log(metrics_dict)
# Reset running loss and examples counts
self.running_loss = 0.0
self.running_examples = 0
# Checkpoint if applicable
self._checkpoint(metrics_dict)
self.train()
return metrics_dict
def _checkpoint(self, metrics_dict):
if self.checkpointer is None:
return
iteration = self.logger.unit_total
self.checkpointer.checkpoint(
metrics_dict, iteration, self, self.optimizer, self.lr_scheduler
)
def _get_predictions(self, data, break_ties="random", return_probs=False, **kwargs):
"""Computes predictions in batch, given a labeled dataset
Args:
data: a Pytorch DataLoader, Dataset, or tuple with Tensors (X,Y):
X: The input for the predict method
Y: An [n] or [n, 1] torch.Tensor or np.ndarray of target labels
in {1,...,k}
break_ties: How to break ties when making predictions
return_probs: Return the predicted probabilities as well
Returns:
Y_p: A Tensor of predictions
Y: A Tensor of labels
[Optionally: Y_s: An [n, k] np.ndarray of predicted probabilities]
"""
data_loader = self._create_data_loader(data)
Y_p = []
Y = []
Y_s = []
# Do batch evaluation by default, getting the predictions and labels
for batch_num, data in enumerate(data_loader):
Xb, Yb = data
Y.append(self._to_numpy(Yb))
# Optionally move to device
if self.config["device"] != "cpu":
Xb = place_on_gpu(Xb)
# Append predictions and labels from DataLoader
Y_pb, Y_sb = self.predict(
Xb, break_ties=break_ties, return_probs=True, **kwargs
)
Y_p.append(self._to_numpy(Y_pb))
Y_s.append(self._to_numpy(Y_sb))
Y_p, Y, Y_s = map(self._stack_batches, [Y_p, Y, Y_s])
if return_probs:
return Y_p, Y, Y_s
else:
return Y_p, Y
def _break_ties(self, Y_s, break_ties="random"):
"""Break ties in each row of a tensor according to the specified policy
Args:
Y_s: An [n, k] np.ndarray of probabilities
break_ties: A tie-breaking policy:
"abstain": return an abstain vote (0)
"random": randomly choose among the tied options
NOTE: if break_ties="random", repeated runs may have
slightly different results due to difference in broken ties
[int]: ties will be broken by using this label
"""
n, k = Y_s.shape
Y_h = np.zeros(n)
diffs = np.abs(Y_s - Y_s.max(axis=1).reshape(-1, 1))
TOL = 1e-5
for i in range(n):
max_idxs = np.where(diffs[i, :] < TOL)[0]
if len(max_idxs) == 1:
Y_h[i] = max_idxs[0] + 1
# Deal with "tie votes" according to the specified policy
elif break_ties == "random":
Y_h[i] = np.random.choice(max_idxs) + 1
elif break_ties == "abstain":
Y_h[i] = 0
elif isinstance(break_ties, int):
Y_h[i] = break_ties
else:
ValueError(f"break_ties={break_ties} policy not recognized.")
return Y_h
@staticmethod
def _to_numpy(Z):
"""Converts a None, list, np.ndarray, or torch.Tensor to np.ndarray;
also handles converting sparse input to dense."""
if Z is None:
return Z
elif issparse(Z):
return Z.toarray()
elif isinstance(Z, np.ndarray):
return Z
elif isinstance(Z, list):
return np.array(Z)
elif isinstance(Z, torch.Tensor):
return Z.cpu().numpy()
else:
msg = (
f"Expected None, list, numpy.ndarray or torch.Tensor, "
f"got {type(Z)} instead."
)
raise Exception(msg)
@staticmethod
def _to_torch(Z, dtype=None):
"""Converts a None, list, np.ndarray, or torch.Tensor to torch.Tensor;
also handles converting sparse input to dense."""
if Z is None:
return None
elif issparse(Z):
Z = torch.from_numpy(Z.toarray())
elif isinstance(Z, torch.Tensor):
pass
elif isinstance(Z, list):
Z = torch.from_numpy(np.array(Z))
elif isinstance(Z, np.ndarray):
Z = torch.from_numpy(Z)
else:
msg = (
f"Expected list, numpy.ndarray or torch.Tensor, "
f"got {type(Z)} instead."
)
raise Exception(msg)
return Z.type(dtype) if dtype else Z
def _check(self, var, val=None, typ=None, shape=None):
if val is not None and not var != val:
msg = f"Expected value {val} but got value {var}."
raise ValueError(msg)
if typ is not None and not isinstance(var, typ):
msg = f"Expected type {typ} but got type {type(var)}."
raise ValueError(msg)
if shape is not None and not var.shape != shape:
msg = f"Expected shape {shape} but got shape {var.shape}."
raise ValueError(msg)
def _check_or_set_attr(self, name, val, set_val=False):
if set_val:
setattr(self, name, val)
else:
true_val = getattr(self, name)
if val != true_val:
raise Exception(f"{name} = {val}, but should be {true_val}.")
@staticmethod
def _stack_batches(X):
"""Stack a list of np.ndarrays along the first axis, returning an
np.ndarray; note this is mainly for smooth hanlding of the multi-task
setting."""
X = [Classifier._to_numpy(Xb) for Xb in X]
if len(X[0].shape) == 1:
return np.hstack(X)
elif len(X[0].shape) == 2:
return np.vstack(X)
else:
raise ValueError(f"Can't stack {len(X[0].shape)}-dim batches.")
|
metal-master
|
metal/classifier.py
|
from collections import Counter, defaultdict
import numpy as np
import scipy.sparse as sparse
from pandas import DataFrame, Series
from metal.utils import arraylike_to_numpy
############################################################
# Label Matrix Diagnostics
############################################################
def _covered_data_points(L):
"""Returns an indicator vector where ith element = 1 if x_i is labeled by at
least one LF."""
return np.ravel(np.where(L.sum(axis=1) != 0, 1, 0))
def _overlapped_data_points(L):
"""Returns an indicator vector where ith element = 1 if x_i is labeled by
more than one LF."""
return np.where(np.ravel((L != 0).sum(axis=1)) > 1, 1, 0)
def _conflicted_data_points(L):
"""Returns an indicator vector where ith element = 1 if x_i is labeled by
at least two LFs that give it disagreeing labels."""
m = sparse.diags(np.ravel(L.max(axis=1).todense()))
return np.ravel(np.max(m @ (L != 0) != L, axis=1).astype(int).todense())
def label_coverage(L):
"""Returns the **fraction of data points with > 0 (non-zero) labels**
Args:
L: an n x m scipy.sparse matrix where L_{i,j} is the label given by the
jth LF to the ith item
"""
return _covered_data_points(L).sum() / L.shape[0]
def label_overlap(L):
"""Returns the **fraction of data points with > 1 (non-zero) labels**
Args:
L: an n x m scipy.sparse matrix where L_{i,j} is the label given by the
jth LF to the ith item
"""
return _overlapped_data_points(L).sum() / L.shape[0]
def label_conflict(L):
"""Returns the **fraction of data points with conflicting (disagreeing)
lablels.**
Args:
L: an n x m scipy.sparse matrix where L_{i,j} is the label given by the
jth LF to the ith item
"""
return _conflicted_data_points(L).sum() / L.shape[0]
def lf_polarities(L):
"""Return the polarities of each LF based on evidence in a label matrix.
Args:
L: an n x m scipy.sparse matrix where L_{i,j} is the label given by the
jth LF to the ith candidate
"""
polarities = [sorted(list(set(L[:, i].data))) for i in range(L.shape[1])]
return [p[0] if len(p) == 1 else p for p in polarities]
def lf_coverages(L):
"""Return the **fraction of data points that each LF labels.**
Args:
L: an n x m scipy.sparse matrix where L_{i,j} is the label given by the
jth LF to the ith candidate
"""
return np.ravel((L != 0).sum(axis=0)) / L.shape[0]
def lf_overlaps(L, normalize_by_coverage=False):
"""Return the **fraction of items each LF labels that are also labeled by at
least one other LF.**
Note that the maximum possible overlap fraction for an LF is the LF's
coverage, unless `normalize_by_coverage=True`, in which case it is 1.
Args:
L: an n x m scipy.sparse matrix where L_{i,j} is the label given by the
jth LF to the ith candidate
normalize_by_coverage: Normalize by coverage of the LF, so that it
returns the percent of LF labels that have overlaps.
"""
overlaps = (L != 0).T @ _overlapped_data_points(L) / L.shape[0]
if normalize_by_coverage:
overlaps /= lf_coverages(L)
return np.nan_to_num(overlaps)
def lf_conflicts(L, normalize_by_overlaps=False):
"""Return the **fraction of items each LF labels that are also given a
different (non-abstain) label by at least one other LF.**
Note that the maximum possible conflict fraction for an LF is the LF's
overlaps fraction, unless `normalize_by_overlaps=True`, in which case it
is 1.
Args:
L: an n x m scipy.sparse matrix where L_{i,j} is the label given by the
jth LF to the ith candidate
normalize_by_overlaps: Normalize by overlaps of the LF, so that it
returns the percent of LF overlaps that have conflicts.
"""
conflicts = (L != 0).T @ _conflicted_data_points(L) / L.shape[0]
if normalize_by_overlaps:
conflicts /= lf_overlaps(L)
return np.nan_to_num(conflicts)
def lf_empirical_accuracies(L, Y):
"""Return the **empirical accuracy** against a set of labels Y (e.g. dev
set) for each LF.
Args:
L: an n x m scipy.sparse matrix where L_{i,j} is the label given by the
jth LF to the ith candidate
Y: an [n] or [n, 1] np.ndarray of gold labels
"""
# Assume labeled set is small, work with dense matrices
Y = arraylike_to_numpy(Y)
L = L.toarray()
X = np.where(L == 0, 0, np.where(L == np.vstack([Y] * L.shape[1]).T, 1, -1))
return 0.5 * (X.sum(axis=0) / (L != 0).sum(axis=0) + 1)
def lf_summary(L, Y=None, lf_names=None, est_accs=None):
"""Returns a pandas DataFrame with the various per-LF statistics.
Args:
L: an n x m scipy.sparse matrix where L_{i,j} is the label given by the
jth LF to the ith candidate
Y: an [n] or [n, 1] np.ndarray of gold labels.
If provided, the empirical accuracy for each LF will be calculated
"""
n, m = L.shape
if lf_names is not None:
col_names = ["j"]
d = {"j": list(range(m))}
else:
lf_names = list(range(m))
col_names = []
d = {}
# Default LF stats
col_names.extend(["Polarity", "Coverage", "Overlaps", "Conflicts"])
d["Polarity"] = Series(data=lf_polarities(L), index=lf_names)
d["Coverage"] = Series(data=lf_coverages(L), index=lf_names)
d["Overlaps"] = Series(data=lf_overlaps(L), index=lf_names)
d["Conflicts"] = Series(data=lf_conflicts(L), index=lf_names)
if Y is not None:
col_names.extend(["Correct", "Incorrect", "Emp. Acc."])
confusions = [
confusion_matrix(Y, L[:, i], pretty_print=False) for i in range(m)
]
corrects = [np.diagonal(conf).sum() for conf in confusions]
incorrects = [
conf.sum() - correct for conf, correct in zip(confusions, corrects)
]
accs = lf_empirical_accuracies(L, Y)
d["Correct"] = Series(data=corrects, index=lf_names)
d["Incorrect"] = Series(data=incorrects, index=lf_names)
d["Emp. Acc."] = Series(data=accs, index=lf_names)
if est_accs is not None:
col_names.append("Learned Acc.")
d["Learned Acc."] = Series(est_accs, index=lf_names)
return DataFrame(data=d, index=lf_names)[col_names]
def single_lf_summary(Y_p, Y=None):
"""Calculates coverage, overlap, conflicts, and accuracy for a single LF
Args:
Y_p: a np.array or torch.Tensor of predicted labels
Y: a np.array or torch.Tensor of true labels (if known)
"""
L = sparse.csr_matrix(arraylike_to_numpy(Y_p).reshape(-1, 1))
return lf_summary(L, Y)
def error_buckets(gold, pred, X=None):
"""Group items by error buckets
Args:
gold: an array-like of gold labels (ints)
pred: an array-like of predictions (ints)
X: an iterable of items
Returns:
buckets: A dict of items where buckets[i,j] is a list of items with
predicted label i and true label j. If X is None, return indices
instead.
For a binary problem with (1=positive, 2=negative):
buckets[1,1] = true positives
buckets[1,2] = false positives
buckets[2,1] = false negatives
buckets[2,2] = true negatives
"""
buckets = defaultdict(list)
gold = arraylike_to_numpy(gold)
pred = arraylike_to_numpy(pred)
for i, (y, l) in enumerate(zip(pred, gold)):
buckets[y, l].append(X[i] if X is not None else i)
return buckets
def confusion_matrix(
gold, pred, null_pred=False, null_gold=False, normalize=False, pretty_print=True
):
"""A shortcut method for building a confusion matrix all at once.
Args:
gold: an array-like of gold labels (ints)
pred: an array-like of predictions (ints)
null_pred: If True, include the row corresponding to null predictions
null_gold: If True, include the col corresponding to null gold labels
normalize: if True, divide counts by the total number of items
pretty_print: if True, pretty-print the matrix before returning
"""
conf = ConfusionMatrix(null_pred=null_pred, null_gold=null_gold)
gold = arraylike_to_numpy(gold)
pred = arraylike_to_numpy(pred)
conf.add(gold, pred)
mat = conf.compile()
if normalize:
mat = mat / len(gold)
if pretty_print:
conf.display(normalize=normalize)
return mat
class ConfusionMatrix(object):
"""
An iteratively built abstention-aware confusion matrix with pretty printing
Assumed axes are true label on top, predictions on the side.
"""
def __init__(self, null_pred=False, null_gold=False):
"""
Args:
null_pred: If True, include the row corresponding to null
predictions
null_gold: If True, include the col corresponding to null gold
labels
"""
self.counter = Counter()
self.mat = None
self.null_pred = null_pred
self.null_gold = null_gold
def __repr__(self):
if self.mat is None:
self.compile()
return str(self.mat)
def add(self, gold, pred):
"""
Args:
gold: a np.ndarray of gold labels (ints)
pred: a np.ndarray of predictions (ints)
"""
self.counter.update(zip(gold, pred))
def compile(self, trim=True):
k = max([max(tup) for tup in self.counter.keys()]) + 1 # include 0
mat = np.zeros((k, k), dtype=int)
for (y, l), v in self.counter.items():
mat[l, y] = v
if trim and not self.null_pred:
mat = mat[1:, :]
if trim and not self.null_gold:
mat = mat[:, 1:]
self.mat = mat
return mat
def display(self, normalize=False, indent=0, spacing=2, decimals=3, mark_diag=True):
mat = self.compile(trim=False)
m, n = mat.shape
tab = " " * spacing
margin = " " * indent
# Print headers
s = margin + " " * (5 + spacing)
for j in range(n):
if j == 0 and not self.null_gold:
continue
s += f" y={j} " + tab
print(s)
# Print data
for i in range(m):
# Skip null predictions row if necessary
if i == 0 and not self.null_pred:
continue
s = margin + f" l={i} " + tab
for j in range(n):
# Skip null gold if necessary
if j == 0 and not self.null_gold:
continue
else:
if i == j and mark_diag and normalize:
s = s[:-1] + "*"
if normalize:
s += f"{mat[i,j]/sum(mat[i,1:]):>5.3f}" + tab
else:
s += f"{mat[i,j]:^5d}" + tab
print(s)
|
metal-master
|
metal/analysis.py
|
from .end_model import EndModel
from .label_model import LabelModel, MajorityClassVoter, MajorityLabelVoter, RandomVoter
from .tuners import RandomSearchTuner
__all__ = [
"EndModel",
"LabelModel",
"MajorityClassVoter",
"MajorityLabelVoter",
"RandomVoter",
"RandomSearchTuner",
]
__version__ = "0.5.0"
|
metal-master
|
metal/__init__.py
|
import argparse
import copy
import random
import warnings
from collections import defaultdict
import numpy as np
import torch
from scipy.sparse import issparse
from torch.utils.data import Dataset
class MetalDataset(Dataset):
"""A dataset that group each item in X with its label from Y
Args:
X: an n-dim iterable of items
Y: a torch.Tensor of labels
This may be predicted (int) labels [n] or probabilistic (float) labels [n, k]
"""
def __init__(self, X, Y):
self.X = X
self.Y = Y
assert len(X) == len(Y)
def __getitem__(self, index):
return tuple([self.X[index], self.Y[index]])
def __len__(self):
return len(self.X)
def rargmax(x, eps=1e-8):
"""Argmax with random tie-breaking
Args:
x: a 1-dim numpy array
Returns:
the argmax index
"""
idxs = np.where(abs(x - np.max(x, axis=0)) < eps)[0]
return np.random.choice(idxs)
def pred_to_prob(Y_h, k):
"""Converts a 1D tensor of predicted labels into a 2D tensor of probabilistic labels
Args:
Y_h: an [n], or [n,1] tensor of predicted (int) labels in {1,...,k}
k: the largest possible label in Y_h
Returns:
Y_s: a torch.FloatTensor of shape [n, k] where Y_s[i, j-1] is the probabilistic
label for item i and label j
"""
Y_h = Y_h.clone()
if Y_h.dim() > 1:
Y_h = Y_h.squeeze()
assert Y_h.dim() == 1
assert (Y_h >= 1).all()
assert (Y_h <= k).all()
n = Y_h.shape[0]
Y_s = torch.zeros((n, k), dtype=Y_h.dtype, device=Y_h.device)
for i, j in enumerate(Y_h):
Y_s[i, j - 1] = 1.0
return Y_s
def arraylike_to_numpy(array_like):
"""Convert a 1d array-like (e.g,. list, tensor, etc.) to an np.ndarray"""
orig_type = type(array_like)
# Convert to np.ndarray
if isinstance(array_like, np.ndarray):
pass
elif isinstance(array_like, list):
array_like = np.array(array_like)
elif issparse(array_like):
array_like = array_like.toarray()
elif isinstance(array_like, torch.Tensor):
array_like = array_like.numpy()
elif not isinstance(array_like, np.ndarray):
array_like = np.array(array_like)
else:
msg = f"Input of type {orig_type} could not be converted to 1d " "np.ndarray"
raise ValueError(msg)
# Correct shape
if (array_like.ndim > 1) and (1 in array_like.shape):
array_like = array_like.flatten()
if array_like.ndim != 1:
raise ValueError("Input could not be converted to 1d np.array")
# Convert to ints
if any(array_like % 1):
raise ValueError("Input contains at least one non-integer value.")
array_like = array_like.astype(np.dtype(int))
return array_like
def convert_labels(Y, source, target):
"""Convert a matrix from one label type to another
Args:
Y: A np.ndarray or torch.Tensor of labels (ints) using source convention
source: The convention the labels are currently expressed in
target: The convention to convert the labels to
Returns:
Y: an np.ndarray or torch.Tensor of labels (ints) using the target convention
Conventions:
'categorical': [0: abstain, 1: positive, 2: negative]
'plusminus': [0: abstain, 1: positive, -1: negative]
'onezero': [0: negative, 1: positive]
Note that converting to 'onezero' will combine abstain and negative labels.
"""
if Y is None:
return Y
if isinstance(Y, np.ndarray):
Y = Y.copy()
assert Y.dtype == np.int64
elif isinstance(Y, torch.Tensor):
Y = Y.clone()
assert isinstance(Y, torch.LongTensor)
else:
raise ValueError("Unrecognized label data type.")
negative_map = {"categorical": 2, "plusminus": -1, "onezero": 0}
Y[Y == negative_map[source]] = negative_map[target]
return Y
def plusminus_to_categorical(Y):
return convert_labels(Y, "plusminus", "categorical")
def categorical_to_plusminus(Y):
return convert_labels(Y, "categorical", "plusminus")
def label_matrix_to_one_hot(L, k=None):
"""Converts a 2D [n,m] label matrix into an [n,m,k] one hot 3D tensor
Note that in the returned 3D matrix, abstain votes continue to be
represented by 0s, not 1s.
Args:
L: a [n,m] label matrix with categorical labels (0 = abstain)
k: the number of classes that could appear in L
if None, k is inferred as the max element in L
"""
n, m = L.shape
if k is None:
k = L.max()
L_onehot = torch.zeros(n, m, k + 1)
for i, row in enumerate(L):
for j, k in enumerate(row):
if k > 0:
L_onehot[i, j, k - 1] = 1
return L_onehot
def recursive_merge_dicts(x, y, misses="report", verbose=None):
"""
Merge dictionary y into a copy of x, overwriting elements of x when there
is a conflict, except if the element is a dictionary, in which case recurse.
misses: what to do if a key in y is not in x
'insert' -> set x[key] = value
'exception' -> raise an exception
'report' -> report the name of the missing key
'ignore' -> do nothing
verbose: If verbose is None, look for a value for verbose in y first, then x
TODO: give example here (pull from tests)
"""
def recurse(x, y, misses="report", verbose=1):
found = True
for k, v in y.items():
found = False
if k in x:
found = True
if isinstance(x[k], dict):
if not isinstance(v, dict):
msg = f"Attempted to overwrite dict {k} with " f"non-dict: {v}"
raise ValueError(msg)
# If v is {}, set x[k] = {} instead of recursing on empty dict
# Otherwise, recurse on the items in v
if v:
recurse(x[k], v, misses, verbose)
else:
x[k] = v
else:
if x[k] == v:
msg = f"Reaffirming {k}={x[k]}"
else:
msg = f"Overwriting {k}={x[k]} to {k}={v}"
x[k] = v
if verbose > 1 and k != "verbose":
print(msg)
else:
for kx, vx in x.items():
if isinstance(vx, dict):
found = recurse(vx, {k: v}, misses="ignore", verbose=verbose)
if found:
break
if not found:
msg = f'Could not find kwarg "{k}" in destination dict.'
if misses == "insert":
x[k] = v
if verbose > 1:
print(f"Added {k}={v} from second dict to first")
elif misses == "exception":
raise ValueError(msg)
elif misses == "report":
print(msg)
else:
pass
return found
# If verbose is not provided, look for an value in y first, then x
# (Do this because 'verbose' kwarg is often inside one or both of x and y)
if verbose is None:
verbose = y.get("verbose", x.get("verbose", 1))
z = copy.deepcopy(x)
recurse(z, y, misses, verbose)
return z
def recursive_transform(x, test_func, transform):
"""Applies a transformation recursively to each member of a dictionary
Args:
x: a (possibly nested) dictionary
test_func: a function that returns whether this element should be transformed
transform: a function that transforms a value
"""
for k, v in x.items():
if test_func(v):
x[k] = transform(v)
if isinstance(v, dict):
recursive_transform(v, test_func, transform)
return x
def add_flags_from_config(parser, config_dict):
"""
Adds a flag (and default value) to an ArgumentParser for each parameter in a config
"""
def OrNone(default):
def func(x):
# Convert "none" to proper None object
if x.lower() == "none":
return None
# If default is None (and x is not None), return x without conversion as str
elif default is None:
return str(x)
# Otherwise, default has non-None type; convert x to that type
else:
return type(default)(x)
return func
def str2bool(string):
if string == "0" or string.lower() == "false":
return False
elif string == "1" or string.lower() == "true":
return True
else:
raise Exception(f"Invalid value {string} for boolean flag")
for param in config_dict:
# Blacklist certain config parameters from being added as flags
if param in ["verbose"]:
continue
default = config_dict[param]
try:
if isinstance(default, dict):
parser = add_flags_from_config(parser, default)
elif isinstance(default, bool):
parser.add_argument(f"--{param}", type=str2bool, default=default)
elif isinstance(default, list):
if len(default) > 0:
# pass a list as argument
parser.add_argument(
f"--{param}",
action="append",
type=type(default[0]),
default=default,
)
else:
parser.add_argument(f"--{param}", action="append", default=default)
else:
parser.add_argument(f"--{param}", type=OrNone(default), default=default)
except argparse.ArgumentError:
print(
f"Could not add flag for param {param} because it was already present."
)
return parser
def split_data(
*inputs,
splits=[0.5, 0.5],
shuffle=True,
stratify_by=None,
index_only=False,
seed=None,
):
"""Splits inputs into multiple splits of defined sizes
Args:
inputs: correlated tuples/lists/arrays/matrices/tensors to split
splits: list containing split sizes (fractions or counts);
shuffle: if True, shuffle the data before splitting
stratify_by: (None or an input) if not None, use these labels to
stratify the splits (separating the data into groups by these
labels and sampling from those, rather than from the population at
large); overrides shuffle
index_only: if True, return only the indices of the new splits, not the
split data itself
seed: (int) random seed
Example usage:
Ls, Xs, Ys = split_data(L, X, Y, splits=[0.8, 0.1, 0.1])
OR
assignments = split_data(Y, splits=[0.8, 0.1, 0.1], index_only=True)
Note: This is very similar to scikit-learn's train_test_split() method,
but with support for more than two splits.
"""
def fractions_to_counts(fracs, n):
"""Converts a list of fractions to a list of counts that sum to n"""
counts = [int(np.round(n * frac)) for frac in fracs]
# Ensure sum of split counts sums to n
counts[-1] = n - sum(counts[:-1])
return counts
def slice_data(data, indices):
if isinstance(data, list) or isinstance(data, tuple):
return [d for i, d in enumerate(data) if i in set(indices)]
else:
try:
# Works for np.ndarray, scipy.sparse, torch.Tensor
return data[indices]
except TypeError:
raise Exception(
f"split_data() currently only accepts inputs "
f"of type tuple, list, np.ndarray, scipy.sparse, or "
f"torch.Tensor; not {type(data)}"
)
# Setting random seed
if seed is not None:
random.seed(seed)
try:
n = len(inputs[0])
except TypeError:
n = inputs[0].shape[0]
num_splits = len(splits)
# Check splits for validity and convert to fractions
if all(isinstance(x, int) for x in splits):
if not sum(splits) == n:
raise ValueError(
f"Provided split counts must sum to n ({n}), not {sum(splits)}."
)
fracs = [count / n for count in splits]
elif all(isinstance(x, float) for x in splits):
if not sum(splits) == 1.0:
raise ValueError(f"Split fractions must sum to 1.0, not {sum(splits)}.")
fracs = splits
else:
raise ValueError("Splits must contain all ints or all floats.")
# Make sampling pools
if stratify_by is None:
pools = [np.arange(n)]
else:
pools = defaultdict(list)
for i, val in enumerate(stratify_by):
pools[val].append(i)
pools = list(pools.values())
# Make index assignments
assignments = [[] for _ in range(num_splits)]
for pool in pools:
if shuffle or stratify_by is not None:
random.shuffle(pool)
counts = fractions_to_counts(fracs, len(pool))
counts.insert(0, 0)
cum_counts = np.cumsum(counts)
for i in range(num_splits):
assignments[i].extend(pool[cum_counts[i] : cum_counts[i + 1]])
if index_only:
return assignments
else:
outputs = []
for data in inputs:
data_splits = []
for split in range(num_splits):
data_splits.append(slice_data(data, assignments[split]))
outputs.append(data_splits)
if len(outputs) == 1:
return outputs[0]
else:
return outputs
def padded_tensor(items, pad_idx=0, left_padded=False, max_len=None):
"""Create a padded [n, ?] Tensor from a potentially uneven iterable of Tensors.
Modified from github.com/facebookresearch/ParlAI
Args:
items: (list) the items to merge and pad
pad_idx: (int) the value to use for padding
left_padded: (bool) if True, pad on the left instead of the right
max_len: (int) if not None, the maximum allowable item length
Returns:
padded_tensor: (Tensor) the merged and padded tensor of items
"""
# number of items
n = len(items)
# length of each item
lens = [len(item) for item in items]
# max seq_len dimension
max_seq_len = max(lens) if max_len is None else max_len
output = items[0].new_full((n, max_seq_len), pad_idx)
for i, (item, length) in enumerate(zip(items, lens)):
if left_padded:
# place at end
output[i, max_seq_len - length :] = item
else:
# place at beginning
output[i, :length] = item
return output
global warnings_given
warnings_given = set([])
def warn_once(self, msg, msg_name=None):
"""Prints a warning statement just once
Args:
msg: The warning message
msg_name: [optional] The name of the warning. If None, the msg_name
will be the msg itself.
"""
assert isinstance(msg, str)
msg_name = msg_name if msg_name else msg
if msg_name not in warnings_given:
warnings.warn(msg)
warnings_given.add(msg_name)
# DEPRECATION: This is replaced by move_to_device
def place_on_gpu(data):
"""Utility to place data on GPU, where data could be a torch.Tensor, a tuple
or list of Tensors, or a tuple or list of tuple or lists of Tensors"""
data_type = type(data)
if data_type in (list, tuple):
data = [place_on_gpu(data[i]) for i in range(len(data))]
data = data_type(data)
return data
elif isinstance(data, torch.Tensor):
return data.cuda()
else:
return ValueError(f"Data type {type(data)} not recognized.")
def move_to_device(obj, device=-1):
"""
Given a structure (possibly) containing Tensors on the CPU,
move all the Tensors to the specified GPU (or do nothing, if they should be on the CPU).
device = -1 -> "cpu"
device = 0 -> "cuda:0"
"""
if device < 0 or not torch.cuda.is_available():
return obj
elif isinstance(obj, torch.Tensor):
return obj.cuda(device)
elif isinstance(obj, dict):
return {key: move_to_device(value, device) for key, value in obj.items()}
elif isinstance(obj, list):
return [move_to_device(item, device) for item in obj]
elif isinstance(obj, tuple):
return tuple([move_to_device(item, device) for item in obj])
else:
return obj
def set_seed(seed):
seed = int(seed)
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.backends.cudnn.enabled = True # Is this necessary?
torch.cuda.manual_seed(seed)
|
metal-master
|
metal/utils.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from metal.classifier import Classifier
from metal.end_model.em_defaults import em_default_config
from metal.end_model.identity_module import IdentityModule
from metal.end_model.loss import SoftCrossEntropyLoss
from metal.utils import MetalDataset, pred_to_prob, recursive_merge_dicts
class EndModel(Classifier):
"""A dynamically constructed discriminative classifier
layer_out_dims: a list of integers corresponding to the output sizes
of the layers of your network. The first element is the
dimensionality of the input layer, the last element is the
dimensionality of the head layer (equal to the cardinality of the
task), and all other elements dictate the sizes of middle layers.
The number of middle layers will be inferred from this list.
input_module: (nn.Module) a module that converts the user-provided
model inputs to torch.Tensors. Defaults to IdentityModule.
middle_modules: (nn.Module) a list of modules to execute between the
input_module and task head. Defaults to nn.Linear.
head_module: (nn.Module) a module to execute right before the final
softmax that outputs a prediction for the task.
"""
def __init__(
self,
layer_out_dims,
input_module=None,
middle_modules=None,
head_module=None,
**kwargs,
):
if len(layer_out_dims) < 2 and not kwargs["skip_head"]:
raise ValueError(
"Arg layer_out_dims must have at least two "
"elements corresponding to the output dim of the input module "
"and the cardinality of the task. If the input module is the "
"IdentityModule, then the output dim of the input module will "
"be equal to the dimensionality of your input data points"
)
# Add layer_out_dims to kwargs so it will be merged into the config dict
kwargs["layer_out_dims"] = layer_out_dims
config = recursive_merge_dicts(em_default_config, kwargs, misses="insert")
super().__init__(k=layer_out_dims[-1], config=config)
self._build(input_module, middle_modules, head_module)
# Show network
if self.config["verbose"]:
print("\nNetwork architecture:")
self._print()
print()
def _build(self, input_module, middle_modules, head_module):
"""
TBD
"""
input_layer = self._build_input_layer(input_module)
middle_layers = self._build_middle_layers(middle_modules)
# Construct list of layers
layers = [input_layer]
if middle_layers is not None:
layers += middle_layers
if not self.config["skip_head"]:
head = self._build_task_head(head_module)
layers.append(head)
# Construct network
if len(layers) > 1:
self.network = nn.Sequential(*layers)
else:
self.network = layers[0]
# Construct loss module
loss_weights = self.config["train_config"]["loss_weights"]
if loss_weights is not None and self.config["verbose"]:
print(f"Using class weight vector {loss_weights}...")
reduction = self.config["train_config"]["loss_fn_reduction"]
self.criteria = SoftCrossEntropyLoss(
weight=self._to_torch(loss_weights, dtype=torch.FloatTensor),
reduction=reduction,
)
def _build_input_layer(self, input_module):
if input_module is None:
input_module = IdentityModule()
output_dim = self.config["layer_out_dims"][0]
input_layer = self._make_layer(
input_module,
"input",
self.config["input_layer_config"],
output_dim=output_dim,
)
return input_layer
def _build_middle_layers(self, middle_modules):
layer_out_dims = self.config["layer_out_dims"]
num_mid_layers = len(layer_out_dims) - 2
if num_mid_layers == 0:
return None
middle_layers = nn.ModuleList()
for i in range(num_mid_layers):
if middle_modules is None:
module = nn.Linear(*layer_out_dims[i : i + 2])
output_dim = layer_out_dims[i + 1]
else:
module = middle_modules[i]
output_dim = None
layer = self._make_layer(
module,
"middle",
self.config["middle_layer_config"],
output_dim=output_dim,
)
middle_layers.add_module(f"layer{i+1}", layer)
return middle_layers
def _build_task_head(self, head_module):
if head_module is None:
head = nn.Linear(self.config["layer_out_dims"][-2], self.k)
else:
# Note that if head module is provided, it must have input dim of
# the last middle module and output dim of self.k, the cardinality
head = head_module
return head
def _make_layer(self, module, prefix, layer_config, output_dim=None):
if isinstance(module, IdentityModule):
return module
layer = [module]
if layer_config[f"{prefix}_relu"]:
layer.append(nn.ReLU())
if layer_config[f"{prefix}_batchnorm"] and output_dim:
layer.append(nn.BatchNorm1d(output_dim))
if layer_config[f"{prefix}_dropout"]:
layer.append(nn.Dropout(layer_config[f"{prefix}_dropout"]))
if len(layer) > 1:
return nn.Sequential(*layer)
else:
return layer[0]
def _print(self):
print(self.network)
def forward(self, x):
"""Returns a list of outputs for tasks 0,...t-1
Args:
x: a [batch_size, ...] batch from X
"""
return self.network(x)
@staticmethod
def _reset_module(m):
"""A method for resetting the parameters of any module in the network
First, handle special cases (unique initialization or none required)
Next, use built in method if available
Last, report that no initialization occured to avoid silent failure.
This will be called on all children of m as well, so do not recurse
manually.
"""
if callable(getattr(m, "reset_parameters", None)):
m.reset_parameters()
def update_config(self, update_dict):
"""Updates self.config with the values in a given update dictionary"""
self.config = recursive_merge_dicts(self.config, update_dict)
def _preprocess_Y(self, Y, k):
"""Convert Y to prob labels if necessary"""
Y = Y.clone()
# If preds, convert to probs
if Y.dim() == 1 or Y.shape[1] == 1:
Y = pred_to_prob(Y.long(), k=k)
return Y
def _create_dataset(self, *data):
return MetalDataset(*data)
def _get_loss_fn(self):
criteria = self.criteria.to(self.config["device"])
# This self.preprocess_Y allows us to not handle preprocessing
# in a custom dataloader, but decreases speed a bit
loss_fn = lambda X, Y: criteria(self.forward(X), self._preprocess_Y(Y, self.k))
return loss_fn
def train_model(self, train_data, valid_data=None, log_writer=None, **kwargs):
self.config = recursive_merge_dicts(self.config, kwargs)
# If train_data is provided as a tuple (X, Y), we can make sure Y is in
# the correct format
# NOTE: Better handling for if train_data is Dataset or DataLoader...?
if isinstance(train_data, (tuple, list)):
X, Y = train_data
Y = self._preprocess_Y(self._to_torch(Y, dtype=torch.FloatTensor), self.k)
train_data = (X, Y)
# Convert input data to data loaders
train_loader = self._create_data_loader(train_data, shuffle=True)
# Create loss function
loss_fn = self._get_loss_fn()
# Execute training procedure
self._train_model(
train_loader, loss_fn, valid_data=valid_data, log_writer=log_writer
)
def predict_proba(self, X):
"""Returns a [n, k] tensor of probs (probabilistic labels)."""
return F.softmax(self.forward(X), dim=1).data.cpu().numpy()
|
metal-master
|
metal/end_model/end_model.py
|
import torch.nn as nn
class IdentityModule(nn.Module):
"""A default identity input module that simply passes the input through."""
def __init__(self):
super().__init__()
def reset_parameters(self):
pass
def forward(self, x):
return x
|
metal-master
|
metal/end_model/identity_module.py
|
from .end_model import EndModel
from .identity_module import IdentityModule
from .logreg import LogisticRegression
from .loss import SoftCrossEntropyLoss
__all__ = ["EndModel", "IdentityModule", "LogisticRegression", "SoftCrossEntropyLoss"]
|
metal-master
|
metal/end_model/__init__.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class SoftCrossEntropyLoss(nn.Module):
"""Computes the CrossEntropyLoss while accepting probabilistic (float) targets
Args:
weight: a tensor of relative weights to assign to each class.
the kwarg name 'weight' is used to match CrossEntropyLoss
reduction: how to combine the elementwise losses
'none': return an unreduced list of elementwise losses
'mean': return the mean loss per elements
'sum': return the sum of the elementwise losses
Accepts:
input: An [n, k] float tensor of prediction logits (not probabilities)
target: An [n, k] float tensor of target probabilities
"""
def __init__(self, weight=None, reduction="mean"):
super().__init__()
# Register as buffer is standard way to make sure gets moved /
# converted with the Module, without making it a Parameter
if weight is None:
self.weight = None
else:
# Note: Sets the attribute self.weight as well
self.register_buffer("weight", torch.FloatTensor(weight))
self.reduction = reduction
def forward(self, input, target):
n, k = input.shape
# Note that t.new_zeros, t.new_full put tensor on same device as t
cum_losses = input.new_zeros(n)
for y in range(k):
cls_idx = input.new_full((n,), y, dtype=torch.long)
y_loss = F.cross_entropy(input, cls_idx, reduction="none")
if self.weight is not None:
y_loss = y_loss * self.weight[y]
cum_losses += target[:, y].float() * y_loss
if self.reduction == "none":
return cum_losses
elif self.reduction == "mean":
return cum_losses.mean()
elif self.reduction == "sum":
return cum_losses.sum()
else:
raise ValueError(f"Unrecognized reduction: {self.reduction}")
|
metal-master
|
metal/end_model/loss.py
|
from metal.end_model import EndModel
from metal.utils import recursive_merge_dicts
class LogisticRegression(EndModel):
"""A logistic regression classifier for a single-task problem"""
def __init__(self, input_dim, output_dim=2, **kwargs):
layer_out_dims = [input_dim, output_dim]
overrides = {"input_batchnorm": False, "input_dropout": 0.0}
kwargs = recursive_merge_dicts(
kwargs, overrides, misses="insert", verbose=False
)
super().__init__(layer_out_dims, **kwargs)
|
metal-master
|
metal/end_model/logreg.py
|
em_default_config = {
# GENERAL
"seed": None,
"verbose": True,
"show_plots": True,
# Network
# The first value is the output dim of the input module (or the sum of
# the output dims of all the input modules if multitask=True and
# multiple input modules are provided). The last value is the
# output dim of the head layer (i.e., the cardinality of the
# classification task). The remaining values are the output dims of
# middle layers (if any). The number of middle layers will be inferred
# from this list.
"layer_out_dims": [10, 2],
# Input layer configs
"input_layer_config": {
"input_relu": True,
"input_batchnorm": False,
"input_dropout": 0.0,
},
# Middle layer configs
"middle_layer_config": {
"middle_relu": True,
"middle_batchnorm": False,
"middle_dropout": 0.0,
},
# Can optionally skip the head layer completely, for e.g. running baseline
# models...
"skip_head": False,
# Device
"device": "cpu",
# TRAINING
"train_config": {
# Loss function config
"loss_fn_reduction": "mean",
# Display
"progress_bar": False,
# Dataloader
"data_loader_config": {"batch_size": 32, "num_workers": 1, "shuffle": True},
# Loss weights
"loss_weights": None,
# Train Loop
"n_epochs": 10,
# 'grad_clip': 0.0,
"l2": 0.0,
"validation_metric": "accuracy",
"validation_freq": 1,
"validation_scoring_kwargs": {},
# Evaluate dev for during training every this many epochs
# Optimizer
"optimizer_config": {
"optimizer": "adam",
"optimizer_common": {"lr": 0.01},
# Optimizer - SGD
"sgd_config": {"momentum": 0.9},
# Optimizer - Adam
"adam_config": {"betas": (0.9, 0.999)},
# Optimizer - RMSProp
"rmsprop_config": {}, # Use defaults
},
# LR Scheduler (for learning rate)
"lr_scheduler": "reduce_on_plateau",
# [None, 'exponential', 'reduce_on_plateau']
# 'reduce_on_plateau' uses checkpoint_metric to assess plateaus
"lr_scheduler_config": {
# Freeze learning rate initially this many epochs
"lr_freeze": 0,
# Scheduler - exponential
"exponential_config": {"gamma": 0.9}, # decay rate
# Scheduler - reduce_on_plateau
"plateau_config": {
"factor": 0.5,
"patience": 10,
"threshold": 0.0001,
"min_lr": 1e-4,
},
},
# Logger (see metal/logging/logger.py for descriptions)
"logger": True,
"logger_config": {
"log_unit": "epochs", # ['seconds', 'examples', 'batches', 'epochs']
"log_train_every": 1, # How often train metrics are calculated (optionally logged to TB)
"log_train_metrics": [
"loss"
], # Metrics to calculate and report every `log_train_every` units. This can include built-in and user-defined metrics.
"log_train_metrics_func": None, # A function or list of functions that map a model + train_loader to a dictionary of custom metrics
"log_valid_every": 1, # How frequently to evaluate on valid set (must be multiple of log_freq)
"log_valid_metrics": [
"accuracy"
], # Metrics to calculate and report every `log_valid_every` units; this can include built-in and user-defined metrics
"log_valid_metrics_func": None, # A function or list of functions that maps a model + valid_loader to a dictionary of custom metrics
},
# LogWriter/Tensorboard (see metal/logging/writer.py for descriptions)
"writer": None, # [None, "json", "tensorboard"]
"writer_config": { # Log (or event) file stored at log_dir/run_dir/run_name
"log_dir": None,
"run_dir": None,
"run_name": None,
"writer_metrics": None, # May specify a subset of metrics in metrics_dict to be written
"include_config": True, # If True, include model config in log
},
# Checkpointer (see metal/logging/checkpointer.py for descriptions)
"checkpoint": True, # If True, checkpoint models when certain conditions are met
"checkpoint_config": {
"checkpoint_best": True,
"checkpoint_every": None, # uses log_valid_unit for units; if not None, checkpoint this often regardless of performance
"checkpoint_metric": "accuracy", # Must be in metrics dict; assumes valid split unless appended with "train/"
"checkpoint_metric_mode": "max", # ['max', 'min']
"checkpoint_dir": "checkpoints",
"checkpoint_runway": 0,
},
},
}
|
metal-master
|
metal/end_model/em_defaults.py
|
import time
from collections import defaultdict
class Logger(object):
"""Tracks when it is time to calculate train/valid metrics and logs them"""
def __init__(self, config, batches_per_epoch, writer={}, verbose=True):
# Strip split name from config keys
self.config = config
self.writer = writer
self.verbose = verbose
self.log_unit = self.config["log_unit"]
self.batches_per_epoch = batches_per_epoch
self.example_count = 0
self.example_total = 0
self.batch_count = 0
self.batch_total = 0
self.unit_count = 0
self.unit_total = 0
self.loss_ticks = 0 # Count how many times loss logging has occurred
# Specific to log_unit == "seconds"
self.timer = Timer() if self.log_unit == "seconds" else None
# Calculate how many log_train steps to take per log_valid steps
self.valid_every_X = self._calculate_valid_frequency()
def increment(self, batch_size):
"""Update the total and relative unit counts"""
self.example_count += batch_size
self.example_total += batch_size
self.batch_count += 1
self.batch_total += 1
if self.log_unit == "seconds":
self.unit_count = int(self.timer.elapsed())
self.unit_total = int(self.timer.total_elapsed())
elif self.log_unit == "examples":
self.unit_count = self.example_count
self.unit_total = self.example_total
elif self.log_unit == "batches":
self.unit_count = self.batch_count
self.unit_total = self.batch_total
elif self.log_unit == "epochs":
# Track epoch by example count rather than epoch number because otherwise
# we only know when a new epoch starts, not when an epoch ends
self.unit_count = self.batch_count / self.batches_per_epoch
self.unit_total = self.batch_total / self.batches_per_epoch
else:
raise Exception(f"Unrecognized log_unit: {self.log_unit}")
def loss_time(self):
"""Returns True if it is time to calculate and report loss"""
is_time = self.unit_count >= self.config["log_every"]
if is_time:
self.loss_ticks += 1
return is_time
def metrics_time(self):
"""Returns True if it is time to calculate and report loss
TODO: Currently, score_every is a multiple of log_every so there is
only one set of counters to reset. These two could be made independent by
creating a separate counter set for loss_time and metrics_time.
"""
is_time = self.loss_ticks == self.valid_every_X
if is_time:
self.loss_ticks = 0
return is_time
def _calculate_valid_frequency(self):
if self.config["score_every"]:
# Do integer check on ratio instead of using mod due to float issues:
# e.g., 1.0 % 0.1 == 0.0999999995 for some reason
ratio = self.config["score_every"] / self.config["log_every"]
if self.config["score_every"] < self.config["log_every"] or ratio != int(
ratio
):
msg = (
f"Parameter `score_every` "
f"({self.config['score_every']}) must be a multiple of "
f"`log_every` ({self.config['log_every']})."
)
raise Exception(msg)
return int(ratio)
else:
return 0
def log(self, metrics_dict):
"""Print calculated metrics and optionally write to file (json/tb)"""
if self.writer:
self.write_to_file(metrics_dict)
if self.verbose:
self.print_to_screen(metrics_dict)
self.reset()
def print_to_screen(self, metrics_dict):
"""Print all metrics in metrics_dict to screen"""
score_strings_by_task = defaultdict(list)
for full_metric_name, value in metrics_dict.items():
task_name, metric_name = full_metric_name.split("/", maxsplit=1)
if isinstance(value, float):
score_strings_by_task[task_name].append(f"{metric_name}={value:0.2e}")
else:
score_strings_by_task[task_name].append(f"{metric_name}={value}")
if self.log_unit == "epochs":
if int(self.unit_total) == self.unit_total:
header = f"{self.unit_total} {self.log_unit[:3]}"
else:
header = f"{self.unit_total:0.2f} {self.log_unit[:3]}"
else:
epochs = self.batch_total / self.batches_per_epoch
header = f" ({epochs:0.2f} epo)"
string = f"[{header}]:\n"
for task, score_strings in score_strings_by_task.items():
concatenated_scores = f"{', '.join(score_strings)}"
string += f" {task}:[{concatenated_scores}]"
string += "\n" # Print each task on a new line
print(string[:-1]) # Don't include final newline
def write_to_file(self, metrics_dict):
for metric, value in metrics_dict.items():
if self.log_unit == "epochs":
# Use batches b/c Tensorboard cannot handle non-integer iteration #s
self.writer.add_scalar(metric, value, self.batch_total)
else:
self.writer.add_scalar(metric, value, self.unit_total)
def reset(self):
self.unit_count = 0
self.example_count = 0
self.batch_count = 0
if self.timer is not None:
self.timer.update()
class Timer(object):
"""Computes elapsed time."""
def __init__(self):
"""Initialize timer"""
self.reset()
def reset(self):
"""Reset timer, completely obliterating history"""
self.start = time.time()
self.update()
def update(self):
"""Update timer with most recent click point"""
self.click = time.time()
def elapsed(self):
"""Get time elapsed since last recorded click"""
elapsed = time.time() - self.click
return elapsed
def total_elapsed(self):
return time.time() - self.start
|
metal-master
|
metal/mmtl/mmtl_logger.py
|
from abc import ABC
import torch.nn.functional as F
from metal.end_model import IdentityModule
from metal.mmtl.modules import MetalModule, MetalModuleWrapper
from metal.mmtl.scorer import Scorer
class Task(ABC):
"""A abstract class for tasks in MMTL Metal Model.
Args:
name: (str) The name of the task
TODO: replace this with a more fully-featured path through the network
input_module: (nn.Module) The input module
middle_module: (nn.Module) A middle module
head_module: (nn.Module) The task head module
output_hat_func: A function of the form f(forward(X)) -> output (e.g. probs)
loss_hat_func: A function of the form f(forward(X), Y) -> loss (scalar Tensor)
We recommend returning an average loss per example so that loss magnitude
is more consistent in the face of batch size changes
loss_multiplier: A scalar by which the loss for this task will be multiplied.
Default is 1 (no scaling effect at all)
scorer: A Scorer that returns a metrics_dict object.
"""
def __init__(
self,
name,
input_module,
middle_module,
head_module,
output_hat_func,
loss_hat_func,
loss_multiplier,
scorer,
) -> None:
self.name = name
self.input_module = self._wrap_module(input_module)
self.middle_module = self._wrap_module(middle_module)
self.head_module = self._wrap_module(head_module)
self.output_hat_func = output_hat_func
self.loss_hat_func = loss_hat_func
self.loss_multiplier = loss_multiplier
self.scorer = scorer
@staticmethod
def _wrap_module(module):
if isinstance(module, MetalModule):
return module
else:
return MetalModuleWrapper(module)
def __repr__(self):
cls_name = type(self).__name__
return f"{cls_name}(name={self.name}, loss_multiplier={self.loss_multiplier})"
class ClassificationTask(Task):
"""A classification task for use in an MMTL MetalModel
loss_hat_func converts labels into 1D tensor and then offsets subtracts 1 to account
for the fact that our labels are categorical (e.g., {1,2}) but the method
F.cross_entropy() expects 0-indexed labels.
"""
def __init__(
self,
name,
input_module=IdentityModule(),
middle_module=IdentityModule(),
head_module=IdentityModule(),
output_hat_func=(lambda X: F.softmax(X["data"], dim=1)),
loss_hat_func=(lambda X, Y: F.cross_entropy(X["data"], Y.view(-1) - 1)),
loss_multiplier=1.0,
scorer=Scorer(standard_metrics=["accuracy"]),
) -> None:
super(ClassificationTask, self).__init__(
name,
input_module,
middle_module,
head_module,
output_hat_func,
loss_hat_func,
loss_multiplier,
scorer,
)
class RegressionTask(Task):
"""A regression task for use in an MMTL MetalModel"""
def __init__(
self,
name,
input_module=IdentityModule(),
middle_module=IdentityModule(),
head_module=IdentityModule(),
output_hat_func=(lambda X: X["data"]),
# Note: no sigmoid (target labels can be in any range)
loss_hat_func=(lambda X, Y: F.mse_loss(X["data"].view(-1), Y.view(-1))),
loss_multiplier=1.0,
scorer=Scorer(standard_metrics=[]),
) -> None:
super(RegressionTask, self).__init__(
name,
input_module,
middle_module,
head_module,
output_hat_func,
loss_hat_func,
loss_multiplier,
scorer,
)
|
metal-master
|
metal/mmtl/task.py
|
from collections import defaultdict
import numpy as np
import torch
import torch.nn as nn
from metal.utils import move_to_device, recursive_merge_dicts, set_seed
model_defaults = {
"seed": None,
"device": 0, # gpu id (int) or -1 for cpu
"verbose": True,
"fp16": False,
"model_weights": None, # the path to a saved checkpoint to initialize with
}
class MetalModel(nn.Module):
"""A dynamically constructed discriminative classifier
Args:
tasks: a list of Task objects which bring their own (named) modules
We currently support up to N input modules -> middle layers -> up to N heads
TODO: Accept specifications for more exotic structure (e.g., via user-defined graph)
"""
def __init__(self, tasks, **kwargs):
self.config = recursive_merge_dicts(model_defaults, kwargs, misses="insert")
# Set random seed before initializing module weights
if self.config["seed"] is None:
self.config["seed"] = np.random.randint(1e6)
set_seed(self.config["seed"])
super().__init__()
# Build network
self._build(tasks)
self.task_map = {task.name: task for task in tasks}
# Load weights
if self.config["model_weights"]:
self.load_weights(self.config["model_weights"])
# Half precision
if self.config["fp16"]:
print("metal_model.py: Using fp16")
self.half()
# Move model to device now, then move data to device in forward() or calculate_loss()
if self.config["device"] >= 0:
if torch.cuda.is_available():
if self.config["verbose"]:
print("Using GPU...")
self.to(torch.device(f"cuda:{self.config['device']}"))
else:
if self.config["verbose"]:
print("No cuda device available. Using cpu instead.")
# Show network
if self.config["verbose"]:
print("\nNetwork architecture:")
print(self)
print()
num_params = sum(p.numel() for p in self.parameters() if p.requires_grad)
print(f"Total number of parameters: {num_params}")
def _build(self, tasks):
"""Iterates over tasks, adding their input_modules and head_modules"""
# TODO: Allow more flexible specification of network structure
self.input_modules = nn.ModuleDict(
{task.name: nn.DataParallel(task.input_module) for task in tasks}
)
self.middle_modules = nn.ModuleDict(
{task.name: nn.DataParallel(task.middle_module) for task in tasks}
)
self.head_modules = nn.ModuleDict(
{task.name: nn.DataParallel(task.head_module) for task in tasks}
)
self.loss_hat_funcs = {task.name: task.loss_hat_func for task in tasks}
self.output_hat_funcs = {task.name: task.output_hat_func for task in tasks}
def forward(self, X, task_names):
"""Returns the outputs of the requested task heads in a dictionary
The output of each task is the result of passing the input through the
input_module, middle_module, and head_module for that task, in that order.
Before calculating any intermediate values, we first check whether a previously
evaluated task has produced that intermediate result. If so, we use that.
Args:
X: a [batch_size, ...] batch from a DataLoader
Returns:
output_dict: {task_name (str): output (Tensor)}
"""
input = move_to_device(X, self.config["device"])
outputs = {}
# TODO: Replace this naive caching scheme with a more intelligent and feature-
# complete approach where arbitrary DAGs of modules are specified and we only
# cache things that will be reused by another task
for task_name in task_names:
# Extra .module call is to get past DataParallel wrapper
input_module = self.input_modules[task_name].module
if input_module not in outputs:
output = input_module(input)
outputs[input_module] = output
middle_module = self.middle_modules[task_name].module
if middle_module not in outputs:
output = middle_module(outputs[input_module])
outputs[middle_module] = output
head_module = self.head_modules[task_name].module
if head_module not in outputs:
output = head_module(outputs[middle_module])
outputs[head_module] = output
return {t: outputs[self.head_modules[t].module] for t in task_names}
def calculate_loss(self, X, Ys, payload_name, labels_to_tasks):
"""Returns a dict of {task_name: loss (a FloatTensor scalar)}.
Args:
X: an appropriate input for forward(), either a Tensor or tuple
Ys: a dict of {task_name: labels} where labels is [n, ?]
labels_to_tasks: a dict of {label_name: task_name} indicating which task
head to use to calculate the loss for each labelset.
"""
task_names = set(labels_to_tasks.values())
outputs = self.forward(X, task_names)
loss_dict = {} # Stores the loss by task
count_dict = {} # Stores the number of active examples by task
for label_name, task_name in labels_to_tasks.items():
loss_name = f"{task_name}/{payload_name}/{label_name}/loss"
Y = Ys[label_name]
assert isinstance(Y, torch.Tensor)
out = outputs[task_name]
# Identify which instances have at least one non-zero target labels
active = torch.any(Y.detach() != 0, dim=1)
count_dict[loss_name] = active.sum().item()
# If there are inactive instances, slice them out to save computation
# and ignore their contribution to the loss
if 0 in active:
Y = Y[active]
if isinstance(out, torch.Tensor):
out = out[active]
# If the output of the head has multiple fields, slice them all
elif isinstance(out, dict):
out = move_to_device({k: v[active] for k, v in out.items()})
# Convert to half precision last thing if applicable
if self.config["fp16"] and Y.dtype == torch.float32:
out["data"] = out["data"].half()
Y = Y.half()
# If no examples in this batch have labels for this task, skip loss calc
# Active has type torch.uint8; avoid overflow with long()
if active.long().sum():
label_loss = self.loss_hat_funcs[task_name](
out, move_to_device(Y, self.config["device"])
)
assert isinstance(label_loss.item(), float)
loss_dict[loss_name] = (
label_loss * self.task_map[task_name].loss_multiplier
)
return loss_dict, count_dict
@torch.no_grad()
def calculate_probs(self, X, task_names):
"""Returns a dict of {task_name: probs}
Args:
X: instances to feed through the network
task_names: the names of the tasks for which to calculate outputs
Returns:
{task_name: probs}: probs is the output of the output_hat for the given
task_head
The type of each entry in probs depends on the task type:
instance-based tasks: each entry in probs is a [k]-len array
token-based tasks: each entry is a [seq_len, k] array
"""
assert self.eval()
return {
t: [probs.cpu().numpy() for probs in self.output_hat_funcs[t](out)]
for t, out in self.forward(X, task_names).items()
}
def update_config(self, update_dict):
"""Updates self.config with the values in a given update dictionary."""
self.config = recursive_merge_dicts(self.config, update_dict)
def load_weights(self, model_path):
"""Load model weights from checkpoint."""
if self.config["device"] >= 0:
device = torch.device(f"cuda:{self.config['device']}")
else:
device = torch.device("cpu")
try:
self.load_state_dict(torch.load(model_path, map_location=device)["model"])
except RuntimeError:
print("Your destination state dict has different keys for the update key.")
self.load_state_dict(
torch.load(model_path, map_location=device)["model"], strict=False
)
def save_weights(self, model_path):
"""Saves weight in checkpoint directory"""
raise NotImplementedError
@torch.no_grad()
def score(self, payload, metrics=[], verbose=True, **kwargs):
"""Calculate the requested metrics for the given payload
Args:
payload: a Payload to score
metrics: a list of full metric names, a single full metric name, or []:
list: a list of full metric names supported by the tasks' Scorers.
(full metric names are of the form task/payload/labelset/metric)
Only these metrics will be calculated and returned.
[]: defaults to all supported metrics for the given payload's Tasks
str: a single full metric name
A single score will be returned instead of a dictionary
Returns:
scores: a dict of the form {metric_name: score} corresponding to the
requested metrics (optionally a single score if metrics is a string
instead of a list)
"""
self.eval()
return_unwrapped = isinstance(metrics, str)
# If no specific metrics were requested, calculate all available metrics
if metrics:
metrics_list = metrics if isinstance(metrics, list) else [metrics]
assert all(len(metric.split("/")) == 4 for metric in metrics_list)
target_metrics = defaultdict(list)
target_tasks = []
target_labels = []
for full_metric_name in metrics:
task_name, payload_name, label_name, metric_name = full_metric_name.split(
"/"
)
target_tasks.append(task_name)
target_labels.append(label_name)
target_metrics[label_name].append(metric_name)
else:
target_tasks = set(payload.labels_to_tasks.values())
target_labels = set(payload.labels_to_tasks.keys())
target_metrics = {
label_name: None for label_name in payload.labels_to_tasks
}
Ys, Ys_probs, Ys_preds = self.predict_with_gold(
payload, target_tasks, target_labels, return_preds=True, **kwargs
)
metrics_dict = {}
for label_name, task_name in payload.labels_to_tasks.items():
scorer = self.task_map[task_name].scorer
task_metrics_dict = scorer.score(
Ys[label_name],
Ys_probs[task_name],
Ys_preds[task_name],
target_metrics=target_metrics[label_name],
)
# Expand short metric names into full metric names
for metric_name, score in task_metrics_dict.items():
full_metric_name = (
f"{task_name}/{payload.name}/{label_name}/{metric_name}"
)
metrics_dict[full_metric_name] = score
# If a single metric was given as a string (not list), return a float
if return_unwrapped:
metric, score = metrics_dict.popitem()
return score
else:
return metrics_dict
@torch.no_grad()
def predict_with_gold(
self,
payload,
target_tasks=None,
target_labels=None,
return_preds=False,
max_examples=0,
**kwargs,
):
"""Extracts Y and calculates Y_prods, Y_preds for the given payload and tasks
To get just the probabilities or predictions for a single task, consider using
predict() or predict_probs().
Args:
payload: the Payload to make predictions for
target_tasks: if not None, predict probs only for the specified tasks;
otherwise, predict probs for all tasks with corresponding labelsets
in the payload
target_labels: if not None, return labels for only the specified labelsets;
otherwise, return all labelsets
return_preds: if True, also include preds in return values
max_examples: if > 0, predict for a maximum of this many examples
# TODO: consider returning Ys as tensors instead of lists (padded if necessary)
Returns:
Ys: a {label_name: Y} dict where Y is an [n] list of labels (often ints)
Ys_probs: a {task_name: Y_probs} dict where Y_probs is a [n] list of
probabilities
Ys_preds: a {task_name: Y_preds} dict where Y_preds is a [n] list of
predictions
"""
validate_targets(payload, target_tasks, target_labels)
if target_tasks is None:
target_tasks = set(payload.labels_to_tasks.values())
elif isinstance(target_tasks, str):
target_tasks = [target_tasks]
Ys = defaultdict(list)
Ys_probs = defaultdict(list)
total = 0
for batch_num, (Xb, Yb) in enumerate(payload.data_loader):
Yb_probs = self.calculate_probs(Xb, target_tasks)
for task_name, yb_probs in Yb_probs.items():
Ys_probs[task_name].extend(yb_probs)
for label_name, yb in Yb.items():
if target_labels is None or label_name in target_labels:
Ys[label_name].extend(yb.cpu().numpy())
total += len(Xb)
if max_examples > 0 and total >= max_examples:
break
if max_examples:
Ys = {label_name: Y[:max_examples] for label_name, Y in Ys.items()}
Ys_probs = {
task_name: Y_probs[:max_examples]
for task_name, Y_probs in Ys_probs.items()
}
if return_preds:
Ys_preds = {
task_name: [probs_to_preds(y_probs) for y_probs in Y_probs]
for task_name, Y_probs in Ys_probs.items()
}
return Ys, Ys_probs, Ys_preds
else:
return Ys, Ys_probs
# Single-task prediction helpers (for convenience)
@torch.no_grad()
def predict_probs(self, payload, task_name=None, **kwargs):
"""Return probabilistic labels for a single task of a payload
Args:
payload: a Payload
task_name: the task to calculate probabilities for
If task_name is None and the payload includes labels for only one task,
return predictions for that task. If task_name is None and the payload
includes labels for more than one task, raise an exception.
Returns:
Y_probs: an [n] list of probabilities
"""
self.eval()
if task_name is None:
if len(payload.labels_to_tasks) > 1:
msg = (
"The payload you provided contains labels for more than one "
"task, so task_name cannot be None."
)
raise Exception(msg)
else:
task_name = next(iter(payload.labels_to_tasks.values()))
target_tasks = [task_name]
_, Ys_probs = self.predict_with_gold(payload, target_tasks, **kwargs)
return Ys_probs[task_name]
@torch.no_grad()
def predict(self, payload, task_name=None, return_probs=False, **kwargs):
"""Return predicted labels for a single task of a payload
Args:
payload: a Payload
task_name: the task to calculate predictions for
If task_name is None and the payload includes labels for only one task,
return predictions for that task. If task_name is None and the payload
includes labels for more than one task, raise an exception.
Returns:
Y_probs: an [n] list of probabilities
Y_preds: an [n] list of predictions
"""
self.eval()
if task_name is None:
if len(payload.labels_to_tasks) > 1:
msg = (
"The payload you provided contains labels for more than one "
"task, so task_name cannot be None."
)
raise Exception(msg)
else:
task_name = next(iter(payload.labels_to_tasks.values()))
target_tasks = [task_name]
_, Ys_probs, Ys_preds = self.predict_with_gold(
payload, target_tasks, return_preds=True, **kwargs
)
Y_probs = Ys_probs[task_name]
Y_preds = Ys_preds[task_name]
if return_probs:
return Y_preds, Y_probs
else:
return Y_preds
def validate_targets(payload, target_tasks, target_labels):
if target_tasks:
for task_name in target_tasks:
if task_name not in set(payload.labels_to_tasks.values()):
msg = (
f"Could not find the specified task_name {task_name} in "
f"payload {payload}."
)
raise Exception(msg)
if target_labels:
for label_name in target_labels:
if label_name not in payload.labels_to_tasks:
msg = (
f"Could not find the specified labelset {label_name} in "
f"payload {payload}."
)
raise Exception(msg)
def probs_to_preds(probs):
"""Identifies the largest probability in each column on the last axis
We add 1 to the argmax to account for the fact that all labels in MeTaL are
categorical and the 0 label is reserved for abstaining weak labels.
"""
# TODO: Consider replacing argmax with a version of the rargmax utility to randomly
# break ties instead of accepting the first one, or allowing other tie-breaking
# strategies
return np.argmax(probs, axis=-1) + 1
|
metal-master
|
metal/mmtl/metal_model.py
|
import random
from abc import ABC, abstractmethod
class PayloadScheduler(ABC):
"""Returns batches from multiple payloads in some order for MTL training"""
def __init__(self, model, payloads, split, **kwargs):
pass
@abstractmethod
def get_batches(self, payloads, split, **kwargs):
"""Returns batches from all payloads in some order until one 'epoch' is reached
Args:
payloads: a list of Payloads
split: only Payloads belonging to this split will be returned
Yields:
batch: a tuple of (X_batch_dict, Y_batch_dict)
payload_name: the name of the payload returned
labels_to_tasks: a dict indicating which task each label set belongs to
For now, an epoch is defined as one full pass through all datasets.
This is required because of assumptions currently made in the logger and
training loop about the number of batches that will be seen per epoch.
"""
pass
class ProportionalScheduler(PayloadScheduler):
"""Returns batches proportional to the fraction of the total number of batches"""
def get_batches(self, payloads, split, **kwargs):
# First filter to only those payloads belonging to the given split
payloads = [p for p in payloads if p.split == split]
data_loaders = [iter(p.data_loader) for p in payloads]
batch_counts = [len(p.data_loader) for p in payloads]
batch_assignments = []
for payload_idx in range(len(payloads)):
batch_assignments.extend([payload_idx] * batch_counts[payload_idx])
random.shuffle(batch_assignments)
for payload_idx in batch_assignments:
batch = next(data_loaders[payload_idx])
payload = payloads[payload_idx]
yield (batch, payload.name, payload.labels_to_tasks)
|
metal-master
|
metal/mmtl/task_scheduler.py
|
from .metal_model import MetalModel
from .payload import Payload
__all__ = ["Payload", "MetalModel"]
|
metal-master
|
metal/mmtl/__init__.py
|
import torch
from metal.mmtl.data import MmtlDataLoader, MmtlDataset
class Payload(object):
"""A bundle of data_loaders...
Args:
name: the name of the payload (i.e., the name of the instance set)
data_loaders: A DataLoader to feed through the network
The DataLoader should wrap an MmtlDataset or one with a similar signature
labels_to_tasks: a dict of the form {label_name: task_name} mapping each label
set to the task that it corresponds to
split: a string name of a split that the data in this Payload belongs to
"""
def __init__(self, name, data_loader, labels_to_tasks, split):
self.name = name
self.data_loader = data_loader
self.labels_to_tasks = labels_to_tasks
self.split = split
def __repr__(self):
return (
f"Payload({self.name}: labels_to_tasks=[{self.labels_to_tasks}], "
f"split={self.split})"
)
@classmethod
def from_tensors(self, name, X, Y, task_name, split, **data_loader_kwargs):
"""A shortcut for creating a Payload for data with one field and one label set
name: the name of this Payload
X: a Tensor of data of shape [n, ?]
Y: a Tensor of labels of shape [n, ?]
task_name: the name of the Task that the label set Y corresponds to
split: the string name of the split that this Payload corresponds to
X and Y will be packaged into an MmtlDataset that will be wrapped in an
MmtlDataLoader.
"""
dataset = MmtlDataset(X, Y)
data_loader = MmtlDataLoader(dataset, **data_loader_kwargs)
labels_to_tasks = {"labels": task_name}
return Payload(name, data_loader, labels_to_tasks, split)
def add_labelset(
self, task_name, label_name, label_list=None, label_fn=None, verbose=True
):
"""Adds a new labelset to an existing payload
Args:
task_name: the name of the Task to which the labelset belongs
label_name: the name of the labelset being added
label_fn: a function which maps a dataset item to a label
labels will be combined using torch.stack(labels, dim=0)
label_list: a list of labels in the correct order
Note that either label_fn or label_list should be provided, but not both.
"""
if label_fn is not None:
assert label_list is None
assert callable(label_fn)
new_labels = torch.stack(
[label_fn(x) for x in self.data_loader.dataset], dim=0
)
elif label_list is not None:
assert label_fn is None
assert isinstance(label_list, torch.Tensor)
new_labels = label_list
else:
raise ValueError("Incorrect label object type -- supply list or function")
if new_labels.dim() < 2:
raise Exception("New labelset must have at least two dimensions: [n, ?]")
self.data_loader.dataset.labels[task_name] = new_labels
self.labels_to_tasks[label_name] = task_name
if verbose:
active = torch.any(new_labels != 0, dim=1)
msg = (
f"Added labelset with {sum(active.long())}/{len(active)} labels for "
f"task {task_name} to payload {self.name}."
)
print(msg)
def remove_labelset(self, label_name, verbose=True):
self.data_loader.dataset.labels.pop(label_name)
task_name = self.labels_to_tasks[label_name]
del self.labels_to_tasks[label_name]
if verbose:
print(
f"Removed labelset {label_name} for task {task_name} from payload {self.name}."
)
|
metal-master
|
metal/mmtl/payload.py
|
import numpy as np
import torch.nn.functional as F
from metal.end_model import IdentityModule
from metal.mmtl.scorer import Scorer
from metal.mmtl.task import Task
def tokenwise_ce_loss(out, Y_gold):
"""Compute the token-averaged cross-entropy loss
We assume the standard MeTaL convention of no 0 labels in Y_gold
"""
logits, attention_mask = out
batch_size, seq_len, num_classes = logits.shape
active = attention_mask.view(-1) == 1
active_logits = logits.view(-1, num_classes)[active]
active_labels = Y_gold.view(-1)[active]
return F.cross_entropy(active_logits, active_labels - 1, reduction="mean")
def tokenwise_softmax(out):
"""Compute the token-wise class probabilities for each token
Args:
out: the output of task head
Returns:
probs: [batch_size] list of [seq_len, num_classes] probabilities
Note that seq_len may vary by instance after this step (padding is removed)
"""
logits, masks = out
batch_size, seq_len, num_classes = logits.shape
probs = F.softmax(logits, dim=2)
return [probs_matrix[mask == 1] for probs_matrix, mask in zip(probs, masks)]
def tokenwise_accuracy(gold, preds, probs=None):
"""Compute the average token-wise accuracy per example"""
# HACK: Most unfortunately, incoming gold is padded whereas preds are not
# For now we just drop the padding on the end by looking up the length of the preds
# Longer-term, find a more intuitive hard and fast rule for when Y will be padded
accs = []
for y, y_preds in zip(gold, preds):
acc = np.mean(y[: len(y_preds)] == y_preds)
accs.append(acc)
return {"token_acc": float(np.mean(accs))}
class TokenClassificationTask(Task):
"""A single task for predicting a class for multiple tokens (e.g., POS tagging)
Assumed i/o of head_module:
(sequence_output, attention_mask) -> (logits, attention_mask)
logits: [batch_size, seq_len, num_classes]
"""
def __init__(
self,
name,
input_module=IdentityModule(),
middle_module=IdentityModule(),
head_module=IdentityModule(),
output_hat_func=tokenwise_softmax,
loss_hat_func=tokenwise_ce_loss,
loss_multiplier=1.0,
scorer=Scorer(custom_metric_funcs={tokenwise_accuracy: ["token_acc"]}),
) -> None:
super().__init__(
name,
input_module,
middle_module,
head_module,
output_hat_func,
loss_hat_func,
loss_multiplier,
scorer,
)
|
metal-master
|
metal/mmtl/token_task.py
|
import torch.nn as nn
class MetalModule(nn.Module):
"""An abstract class of a module that accepts and returns a dict"""
def __init__(self):
super().__init__()
class MetalModuleWrapper(nn.Module):
def __init__(self, module):
super().__init__()
self.module = module
def forward(self, X):
# The object that is passed out must be different from the object that gets
# passed in so that cached outputs from intermediate modules aren't mutated
X_out = {k: v for k, v in X.items()}
X_out["data"] = self.module(X["data"])
return X_out
|
metal-master
|
metal/mmtl/modules.py
|
import copy
import os
import warnings
from collections import defaultdict
from pprint import pprint
from shutil import copy2
import dill
import numpy as np
import torch
import torch.optim as optim
from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors
from metal.logging import Checkpointer, LogWriter, TensorBoardWriter
from metal.mmtl.mmtl_logger import Logger # NOTE: we use special MMTL logger
from metal.mmtl.task_scheduler import ProportionalScheduler
from metal.utils import recursive_merge_dicts, recursive_transform, set_seed
# Import tqdm_notebook if in Jupyter notebook
try:
from IPython import get_ipython
if "IPKernelApp" not in get_ipython().config:
raise ImportError("console")
except (AttributeError, ImportError):
from tqdm import tqdm
else:
# Only use tqdm notebook if not in travis testing
if "CI" not in os.environ:
from tqdm import tqdm_notebook as tqdm
else:
from tqdm import tqdm
trainer_defaults = {
"verbose": True,
"seed": None,
# Commit hash
"commit_hash": None,
"ami": None, # ami id for aws
# Display
"progress_bar": False,
# Train Loop
"n_epochs": 1,
"l2": 0.0,
"grad_clip": 1.0,
# Evaluate dev for during training every this many epochs
# Optimizer
"optimizer_config": {
"optimizer": "adam",
"optimizer_common": {"lr": 0.01},
# Optimizer - SGD
"sgd_config": {"momentum": 0.9},
# Optimizer - Adam
"adam_config": {"betas": (0.9, 0.999)},
# Optimizer - RMSProp
"rmsprop_config": {}, # Use defaults
},
# LR Scheduler (for learning rate)
"lr_scheduler": None,
# ['linear', 'exponential', 'reduce_on_plateau']
# 'reduce_on_plateau' uses checkpoint_metric to assess plateaus
"lr_scheduler_config": {
# Linearly increase lr up to "lr" over this many warmup_units
"warmup_steps": 0.0,
"warmup_unit": "batches", # ["epochs", "batches"]
# The minimum lr that will ever be used after warmup.
"min_lr": 0.0,
# Scheduler - exponential
"exponential_config": {"gamma": 0.999}, # decay rate
# Scheduler - reduce_on_plateau
"plateau_config": {"factor": 0.5, "patience": 10, "threshold": 0.0001},
},
# Metrics
"metrics_config": {
# The list of task metrics (task/split/metric) to calculate (and log);
# if empty, calculate all metrics supported by all tasks' Scorers.
"task_metrics": [],
# A list of functions that operate on a metrics_dict and return a dict with
# additional metrics (e.g., aggregated metrics)
"aggregate_metric_fns": [],
# Run scorers over a maximum of this many examples if > 0.
"max_valid_examples": 0,
# The name of the split to run scoring on during training
# To score over multiple splits, set valid_split=None and use task_metrics
"valid_split": "valid",
# The name of the split to run final evaluation on after training
"test_split": None, # If None, calculate final metrics over all splits
# If non-None, only calculate and report these metrics every `score_every`
# units (this can include the names of built-in and user-defined metrics);
# otherwise, include all metrics returned by task Scorers.
},
# Task Scheduler
"task_scheduler": "proportional", # ["proportional", "staged"]
# Logger (see metal/logging/logger.py for descriptions)
"logger": True,
"logger_config": {
"log_unit": "epochs", # ['seconds', 'examples', 'batches', 'epochs']
# Report loss every this many log_units
"log_every": 1.0,
# Calculate and report metrics every this many log_units:
# -1: default to log_every
# 0: do not calculate or log metrics
# otherwise: must be a multiple of log_every
"score_every": -1.0,
"log_lr": True, # If True, also log learning rate whenever loss is logged
},
# LogWriter/Tensorboard (see metal/logging/writer.py for descriptions)
"writer": None, # [None, "json", "tensorboard"]
"writer_config": { # Log (or event) file stored at log_dir/run_dir/run_name
"log_dir": "logs",
"run_dir": None,
"run_name": None,
# May specify a subset of metrics in metrics_dict to be written.
# If [], write all available metrics to the logs
"writer_metrics": [],
},
# Checkpointer (see metal/logging/checkpointer.py for descriptions)
"checkpoint": True, # If True, checkpoint models when certain conditions are met
# If true, checkpoint directory will be cleaned after training (if checkpoint_best
# is True, the best model will first be copied to the log_dir/run_dir/run_name/)
"checkpoint_cleanup": True,
"checkpoint_config": {
# TODO: unify checkpoint=['every', 'best', 'final']; specify one strategy
"checkpoint_every": 0, # Save a model checkpoint every this many log_units
# If checkpoint_best, also save the "best" model according to some metric
# The "best" model will have the ['max', 'min'] value of checkpoint_metric
# This metric must be produced by one of the task Scorer objects so it will be
# available for lookup; assumes valid split unless appended with "train/"
"checkpoint_best": False,
# "checkpoint_final": False, # Save a model checkpoint at the end of training
"checkpoint_metric": "model/train/all/loss",
"checkpoint_metric_mode": "min",
# If None, checkpoint_dir defaults to the log_dir/run_dir/run_name/checkpoints
# Note that using this default path is strongly recommended.
# If you hardcode checkpoint_dir, checkpoints from concurrent runs may overwrite
# each other.
"checkpoint_dir": None,
"checkpoint_runway": 0,
},
}
class MultitaskTrainer(object):
"""Driver for the MTL training process"""
def __init__(self, **kwargs):
self.config = recursive_merge_dicts(trainer_defaults, kwargs, misses="insert")
# Set random seeds
if self.config["seed"] is None:
self.config["seed"] = np.random.randint(1e6)
set_seed(self.config["seed"])
def train_model(self, model, payloads, **kwargs):
# NOTE: misses="insert" so we can log extra metadata (e.g. num_parameters)
# and eventually write to disk.
self.config = recursive_merge_dicts(self.config, kwargs, misses="insert")
self.task_names = [task_name for task_name in model.task_map]
self.payload_names = [payload.name for payload in payloads]
train_payloads = [p for p in payloads if p.split == "train"]
if not train_payloads:
msg = "At least one payload must have property payload.split=='train'"
raise Exception(msg)
# Calculate epoch statistics
# NOTE: We calculate approximate count size using batch_size * num_batches
self.batches_per_epoch = sum([len(p.data_loader) for p in train_payloads])
self.examples_per_epoch = sum(
[len(p.data_loader) * p.data_loader.batch_size for p in train_payloads]
)
if self.config["verbose"]:
print(f"Beginning train loop.")
print(
f"Expecting approximately {self.examples_per_epoch} examples total "
f"and {self.batches_per_epoch} batches per epoch from "
f"{len(train_payloads)} payload(s) in the train split."
)
# Check inputs
self._check_metrics()
# Set training components
self._set_writer()
self._set_logger()
self._set_checkpointer(model)
self._set_optimizer(model)
self._set_lr_scheduler(model) # TODO: Support more detailed training schedules
self._set_task_scheduler(model, payloads)
# Record config
if self.writer:
self.writer.write_config(self.config)
# Train the model
# TODO: Allow other ways to train besides 1 epoch of all datasets
model.train()
# Dict metrics_hist contains the most recently recorded value of all metrics
self.metrics_hist = {}
self._reset_losses()
for epoch in range(self.config["n_epochs"]):
progress_bar = self.config["progress_bar"] and self.config["verbose"]
t = tqdm(
enumerate(self.task_scheduler.get_batches(payloads, "train")),
total=self.batches_per_epoch,
disable=(not progress_bar),
)
for batch_num, (batch, payload_name, labels_to_tasks) in t:
# NOTE: actual batch_size may not equal config's target batch_size,
# for example due to orphan batches. We base batch size off of Y instead
# of X because we know Y will contain tensors, whereas X can be of any
# format the input_module accepts, including tuples of tensors, etc.
_, Ys = batch
batch_size = len(next(iter(Ys.values())))
batch_id = epoch * self.batches_per_epoch + batch_num
# Zero the parameter gradients
self.optimizer.zero_grad()
# Forward pass to calculate the average loss per example by task
# Counts stores the number of examples in each batch with labels by task
loss_dict, count_dict = model.calculate_loss(
*batch, payload_name, labels_to_tasks
)
# NOTE: If there were no "active" examples, loss_dict is empty
# Skip additional loss-based computation at this point
if not loss_dict:
continue
loss = sum(loss_dict.values())
if torch.isnan(loss):
msg = "Loss is NaN. Consider reducing learning rate."
raise Exception(msg)
# Backward pass to calculate gradients
# Loss is an average loss per example
if model.config["fp16"]:
self.optimizer.backward(loss)
else:
loss.backward()
# Clip gradient norm (not individual gradient magnitudes)
# max_grad_value = max([p.grad.abs().max().item() for p in model.parameters()])
if self.config["grad_clip"]:
torch.nn.utils.clip_grad_norm_(
model.parameters(), self.config["grad_clip"]
)
# Perform optimizer step
self.optimizer.step()
# Update loss
for loss_name in loss_dict:
if count_dict[loss_name]:
self.running_losses[loss_name] += (
loss_dict[loss_name].item() * count_dict[loss_name]
)
self.running_examples[loss_name] += count_dict[loss_name]
# Calculate metrics, log, and checkpoint as necessary
metrics_dict = self._execute_logging(model, payloads, batch_size)
# Confirm metrics being produced are in proper format
if epoch == 0 and batch_num == 0:
self._validate_metrics_dict(metrics_dict)
# Apply learning rate scheduler
self._update_lr_scheduler(model, batch_id)
# tqdm output
if len(model.task_map) == 1:
t.set_postfix(loss=metrics_dict["model/train/all/loss"])
else:
losses = {}
for key, val in metrics_dict.items():
if "loss" in key:
losses[key] = val
t.set_postfix(losses)
model.eval()
# Restore best model if applicable
if self.checkpointer and self.checkpointer.checkpoint_best:
# First do a final checkpoint at the end of training
metrics_dict = self._execute_logging(
model, payloads, batch_size, force_log=True
)
self.checkpointer.load_best_model(model=model)
# Copy best model to log directory
if self.writer:
path_to_best = os.path.join(
self.checkpointer.checkpoint_dir, "best_model.pth"
)
path_to_logs = self.writer.log_subdir
if os.path.isfile(path_to_best):
copy2(path_to_best, path_to_logs)
# Print final performance values
if self.config["verbose"]:
print("Finished training")
# Calculate metrics for all splits if test_split=None
test_split = self.config["metrics_config"]["test_split"]
metrics_dict = self.calculate_metrics(model, payloads, split=test_split)
if self.config["verbose"]:
pprint(metrics_dict)
# Clean up checkpoints
if self.checkpointer and self.config["checkpoint_cleanup"]:
print("Cleaning checkpoints")
self.checkpointer.clean_up()
# Write log if applicable
if self.writer:
# convert from numpy to python float
metrics_dict = recursive_transform(
metrics_dict, lambda x: type(x).__module__ == np.__name__, float
)
self.writer.write_metrics(metrics_dict)
self.writer.write_log()
self.writer.close()
# pickle and save the full model
full_model_path = os.path.join(self.writer.log_subdir, "model.pkl")
torch.save(model, full_model_path, pickle_module=dill)
print(f"Full model saved at {full_model_path}")
return metrics_dict
def _execute_logging(self, model, payloads, batch_size, force_log=False):
model.eval()
metrics_dict = {}
metrics_dict.update(self.aggregate_losses())
self.logger.increment(batch_size)
do_log = False
if self.logger.loss_time():
self._reset_losses()
do_log = True
if self.logger.metrics_time() or force_log:
# Unless valid_split is None, Scorers will only score on one split
valid_split = self.config["metrics_config"]["valid_split"]
metrics_dict.update(
self.calculate_metrics(model, payloads, split=valid_split)
)
do_log = True
if do_log or force_log:
# Log to screen/file/TensorBoard
self.logger.log(metrics_dict)
# Save best model if applicable
self._checkpoint(model, metrics_dict)
self.metrics_hist.update(metrics_dict)
model.train()
return metrics_dict
def aggregate_losses(self):
"""Calculate the average loss for each task since the last calculation
If no examples of a certain task have been seen since the losses were reset,
use the most recently reported value again (stored in metrics_hist).
If the loss for a certain task has never been reported, report it as None.
"""
metrics_dict = {}
for loss_name in self.running_losses:
if self.running_examples[loss_name]:
loss = self.running_losses[loss_name] / self.running_examples[loss_name]
elif self.metrics_hist.get(loss_name):
loss = self.metrics_hist[loss_name]
else:
loss = None
metrics_dict[loss_name] = loss
# Report micro average of losses
total_loss = sum(self.running_losses.values())
total_examples = sum(self.running_examples.values())
if total_examples > 0:
metrics_dict["model/train/all/loss"] = total_loss / total_examples
# Log learning rate
if self.config["logger_config"]["log_lr"]:
# For now just report one global lr; eventually support lr groups
metrics_dict[f"model/train/all/lr"] = self.optimizer.param_groups[0]["lr"]
return metrics_dict
def calculate_metrics(self, model, payloads, split=None):
metrics_dict = {}
# Update metrics_hist after task_metrics so aggregates metrics have access to
# most recently calculated numbers
metrics_dict.update(self.calculate_task_metrics(model, payloads, split))
self.metrics_hist.update(metrics_dict)
metrics_dict.update(self.calculate_aggregate_metrics())
self.metrics_hist.update(metrics_dict)
return metrics_dict
def calculate_task_metrics(self, model, payloads, split=None):
metrics_dict = {}
max_examples = self.config["metrics_config"]["max_valid_examples"]
task_metrics = self.config["metrics_config"]["task_metrics"]
# Losses are handled specially; we drop them from task_metrics
target_metrics = [metric for metric in task_metrics if "/loss" not in metric]
# Calculate metrics from Scorers
for payload in payloads:
if split and payload.split != split:
continue
payload_metrics_dict = model.score(
payload, target_metrics, max_examples=max_examples
)
metrics_dict.update(payload_metrics_dict)
return metrics_dict
def calculate_aggregate_metrics(self):
aggregate_metric_fns = self.config["metrics_config"]["aggregate_metric_fns"]
aggregate_metrics = {}
for metric_fn in aggregate_metric_fns:
aggregate_metrics.update(metric_fn(self.metrics_hist))
return aggregate_metrics
def _checkpoint(self, model, metrics_dict):
if self.checkpointer is None:
return
iteration = self.logger.unit_total
self.checkpointer.checkpoint(
metrics_dict, iteration, model, self.optimizer, self.lr_scheduler
)
def _reset_losses(self):
self.running_losses = defaultdict(float)
self.running_examples = defaultdict(int)
def _set_writer(self):
writer_config = self.config["writer_config"]
writer_config["verbose"] = self.config["verbose"]
if self.config["writer"] is None:
self.writer = None
elif self.config["writer"] == "json":
self.writer = LogWriter(**writer_config)
elif self.config["writer"] == "tensorboard":
self.writer = TensorBoardWriter(**writer_config)
else:
raise Exception(f"Unrecognized writer: {self.config['writer']}")
def _set_logger(self):
# If not provided, set score_every to log_every
logger_config = self.config["logger_config"]
if logger_config["score_every"] < 0:
logger_config["score_every"] = logger_config["log_every"]
self.logger = Logger(
logger_config,
self.batches_per_epoch,
self.writer,
verbose=self.config["verbose"],
)
def _set_checkpointer(self, model):
if (
self.config["checkpoint"]
or self.config["lr_scheduler"] == "reduce_on_plateau"
):
self._validate_checkpoint_metric(model)
# Set checkpoint_dir to log_dir/checkpoints/
if self.writer:
if not self.config["checkpoint_config"]["checkpoint_dir"]:
self.config["checkpoint_config"]["checkpoint_dir"] = os.path.join(
self.writer.log_subdir, "checkpoints"
)
else:
# If you hardcode checkpoint_dir, checkpoints from concurrent runs
# may overwrite each other.
msg = (
"You have provided checkpoint_dir, overriding the default "
"of using log_dir/run_dir/run_name/checkpoints. Be careful: "
"multiple concurrent runs may override each other."
)
warnings.warn(msg)
else:
self.config["checkpoint_config"]["checkpoint_dir"] = "checkpoints"
# Create Checkpointer
self.checkpointer = Checkpointer(
self.config["checkpoint_config"], verbose=self.config["verbose"]
)
else:
self.checkpointer = None
def _set_optimizer(self, model):
optimizer_config = self.config["optimizer_config"]
opt = optimizer_config["optimizer"]
parameters = filter(lambda p: p.requires_grad, model.parameters())
# Special optimizer for fp16
if model.config["fp16"]:
from apex.optimizers import FP16_Optimizer, FusedAdam
class FP16_OptimizerMMTLModified(FP16_Optimizer):
def step(self, closure=None):
"""
Not supporting closure.
"""
# First compute norm for all group so we know if there is overflow
grads_groups_flat = []
norm_groups = []
skip = False
for i, group in enumerate(self.fp16_groups):
# Only part that's changed -- zero out grads that are None
grads_to_use = []
for p in group:
if p.grad is None:
size = list(p.size())
grads_to_use.append(p.new_zeros(size))
else:
grads_to_use.append(p.grad)
grads_groups_flat.append(_flatten_dense_tensors(grads_to_use))
norm_groups.append(
self._compute_grad_norm(grads_groups_flat[i])
)
if norm_groups[i] == -1: # TODO: early break
skip = True
if skip:
self._update_scale(skip)
return
# norm is in fact norm*cur_scale
self.optimizer.step(
grads=[[g] for g in grads_groups_flat],
output_params=[[p] for p in self.fp16_groups_flat],
scale=self.cur_scale,
grad_norms=norm_groups,
)
# TODO: This may not be necessary; confirm if it is
for i in range(len(norm_groups)):
updated_params = _unflatten_dense_tensors(
self.fp16_groups_flat[i], self.fp16_groups[i]
)
for p, q in zip(self.fp16_groups[i], updated_params):
p.data = q.data
self._update_scale(False)
return
optimizer = FusedAdam(
parameters,
**optimizer_config["optimizer_common"],
bias_correction=False,
max_grad_norm=1.0,
)
optimizer = FP16_OptimizerMMTLModified(optimizer, dynamic_loss_scale=True)
elif opt == "sgd":
optimizer = optim.SGD(
parameters,
**optimizer_config["optimizer_common"],
**optimizer_config["sgd_config"],
weight_decay=self.config["l2"],
)
elif opt == "rmsprop":
optimizer = optim.RMSprop(
parameters,
**optimizer_config["optimizer_common"],
**optimizer_config["rmsprop_config"],
weight_decay=self.config["l2"],
)
elif opt == "adam":
optimizer = optim.Adam(
parameters,
**optimizer_config["optimizer_common"],
**optimizer_config["adam_config"],
weight_decay=self.config["l2"],
)
elif opt == "adamax":
optimizer = optim.Adamax(
parameters,
**optimizer_config["optimizer_common"],
**optimizer_config["adam_config"],
weight_decay=self.config["l2"],
)
elif opt == "sparseadam":
optimizer = optim.SparseAdam(
parameters,
**optimizer_config["optimizer_common"],
**optimizer_config["adam_config"],
)
if self.config["l2"]:
raise Exception(
"SparseAdam optimizer does not support weight_decay (l2 penalty)."
)
else:
raise ValueError(f"Did not recognize optimizer option '{opt}'")
self.optimizer = optimizer
def _set_lr_scheduler(self, model):
lr_scheduler = self.config["lr_scheduler"]
lr_scheduler_config = self.config["lr_scheduler_config"]
# Create warmup scheduler for first warmup_steps warmup_units if applicable
self._set_warmup_scheduler(model)
optimizer_to_config = self.optimizer
# If using half precision, configure the underlying
# optimizer of FP16_Optimizer
if model.config["fp16"]:
optimizer_to_config = self.optimizer.optimizer
# Create regular lr scheduler for use after warmup
if lr_scheduler is None:
lr_scheduler = None
else:
lr_scheduler_config = self.config["lr_scheduler_config"]
if lr_scheduler == "linear":
total_steps = self.batches_per_epoch * self.config["n_epochs"]
cooldown_steps = total_steps - self.warmup_steps
linear_cooldown_func = lambda x: (cooldown_steps - x) / cooldown_steps
lr_scheduler = torch.optim.lr_scheduler.LambdaLR(
optimizer_to_config, linear_cooldown_func
)
elif lr_scheduler == "exponential":
lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(
optimizer_to_config, **lr_scheduler_config["exponential_config"]
)
elif lr_scheduler == "reduce_on_plateau":
lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
optimizer_to_config,
min_lr=lr_scheduler_config["min_lr"],
**lr_scheduler_config["plateau_config"],
)
else:
raise ValueError(
f"Did not recognize lr_scheduler option '{lr_scheduler}'"
)
self.lr_scheduler = lr_scheduler
def _set_warmup_scheduler(self, model):
optimizer_to_use = self.optimizer
if model.config["fp16"]:
optimizer_to_use = self.optimizer.optimizer
if self.config["lr_scheduler_config"]["warmup_steps"]:
warmup_unit = self.config["lr_scheduler_config"]["warmup_unit"]
warmup_steps = self.config["lr_scheduler_config"]["warmup_steps"]
# Convert warmup unit to batches
if warmup_unit == "epochs":
self.warmup_steps = max(1, int(warmup_steps * self.batches_per_epoch))
elif warmup_unit == "batches":
self.warmup_steps = max(1, int(warmup_steps))
else:
msg = f"warmup_unit must be 'epochs' or 'batches', not {warmup_unit}"
raise Exception(msg)
# This function returns a multiplicative factor based on iteration number
linear_warmup_func = lambda x: x / self.warmup_steps
warmup_scheduler = torch.optim.lr_scheduler.LambdaLR(
optimizer_to_use, linear_warmup_func
)
else:
warmup_scheduler = None
self.warmup_steps = 0
self.warmup_scheduler = warmup_scheduler
def _update_lr_scheduler(self, model, step):
"""Optionally update the learning rate scheduler with each batch"""
optimizer_to_use = self.optimizer
if model.config["fp16"]:
optimizer_to_use = self.optimizer.optimizer
lr_scheduler_config = self.config["lr_scheduler_config"]
if self.warmup_scheduler and (step < self.warmup_steps):
self.warmup_scheduler.step()
elif self.lr_scheduler is not None:
# Metrics-based scheduler(s)
if self.config["lr_scheduler"] == "reduce_on_plateau":
checkpoint_config = self.config["checkpoint_config"]
metric_name = checkpoint_config["checkpoint_metric"]
score = self.metrics_hist.get(metric_name, None)
if score is not None:
self.lr_scheduler.step(score)
# Iteration-based scheduler(s)
else:
self.lr_scheduler.step()
# HACK: We enforce min_lr right now by just overwriting
min_lr = lr_scheduler_config["min_lr"]
if min_lr and optimizer_to_use.param_groups[0]["lr"] < min_lr:
optimizer_to_use.param_groups[0]["lr"] = min_lr
def _set_task_scheduler(self, model, payloads):
if self.config["task_scheduler"] == "proportional":
self.task_scheduler = ProportionalScheduler(model, payloads, "train")
else:
raise NotImplementedError
def _validate_checkpoint_metric(self, model):
# Confirm that checkpoint_metric is a metric that will be available
checkpoint_config = self.config["checkpoint_config"]
checkpoint_metric = checkpoint_config["checkpoint_metric"]
if checkpoint_metric.startswith("model"):
metric_name = checkpoint_metric.split("/")[-1]
aggregate_metric_fns = self.config["metrics_config"]["aggregate_metric_fns"]
aggregate_metric_names = [
getattr(metric_fn, "__name__") for metric_fn in aggregate_metric_fns
]
if metric_name != "loss" and metric_name not in aggregate_metric_names:
msg = (
f"The checkpoint_metric you specified ('{checkpoint_metric}') is "
f"not currently supported."
)
raise Exception(msg)
else:
if checkpoint_metric.count("/") != 3:
msg = (
f"checkpoint_metric must have a full metric name "
f"(task/payload/split/metric); you submitted: {checkpoint_metric}"
)
raise Exception(msg)
task_name, payload_name, label_name, metric = checkpoint_metric.split("/")
try:
task = model.task_map[task_name]
except KeyError:
msg = (
f"The task for your specified checkpoint_metric "
f"({checkpoint_metric}) was not found in the list of "
f"submitted tasks: {[t for t in self.task_names]}."
)
raise Exception(msg)
if payload_name not in self.payload_names:
msg = (
f"The payload for your specified checkpoint_metric "
f"({checkpoint_metric}) was not found in the list of "
f"submitted payloads: {self.payload_names}."
)
raise Exception(msg)
if metric != "loss" and metric not in task.scorer.metrics:
msg = (
f"The checkpoint_metric you specified "
f"({checkpoint_metric}) is not in the list of supported "
f"metrics ({task.scorer.metrics}) for the Scorer of that task. "
f"Either change your checkpoint_metric, use a different Scorer, "
f"or add a custom_metric_func that outputs your desired metric."
)
raise Exception(msg)
task_metrics = self.config["metrics_config"]["task_metrics"]
if task_metrics and checkpoint_metric not in task_metrics:
msg = (
"checkpoint_metric must be a metric in task_metrics if "
"task_metrics is not empty"
)
raise Exception(msg)
def _validate_metrics_dict(self, metrics_dict):
for full_name in metrics_dict:
if len(full_name.split("/")) != 4:
msg = (
f"Metric should have form task/payload/label_name/metric, not: "
f"{full_name}"
)
raise Exception(msg)
def _check_metrics(self):
assert isinstance(self.config["metrics_config"]["task_metrics"], list)
assert isinstance(self.config["metrics_config"]["aggregate_metric_fns"], list)
|
metal-master
|
metal/mmtl/trainer.py
|
from collections import defaultdict
import torch
from torch.utils.data import DataLoader, Dataset
from metal.utils import padded_tensor
class MmtlDataset(Dataset):
"""A pairing of data with one or more fields to one or more label sets
Args:
X: Instances. If X is a dict, it should be in the form {field_name: values}
where field_name is a string and values is an [n]-length iterable.
Otherwise, X will be thinly wrapped into a dict of the form {"data": X}
Y: Labels. If Y is a dict, it should be in the form {label_name: values}
where label_name is a string and values is an [n]-length iterable.
Otherwise, Y will be thinly wrapped into a dict of the form {"labels": Y}
"""
def __init__(self, X, Y):
if not isinstance(X, dict):
X = {"data": X}
if not isinstance(Y, dict):
Y = {"labels": Y}
for labels in Y.values():
if not isinstance(labels, torch.Tensor):
raise Exception("All label sets must be of type torch.Tensor.")
self.X_dict = X
self.Y_dict = Y
def __getitem__(self, index):
x_dict = {key: field[index] for key, field in self.X_dict.items()}
y_dict = {key: label[index] for key, label in self.Y_dict.items()}
return x_dict, y_dict
def __len__(self):
return len(next(iter(self.X_dict.values())))
def mmtl_collate_fn(batch_list):
"""Collates a batch of (x_dict, y_dict) tuples into padded (X_dict, Y_dict)
Assumes that all values are torch Tensors
Args:
batch_list: a list of tuples containing (x_dict, y_dict), where x_dict
and y_dict each contain a fields or labels for a single instance.
Returns:
X_batch: a dict of the form {field_name: values} where field_name is a string
and values is a [batch_size]-length iterable
Y_batch: a dict of the form {label_name: values} where label_name is a string
and values is a Tensor where values.shape[0] == batch_size
Resulting values may be [n, 1] (e.g., instance labels) or [n, seq_len] (e.g.,
token labels)
"""
def list_to_tensor(list_):
if all(value.dim() == 0 for value in list_):
tensor_ = torch.stack(list_, dim=0).view(batch_size, -1)
elif all(len(list_[i]) == len(list_[0]) for i in range(len(list_))):
tensor_ = torch.stack(list_, dim=0).view(batch_size, -1)
else:
tensor_ = padded_tensor(list_).view(batch_size, -1)
return tensor_
batch_size = len(batch_list)
X_batch = defaultdict(list)
Y_batch = defaultdict(list)
for x_dict, y_dict in batch_list:
for field_name, value in x_dict.items():
X_batch[field_name].append(value)
for label_name, value in y_dict.items():
Y_batch[label_name].append(value)
for field_name, values in X_batch.items():
# Merge lists of tensors, leave other data types alone
if isinstance(values[0], torch.Tensor):
X_batch[field_name] = list_to_tensor(values)
for label_name, values in Y_batch.items():
Y_batch[label_name] = list_to_tensor(values)
# Remove 'defaultdict' property
return dict(X_batch), dict(Y_batch)
class MmtlDataLoader(DataLoader):
def __init__(self, dataset, collate_fn=mmtl_collate_fn, **kwargs):
assert isinstance(dataset, MmtlDataset)
super().__init__(dataset, collate_fn=collate_fn, **kwargs)
|
metal-master
|
metal/mmtl/data.py
|
from metal.metrics import METRICS as STANDARD_METRICS, metric_score
class Scorer(object):
"""
DESIGN:
- A Scorer is a bundle of metrics; it defines what metrics _can_ be calculated on a
given task (may be able to use smart defaults based on the Task subclass; e.g.,
classification comes with many nicely defined).
- custom functions come with a list of names of the metrics they produce (with
error checking to confirm they don't produce more than that)
- A Scorer operates over gold labels, probabilities, and predictions
NOTE: we use
- All metrics in a scorer produce simple metric name only
- a simple metric name looks like "accuracy"
- a full metric name looks like "foo_task/bar_payload/accuracy"
Args:
standard_metrics: List of strings of standard metrics for which to evaluate.
By default, calculate on valid split. Optionally, prepend metric with
"train/" to calculate on train split instead.
custom_metric_funcs: Dict of the form:
{metric_fn1: ["metric1a", ..., "metric1z"],
metric_fn2: ["metric2a", ..., "metric2z]}
where metric_fn is a function of the form:
metric_fn1(Y, Y_preds, probs=Y_probs) ->
{metric1a: value1, ..., metric1z: valueN}
"""
def __init__(self, standard_metrics=[], custom_metric_funcs={}):
self.standard_metrics = standard_metrics
for metric_name in standard_metrics:
if "/" in metric_name:
msg = (
f"Standard metrics at Scorer initialization time must not "
"include task or split name, but you submitted: {metric_name}"
)
raise Exception(msg)
if metric_name not in STANDARD_METRICS:
msg = (
f"Requested standard metric {metric_name} could not be found in "
"metrics.py."
)
raise Exception(msg)
# Create a map from custom metric names to the function that creates them
self.custom_metric_funcs = custom_metric_funcs
self.custom_metric_map = {}
for metric_fn, metric_names in custom_metric_funcs.items():
assert isinstance(metric_names, list)
for metric_name in metric_names:
if "/" in metric_name:
msg = (
f"Metrics produced by custom_metric_funcs must not include "
f"task or split name, but you submitted: {metric_name}."
)
raise Exception(msg)
self.custom_metric_map[metric_name] = metric_fn
def score(self, Y, Y_probs, Y_preds, target_metrics=None):
"""
Calculates and returns a metrics_dict for a given set of predictions and labels
Args:
Y: an [n] list of gold labels
Y_probs: an [n] list of probabilities
Y_preds: an [n] list of predictions
target_metrics: a list of simple metrics to calculate
Returns:
a metrics_dict object of the form:
{metric1 : score1, ...., metricN: score N}
Note that the returned metrics dict will be transformed to have full metric
names (e.g., "accuracy" -> "foo_task/bar_payload/accuracy") in the trainer.
"""
self.validate_target_metrics(target_metrics)
# TODO: Tighen this up; it can be much more efficient
# The main issue is that we currently require Y/Y_probs/Y_preds to be lists
# so that they can support sequence-based tasks that have arbitrary length
# labels. But there is certainly a way we can be more strict/certain about
# what our data types will be and do some much more efficient slice operation
# instead of list comprehension.
# Identify all examples with at least one non-zero (i.e., non-abstain) label
active = [bool(y != 0) for y in Y]
if sum(active) != len(active):
Y = [y for a, y in zip(active, Y) if a]
if Y_probs:
Y_probs = [y for a, y in zip(active, Y_probs) if a]
if Y_preds:
Y_preds = [y for a, y in zip(active, Y_preds) if a]
simple_metrics_dict = {}
for metric in self.standard_metrics:
# If target metrics were specified and this is not one of them, skip it
if target_metrics and metric not in target_metrics:
continue
score = metric_score(Y, Y_preds, metric, probs=Y_probs)
simple_metrics_dict[metric] = score
for metric, custom_metric_func in self.custom_metric_map.items():
# If target metrics were specified and this is not one of them, skip it
if target_metrics and metric not in target_metrics:
continue
# If the current metric is already in the simple_metrics_dict, skip it
# This is possible because a custom_metric_func can return multiple metrics
if metric in simple_metrics_dict:
continue
custom_metric_dict = custom_metric_func(Y, Y_preds, probs=Y_probs)
for metric, score in custom_metric_dict.items():
if not target_metrics or metric in target_metrics:
simple_metrics_dict[metric] = score
return simple_metrics_dict
def validate_target_metrics(self, target_metrics):
if not target_metrics:
return
for metric in target_metrics:
if "/" in metric:
msg = (
"Target metrics must be in simple form (e.g., accuracy), "
"not full form (e.g., foo_task/bar_payload/accuracy) and "
"should not include the character '/'."
)
raise Exception(msg)
elif metric not in self.metrics:
msg = (
f"Target metric {metric} is not supported by the given Scorer. "
f"Supported tasks are: {self.metrics}."
)
raise Exception(msg)
@property
def metrics(self):
"""Returns a list of short metric names supported by this Scorer"""
return self.standard_metrics + list(self.custom_metric_map.keys())
|
metal-master
|
metal/mmtl/scorer.py
|
import math
import numpy as np
from metal.tuners.tuner import ModelTuner
class HyperbandTuner(ModelTuner):
"""Performs hyperparameter search according to the Hyperband algorithm
Reference: (https://arxiv.org/pdf/1603.06560.pdf)
Args:
model: (nn.Module) The model class to train (uninitiated)
hyperband_epochs_budget: Number of total epochs of training to perform
in search.
hyperband_proportion_discard: proportion of configurations to discard
in each round of Hyperband's SuccessiveHalving. An integer.
log_dir: The directory in which to save intermediate results
If no log_dir is given, the model tuner will attempt to keep
all trained models in memory.
seed: Random seed
"""
def __init__(
self,
model_class,
hyperband_epochs_budget=200,
hyperband_proportion_discard=3,
log_dir=None,
run_dir=None,
run_name=None,
log_writer_class=None,
seed=None,
**tuner_args,
):
super().__init__(
model_class,
log_dir=log_dir,
run_dir=run_dir,
run_name=run_name,
log_writer_class=log_writer_class,
seed=seed,
**tuner_args,
)
# Set random seed (Note this only makes sense in single threaded mode)
self.rand_state = np.random.RandomState()
self.rand_state.seed(self.seed)
# Hyperband parameters
self.hyperband_epochs_budget = hyperband_epochs_budget
self.hyperband_proportion_discard = hyperband_proportion_discard
# Given the budget, generate the largest hyperband schedule
# within budget
self.hyperband_schedule = self.get_largest_schedule_within_budget(
self.hyperband_epochs_budget, self.hyperband_proportion_discard
)
# Print the search schedule
self.pretty_print_schedule(self.hyperband_schedule)
def pretty_print_schedule(self, hyperband_schedule, describe_hyperband=True):
"""
Prints scheduler for user to read.
"""
print("=========================================")
print("| Hyperband Schedule |")
print("=========================================")
if describe_hyperband:
# Print a message indicating what the below schedule means
print(
"Table consists of tuples of "
"(num configs, num_resources_per_config) "
"which specify how many configs to run and "
"for how many epochs. "
)
print(
"Each bracket starts with a list of random "
"configurations which is successively halved "
"according the schedule."
)
print(
"See the Hyperband paper "
"(https://arxiv.org/pdf/1603.06560.pdf) for more details."
)
print("-----------------------------------------")
for bracket_index, bracket in enumerate(hyperband_schedule):
bracket_string = "Bracket %d:" % bracket_index
for n_i, r_i in bracket:
bracket_string += " (%d, %d)" % (n_i, r_i)
print(bracket_string)
print("-----------------------------------------")
def get_largest_schedule_within_budget(self, budget, proportion_discard):
"""
Gets the largest hyperband schedule within target_budget.
This is required since the original hyperband algorithm uses R,
the maximum number of resources per configuration.
TODO(maxlam): Possibly binary search it if this becomes a bottleneck.
Args:
budget: total budget of the schedule.
proportion_discard: hyperband parameter that specifies
the proportion of configurations to discard per iteration.
"""
# Exhaustively generate schedules and check if
# they're within budget, adding to a list.
valid_schedules_and_costs = []
for R in range(1, budget):
schedule = self.generate_hyperband_schedule(R, proportion_discard)
cost = self.compute_schedule_cost(schedule)
if cost <= budget:
valid_schedules_and_costs.append((schedule, cost))
# Choose a valid schedule that maximizes usage of the budget.
valid_schedules_and_costs.sort(key=lambda x: x[1], reverse=True)
return valid_schedules_and_costs[0][0]
def compute_schedule_cost(self, schedule):
# Sum up all n_i * r_i for each band.
flattened = [item for sublist in schedule for item in sublist]
return sum([x[0] * x[1] for x in flattened])
def generate_hyperband_schedule(self, R, eta):
"""
Generate hyperband schedule according to the paper.
Args:
R: maximum resources per config.
eta: proportion of configruations to discard per
iteration of successive halving.
Returns: hyperband schedule, which is represented
as a list of brackets, where each bracket
contains a list of (num configurations,
num resources to use per configuration).
See the paper for more details.
"""
schedule = []
s_max = int(math.floor(math.log(R, eta)))
# B = (s_max + 1) * R
for s in range(0, s_max + 1):
n = math.ceil(int((s_max + 1) / (s + 1)) * eta ** s)
r = R * eta ** (-s)
bracket = []
for i in range(0, s + 1):
n_i = int(math.floor(n * eta ** (-i)))
r_i = int(r * eta ** i)
bracket.append((n_i, r_i))
schedule = [bracket] + schedule
return schedule
def search(
self,
search_space,
valid_data,
init_args=[],
train_args=[],
init_kwargs={},
train_kwargs={},
module_args={},
module_kwargs={},
max_search=None,
shuffle=True,
verbose=True,
seed=None,
**score_kwargs,
):
"""
Performs hyperband search according to the generated schedule.
At the beginning of each bracket, we generate a
list of random configurations and perform
successive halving on it; we repeat this process
for the number of brackets in the schedule.
Args:
init_args: (list) positional args for initializing the model
train_args: (list) positional args for training the model
valid_data: a tuple of Tensors (X,Y), a Dataset, or a DataLoader of
X (data) and Y (labels) for the dev split
search_space: see ModelTuner's config_generator() documentation
max_search: see ModelTuner's config_generator() documentation
shuffle: see ModelTuner's config_generator() documentation
Returns:
best_model: the highest performing trained model found by Hyperband
best_config: (dict) the config corresponding to the best model
Note: Initialization is performed by ModelTuner instead of passing a
pre-initialized model so that tuning may be performed over all model
parameters, including the network architecture (which is defined before
the train loop).
"""
self._clear_state(seed)
self.search_space = search_space
# Loop over each bracket
n_models_scored = 0
for bracket_index, bracket in enumerate(self.hyperband_schedule):
# Sample random configurations to seed SuccessiveHalving
n_starting_configurations, _ = bracket[0]
configurations = list(
self.config_generator(
search_space,
max_search=n_starting_configurations,
rng=self.rng,
shuffle=True,
)
)
# Successive Halving
for band_index, (n_i, r_i) in enumerate(bracket):
assert len(configurations) <= n_i
# Evaluate each configuration for r_i epochs
scored_configurations = []
for i, configuration in enumerate(configurations):
cur_model_index = n_models_scored
# Set epochs of the configuration
configuration["n_epochs"] = r_i
# Train model and get the score
score, model = self._test_model_config(
f"{band_index}_{i}",
configuration,
valid_data,
init_args=init_args,
train_args=train_args,
init_kwargs=init_kwargs,
train_kwargs=train_kwargs,
module_args=module_args,
module_kwargs=module_kwargs,
verbose=verbose,
**score_kwargs,
)
# Add score and model to list
scored_configurations.append(
(score, cur_model_index, configuration)
)
n_models_scored += 1
# Sort scored configurations by score
scored_configurations.sort(key=lambda x: x[0], reverse=True)
# Successively halve the configurations
if band_index + 1 < len(bracket):
n_to_keep, _ = bracket[band_index + 1]
configurations = [x[2] for x in scored_configurations][:n_to_keep]
print("=" * 60)
print(f"[SUMMARY]")
print(f"Best model: [{self.best_index}]")
print(f"Best config: {self.best_config}")
print(f"Best score: {self.best_score}")
print("=" * 60)
# Return best model
return self._load_best_model(clean_up=True)
|
metal-master
|
metal/tuners/hyperband_tuner.py
|
from .hyperband_tuner import HyperbandTuner
from .random_tuner import RandomSearchTuner
__all__ = ["HyperbandTuner", "RandomSearchTuner"]
|
metal-master
|
metal/tuners/__init__.py
|
from metal.tuners.tuner import ModelTuner
class RandomSearchTuner(ModelTuner):
"""A tuner for models
Args:
model: (nn.Module) The model class to train (uninitiated)
log_dir: The directory in which to save intermediate results
If no log_dir is given, the model tuner will attempt to keep
best trained model in memory.
"""
def search(
self,
search_space,
valid_data,
init_args=[],
train_args=[],
init_kwargs={},
train_kwargs={},
module_args={},
module_kwargs={},
max_search=None,
shuffle=True,
verbose=True,
clean_up=True,
seed=None,
**score_kwargs,
):
"""
Args:
search_space: see config_generator() documentation
valid_data: a tuple of Tensors (X,Y), a Dataset, or a DataLoader of
X (data) and Y (labels) for the dev split
init_args: (list) positional args for initializing the model
train_args: (list) positional args for training the model
init_kwargs: (dict) keyword args for initializing the model
train_kwargs: (dict) keyword args for training the model
module_args: (dict) Dictionary of lists of module args
module_kwargs: (dict) Dictionary of dictionaries of module kwargs
max_search: see config_generator() documentation
shuffle: see config_generator() documentation
Returns:
best_model: the highest performing trained model
Note: Initialization is performed by ModelTuner instead of passing a
pre-initialized model so that tuning may be performed over all model
parameters, including the network architecture (which is defined before
the train loop).
"""
self._clear_state(seed)
self.search_space = search_space
# Generate configs
configs = self.config_generator(search_space, max_search, self.rng, shuffle)
# Commence search
for i, config in enumerate(configs):
score, model = self._test_model_config(
i,
config,
valid_data,
init_args=init_args,
train_args=train_args,
init_kwargs=init_kwargs,
train_kwargs=train_kwargs,
module_args=module_args,
module_kwargs=module_kwargs,
verbose=verbose,
**score_kwargs,
)
if verbose:
print("=" * 60)
print(f"[SUMMARY]")
print(f"Best model: [{self.best_index}]")
print(f"Best config: {self.best_config}")
print(f"Best score: {self.best_score}")
print("=" * 60)
self._save_report()
# Return best model
return self._load_best_model(clean_up=clean_up)
|
metal-master
|
metal/tuners/random_tuner.py
|
import json
import os
import pickle
import random
from itertools import cycle, product
from time import strftime, time
import numpy as np
import pandas as pd
from metal.utils import recursive_merge_dicts
class ModelTuner(object):
"""A tuner for models
Args:
model_class: (nn.Module class) The model class to train (uninitiated)
module_classes: (dict) An optional dictionary of module classes
(uninitiated), with keys corresponding to their kwargs in model class;
for example, with model_class=EndModel, could have:
module_classes = {"input_module": metal.modules.LSTMModule}
log_dir: (str) The path to the base log directory, or defaults to
current working directory.
run_dir: (str) The name of the sub-directory, or defaults to the date,
strftime("%Y_%m_%d").
run_name: (str) The name of the run, or defaults to the time,
strftime("%H_%M_%S").
log_writer_class: a metal.utils.LogWriter class for logging the full
model search.
validation_metric: The metric to use in scoring and selecting models.
Saves model search logs and tuner report to 'log_dir/run_dir/run_name/.'
"""
def __init__(
self,
model_class,
module_classes={},
log_dir=None,
run_dir=None,
run_name=None,
log_writer_class=None,
seed=None,
validation_metric="accuracy",
):
self.model_class = model_class
self.module_classes = module_classes
self.log_writer_class = log_writer_class
self.validation_metric = validation_metric
# Set logging subdirectory + make sure exists
self.init_date = strftime("%Y_%m_%d")
self.init_time = strftime("%H_%M_%S")
self.log_dir = log_dir or os.getcwd()
run_dir = run_dir or self.init_date
run_name = run_name or self.init_time
self.log_rootdir = os.path.join(self.log_dir, run_dir)
self.log_subdir = os.path.join(self.log_dir, run_dir, run_name)
if not os.path.exists(self.log_subdir):
os.makedirs(self.log_subdir)
# Set best model pkl and JSON log paths
self.save_path = os.path.join(self.log_subdir, f"best_model.pkl")
self.report_path = os.path.join(self.log_subdir, f"tuner_report.json")
# Set global seed
if seed is None:
self.seed = 0
else:
self.seed = seed
# Search state
# NOTE: Must be cleared each run with self._clear_state()!
self._clear_state(self.seed)
def _clear_state(self, seed=None):
"""Clears the state, starts clock"""
self.start_time = time()
self.run_stats = []
self.best_index = -1
self.best_score = -1
self.best_config = None
# Note: These must be set at the start of self.search()
self.search_space = None
# Reset the seed
if seed is not None:
self.rng = random.Random(seed)
def _test_model_config(
self,
idx,
config,
valid_data,
init_args=[],
train_args=[],
init_kwargs={},
train_kwargs={},
module_args={},
module_kwargs={},
verbose=False,
**score_kwargs,
):
# Integrating generated config into init kwargs and train kwargs
init_kwargs["verbose"] = verbose
init_kwargs = recursive_merge_dicts(init_kwargs, config, misses="insert")
train_kwargs = recursive_merge_dicts(train_kwargs, config, misses="insert")
# Also make sure train kwargs includes validation metric
train_kwargs["validation_metric"] = self.validation_metric
# Initialize modules if provided
for module_name, module_class in self.module_classes.items():
# Also integrate generated config into module kwargs so that module
# hyperparameters can be searched over as well
module_kwargs[module_name] = recursive_merge_dicts(
module_kwargs[module_name], config, misses="insert"
)
# Initialize module
init_kwargs[module_name] = module_class(
*module_args[module_name], **module_kwargs[module_name]
)
# Init model
model = self.model_class(*init_args, **init_kwargs)
# Search params
# Select any params in search space that have list or dict
search_params = {}
for k, v in config.items():
if k in self.search_space.keys():
if isinstance(self.search_space[k], (list, dict)):
search_params[k] = v
if verbose:
print("=" * 60)
print(f"[{idx}] Testing {search_params}")
print("=" * 60)
# Initialize a new LogWriter and train the model, returning the score
log_writer = None
if self.log_writer_class is not None:
writer_config = {
"log_dir": self.log_subdir,
"run_dir": ".",
"run_name": f"model_search_{idx}",
}
log_writer = self.log_writer_class(**writer_config)
model.train_model(
*train_args,
**train_kwargs,
valid_data=valid_data,
verbose=verbose,
log_writer=log_writer,
)
score = model.score(
valid_data,
metric=self.validation_metric,
verbose=False, # Score is already printed in train_model above
**score_kwargs,
)
# If score better than best_score, save
if score > self.best_score:
self.best_score = score
self.best_index = idx
self.best_config = config
self._save_best_model(model)
# Save high-level run stats (in addition to per-model log)
time_elapsed = time() - self.start_time
self.run_stats.append(
{
"idx": idx,
"time_elapsed": time_elapsed,
"search_params": search_params,
"score": score,
}
)
return score, model
def _save_best_model(self, model):
with open(self.save_path, "wb") as f:
pickle.dump(model, f)
def _load_best_model(self, clean_up=False):
with open(self.save_path, "rb") as f:
model = pickle.load(f)
if clean_up:
self._clean_up()
return model
def _clean_up(self):
if os.path.exists(self.save_path):
os.remove(self.save_path)
def _save_report(self):
with open(self.report_path, "w") as f:
json.dump(self.run_stats, f, indent=1)
def run_stats_df(self):
"""Returns self.run_stats over search params as pandas dataframe."""
run_stats_df = []
for x in self.run_stats:
search_results = {**x["search_params"]}
search_results["score"] = x["score"]
run_stats_df.append(search_results)
return pd.DataFrame(run_stats_df)
def search(
self,
search_space,
valid_data,
init_args=[],
train_args=[],
init_kwargs={},
train_kwargs={},
module_args={},
module_kwargs={},
max_search=None,
shuffle=True,
verbose=True,
**score_kwargs,
):
"""
Args:
search_space: see config_generator() documentation
valid_data: a tuple of Tensors (X,Y), a Dataset, or a DataLoader of
X (data) and Y (labels) for the dev split
init_args: (list) positional args for initializing the model
train_args: (list) positional args for training the model
init_kwargs: (dict) keyword args for initializing the model
train_kwargs: (dict) keyword args for training the model
module_args: (dict) Dictionary of lists of module args
module_kwargs: (dict) Dictionary of dictionaries of module kwargs
max_search: see config_generator() documentation
shuffle: see config_generator() documentation
Returns:
best_model: the highest performing trained model
Note: Initialization is performed by ModelTuner instead of passing a
pre-initialized model so that tuning may be performed over all model
parameters, including the network architecture (which is defined before
the train loop).
"""
raise NotImplementedError()
@staticmethod
def config_generator(search_space, max_search, rng, shuffle=True):
"""Generates config dicts from the given search space
Args:
search_space: (dict) A dictionary of parameters to search over.
See note below for more details.
max_search: (int) The maximum number of configurations to search.
If max_search is None, do a full grid search of all discrete
parameters, filling in range parameters as needed.
Otherwise, do a full grid search of all discrete
parameters and then cycle through again filling in new
range parameters values; if there are no range parameters,
stop after yielding the full cross product of parameters
once.
shuffle: (bool) If True, shuffle the order of generated configs
Yields:
configs: each config is a dict of parameter values based on the
provided search space
The search_space dictionary may consist of two types of parameters:
--discrete: a discrete parameter is either a single value or a
list of values. Use single values, for example, to override
a default model parameter or set a flag such as 'verbose'=True.
--range: a range parameter is a dict of the form:
{'range': [<min>, <max>], 'scale': <scale>}
where <min> and <max> are the min/max values to search between
and scale is one of ['linear', 'log'] (defaulting to 'linear')
representing the scale to use when searching the given range
Example:
search_space = {
'verbose': True, # discrete
'n_epochs': 100, # discrete
'momentum': [0.0, 0.9, 0.99], # discrete
'l2': {'range': [0.0001, 10]} # linear range
'lr': {'range': [0.001, 1], 'scale': 'log'}, # log range
}
If max_search is None, this will return 3 configurations (enough to
just cover the full cross-product of discrete values, filled
in with sampled range values)
Otherewise, this will return max_search configurations
(cycling through the discrete value combinations multiple
time if necessary)
"""
def dict_product(d):
keys = d.keys()
for element in product(*d.values()):
yield dict(zip(keys, element))
def range_param_func(v):
scale = v.get("scale", "linear")
mini = min(v["range"])
maxi = max(v["range"])
if scale == "linear":
func = lambda rand: mini + (maxi - mini) * rand
elif scale == "log":
mini = np.log(mini)
maxi = np.log(maxi)
func = lambda rand: np.exp(mini + (maxi - mini) * rand)
else:
raise ValueError(f"Unrecognized scale '{scale}' for " "parameter {k}")
return func
discretes = {}
ranges = {}
for k, v in search_space.items():
if isinstance(v, dict):
ranges[k] = range_param_func(v)
elif isinstance(v, list):
discretes[k] = v
else:
discretes[k] = [v]
discrete_configs = list(dict_product(discretes))
if shuffle:
rng.shuffle(discrete_configs)
# If there are range parameters and a non-None max_search, cycle
# through the discrete_configs (with new range values) until
# max_search is met
if ranges and max_search:
discrete_configs = cycle(discrete_configs)
for i, config in enumerate(discrete_configs):
# We may see the same config twice due to cycle
config = config.copy()
if max_search and i == max_search:
break
for k, v in ranges.items():
config[k] = float(v(rng.random()))
yield config
|
metal-master
|
metal/tuners/tuner.py
|
metal-master
|
metal/contrib/__init__.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.