python_code
stringlengths 0
4.04M
| repo_name
stringlengths 8
58
| file_path
stringlengths 5
147
|
---|---|---|
# Copyright (c) Meta Platforms, Inc. and affiliates.
import math
import random
import cv2
import mmcv
import numpy as np
import torch
import torch.nn.functional as F
import torchvision.transforms as transforms
from mmseg.datasets import PIPELINES
@PIPELINES.register_module(force=True)
class RandomCrop(object):
"""Random crop the image & seg.
Args:
crop_size (tuple): Expected size after cropping, (h, w).
cat_max_ratio (float): The maximum ratio that single category could
occupy.
"""
def __init__(self, crop_size, cat_max_ratio=1.0, ignore_index=255):
assert crop_size[0] > 0 and crop_size[1] > 0
self.crop_size = crop_size
self.cat_max_ratio = cat_max_ratio
self.ignore_index = ignore_index
def get_crop_bbox(self, img):
"""Randomly get a crop bounding box."""
margin_h = max(img.shape[0] - self.crop_size[0], 0)
margin_w = max(img.shape[1] - self.crop_size[1], 0)
offset_h = np.random.randint(0, margin_h + 1)
offset_w = np.random.randint(0, margin_w + 1)
crop_y1, crop_y2 = offset_h, offset_h + self.crop_size[0]
crop_x1, crop_x2 = offset_w, offset_w + self.crop_size[1]
return crop_y1, crop_y2, crop_x1, crop_x2
def crop(self, img, crop_bbox):
"""Crop from ``img``"""
crop_y1, crop_y2, crop_x1, crop_x2 = crop_bbox
img = img[crop_y1:crop_y2, crop_x1:crop_x2, ...]
return img
def __call__(self, results):
"""Call function to randomly crop images, semantic segmentation maps.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Randomly cropped results, 'img_shape' key in result dict is
updated according to crop size.
"""
img = results["img"]
crop_bbox = self.get_crop_bbox(img)
if self.cat_max_ratio < 1.0 and "gt_semantic_seg" in results:
# Repeat 10 times
for _ in range(10):
seg_temp = self.crop(results["gt_semantic_seg"], crop_bbox)
labels, cnt = np.unique(seg_temp, return_counts=True)
cnt = cnt[labels != self.ignore_index]
if len(cnt) > 1 and np.max(cnt) / np.sum(cnt) < self.cat_max_ratio:
break
crop_bbox = self.get_crop_bbox(img)
# crop the image
for key in results.get("img_fields", ["img"]):
img = self.crop(results[key], crop_bbox)
results[key] = img
results["img_shape"] = results["img"].shape
# crop annotations
for key in results.get("seg_fields", []):
results[key] = self.crop(results[key], crop_bbox)
# crop image and semantic seg for clips if present
if "img_list" in results:
new_img_list = []
img_list = results["img_list"]
for curr_img in img_list:
new_img_list.append(self.crop(curr_img, crop_bbox))
results["img_list"] = new_img_list
if "r_img_list" in results:
new_img_list = []
img_list = results["r_img_list"]
for curr_img in img_list:
new_img_list.append(self.crop(curr_img, crop_bbox))
results["r_img_list"] = new_img_list
for key in results.get("seg_fields", []):
key_list = key + "_list"
if key_list not in results:
continue
seg_list = results[key_list]
new_seg_list = []
for curr_seg in seg_list:
new_seg_list.append(self.crop(curr_seg, crop_bbox))
results[key_list] = new_seg_list
# crop intrinsics
if "intrinsics" in results and results["intrinsics"] is not None:
crop_y1, crop_y2, crop_x1, crop_x2 = crop_bbox
new_intrinsics = results["intrinsics"]
new_intrinsics = [new_intrinsics[0], new_intrinsics[1], new_intrinsics[2] - crop_x1,
new_intrinsics[3] - crop_y1]
results["intrinsics"] = new_intrinsics
return results
def __repr__(self):
return self.__class__.__name__ + f"(crop_size={self.crop_size})"
@PIPELINES.register_module(force=True)
class Pad(object):
"""Pad the image & mask.
There are two padding modes: (1) pad to a fixed size and (2) pad to the
minimum size that is divisible by some number.
Added keys are "pad_shape", "pad_fixed_size", "pad_size_divisor",
Args:
size (tuple, optional): Fixed padding size.
size_divisor (int, optional): The divisor of padded size.
pad_val (float, optional): Padding value. Default: 0.
seg_pad_val (float, optional): Padding value of segmentation map.
Default: 255.
"""
def __init__(
self,
size=None,
size_divisor=None,
pad_val=0,
seg_pad_val=255,
disp_pad_val=0,
flow_pad_val=210,
):
self.size = size
self.size_divisor = size_divisor
self.pad_val = pad_val
self.seg_pad_val = seg_pad_val
self.disp_pad_val = disp_pad_val
self.flow_pad_val = flow_pad_val
# only one of size and size_divisor should be valid
assert size is not None or size_divisor is not None
assert size is None or size_divisor is None
def _get_pad_img(self, img):
if self.size is not None:
padded_img = mmcv.impad(
img, shape=self.size, padding_mode='reflect'
)
elif self.size_divisor is not None:
h, w = img.shape[:2]
size = [math.ceil(h / self.size_divisor) * self.size_divisor,
math.ceil(w / self.size_divisor) * self.size_divisor]
padded_img = mmcv.impad(
img, shape=size, padding_mode='reflect'
)
# padded_img = mmcv.impad_to_multiple(img, divisor=self.size_divisor, pad_val=self.pad_val)
return padded_img
def _pad_img(self, results):
"""Pad images according to ``self.size``."""
padded_img = self._get_pad_img(results["img"])
results["img"] = padded_img
results["pad_shape"] = padded_img.shape
results["pad_fixed_size"] = self.size
results["pad_size_divisor"] = self.size_divisor
if "img_list" in results:
curr_imgs = results['img_list']
new_list = []
for curr_img in curr_imgs:
new_list.append(self._get_pad_img(curr_img))
results['img_list'] = new_list
def _pad_r_img(self, results):
"""Pad images according to ``self.size``."""
if "r_img" in results:
results["r_img"] = self._get_pad_img(results["r_img"])
if "r_img_list" in results:
curr_imgs = results['r_img_list']
new_list = []
for curr_img in curr_imgs:
new_list.append(self._get_pad_img(curr_img))
results['r_img_list'] = new_list
def _pad_seg(self, results):
"""Pad masks according to ``results['pad_shape']``."""
if "gt_semantic_seg" in results:
results["gt_semantic_seg"] = mmcv.impad(
results["gt_semantic_seg"],
shape=results["pad_shape"][:2],
pad_val=self.seg_pad_val,
)
if "gt_semantic_seg_list" in results:
curr_list = results["gt_semantic_seg_list"]
new_list = []
for curr_seg in curr_list:
new_list.append(mmcv.impad(
curr_seg, shape=results["pad_shape"][:2], pad_val=self.seg_pad_val
))
results['gt_semantic_seg_list'] = new_list
def _pad_disp(self, results):
"""Pad masks according to ``results['pad_shape']``."""
if "gt_disp" in results:
results["gt_disp"] = mmcv.impad(
results["gt_disp"],
shape=results["pad_shape"][:2],
pad_val=self.disp_pad_val,
)
if "gt_disp_list" in results:
curr_list = results["gt_disp_list"]
new_list = []
for curr_disp in curr_list:
new_list.append(mmcv.impad(
curr_disp, shape=results["pad_shape"][:2], pad_val=self.disp_pad_val
))
results['gt_disp_list'] = new_list
def _pad_flow(self, results):
"""Pad masks according to ``results['pad_shape']``."""
if "gt_flow" in results:
results["gt_flow"] = mmcv.impad(
results["gt_flow"],
shape=results["pad_shape"][:2],
pad_val=self.flow_pad_val,
)
if "gt_flow_list" in results:
curr_list = results["gt_flow_list"]
new_list = []
for curr_flow in curr_list:
new_list.append(mmcv.impad(
curr_flow, shape=results["pad_shape"][:2], pad_val=self.flow_pad_val
))
results['gt_flow_list'] = new_list
def _pad_disp_change(self, results):
"""Pad masks according to ``results['pad_shape']``."""
if "gt_disp_change" in results:
results["gt_disp_change"] = mmcv.impad(
results["gt_disp_change"],
shape=results["pad_shape"][:2],
pad_val=self.flow_pad_val,
)
if "gt_disp_change_list" in results:
curr_list = results["gt_disp_change_list"]
new_list = []
for curr_disp in curr_list:
new_list.append(mmcv.impad(
curr_disp, shape=results["pad_shape"][:2], pad_val=self.flow_pad_val
))
results['gt_disp_change_list'] = new_list
def _pad_disp_2(self, results):
"""Pad masks according to ``results['pad_shape']``."""
if "gt_disp_2" in results:
results["gt_disp_2"] = mmcv.impad(
results["gt_disp_2"],
shape=results["pad_shape"][:2],
pad_val=self.disp_pad_val,
)
if "gt_disp_2_list" in results:
curr_list = results["gt_disp_2_list"]
new_list = []
for curr_disp in curr_list:
new_list.append(mmcv.impad(
curr_disp, shape=results["pad_shape"][:2], pad_val=self.disp_pad_val
))
results['gt_disp_2_list'] = new_list
def _pad_flow_occ(self, results):
"""Pad masks according to ``results['pad_shape']``."""
if "gt_flow_occ" in results:
results["gt_flow_occ"] = mmcv.impad(
results["gt_flow_occ"],
shape=results["pad_shape"][:2],
pad_val=self.seg_pad_val, # pad 255
)
if "gt_flow_occ_list" in results:
curr_list = results["gt_flow_occ_list"]
new_list = []
for curr_occ in curr_list:
new_list.append(mmcv.impad(
curr_occ, shape=results["pad_shape"][:2], pad_val=self.seg_pad_val, # pad 255
))
results['gt_flow_occ_list'] = new_list
def _pad_disp_occ(self, results):
"""Pad masks according to ``results['pad_shape']``."""
if "gt_disp_occ" in results:
results["gt_disp_occ"] = mmcv.impad(
results["gt_disp_occ"],
shape=results["pad_shape"][:2],
pad_val=self.seg_pad_val, # pad 255
)
if "gt_disp_occ_list" in results:
curr_list = results["gt_disp_occ_list"]
new_list = []
for curr_occ in curr_list:
new_list.append(mmcv.impad(
curr_occ, shape=results["pad_shape"][:2], pad_val=self.seg_pad_val, # pad 255
))
results['gt_disp_occ_list'] = new_list
def _pad_disp2(self, results):
"""Pad masks according to ``results['pad_shape']``."""
if "gt_disp2" in results:
results["gt_disp2"] = mmcv.impad(
results["gt_disp2"],
shape=results["pad_shape"][:2],
pad_val=self.disp_pad_val,
)
if "gt_disp2_list" in results:
curr_list = results["gt_disp2_list"]
new_list = []
for curr_disp in curr_list:
new_list.append(mmcv.impad(
curr_disp, shape=results["pad_shape"][:2], pad_val=self.disp_pad_val
))
results['gt_disp2_list'] = new_list
def __call__(self, results):
"""Call function to pad images, masks, semantic segmentation maps.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Updated result dict.
"""
self._pad_img(results)
self._pad_seg(results)
self._pad_r_img(results)
self._pad_disp(results)
self._pad_flow(results)
self._pad_disp_change(results)
self._pad_disp_2(results)
self._pad_flow_occ(results)
self._pad_disp2(results)
self._pad_disp_occ(results)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (
f"(size={self.size}, size_divisor={self.size_divisor}, "
f"pad_val={self.pad_val})"
)
return repr_str
@PIPELINES.register_module(force=True)
class Normalize(object):
"""Normalize the image.
Added key is "img_norm_cfg".
Args:
mean (sequence): Mean values of 3 channels.
std (sequence): Std values of 3 channels.
to_rgb (bool): Whether to convert the image from BGR to RGB,
default is true.
"""
def __init__(self, mean, std, to_rgb=True):
self.mean = np.array(mean, dtype=np.float32)
self.std = np.array(std, dtype=np.float32)
self.to_rgb = to_rgb
def img_norm(self, results, key):
if key in results:
results[key] = mmcv.imnormalize(
results[key], self.mean, self.std, self.to_rgb,
)
def imglist_norm(self, results, key):
if key in results:
curr_list = results[key]
new_list = []
for img in curr_list:
new_list.append(mmcv.imnormalize(img, self.mean, self.std, self.to_rgb))
results[key] = new_list
def __call__(self, results):
"""Call function to normalize images.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Normalized results, 'img_norm_cfg' key is added into
result dict.
"""
self.img_norm(results, "img")
self.img_norm(results, "r_img")
self.imglist_norm(results, "img_list")
self.imglist_norm(results, "r_img_list")
results["img_norm_cfg"] = dict(mean=self.mean, std=self.std, to_rgb=self.to_rgb)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f"(mean={self.mean}, std={self.std}, to_rgb=" f"{self.to_rgb})"
return repr_str
@PIPELINES.register_module(force=True)
class PhotoMetricDistortion(object):
"""Apply photometric distortion to image sequentially, every transformation
is applied with a probability of 0.5. The position of random contrast is in
second or second to last. If asymmetric augmentation is used, 0.5 probability
the augmentation will be asym.
1. random brightness
2. random contrast (mode 0)
3. convert color from BGR to HSV
4. random saturation
5. random hue
6. convert color from HSV to BGR
7. random contrast (mode 1)
Args:
brightness_delta (int): delta of brightness.
contrast_range (tuple): range of contrast.
saturation_range (tuple): range of saturation.
hue_delta (int): delta of hue.
asym (bool): apply augmentation asymmetrically
"""
def __init__(
self,
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18,
asym=False,
):
self.brightness_delta = brightness_delta
self.contrast_lower, self.contrast_upper = contrast_range
self.saturation_lower, self.saturation_upper = saturation_range
self.hue_delta = hue_delta
self.asym = asym
def convert(self, img, alpha=1, beta=0):
"""Multiple with alpha and add beat with clip."""
img = img.astype(np.float32) * alpha + beta
img = np.clip(img, 0, 255)
return img.astype(np.uint8)
def brightness(self, imgs):
"""Brightness distortion."""
p_aug = np.random.randint(2)
p_asym = np.random.randint(2)
if p_aug:
new_imgs = []
beta = np.random.uniform(-self.brightness_delta, self.brightness_delta)
for idx, img in enumerate(imgs):
if self.asym and idx >= len(imgs) / 2 and p_asym: # asym prob for right image only
beta = np.random.uniform(-self.brightness_delta, self.brightness_delta)
new_imgs.append(self.convert(img, beta=beta))
imgs = new_imgs
return imgs
def contrast(self, imgs):
"""Contrast distortion."""
p_aug = np.random.randint(2)
p_asym = np.random.randint(2)
if p_aug:
new_imgs = []
alpha = np.random.uniform(self.contrast_lower, self.contrast_upper)
for idx, img in enumerate(imgs):
if self.asym and idx >= len(imgs) / 2 and p_asym: # asym prob for right image only
alpha = np.random.uniform(self.contrast_lower, self.contrast_upper)
new_imgs.append(self.convert(img, alpha=alpha))
imgs = new_imgs
return imgs
def saturation(self, imgs):
"""Saturation distortion."""
p_aug = np.random.randint(2)
p_asym = np.random.randint(2)
if p_aug:
new_imgs = []
alpha = np.random.uniform(self.saturation_lower, self.saturation_upper)
for idx, img in enumerate(imgs):
if self.asym and idx >= len(imgs) / 2 and p_asym: # asym prob for right image only
alpha = np.random.uniform(self.saturation_lower, self.saturation_upper)
img = mmcv.bgr2hsv(img)
img[:, :, 1] = self.convert(img[:, :, 1], alpha=alpha)
new_imgs.append(mmcv.hsv2bgr(img))
imgs = new_imgs
return imgs
def hue(self, imgs):
"""Hue distortion."""
p_aug = np.random.randint(2)
p_asym = np.random.randint(2)
if p_aug:
new_imgs = []
delta = np.random.randint(-self.hue_delta, self.hue_delta)
for idx, img in enumerate(imgs):
if self.asym and idx >= len(imgs) / 2 and p_asym: # asym prob for right image only
delta = np.random.randint(-self.hue_delta, self.hue_delta)
img = mmcv.bgr2hsv(img)
img[:, :, 0] = (img[:, :, 0].astype(int) + delta) % 180
new_imgs.append(mmcv.hsv2bgr(img))
imgs = new_imgs
return imgs
def __call__(self, results):
"""Call function to perform photometric distortion on images.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Result dict with images distorted.
"""
imgs = [results["img"]]
if "r_img" in results:
imgs.append(results["r_img"])
# random brightness
imgs = self.brightness(imgs)
# mode == 0 --> do random contrast first
# mode == 1 --> do random contrast last
mode = np.random.randint(2)
if "img_list" not in results:
if mode == 1:
imgs = self.contrast(imgs)
# random saturation
imgs = self.saturation(imgs)
# random hue
imgs = self.hue(imgs)
# random contrast
if mode == 0:
imgs = self.contrast(imgs)
results["img"] = imgs[0]
if "r_img" in results:
results["r_img"] = imgs[1]
elif "img_list" in results:
import copy
new_list = copy.copy(results["img_list"])
img_list_len = len(new_list)
if "r_img_list" in results:
new_list += results["r_img_list"]
if mode == 1:
new_list = self.contrast(new_list)
# random saturation
new_list = self.saturation(new_list)
# random hue
new_list = self.hue(new_list)
# random contrast
if mode == 0:
new_list = self.contrast(new_list)
results["img_list"] = new_list[:img_list_len]
if "r_img_list" in results:
results['r_img_list'] = new_list[img_list_len:]
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (
f"(brightness_delta={self.brightness_delta}, "
f"contrast_range=({self.contrast_lower}, "
f"{self.contrast_upper}), "
f"saturation_range=({self.saturation_lower}, "
f"{self.saturation_upper}), "
f"hue_delta={self.hue_delta})"
)
return repr_str
@PIPELINES.register_module(force=True)
class StereoPhotoMetricDistortion(object):
"""Apply photometric distortion to image sequentially, every transformation
is applied with a probability of 0.5. The position of random contrast is in
second or second to last. If asymmetric augmentation is used, 0.5 probability
the augmentation will be asym.
1. random brightness
2. random contrast (mode 0)
3. convert color from BGR to HSV
4. random saturation
5. random hue
6. convert color from HSV to BGR
7. random contrast (mode 1)
Args:
brightness_delta (int): delta of brightness.
contrast_range (tuple): range of contrast.
saturation_range (tuple): range of saturation.
hue_delta (int): delta of hue.
prob (float): apply augmentation
asym_prob (float): apply augmentation asymmetrically
"""
def __init__(
self,
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18,
prob=0.5,
asym_prob=0.5,
):
self.brightness_delta = brightness_delta
self.contrast_lower, self.contrast_upper = contrast_range
self.saturation_lower, self.saturation_upper = saturation_range
self.hue_delta = hue_delta
self.prob = prob
self.asym_prob = asym_prob
def convert(self, img, alpha=1, beta=0):
"""Multiple with alpha and add beat with clip."""
img = img.astype(np.float32) * alpha + beta
img = np.clip(img, 0, 255)
return img.astype(np.uint8)
def brightness(self, imgs, r_imgs):
"""Brightness distortion."""
for idx, (img, r_img) in enumerate(zip(imgs, r_imgs)):
p_aug = np.random.rand() < self.prob
p_asym = np.random.rand() < self.asym_prob
if p_aug:
beta = np.random.uniform(-self.brightness_delta, self.brightness_delta)
imgs[idx] = self.convert(img, beta=beta)
if p_asym:
beta = beta * (1 + np.random.uniform(-0.2, 0.2))
r_imgs[idx] = self.convert(r_img, beta=beta)
return imgs, r_imgs
def contrast(self, imgs, r_imgs):
"""Contrast distortion."""
for idx, (img, r_img) in enumerate(zip(imgs, r_imgs)):
p_aug = np.random.rand() < self.prob
p_asym = np.random.rand() < self.asym_prob
if p_aug:
alpha = np.random.uniform(self.contrast_lower, self.contrast_upper)
imgs[idx] = self.convert(img, alpha=alpha)
if p_asym:
alpha = alpha * (1 + np.random.uniform(-0.2, 0.2))
r_imgs[idx] = self.convert(r_img, alpha=alpha)
return imgs, r_imgs
def saturation(self, imgs, r_imgs):
"""Saturation distortion."""
for idx, (img, r_img) in enumerate(zip(imgs, r_imgs)):
p_aug = np.random.rand() < self.prob
p_asym = np.random.rand() < self.asym_prob
if p_aug:
alpha = np.random.uniform(self.saturation_lower, self.saturation_upper)
img = mmcv.bgr2hsv(img)
img[:, :, 1] = self.convert(img[:, :, 1], alpha=alpha)
imgs[idx] = mmcv.hsv2bgr(img)
if p_asym:
alpha = alpha * (1 + np.random.uniform(-0.2, 0.2))
r_img = mmcv.bgr2hsv(r_img)
r_img[:, :, 1] = self.convert(r_img[:, :, 1], alpha=alpha)
r_imgs[idx] = mmcv.hsv2bgr(r_img)
return imgs, r_imgs
def hue(self, imgs, r_imgs):
"""Hue distortion."""
for idx, (img, r_img) in enumerate(zip(imgs, r_imgs)):
p_aug = np.random.rand() < self.prob
p_asym = np.random.rand() < self.asym_prob
if p_aug:
delta = np.random.randint(-self.hue_delta, self.hue_delta)
img = mmcv.bgr2hsv(img)
img[:, :, 0] = (img[:, :, 0].astype(int) + delta) % 180
imgs[idx] = mmcv.hsv2bgr(img)
if p_asym:
delta = delta * (1 + np.random.uniform(-0.2, 0.2))
r_img = mmcv.bgr2hsv(r_img)
r_img[:, :, 0] = (r_img[:, :, 0].astype(int) + delta) % 180
r_imgs[idx] = mmcv.hsv2bgr(r_img)
return imgs, r_imgs
def __call__(self, results):
"""Call function to perform photometric distortion on images.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Result dict with images distorted.
"""
imgs = [results["img"]]
r_imgs = [results["r_img"]]
# random brightness
imgs, r_imgs = self.brightness(imgs, r_imgs)
# mode == 0 --> do random contrast first
# mode == 1 --> do random contrast last
mode = np.random.randint(2)
if "img_list" not in results:
if mode == 1:
imgs, r_imgs = self.contrast(imgs, r_imgs)
# random saturation
imgs, r_imgs = self.saturation(imgs, r_imgs)
# random hue
imgs, r_imgs = self.hue(imgs, r_imgs)
# random contrast
if mode == 0:
imgs, r_imgs = self.contrast(imgs, r_imgs)
results["img"] = imgs[0]
results["r_img"] = r_imgs[0]
elif "img_list" in results:
import copy
new_list = copy.copy(results["img_list"])
r_new_list = results["r_img_list"]
if mode == 1:
new_list, r_new_list = self.contrast(new_list, r_new_list)
# random saturation
new_list, r_new_list = self.saturation(new_list, r_new_list)
# random hue
new_list, r_new_list = self.hue(new_list, r_new_list)
# random contrast
if mode == 0:
new_list, r_new_list = self.contrast(new_list, r_new_list)
results["img_list"] = new_list
results['r_img_list'] = r_new_list
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (
f"(brightness_delta={self.brightness_delta}, "
f"contrast_range=({self.contrast_lower}, "
f"{self.contrast_upper}), "
f"saturation_range=({self.saturation_lower}, "
f"{self.saturation_upper}), "
f"hue_delta={self.hue_delta})"
)
return repr_str
@PIPELINES.register_module()
class RandomShiftRotate(object):
"""Randomly apply vertical translate and rotate the input.
Args:
max_shift (float): maximum shift in pixels along vertical direction. Default: 1.5.
max_rotation (float): maximum rotation in degree. Default: 0.2.
prob (float): probability of applying the transform. Default: 0.5.
Targets:
r_image, r_img_list
Image types:
uint8, float32
"""
def __init__(self, max_shift=1.5, max_rotation=0.2, prob=1.0):
self.max_shift = max_shift
self.max_rotation = max_rotation
self.prob = prob
def _shift_and_rotate(self, img):
if random.random() < self.prob:
px2 = random.uniform(-self.max_shift, self.max_shift)
angle2 = random.uniform(-self.max_rotation, self.max_rotation)
image_center = (np.random.uniform(0, img.shape[0]), \
np.random.uniform(0, img.shape[1]))
rot_mat = cv2.getRotationMatrix2D(image_center, angle2, 1.0)
img = cv2.warpAffine(img, rot_mat, img.shape[1::-1], flags=cv2.INTER_LINEAR)
trans_mat = np.float32([[1, 0, 0], [0, 1, px2]])
img = cv2.warpAffine(img, trans_mat, img.shape[1::-1], flags=cv2.INTER_LINEAR)
return img
def __call__(self, results):
if "r_img" in results:
results["r_img"] = self._shift_and_rotate(results["r_img"])
if "r_img_list" in results:
curr_imgs = results['r_img_list']
new_list = []
for curr_img in curr_imgs:
new_list.append(self._shift_and_rotate(curr_img))
results['r_img_list'] = new_list
return results
@PIPELINES.register_module()
class RandomOcclude(object):
"""Randomly apply occlusion.
Args:
w_patch_range (float): min and max value of patch width.
h_patch_range (float): min and max value of patch height.
prob (float): probability of applying the transform. Default: 0.5.
Targets:
r_image, r_img_list
Image types:
uint8, float32
"""
def __init__(self, w_patch_range=(180, 250), h_patch_range=(50, 70), mode='mean', prob=1.0):
self.w_patch_range = w_patch_range
self.h_patch_range = h_patch_range
self.mode = mode
self.prob = prob
def apply(self, img, patch1, patch2):
patch1_yl, patch1_xl, patch1_yh, patch1_xh = patch1
patch2_yl, patch2_xl, patch2_yh, patch2_xh = patch2
img_patch = img[patch2_yl:patch2_yh, patch2_xl:patch2_xh]
if self.mode == 'mean':
img_patch = np.mean(np.mean(img_patch, 0), 0)[np.newaxis, np.newaxis]
img[patch1_yl:patch1_yh, patch1_xl:patch1_xh] = img_patch
return img
def __call__(self, results):
if random.random() < self.prob and "r_img" in results:
img_h, img_w, _ = results["r_img"].shape
patch_h = random.randint(*self.h_patch_range)
patch_w = random.randint(*self.w_patch_range)
patch1_y = random.randint(0, img_h - patch_h)
patch1_x = random.randint(0, img_w - patch_w)
patch2_y = random.randint(0, img_h - patch_h)
patch2_x = random.randint(0, img_w - patch_w)
patch1 = (patch1_y, patch1_x, patch1_y + patch_h, patch1_x + patch_w)
patch2 = (patch2_y, patch2_x, patch2_y + patch_h, patch2_x + patch_w)
if "r_img" in results:
results["r_img"] = self.apply(results["r_img"], patch1, patch2)
if "r_img_list" in results:
curr_imgs = results['r_img_list']
new_list = []
for curr_img in curr_imgs:
new_list.append(self.apply(curr_img, patch1, patch2))
results['r_img_list'] = new_list
return results
|
CODD-main
|
datasets/transforms.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
import copy
import os.path as osp
import re
import sys
import mmcv
import numpy as np
from mmcv.utils import print_log
from mmseg.datasets import DATASETS, CustomDataset
from mmseg.datasets.pipelines import Compose
from mmseg.utils import get_root_logger
from terminaltables import AsciiTable
from tqdm import tqdm
from utils import AverageMeter
sys.setrecursionlimit(
100000
) # NOTE: increase recursion limit to avoid "RuntimeError: maximum recursion depth exceeded while calling a Python object"
MF_MAX_SEQUENCE_LENGTH = 50
@DATASETS.register_module()
class CustomStereoMultiFrameDataset(CustomDataset):
def __init__(
self,
pipeline,
img_dir,
test_mode=False,
disp_range=(1, 210),
calib=None,
depth_range=None,
img_suffix=".png",
r_img_dir=None,
r_img_suffix=".png",
disp_dir=None,
disp_suffix=".exr",
split=None,
data_root=None,
flow_dir=None,
flow_suffix=".exr",
disp_change_dir=None,
disp_change_suffix=".exr",
flow_occ_dir=None,
flow_occ_suffix=".exr",
disp2_dir=None,
disp2_suffix=".exr",
disp_occ_dir=None,
disp_occ_suffix=".exr",
prefix_pattern="",
intrinsics=None,
num_samples=None,
**kwargs,
):
"""custom dataset for temporal stereo
Args:
pipeline (dict): pipeline for reading
img_dir (str): image directory
disp_range (tuple, optional): valid disparity range. Defaults to (1, 210).
calib (float, optional): baseline * focal length, for converting disparity to depth. Defaults to None.
depth_range (tuple, optional): valid depth range, need calib. Defaults to None.
img_suffix (str, optional): Defaults to ".png".
r_img_dir (str, optional): right image directory. Defaults to None.
r_img_suffix (str, optional): Defaults to ".png".
disp_dir (str, optional): disparity directory. Defaults to None.
disp_suffix (str, optional): Defaults to ".exr".
split (str, optional): path to split file. Defaults to None.
data_root (str, optional): prepend path to image data. Defaults to None.
flow_dir (str, optional): optical flow directory. Defaults to None.
flow_suffix (str, optional): Defaults to ".exr".
disp_change_dir (str, optional): disparity change directory. Defaults to None.
disp_change_suffix (str, optional): Defaults to ".exr".
flow_occ_dir (str, optional): optical flow occlusion directory, used to compute disparity change for Sintel and TartanAir. Defaults to None.
flow_occ_suffix (str, optional): Defaults to ".exr".
disp2_dir (str, optional): disparity of next frame in current frame directory, used to compute disparity change for KITTI Depth. Defaults to None.
disp2_suffix (str, optional): Defaults to ".exr".
disp_occ_dir (str, optional): disparity occlusion directory. Defaults to None.
disp_occ_suffix (str, optional): Defaults to ".exr".
prefix_pattern (str, optional): prefix pattern to determine if frames belong to the same sequence. Defaults to "".
intrinsics (list, optional): intrinsics, fx, fy, cx, cy. Defaults to None.
num_samples ([type], optional): number of data to use. Defaults to None.
"""
self.pipeline = Compose(pipeline)
self.img_dir = img_dir
self.img_suffix = img_suffix
self.r_img_dir = r_img_dir
self.r_img_suffix = r_img_suffix
self.disp_dir = disp_dir
self.disp_suffix = disp_suffix
self.split = split
self.data_root = data_root
self.test_mode = test_mode
self.disp_range = disp_range
self.calib = calib
self.depth_range = depth_range
self.intrinsics = intrinsics
self.prefix_pattern = prefix_pattern
self.flow_dir = flow_dir
self.flow_suffix = flow_suffix
self.disp_change_dir = disp_change_dir
self.disp_change_suffix = disp_change_suffix
self.flow_occ_dir = flow_occ_dir
self.flow_occ_suffix = flow_occ_suffix
self.disp2_dir = disp2_dir
self.disp2_suffix = disp2_suffix
self.disp_occ_dir = disp_occ_dir
self.disp_occ_suffix = disp_occ_suffix
if self.depth_range is not None:
assert (
self.calib is not None
), "calib is required to convert disparity to depth"
self.num_frames = kwargs.get("num_frames", 2)
if "num_frames" in kwargs:
kwargs.pop("num_frames")
# join paths if data_root is specified
if self.data_root is not None:
if not mmcv.isabs(self.img_dir):
self.img_dir = osp.join(self.data_root, self.img_dir)
if not (self.ann_dir is None or mmcv.isabs(self.ann_dir)):
self.ann_dir = osp.join(self.data_root, self.ann_dir)
if not (self.r_img_dir is None or mmcv.isabs(self.r_img_dir)):
self.r_img_dir = osp.join(self.data_root, self.r_img_dir)
if not (self.disp_dir is None or mmcv.isabs(self.disp_dir)):
self.disp_dir = osp.join(self.data_root, self.disp_dir)
if not (self.split is None or mmcv.isabs(self.split)):
self.split = osp.join(self.data_root, self.split)
# load annotations
self.img_infos = self.load_annotations(
self.img_dir,
self.img_suffix,
None,
None,
self.r_img_dir,
self.r_img_suffix,
self.disp_dir,
self.disp_suffix,
self.split,
num_samples,
)
def pre_pipeline(self, results):
"""Prepare results dict for pipeline."""
results["img_fields"] = []
results["seg_fields"] = []
results["img_prefix"] = self.img_dir
results["seg_prefix"] = []
results["r_img_prefix"] = self.r_img_dir
results["disp_prefix"] = self.disp_dir
results["flow_prefix"] = self.flow_dir
results["disp_change_prefix"] = self.disp_change_dir
results["flow_occ_prefix"] = self.flow_occ_dir
results["disp2_prefix"] = self.disp2_dir
results["disp_occ_prefix"] = self.disp_occ_dir
# used in evaluation
results["calib"] = self.calib
results["disp_range"] = self.disp_range
results["depth_range"] = self.depth_range
results["intrinsics"] = self.intrinsics
def prepare_test_img(self, idx):
"""Get testing data after pipeline.
Args:
idx (int): Index of data.
Returns:
dict: Testing data after pipeline with new keys intorduced by
piepline.
"""
img_info = self.img_infos[idx]
ann_info = self.get_ann_info(idx)
results = dict(img_info=img_info, ann_info=ann_info)
self.pre_pipeline(results)
return self.pipeline(results)
def update_mf_history(self, history, new_entry, num_frames, pattern="_[^_]*$"):
if num_frames > 0:
if len(history) == 0:
history.append(new_entry)
else:
first_entry_name = history[0]["filename"]
first_entry_prefix = re.sub(pattern, "", first_entry_name)
new_entry_name = new_entry["filename"]
new_entry_prefix = re.sub(pattern, "", new_entry_name)
if first_entry_prefix == new_entry_prefix:
history.append(new_entry)
else:
history = [new_entry]
assert len(history) <= num_frames, "History cannot be longer than MF"
if len(history) == num_frames:
curr_history = copy.copy(history)
first_entry = curr_history[0]
first_entry["mf"] = curr_history
history.pop(0)
return first_entry, history
else:
return None, history
else: # this is wrote for testing, where we read the whole video sequence in when num_frames=-1
if len(history) == 0:
history.append(new_entry)
else: # read all frames from same sequence
first_entry_name = history[0]["filename"]
first_entry_prefix = re.sub(pattern, "", first_entry_name)
new_entry_name = new_entry["filename"]
new_entry_prefix = re.sub(pattern, "", new_entry_name)
# a new sequence starts or reaching max len
if len(history) >= MF_MAX_SEQUENCE_LENGTH or first_entry_prefix != new_entry_prefix:
curr_history = copy.copy(history)
first_entry = curr_history[0]
first_entry["mf"] = curr_history
history = [new_entry]
return first_entry, history
else:
history.append(new_entry)
return None, history
def load_annotations(
self,
img_dir,
img_suffix,
ann_dir,
seg_map_suffix,
r_img_dir,
r_img_suffix,
disp_dir,
disp_suffix,
split,
num_samples,
):
"""Load annotation from directory.
Args:
img_dir (str): Path to image directory
img_suffix (str): Suffix of images.
ann_dir (str|None): Path to annotation directory.
seg_map_suffix (str|None): Suffix of segmentation maps.
r_img_dir (str|None): Path to right image directory.
r_img_suffix (str|None): Suffix of right images.
disp_dir (str|None): Path to annotation directory.
disp_suffix (str|None): Suffix of disparity maps.
split (str|None): Split txt file. If split is specified, only file
with suffix in the splits will be loaded. Otherwise, all images
in img_dir/ann_dir will be loaded. Default: None
Returns:
list[dict]: All image info of dataset.
"""
img_infos = []
history = []
if split is not None:
with open(split) as f:
for line in f:
img_name = line.strip()
img_info = dict(filename=img_name + img_suffix)
if r_img_dir is not None:
img_info["r_filename"] = img_name + r_img_suffix
img_info["ann"] = dict()
if ann_dir is not None:
seg_map = img_name + seg_map_suffix
img_info["ann"]["seg_map"] = seg_map
if disp_dir is not None:
disp = img_name + disp_suffix
img_info["ann"]["disp"] = disp
if not img_info["ann"]:
del img_info["ann"]
first_img_info, history = self.update_mf_history(
history, img_info, self.num_frames, pattern=self.prefix_pattern
)
if first_img_info is not None:
img_infos.append(first_img_info)
# add last sequence when testing
if self.num_frames <= 0:
curr_history = copy.copy(history)
first_entry = curr_history[0]
first_entry["mf"] = curr_history
img_infos.append(first_entry)
else:
all_files = mmcv.scandir(img_dir, img_suffix, recursive=True)
all_files = sorted(all_files)
for img in all_files:
img_info = dict(filename=img)
if r_img_dir is not None:
img_info["r_filename"] = img.replace(
img_suffix, r_img_suffix
).replace("left", "right")
img_info["ann"] = dict()
first_img_info, history = self.update_mf_history(
history, img_info, self.num_frames, pattern=self.prefix_pattern
)
if first_img_info is not None:
img_infos.append(first_img_info)
# add last sequence when testing
if self.num_frames <= 0:
curr_history = copy.copy(history)
first_entry = curr_history[0]
first_entry["mf"] = curr_history
img_infos.append(first_entry)
if (
num_samples is not None
and 0 < num_samples <= len(img_infos)
):
img_infos = img_infos[:num_samples]
print_log(f"Loaded {len(img_infos)} images", logger=get_root_logger())
return img_infos
def evaluate_disp(self, results, logger):
"""Evaluate the dataset.
Args:
results (list): Testing results of the dataset.
metric (str | list[str]): Metrics to be evaluated.
logger (logging.Logger | None | str): Logger used for printing
related information during evaluation. Default: None.
Returns:
dict[str, float]: Default metrics.
"""
# disp metric
epe_meter = AverageMeter()
th3_meter = AverageMeter()
# temporal metric
t_epe_meter = AverageMeter()
th3_tepe_meter = AverageMeter()
t_epe_rel_meter = AverageMeter()
th1_teper_meter = AverageMeter()
# flow mag metric
flow_mag_meter = AverageMeter()
for _, result in tqdm(enumerate(results)):
epe_meter.update(result['epe'].item())
th3_meter.update(result['th3'].item())
t_epe_meter.update(result['tepe'].item())
th3_tepe_meter.update(result['th3_tepe'].item())
t_epe_rel_meter.update(result['tepe_rel'].item())
th1_teper_meter.update(result['th1_tepe_rel'].item())
flow_mag_meter.update(result['flow_mag'].item())
# depth summary table
summary_table_content = [
("epe", epe_meter, 1),
("th3", th3_meter, 1),
("tepe", t_epe_meter, 1),
("th3_tepe", th3_tepe_meter, 1),
("tepe_rel", t_epe_rel_meter, 1),
("th1_tepe_rel", th1_teper_meter, 1),
("flow_mag", flow_mag_meter, 1),
]
header = [k[0] for k in summary_table_content]
summary_row = [np.round(k[1].avg * k[2], 3) for k in summary_table_content]
summary_table_data = [header, summary_row]
print_log("Summary:", logger)
table = AsciiTable(summary_table_data)
print_log("\n" + table.table, logger=logger)
eval_results = {}
for i in range(len(summary_table_data[0])):
eval_results[summary_table_data[0][i].split(" ")[0]] = summary_table_data[1][i]
return eval_results
def evaluate_motion(self, results, logger, start_idx=7):
count_all = 0
metrics_all = {
"epe2d_scene_flow": 0.0,
"epe2d_optical_flow": 0.0,
"1px_scene_flow": 0.0,
"1px_optical_flow": 0.0,
}
for _, result in tqdm(enumerate(results)):
count_all += result["count"].item()
metrics_all["epe2d_scene_flow"] += result["epe2d_scene_flow"].item()
metrics_all["epe2d_optical_flow"] += result["epe2d_optical_flow"].item()
metrics_all["1px_scene_flow"] += result["1px_scene_flow"].item()
metrics_all["1px_optical_flow"] += result["1px_optical_flow"].item()
# depth summary table
if count_all <= 0.0:
count_all = 1.0
summary_table_content = [
("epe2d_scene_flow", metrics_all["epe2d_scene_flow"], 1.0 / count_all),
("epe2d_optical_flow", metrics_all["epe2d_optical_flow"], 1.0 / count_all),
("1px_scene_flow", metrics_all["1px_scene_flow"], 1.0 / count_all),
("1px_optical_flow", metrics_all["1px_optical_flow"], 1.0 / count_all),
]
header = [k[0] for k in summary_table_content]
summary_row = [np.round(k[1] * k[2], 3) for k in summary_table_content]
summary_table_data = [header, summary_row]
print_log("Summary:", logger)
table = AsciiTable(summary_table_data)
print_log("\n" + table.table, logger=logger)
eval_results = {}
for i in range(len(summary_table_data[0])):
eval_results[summary_table_data[0][i].split(" ")[0]] = summary_table_data[1][i]
return eval_results
def evaluate(self, results, metric="default", logger=None, **kwargs):
"""Evaluate the dataset.
Args:
results (list): Testing results of the dataset.
metric (str | list[str]): Metrics to be evaluated.
logger (logging.Logger | None | str): Logger used for printing
related information during evaluation. Default: None.
Returns:
dict[str, float]: Default metrics.
"""
if not isinstance(metric, str):
assert len(metric) == 1
metric = metric[0]
allowed_metrics = ["default", "disp_only", "motion_only"]
if metric not in allowed_metrics:
raise KeyError("metric {} is not supported".format(metric))
if metric == "disp_only":
return self.evaluate_disp(results, logger)
elif metric == "motion_only":
return self.evaluate_motion(results, logger)
elif metric == "default":
eval_results = self.evaluate_disp(results, logger)
eval_results.update(self.evaluate_motion(results, logger))
return eval_results
|
CODD-main
|
datasets/custom_stereo_mf.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
from mmseg.datasets import DATASETS
from .scene_flow import SceneFlowMultiFrameDataset
@DATASETS.register_module()
class TartanAirMultiFrameDataset(SceneFlowMultiFrameDataset):
def __init__(self, **kwargs):
super(SceneFlowMultiFrameDataset, self).__init__(
img_suffix=".png",
r_img_suffix=".png",
disp_suffix=".npy",
flow_suffix=".npy",
flow_occ_suffix=".npy",
prefix_pattern=r"\d+_left.png",
**kwargs,
)
|
CODD-main
|
datasets/tartanair.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
import copy
from mmcv.utils import print_log
from mmseg.datasets import DATASETS
from mmseg.utils import get_root_logger
from .custom_stereo_mf import CustomStereoMultiFrameDataset
@DATASETS.register_module()
class SceneFlowMultiFrameDataset(CustomStereoMultiFrameDataset):
"""Person dataset.
In segmentation map annotation for ADE20K, 0 stands for background, which
is not included in 150 categories. ``reduce_zero_label`` is fixed to True.
The ``img_suffix`` is fixed to '.jpg' and ``seg_map_suffix`` is fixed to
'.png'.
"""
def __init__(self, **kwargs):
super(SceneFlowMultiFrameDataset, self).__init__(
img_suffix=".png",
r_img_suffix=".png",
disp_suffix=".pfm",
flow_suffix=".pfm",
disp_change_suffix=".pfm",
disp_occ_suffix=".png",
prefix_pattern=r"\d+.png",
**kwargs,
)
def load_annotations(
self,
img_dir,
img_suffix,
ann_dir,
seg_map_suffix,
r_img_dir,
r_img_suffix,
disp_dir,
disp_suffix,
split,
num_samples,
):
"""Load annotation from directory.
Args:
img_dir (str): Path to image directory
img_suffix (str): Suffix of images.
ann_dir (str|None): Path to annotation directory.
seg_map_suffix (str|None): Suffix of segmentation maps.
r_img_dir (str|None): Path to right image directory.
r_img_suffix (str|None): Suffix of right images.
disp_dir (str|None): Path to annotation directory.
disp_suffix (str|None): Suffix of disparity maps.
split (str|None): Split txt file. If split is specified, only file
with suffix in the splits will be loaded. Otherwise, all images
in img_dir/ann_dir will be loaded. Default: None
Returns:
list[dict]: All image info of dataset.
"""
img_infos = []
history = []
if split is not None:
with open(split) as f:
for line in f:
filenames = line.strip().split()
ann = dict(disp=filenames[2])
if len(filenames) > 3:
ann["flow"] = filenames[3]
if len(filenames) > 4:
ann["disp_change"] = filenames[4]
if len(filenames) > 5:
ann["flow_occ"] = filenames[5]
if len(filenames) > 6:
ann["disp2"] = filenames[6]
if len(filenames) > 7:
ann["disp_occ"] = filenames[7]
img_info = dict(
filename=filenames[0], r_filename=filenames[1], ann=ann
)
first_img_info, history = self.update_mf_history(
history, img_info, self.num_frames, pattern=self.prefix_pattern
)
if first_img_info is not None:
img_infos.append(first_img_info)
# add last sequence when testing
if self.num_frames <= 0:
curr_history = copy.copy(history)
first_entry = curr_history[0]
first_entry["mf"] = curr_history
img_infos.append(first_entry)
else:
raise AssertionError("Multi frame dataloader needs split")
if (
num_samples is not None
and 0 < num_samples <= len(img_infos)
):
img_infos = img_infos[:num_samples]
print_log(f"Loaded {len(img_infos)} images", logger=get_root_logger())
return img_infos
|
CODD-main
|
datasets/scene_flow.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
import re
import mmcv
# Requirements: Numpy as PIL/Pillow
import numpy as np
from PIL import Image
# sintel
# Check for endianness, based on Daniel Scharstein's optical flow code.
# Using little-endian architecture, these two should be equal.
TAG_FLOAT = 202021.25
TAG_CHAR = 'PIEH'
### Sintel
def flow_read(filename):
""" Read optical flow from file, return (U,V) tuple.
Original code by Deqing Sun, adapted from Daniel Scharstein.
"""
f = open(filename, 'rb')
check = np.fromfile(f, dtype=np.float32, count=1)[0]
assert check == TAG_FLOAT, ' flow_read:: Wrong tag in flow file (should be: {0}, is: {1}). Big-endian machine? '.format(
TAG_FLOAT, check)
width = np.fromfile(f, dtype=np.int32, count=1)[0]
height = np.fromfile(f, dtype=np.int32, count=1)[0]
size = width * height
assert width > 0 and height > 0 and size > 1 and size < 100000000, ' flow_read:: Wrong input size (width = {0}, height = {1}).'.format(
width, height)
tmp = np.fromfile(f, dtype=np.float32, count=-1).reshape((height, width * 2))
u = tmp[:, np.arange(width) * 2]
v = tmp[:, np.arange(width) * 2 + 1]
return u, v
def flow_write(filename, uv, v=None):
""" Write optical flow to file.
If v is None, uv is assumed to contain both u and v channels,
stacked in depth.
Original code by Deqing Sun, adapted from Daniel Scharstein.
"""
nBands = 2
if v is None:
assert (uv.ndim == 3)
assert (uv.shape[2] == 2)
u = uv[:, :, 0]
v = uv[:, :, 1]
else:
u = uv
assert (u.shape == v.shape)
height, width = u.shape
f = open(filename, 'wb')
# write the header
f.write(TAG_CHAR)
np.array(width).astype(np.int32).tofile(f)
np.array(height).astype(np.int32).tofile(f)
# arrange into matrix form
tmp = np.zeros((height, width * nBands))
tmp[:, np.arange(width) * 2] = u
tmp[:, np.arange(width) * 2 + 1] = v
tmp.astype(np.float32).tofile(f)
f.close()
def depth_read(filename):
""" Read depth data from file, return as numpy array. """
f = open(filename, 'rb')
check = np.fromfile(f, dtype=np.float32, count=1)[0]
assert check == TAG_FLOAT, ' depth_read:: Wrong tag in flow file (should be: {0}, is: {1}). Big-endian machine? '.format(
TAG_FLOAT, check)
width = np.fromfile(f, dtype=np.int32, count=1)[0]
height = np.fromfile(f, dtype=np.int32, count=1)[0]
size = width * height
assert width > 0 and height > 0 and size > 1 and size < 100000000, ' depth_read:: Wrong input size (width = {0}, height = {1}).'.format(
width, height)
depth = np.fromfile(f, dtype=np.float32, count=-1).reshape((height, width))
return depth
def depth_write(filename, depth):
""" Write depth to file. """
height, width = depth.shape[:2]
f = open(filename, 'wb')
# write the header
f.write(TAG_CHAR)
np.array(width).astype(np.int32).tofile(f)
np.array(height).astype(np.int32).tofile(f)
depth.astype(np.float32).tofile(f)
f.close()
def disparity_write(filename, disparity, bitdepth=16):
""" Write disparity to file.
bitdepth can be either 16 (default) or 32.
The maximum disparity is 1024, since the image width in Sintel
is 1024.
"""
d = disparity.copy()
# Clip disparity.
d[d > 1024] = 1024
d[d < 0] = 0
d_r = (d / 4.0).astype('uint8')
d_g = ((d * (2.0 ** 6)) % 256).astype('uint8')
out = np.zeros((d.shape[0], d.shape[1], 3), dtype='uint8')
out[:, :, 0] = d_r
out[:, :, 1] = d_g
if bitdepth > 16:
d_b = (d * (2 ** 14) % 256).astype('uint8')
out[:, :, 2] = d_b
Image.fromarray(out, 'RGB').save(filename, 'PNG')
def disparity_read(filename):
""" Return disparity read from filename. """
f_in = np.array(Image.open(filename))
d_r = f_in[:, :, 0].astype('float64')
d_g = f_in[:, :, 1].astype('float64')
d_b = f_in[:, :, 2].astype('float64')
depth = d_r * 4 + d_g / (2 ** 6) + d_b / (2 ** 14)
return depth
# def cam_read(filename):
# """ Read camera data, return (M,N) tuple.
#
# M is the intrinsic matrix, N is the extrinsic matrix, so that
#
# x = M*N*X,
# with x being a point in homogeneous image pixel coordinates, X being a
# point in homogeneous world coordinates.
# """
# txtdata = np.loadtxt(filename)
# intrinsic = txtdata[0,:9].reshape((3,3))
# extrinsic = textdata[1,:12].reshape((3,4))
# return intrinsic,extrinsic
#
#
# def cam_write(filename,M,N):
# """ Write intrinsic matrix M and extrinsic matrix N to file. """
# Z = np.zeros((2,12))
# Z[0,:9] = M.ravel()
# Z[1,:12] = N.ravel()
# np.savetxt(filename,Z)
def cam_read(filename):
""" Read camera data, return (M,N) tuple.
M is the intrinsic matrix, N is the extrinsic matrix, so that
x = M*N*X,
with x being a point in homogeneous image pixel coordinates, X being a
point in homogeneous world coordinates.
"""
f = open(filename, 'rb')
check = np.fromfile(f, dtype=np.float32, count=1)[0]
assert check == TAG_FLOAT, ' cam_read:: Wrong tag in flow file (should be: {0}, is: {1}). Big-endian machine? '.format(
TAG_FLOAT, check)
M = np.fromfile(f, dtype='float64', count=9).reshape((3, 3))
N = np.fromfile(f, dtype='float64', count=12).reshape((3, 4))
return M, N
def cam_write(filename, M, N):
""" Write intrinsic matrix M and extrinsic matrix N to file. """
f = open(filename, 'wb')
# write the header
f.write(TAG_CHAR)
M.astype('float64').tofile(f)
N.astype('float64').tofile(f)
f.close()
def segmentation_write(filename, segmentation):
""" Write segmentation to file. """
segmentation_ = segmentation.astype('int32')
seg_r = np.floor(segmentation_ / (256 ** 2)).astype('uint8')
seg_g = np.floor((segmentation_ % (256 ** 2)) / 256).astype('uint8')
seg_b = np.floor(segmentation_ % 256).astype('uint8')
out = np.zeros((segmentation.shape[0], segmentation.shape[1], 3), dtype='uint8')
out[:, :, 0] = seg_r
out[:, :, 1] = seg_g
out[:, :, 2] = seg_b
Image.fromarray(out, 'RGB').save(filename, 'PNG')
def segmentation_read(filename):
""" Return disparity read from filename. """
f_in = np.array(Image.open(filename))
seg_r = f_in[:, :, 0].astype('int32')
seg_g = f_in[:, :, 1].astype('int32')
seg_b = f_in[:, :, 2].astype('int32')
segmentation = (seg_r * 256 + seg_g) * 256 + seg_b
return segmentation
### Others
def read_numpy_tartanair(path):
data = np.load(path).astype(np.float32)
return np.array(data)
def read_numpy_tartanair_uint8(path):
data = np.load(path).astype(np.uint8)
return np.array(data)
def read_kitti_disp(img_bytes):
disp = (mmcv.imfrombytes(img_bytes, flag="unchanged", backend="cv2").squeeze()) / 256.0
return disp
def read_kitti_flow(img_bytes):
flow = mmcv.imfrombytes(img_bytes, flag="unchanged", backend="cv2")
flow = flow[:, :, ::-1].astype(np.float32)
flow, valid = flow[:, :, :2], flow[:, :, 2]
flow = (flow - 2 ** 15) / 64.0
return flow, valid
def read_pfm(path):
"""Read pfm file.
Args:
path (str): path to file
Returns:
tuple: (data, scale)
"""
with open(path, "rb") as file:
color = None
width = None
height = None
scale = None
endian = None
header = file.readline().rstrip()
if header.decode("ascii") == "PF":
color = True
elif header.decode("ascii") == "Pf":
color = False
else:
raise Exception("Not a PFM file: " + path)
dim_match = re.match(r"^(\d+)\s(\d+)\s$", file.readline().decode("ascii"))
if dim_match:
width, height = list(map(int, dim_match.groups()))
else:
raise Exception("Malformed PFM header.")
scale = float(file.readline().decode("ascii").rstrip())
if scale < 0:
# little-endian
endian = "<"
scale = -scale
else:
# big-endian
endian = ">"
data = np.frombuffer(file.read(), endian + "f")
shape = (height, width, 3) if color else (height, width)
data = np.reshape(data, shape)
data = np.flipud(data)
return data, scale
|
CODD-main
|
datasets/data_io.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
from .formating import DefaultFormatBundle # NOQA
from .loading_stereo import * # NOQA
from .custom_stereo_mf import CustomStereoMultiFrameDataset # NOQA
from .kitti_depth import Kitti2015MultiFrameDataset, KittiDepthMultiFrameDataset # NOQA
from .scene_flow import SceneFlowMultiFrameDataset # NOQA
from .sintel import SintelMultiFrameDataset # NOQA
from .tartanair import TartanAirMultiFrameDataset # NOQA
from .transforms import (
RandomCrop,
Pad,
PhotoMetricDistortion,
StereoPhotoMetricDistortion
) # NOQA
__all__ = [k for k in globals().keys() if not k.startswith("_")]
|
CODD-main
|
datasets/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
from mmseg.datasets import DATASETS
from .scene_flow import SceneFlowMultiFrameDataset
@DATASETS.register_module()
class SintelMultiFrameDataset(SceneFlowMultiFrameDataset):
"""Person dataset.
In segmentation map annotation for ADE20K, 0 stands for background, which
is not included in 150 categories. ``reduce_zero_label`` is fixed to True.
The ``img_suffix`` is fixed to '.jpg' and ``seg_map_suffix`` is fixed to
'.png'.
"""
def __init__(self, **kwargs):
super(SceneFlowMultiFrameDataset, self).__init__(
img_suffix=".png",
r_img_suffix=".png",
disp_suffix=".png",
flow_suffix=".flo",
flow_occ_suffix=".png",
prefix_pattern="frame.*",
**kwargs,
)
|
CODD-main
|
datasets/sintel.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
import os.path as osp
import mmcv
import numpy as np
from mmseg.datasets import PIPELINES
from mmseg.datasets.pipelines import LoadImageFromFile
from .data_io import disparity_read, flow_read, read_numpy_tartanair, read_numpy_tartanair_uint8, read_kitti_disp, \
read_kitti_flow, read_pfm
BF_DEFAULT = 210.0
@PIPELINES.register_module(force=True)
class LoadImagesFromFile(object):
"""Load an image from file.
Required keys are "img_prefix" and "img_info" (a dict that must contain the
key "filename"). Added or updated keys are "filename", "img", "img_shape",
"ori_shape" (same as `img_shape`), "pad_shape" (same as `img_shape`),
and "img_norm_cfg" (means=0 and stds=1).
Args:
to_float32 (bool): Whether to convert the loaded image to a float32
numpy array. If set to False, the loaded image is an uint8 array.
Defaults to False.
color_type (str): The flag argument for :func:`mmcv.imfrombytes`.
Defaults to 'color'.
file_client_args (dict): Arguments to instantiate a FileClient.
See :class:`mmcv.fileio.FileClient` for details.
Defaults to ``dict(backend='disk')``.
imdecode_backend (str): Backend for :func:`mmcv.imdecode`. Default:
'cv2'
"""
def __init__(self,
to_float32=False,
color_type='color',
file_client_args=dict(backend='disk'),
imdecode_backend='cv2'):
self.to_float32 = to_float32
self.color_type = color_type
self.file_client_args = file_client_args.copy()
self.file_client = None
self.imdecode_backend = imdecode_backend
def __call__(self, results):
"""Call functions to load image and get image meta information.
Args:
results (dict): Result dict from :obj:`mmseg.CustomDataset`.
Returns:
dict: The dict contains loaded image and meta information.
"""
if self.file_client is None:
self.file_client = mmcv.FileClient(**self.file_client_args)
if results.get('img_prefix') is not None:
filename = osp.join(results['img_prefix'],
results['img_info']['filename'])
else:
filename = results['img_info']['filename']
img_bytes = self.file_client.get(filename)
img = mmcv.imfrombytes(
img_bytes, flag=self.color_type, backend=self.imdecode_backend)
if self.to_float32:
img = img.astype(np.float32)
results['filename'] = filename
results['ori_filename'] = results['img_info']['filename']
results['img'] = img
results['img_fields'].append('img')
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
# Set initial values for default meta_keys
results['pad_shape'] = img.shape
num_channels = 1 if len(img.shape) < 3 else img.shape[2]
results['img_norm_cfg'] = dict(
mean=np.zeros(num_channels, dtype=np.float32),
std=np.ones(num_channels, dtype=np.float32),
to_rgb=False)
# Adding the multiple frames after it from "mf" key
if "mf" not in results['img_info']:
results["img_list"] = [img]
else:
img_list = []
imginfolist = results['img_info']['mf']
for curr_imginfo in imginfolist:
if results.get('img_prefix') is not None:
filename = osp.join(results['img_prefix'], curr_imginfo['filename'])
else:
filename = curr_imginfo['filename']
img_bytes = self.file_client.get(filename)
img = mmcv.imfrombytes(
img_bytes, flag=self.color_type, backend=self.imdecode_backend)
if self.to_float32:
img = img.astype(np.float32)
img_list.append(img)
results['img_list'] = img_list
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(to_float32={self.to_float32},'
repr_str += f"color_type='{self.color_type}',"
repr_str += f"imdecode_backend='{self.imdecode_backend}',"
return repr_str
@PIPELINES.register_module()
class LoadRImagesFromFile(LoadImageFromFile):
"""Load an image from file.
Required keys are "r_img_prefix" and "img_info" (a dict that must contain the
key "filename"). Added or updated keys are "filename", "img", "img_shape",
"ori_shape" (same as `img_shape`), "pad_shape" (same as `img_shape`),
and "img_norm_cfg" (means=0 and stds=1).
Args:
to_float32 (bool): Whether to convert the loaded image to a float32
numpy array. If set to False, the loaded image is an uint8 array.
Defaults to False.
color_type (str): The flag argument for :func:`mmcv.imfrombytes`.
Defaults to 'color'.
file_client_args (dict): Arguments to instantiate a FileClient.
See :class:`mmcv.fileio.FileClient` for details.
Defaults to ``dict(backend='disk')``.
imdecode_backend (str): Backend for :func:`mmcv.imdecode`. Default:
'cv2'
"""
def __init__(self, calib=1.0, **kwargs):
super(LoadRImagesFromFile, self).__init__(**kwargs)
def __call__(self, results):
"""Call functions to load image and get image meta information.
Args:
results (dict): Result dict from :obj:`mmseg.CustomDataset`.
Returns:
dict: The dict contains loaded image and meta information.
"""
if self.file_client is None:
self.file_client = mmcv.FileClient(**self.file_client_args)
if results.get("r_img_prefix") is not None:
filename = osp.join(
results["r_img_prefix"], results["img_info"]["r_filename"]
)
else:
filename = results["img_info"]["r_filename"]
img_bytes = self.file_client.get(filename)
r_img = mmcv.imfrombytes(
img_bytes, flag=self.color_type, backend=self.imdecode_backend
)
if self.to_float32:
r_img = r_img.astype(np.float32)
results["r_img"] = r_img
results["img_fields"].append("r_img")
# Loading information about subsequent frames
if "mf" not in results['img_info']:
results['r_img_list'] = [r_img]
else:
img_list = []
imginfolist = results['img_info']['mf']
for curr_imginfo in imginfolist:
if results.get("r_img_prefix") is not None:
filename = osp.join(
results["r_img_prefix"], curr_imginfo["r_filename"]
)
else:
filename = curr_imginfo["r_filename"]
img_bytes = self.file_client.get(filename)
r_img = mmcv.imfrombytes(
img_bytes, flag=self.color_type, backend=self.imdecode_backend
)
if self.to_float32:
r_img = r_img.astype(np.float32)
img_list.append(r_img)
results['r_img_list'] = img_list
return results
@PIPELINES.register_module()
class LoadDispAnnotations(object):
"""Load annotations for disparity/depth prediction.
Args:
file_client_args (dict): Arguments to instantiate a FileClient.
See :class:`mmcv.fileio.FileClient` for details.
Defaults to ``dict(backend='disk')``.
imdecode_backend (str): Backend for :func:`mmcv.imdecode`. Default:
'cv2'
key (str): "disp" or "sparse_disp"
is_reciprocal (bool)
"""
def __init__(
self,
file_client_args=dict(backend="disk"),
imdecode_backend="cv2",
calib=None,
key="disp",
is_reciprocal=False,
):
self.file_client_args = file_client_args.copy()
self.file_client = None
self.imdecode_backend = imdecode_backend
self.key = key
self.is_reciprocal = is_reciprocal
self.calib = None # baseline * focal length
def __call__(self, results):
"""Call function to load multiple types annotations.
Args:
results (dict): Result dict from :obj:`mmseg.CustomDataset`.
Returns:
dict: The dict contains loaded semantic segmentation annotations.
"""
if self.file_client is None:
self.file_client = mmcv.FileClient(**self.file_client_args)
if results.get(self.key + "_prefix", None) is not None:
filename = osp.join(
results[self.key + "_prefix"], results["ann_info"][self.key]
)
else:
filename = results["ann_info"][self.key]
if self.imdecode_backend == "pfm":
assert osp.splitext(filename)[1] == ".pfm", "Only support .pfm format"
gt_disp = np.array(read_pfm(filename)[0])
elif self.imdecode_backend == "sintel":
assert osp.splitext(filename)[1] == ".png", "Only support .png format"
gt_disp = disparity_read(filename)
elif self.imdecode_backend == "tartanair":
assert osp.splitext(filename)[1] == ".npy", "Only support .npy format"
gt_disp = read_numpy_tartanair(filename)
elif self.imdecode_backend == "kitti":
assert osp.splitext(filename)[1] == ".png", "Only support .png format"
if "None.png" in filename:
gt_disp = np.zeros_like(results["r_img"])[..., 0]
else:
img_bytes = self.file_client.get(filename)
gt_disp = read_kitti_disp(img_bytes)
else:
img_bytes = self.file_client.get(filename)
gt_disp = (
mmcv.imfrombytes(
img_bytes, flag="unchanged", backend=self.imdecode_backend
).squeeze().astype(np.float32)
)
if gt_disp.ndim == 3:
gt_disp = gt_disp[:, :, -1]
gt_disp[gt_disp == np.inf] = BF_DEFAULT # set to large number to be filtered out
gt_disp[np.isnan(gt_disp)] = BF_DEFAULT
gt_disp = gt_disp.astype(np.float32)
if self.is_reciprocal:
gt_disp = 1 / gt_disp
if self.calib is not None:
gt_disp = self.calib * gt_disp
results["gt_" + self.key] = gt_disp
results["seg_fields"].append("gt_" + self.key)
# Add information about the frames in the clip if present
if "img_info" in results and "mf" in results["img_info"]:
imginfo_list = results["img_info"]["mf"]
disp_list = []
for curr_imginfo in imginfo_list:
curr_anninfo = curr_imginfo["ann"]
if results.get(self.key + "_prefix", None) is not None:
filename = osp.join(
results[self.key + "_prefix"], curr_anninfo[self.key]
)
else:
filename = curr_anninfo[self.key]
if self.imdecode_backend == "pfm":
assert osp.splitext(filename)[1] == ".pfm", "Only support .pfm format"
gt_disp = np.array(read_pfm(filename)[0])
elif self.imdecode_backend == "tartanair":
assert osp.splitext(filename)[1] == ".npy", "Only support .npy format"
gt_disp = read_numpy_tartanair(filename)
elif self.imdecode_backend == "kitti":
assert osp.splitext(filename)[1] == ".png", "Only support .png format"
if "None.png" in filename:
gt_disp = np.zeros_like(results["r_img"])[..., 0]
else:
img_bytes = self.file_client.get(filename)
gt_disp = read_kitti_disp(img_bytes)
else:
img_bytes = self.file_client.get(filename)
gt_disp = (
mmcv.imfrombytes(
img_bytes, flag="unchanged", backend=self.imdecode_backend
).squeeze().astype(np.float32)
)
if gt_disp.ndim == 3:
gt_disp = gt_disp[:, :, -1]
gt_disp[gt_disp == np.inf] = BF_DEFAULT # set to large number to be filtered out
gt_disp[np.isnan(gt_disp)] = BF_DEFAULT
gt_disp = gt_disp.astype(np.float32)
if self.is_reciprocal:
gt_disp = 1 / gt_disp
if self.calib is not None:
gt_disp = self.calib * gt_disp
disp_list.append(gt_disp)
results["gt_" + self.key + "_list"] = disp_list
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f"key='{self.key}',"
repr_str += f"imdecode_backend='{self.imdecode_backend}',"
repr_str += f"is_reciprocal={self.is_reciprocal},"
return repr_str
@PIPELINES.register_module()
class LoadOpticalFlowAnnotations(object):
"""Load annotations for optical flow prediction.
Args:
file_client_args (dict): Arguments to instantiate a FileClient.
See :class:`mmcv.fileio.FileClient` for details.
Defaults to ``dict(backend='disk')``.
imdecode_backend (str): Backend for :func:`mmcv.imdecode`. Default:
'cv2'
key (str): "opt"
"""
def __init__(
self,
file_client_args=dict(backend="disk"),
imdecode_backend="cv2",
key="flow"
):
self.file_client_args = file_client_args.copy()
self.file_client = None
self.imdecode_backend = imdecode_backend
self.key = key
def __call__(self, results):
"""Call function to load multiple types annotations.
Args:
results (dict): Result dict from :obj:`mmseg.CustomDataset`.
Returns:
dict: The dict contains loaded semantic segmentation annotations.
"""
if self.file_client is None:
self.file_client = mmcv.FileClient(**self.file_client_args)
if results.get(self.key + "_prefix", None) is not None:
filename = osp.join(
results[self.key + "_prefix"], results["ann_info"][self.key]
)
else:
filename = results["ann_info"][self.key]
if self.imdecode_backend == "pfm":
assert osp.splitext(filename)[1] == ".pfm", "Only support .pfm format"
gt_flow = np.array(read_pfm(filename)[0])
elif self.imdecode_backend == "tartanair":
assert osp.splitext(filename)[1] == ".npy", "Only support .npy format"
gt_flow = read_numpy_tartanair(filename, channel=2)
elif self.imdecode_backend == "kitti":
assert osp.splitext(filename)[1] == ".png", "Only support .png format"
if "None.png" in filename:
gt_flow = np.ones_like(results["r_img"])[..., :2]
gt_flow = gt_flow * BF_DEFAULT
else:
img_bytes = self.file_client.get(filename)
gt_flow, valid = read_kitti_flow(img_bytes)
valid = np.tile(valid[..., None], (1, 1, 2)).astype(bool)
gt_flow[~valid] = BF_DEFAULT
gt_flow = gt_flow.astype(np.float32)
else:
img_bytes = self.file_client.get(filename)
gt_flow = (
mmcv.imfrombytes(
img_bytes, flag="unchanged", backend=self.imdecode_backend
).squeeze().astype(np.float32)
)
if gt_flow.ndim == 3:
gt_flow = gt_flow[:, :, :2]
gt_flow[gt_flow == np.inf] = BF_DEFAULT # set to large number to be filetered out
gt_flow[np.isnan(gt_flow)] = BF_DEFAULT
gt_flow = gt_flow.astype(np.float32)
results["gt_" + self.key] = gt_flow
results["seg_fields"].append("gt_" + self.key)
# Add information about the frames in the clip if present
if "mf" in results["img_info"]:
imginfo_list = results["img_info"]["mf"]
opt_list = []
for curr_imginfo in imginfo_list:
curr_anninfo = curr_imginfo["ann"]
if results.get(self.key + "_prefix", None) is not None:
filename = osp.join(
results[self.key + "_prefix"], curr_anninfo[self.key]
)
else:
filename = curr_anninfo[self.key]
if self.imdecode_backend == "pfm":
assert osp.splitext(filename)[1] == ".pfm", "Only support .pfm format"
gt_flow = np.array(read_pfm(filename)[0])
elif self.imdecode_backend == "tartanair":
assert osp.splitext(filename)[1] == ".npy", "Only support .npy format"
gt_flow = read_numpy_tartanair(filename, channel=2)
elif self.imdecode_backend == "kitti":
assert osp.splitext(filename)[1] == ".png", "Only support .png format"
if "None.png" in filename:
gt_flow = np.ones_like(results["r_img"])[..., :2]
gt_flow = gt_flow * BF_DEFAULT
else:
img_bytes = self.file_client.get(filename)
gt_flow, valid = read_kitti_flow(img_bytes)
valid = np.tile(valid[..., None], (1, 1, 2)).astype(bool)
gt_flow[~valid] = BF_DEFAULT
gt_flow = gt_flow.astype(np.float32)
else:
img_bytes = self.file_client.get(filename)
gt_flow = (
mmcv.imfrombytes(
img_bytes, flag="unchanged", backend=self.imdecode_backend
).squeeze().astype(np.float32)
)
if gt_flow.ndim == 3:
gt_flow = gt_flow[:, :, :2]
gt_flow[gt_flow == np.inf] = BF_DEFAULT # set to large number to be filtered out
gt_flow[np.isnan(gt_flow)] = BF_DEFAULT
gt_flow = gt_flow.astype(np.float32)
opt_list.append(gt_flow)
results["gt_" + self.key + "_list"] = opt_list
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f"key='{self.key}',"
repr_str += f"imdecode_backend='{self.imdecode_backend}',"
return repr_str
@PIPELINES.register_module()
class LoadOcclusionAnnotations(object):
"""
255 for occ
"""
def __init__(
self,
file_client_args=dict(backend="disk"),
imdecode_backend="cv2",
key="flow_occ",
inverse=False
):
self.file_client_args = file_client_args.copy()
self.file_client = None
self.imdecode_backend = imdecode_backend
self.key = key
self.inverse = inverse
def __call__(self, results):
"""Call function to load multiple types annotations.
Args:
results (dict): Result dict from :obj:`mmseg.CustomDataset`.
Returns:
dict: The dict contains loaded semantic segmentation annotations.
"""
if self.file_client is None:
self.file_client = mmcv.FileClient(**self.file_client_args)
if results.get(self.key + "_prefix", None) is not None:
filename = osp.join(
results[self.key + "_prefix"], results["ann_info"][self.key]
)
else:
filename = results["ann_info"][self.key]
if self.imdecode_backend == "pfm":
assert osp.splitext(filename)[1] == ".pfm", "Only support .pfm format"
gt_occ = np.array(read_pfm(filename)[0])
elif self.imdecode_backend == "tartanair":
assert osp.splitext(filename)[1] == ".npy", "Only support .npy format"
gt_occ = read_numpy_tartanair_uint8(filename)
else:
img_bytes = self.file_client.get(filename)
gt_occ = (
mmcv.imfrombytes(
img_bytes, flag="unchanged", backend=self.imdecode_backend
).squeeze().astype(np.float32)
)
if gt_occ.ndim == 3:
gt_occ = gt_occ[:, :, -1]
if self.inverse: # make sure occ is True
gt_occ = 255 - gt_occ
results["gt_" + self.key] = gt_occ
results["seg_fields"].append("gt_" + self.key)
# Add information about the frames in the clip if present
if "img_info" in results and "mf" in results["img_info"]:
imginfo_list = results["img_info"]["mf"]
occ_list = []
for curr_imginfo in imginfo_list:
curr_anninfo = curr_imginfo["ann"]
if results.get(self.key + "_prefix", None) is not None:
filename = osp.join(
results[self.key + "_prefix"], curr_anninfo[self.key]
)
else:
filename = curr_anninfo[self.key]
if self.imdecode_backend == "pfm":
assert osp.splitext(filename)[1] == ".pfm", "Only support .pfm format"
gt_occ = np.array(read_pfm(filename)[0])
elif self.imdecode_backend == "tartanair":
assert osp.splitext(filename)[1] == ".npy", "Only support .npy format"
gt_occ = read_numpy_tartanair_uint8(filename)
else:
img_bytes = self.file_client.get(filename)
gt_occ = (
mmcv.imfrombytes(
img_bytes, flag="unchanged", backend=self.imdecode_backend
).squeeze().astype(np.float32)
)
if gt_occ.ndim == 3:
gt_occ = gt_occ[:, :, -1]
if self.inverse: # make sure occ is True
gt_occ = 255 - gt_occ
occ_list.append(gt_occ)
results["gt_" + self.key + "_list"] = occ_list
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f"key='{self.key}',"
repr_str += f"imdecode_backend='{self.imdecode_backend}',"
return repr_str
|
CODD-main
|
datasets/loading_stereo.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
from mmseg.datasets import DATASETS
from .scene_flow import SceneFlowMultiFrameDataset
@DATASETS.register_module()
class Kitti2015MultiFrameDataset(SceneFlowMultiFrameDataset):
def __init__(self, **kwargs):
super(SceneFlowMultiFrameDataset, self).__init__(
img_suffix=".png",
r_img_suffix=".png",
disp_suffix=".png",
flow_suffix=".png",
disp2_suffix=".png",
prefix_pattern=r"_\d+.png",
**kwargs,
)
@DATASETS.register_module()
class KittiDepthMultiFrameDataset(SceneFlowMultiFrameDataset):
def __init__(self, **kwargs):
super(SceneFlowMultiFrameDataset, self).__init__(
img_suffix=".png",
r_img_suffix=".png",
disp_suffix=".png",
flow_suffix=".png",
disp2_suffix=".png",
prefix_pattern=r"\d+.png",
**kwargs,
)
|
CODD-main
|
datasets/kitti_depth.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
import torch
import numpy as np
from mmcv.parallel import DataContainer as DC
from mmseg.datasets import PIPELINES
from mmseg.datasets.pipelines import to_tensor
@PIPELINES.register_module(force=True)
class DefaultFormatBundle(object):
"""Default formatting bundle.
It simplifies the pipeline of formatting common fields, including "img"
and "gt_semantic_seg". These fields are formatted as follows.
- img: (1)transpose, (2)to tensor, (3)to DataContainer (stack=True)
- gt_semantic_seg: (1)unsqueeze dim-0 (2)to tensor,
(3)to DataContainer (stack=True)
"""
def __call__(self, results):
"""Call function to transform and format common fields in results.
Args:
results (dict): Result dict contains the data to convert.
Returns:
dict: The result dict contains the data that is formatted with
default bundle.
"""
for key in results.get("img_fields", []):
img = results[key]
if len(img.shape) < 3:
img = np.expand_dims(img, -1)
img = np.ascontiguousarray(img.transpose(2, 0, 1))
results[key] = DC(to_tensor(img), stack=True)
if "gt_semantic_seg" in results:
# convert to long
results["gt_semantic_seg"] = DC(
to_tensor(
results["gt_semantic_seg"][None, ...].astype(np.int64)
),
stack=True,
)
if "gt_disp" in results:
results["gt_disp"] = DC(
to_tensor(results["gt_disp"][None, ...]), stack=True
)
if "gt_flow" in results:
gt_flow = np.ascontiguousarray(results["gt_flow"].transpose(2, 0, 1))
results["gt_flow"] = DC(to_tensor(gt_flow), stack=True)
if "gt_sparse_disp" in results:
results["gt_sparse_disp"] = DC(
to_tensor(results["gt_sparse_disp"][None, ...]), stack=True
)
return results
def __repr__(self):
return self.__class__.__name__
@PIPELINES.register_module(force=True)
class DefaultFormatBundleList(object):
"""Default formatting bundle with multiple frames.
It simplifies the pipeline of formatting common fields, including "img"
and "gt_semantic_seg". These fields are formatted as follows.
- img: (1)transpose, (2)to tensor, (3)to DataContainer (stack=True)
- gt_semantic_seg: (1)unsqueeze dim-0 (2)to tensor,
(3)to DataContainer (stack=True)
"""
def _get_stacked_tensor(self, img_list):
tensor_list = []
for img in img_list:
if len(img.shape) < 3:
img = np.expand_dims(img, -1)
img = np.ascontiguousarray(img.transpose(2, 0, 1))
tensor_list.append(to_tensor(img))
return DC(torch.stack(tensor_list), stack=True)
def check_img(self, results, key, fail=False):
baseImage = results[key]
otherImage = results[key + "_list"][0]
if fail and (np.array_equal(baseImage, otherImage) == False):
assert False
def __call__(self, results):
"""Call function to transform and format common fields in results.
Args:
results (dict): Result dict contains the data to convert.
Returns:
dict: The result dict contains the data that is formatted with
default bundle.
"""
self.check_img(results, "img")
self.check_img(results, "r_img")
if results.get("gt_disp", None) is not None:
self.check_img(results, "gt_disp", fail=True)
if results.get("gt_flow", None) is not None:
self.check_img(results, "gt_flow", fail=True)
if results.get("gt_disp_change", None) is not None:
self.check_img(results, "gt_disp_change", fail=True)
if results.get("gt_flow_occ", None) is not None:
self.check_img(results, "gt_flow_occ", fail=True)
if results.get("gt_disp2", None) is not None:
self.check_img(results, "gt_disp2", fail=True)
if results.get("gt_disp_occ", None) is not None:
self.check_img(results, "gt_disp_occ", fail=True)
for key in results.get("img_fields", []):
results[key] = self._get_stacked_tensor(results[key + "_list"])
del results[key + "_list"]
if "gt_semantic_seg_list" in results:
# convert to long
seg_list = results['gt_semantic_seg_list']
tensor_list = []
for seg in seg_list:
tensor_list.append(
to_tensor(seg[None, ...].astype(np.int64))
)
results['gt_semantic_seg'] = DC(torch.stack(tensor_list), stack=True)
del results['gt_semantic_seg_list']
if "gt_disp_list" in results:
disp_list = results['gt_disp_list']
tensor_list = []
for disp in disp_list:
tensor_list.append(
to_tensor(disp[None, ...])
)
results['gt_disp'] = DC(torch.stack(tensor_list), stack=True)
del results['gt_disp_list']
if "gt_flow_list" in results:
opt_list = results['gt_flow_list']
tensor_list = []
for opt in opt_list:
opt = np.ascontiguousarray(opt.transpose(2, 0, 1))
tensor_list.append(to_tensor(opt))
results['gt_flow'] = DC(torch.stack(tensor_list), stack=True)
del results['gt_flow_list']
if "gt_disp_change_list" in results:
disp_change_list = results['gt_disp_change_list']
tensor_list = []
for disp in disp_change_list:
tensor_list.append(
to_tensor(disp[None, ...])
)
results['gt_disp_change'] = DC(torch.stack(tensor_list), stack=True)
del results['gt_disp_change_list']
if "gt_disp2_list" in results:
disp_change_list = results['gt_disp2_list']
tensor_list = []
for disp in disp_change_list:
tensor_list.append(
to_tensor(disp[None, ...])
)
results['gt_disp2'] = DC(torch.stack(tensor_list), stack=True)
del results['gt_disp2_list']
if "gt_flow_occ" in results:
flow_occ_list = results['gt_flow_occ_list']
tensor_list = []
for flow_occ in flow_occ_list:
tensor_list.append(
to_tensor(flow_occ[None, ...])
)
results['gt_flow_occ'] = DC(torch.stack(tensor_list), stack=True)
del results['gt_flow_occ_list']
if "gt_disp_occ" in results:
disp_occ_list = results['gt_disp_occ_list']
tensor_list = []
for disp_occ in disp_occ_list:
tensor_list.append(
to_tensor(disp_occ[None, ...])
)
results['gt_disp_occ'] = DC(torch.stack(tensor_list), stack=True)
del results['gt_disp_occ_list']
if "gt_sparse_disp_list" in results:
sp_disp_list = results['gt_sparse_disp_list']
tensor_list = []
for sparse_disp in sp_disp_list:
tensor_list.append(
to_tensor(sparse_disp[None, ...])
)
results['gt_sparse_disp'] = DC(torch.stack(tensor_list), stack=True)
del results['gt_sparse_disp_list']
return results
def __repr__(self):
return self.__class__.__name__
|
CODD-main
|
datasets/formating.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
import torch
from .warp import flow_warp
BF_DEFAULT = 1050 * 0.2 # baseline * focal length
__imagenet_stats = {'mean': [0.485, 0.456, 0.406],
'std': [0.229, 0.224, 0.225]}
def compute_valid_mask(gt_disp, meta, gt_semantic_seg=None, gt_flow_prev=None, gt_disp_change=None):
"""compute valid pixels based on either disparity, segmentation, flow or disp change (< 210 px)
at minimum, disparity should be provided
Args:
gt_disp (Tensor): NxHxW
meta (List): dataset meta information
gt_semantic_seg ([type], optional): NxHxW. Defaults to None.
gt_flow_prev ([type], optional): Nx2xHxW. Defaults to None.
gt_disp_change ([type], optional): NxHxW. Defaults to None.
Returns:
Tensor: True for valid
"""
mask = (gt_disp > meta["disp_range"][0]) & (gt_disp < meta["disp_range"][1])
if gt_semantic_seg is not None:
mask &= gt_semantic_seg > 0
if gt_flow_prev is not None:
mag = torch.sum(gt_flow_prev ** 2, dim=1, keepdim=True).sqrt()
mask &= mag < BF_DEFAULT
if gt_disp_change is not None:
mask &= gt_disp_change.abs() < BF_DEFAULT
mask.detach_()
return mask
def compute_gt_disp_change(gt_flow_occ_prev, gt_disp_prev, gt_disp_curr, gt_flow):
"""derive disparity change from data
Args:
gt_flow_occ_prev (Tensor): Nx1xHxW
gt_disp_prev (Tensor): Nx1xHxW
gt_disp_curr (Tensor): Nx1xHxW
gt_flow (Tensor): Nx2xHxW
Returns:
Tensor: disparity change, Nx1xHxW
"""
gt_disp_curr_warp, valid = flow_warp(
gt_disp_curr, gt_flow, padding_mode="zeros", mode="nearest"
)
gt_disp_change = gt_disp_curr_warp - gt_disp_prev
gt_disp_change[~valid] = BF_DEFAULT
gt_disp_change[gt_flow_occ_prev] = BF_DEFAULT # True for occluded
return gt_disp_change, gt_disp_curr_warp
def collect_metric(state):
"""store results
Args:
state (dict): states storing information
Returns:
Tensor: aggregated results
"""
metric_list = dict()
for k, v in state.items():
if "meter" in k:
metric_list[k.replace('_meter', '')] = torch.tensor([v.avg])
if "all" in k:
metric_list[k.replace('_all', '')] = torch.tensor([v])
return metric_list
def reset_meter(state):
"""reset results in states when new sequence starts
Args:
state (dict)): states storing information
"""
for k, v in state.items():
if "meter" in k:
v.reset()
if "all" in k:
state[k] = 0.0
def collect_gt(kwargs):
"""get ground truth data from kwargs"""
gt_disp = kwargs.get("gt_disp", None)
if gt_disp is not None:
gt_disp_list = torch.unbind(gt_disp[0], dim=1)
else:
gt_disp_list = None
gt_flow = kwargs.get("gt_flow", None)
if gt_flow is not None:
gt_flow_list = torch.unbind(gt_flow[0], dim=1)
else:
gt_flow_list = None
gt_disp_change = kwargs.get("gt_disp_change", None)
if gt_disp_change is not None:
gt_disp_change_list = torch.unbind(gt_disp_change[0], dim=1)
else:
gt_disp_change_list = None
gt_flow_occ = kwargs.get("gt_flow_occ", None)
if gt_flow_occ is not None:
gt_flow_occ_list = torch.unbind(gt_flow_occ[0], dim=1)
else:
gt_flow_occ_list = None
gt_disp2 = kwargs.get("gt_disp2", None)
if gt_disp2 is not None:
gt_disp2_list = torch.unbind(gt_disp2[0], dim=1)
else:
gt_disp2_list = None
gt_disp_occ = kwargs.get("gt_disp_occ", None)
if gt_disp_occ is not None:
gt_disp_occ_list = torch.unbind(gt_disp_occ[0], dim=1)
else:
gt_disp_occ_list = None
return (
gt_disp_list,
gt_flow_list,
gt_disp_change_list,
gt_flow_occ_list,
gt_disp2_list,
gt_disp_occ_list,
)
def denormalize(inp):
output = inp * torch.tensor(__imagenet_stats['std'], device=inp.device)
output = output + torch.tensor(__imagenet_stats['mean'], device=inp.device)
output = output * 255
output = output.byte()
return output
|
CODD-main
|
utils/misc.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
from .running_stats import *
from .metric import *
from .misc import *
from .warp import *
|
CODD-main
|
utils/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
import csv
import re
import numpy as np
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name=' ', fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
class RunningStats(object):
"""Computes running mean and standard deviation
Adapted from https://gist.github.com/wassname/a9502f562d4d3e73729dc5b184db2501
Usage:
rs = RunningStats()
for i in range(10):
rs += np.random.randn()
print(rs)
print(rs.mean, rs.std)
"""
def __init__(self, n=0.0, m=None, s=None):
self.n = n
self.m = m
self.s = s
def clear(self):
self.n = 0.0
def push(self, x, per_dim=True):
x = np.array(x).copy().astype('float32')
# process input
if per_dim:
self.update_params(x)
else:
for el in x.flatten():
self.update_params(el)
def update_params(self, x):
self.n += 1
if self.n == 1:
self.m = x
self.s = 0.0
else:
prev_m = self.m.copy()
self.m += (x - self.m) / self.n
self.s += (x - prev_m) * (x - self.m)
def __add__(self, other):
if isinstance(other, RunningStats):
sum_ns = self.n + other.n
prod_ns = self.n * other.n
delta2 = (other.m - self.m) ** 2.0
return RunningStats(
sum_ns,
(self.m * self.n + other.m * other.n) / sum_ns,
self.s + other.s + delta2 * prod_ns / sum_ns,
)
else:
self.push(other)
return self
@property
def mean(self):
return self.m if self.n else 0.0
def variance(self):
return self.s / (self.n) if self.n else 0.0
@property
def std(self):
return np.sqrt(self.variance())
def __repr__(self):
return (
'<RunningMean(mean={: 2.4f}, std={: 2.4f}, n={: 2f}, m={: 2.4f}, s={: 2.4f})>'.format(
self.mean, self.std, self.n, self.m, self.s
)
)
def __str__(self):
return 'mean={: 2.4f}, std={: 2.4f}'.format(self.mean, self.std)
class RunningStatsWithBuffer(RunningStats):
def __init__(self, path=None, row_id_map=None, data=None, header=None, n=0.0, m=None, s=None):
super(RunningStatsWithBuffer, self).__init__(n, m, s)
self.path = path
if data is None:
self.data = []
else:
assert isinstance(data, list) and any(isinstance(i, list) for i in data)
self.data = data
if row_id_map is None:
self.row_id_map = {}
else:
assert isinstance(row_id_map, dict)
self.row_id_map = row_id_map
if header is None:
self.header = None
else:
assert isinstance(header, list)
self.header = header
def push(self, id, value, per_dim=True):
if id in self.row_id_map:
return
self.row_id_map[id] = len(self.data)
self.data.append(value if isinstance(value, list) else [value])
super(RunningStatsWithBuffer, self).push(value)
def __add__(self, other):
if isinstance(other, RunningStats):
for k, v in other.row_id_map.items():
if k in self.row_id_map:
continue
self.row_id_map[k] = len(self.data)
self.data.append(other.data[v])
data_array = np.array(self.data).copy().astype('float32')
return RunningStatsWithBuffer(
self.path,
self.row_id_map,
self.data,
self.header,
len(self.data),
np.nanmean(data_array, 0),
np.nanvar(data_array, 0),
)
else:
self.push(*other)
return self
def dump(self):
def natural_sort(l):
def convert(text):
return int(text) if text.isdigit() else text.lower()
return sorted(l, key=lambda key: [convert(c) for c in re.split('([0-9]+)', key[0])])
table = [self.header]
table.extend([[k] + self.data[v] for k, v in self.row_id_map.items()])
table[1:] = natural_sort(table[1:])
with open(self.path, 'w') as f:
writer = csv.writer(f)
writer.writerows(table)
@property
def mean(self):
data_array = np.array(self.data).copy().astype('float32')
return np.nanmean(data_array, 0)
def variance(self):
data_array = np.array(self.data).copy().astype('float32')
return np.nanvar(data_array, 0)
|
CODD-main
|
utils/running_stats.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
import numpy as np
import torch
EPSILON = 1e-8
def epe_metric(d_est, d_gt, mask, use_np=False):
d_est, d_gt = d_est[mask], d_gt[mask]
if use_np:
epe = np.mean(np.abs(d_est - d_gt))
else:
epe = torch.mean(torch.abs(d_est - d_gt))
return epe
def t_epe_metric(d_est_t0, d_gt_t0, d_est_t1, d_gt_t1, mask_t0, mask_t1, use_np=False):
d_est = d_est_t0 - d_est_t1
d_gt = d_gt_t0 - d_gt_t1
# sanity_mask = (d_est_t0 > 0.0) & (d_est_t1 > 0.0) # disparity must be larger than 0
if use_np:
mask = np.logical_and(mask_t0, mask_t1)
# mask = np.logical_and(mask, sanity_mask)
mask = mask.astype(bool)
abs_err = np.abs(d_est - d_gt)[mask]
relative_err = abs_err / (np.abs(d_gt[mask]) + 1e-3)
else:
mask = torch.logical_and(mask_t0, mask_t1)
# mask = torch.logical_and(mask, sanity_mask)
mask = mask.bool()
abs_err = torch.abs(d_est - d_gt)[mask]
relative_err = abs_err / (torch.abs(d_gt[mask]) + 1e-3)
return abs_err, relative_err
def thres_metric(d_est, d_gt, mask, thres, use_np=False):
assert isinstance(thres, (int, float))
d_est, d_gt = d_est[mask], d_gt[mask]
if use_np:
e = np.abs(d_gt - d_est)
else:
e = torch.abs(d_gt - d_est)
err_mask = e > thres
if use_np:
mean = np.mean(err_mask.astype("float"))
else:
mean = torch.mean(err_mask.float())
return mean
def depth2normal(depth):
zy, zx = np.gradient(depth)
# or use Sobel to get a joint Gaussian smoothing and differentation to reduce noise
# zx = cv2.Sobel(d_im, cv2.CV_64F, 1, 0, ksize=5)
# zy = cv2.Sobel(d_im, cv2.CV_64F, 0, 1, ksize=5)
normal = np.dstack((-zx, -zy, np.ones_like(depth)))
n = np.linalg.norm(normal, axis=2)
normal[:, :, 0] /= n
normal[:, :, 1] /= n
normal[:, :, 2] /= n
# offset and rescale values to be in [0, 1]
normal += 1
normal /= 2
return normal
|
CODD-main
|
utils/metric.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
import os
import re
from argparse import ArgumentParser
import numpy as np
from natsort import natsorted
def write_to_file(args, left_image, right_image, disparity, flow, disp_change, flow_occ, disp_frame2_in_frame1,
disp_occ, split):
fname = os.path.join(args.output_path, args.dataset + '_' + split + '.txt')
with open(fname, 'w') as f:
for idx in range(len(left_image)):
line = ' '.join([left_image[idx], right_image[idx], disparity[idx]])
if flow is not None:
line += ' ' + flow[idx]
else:
line += ' None'
if disp_change is not None:
line += ' ' + disp_change[idx]
else:
line += ' None'
if flow_occ is not None:
line += ' ' + flow_occ[idx]
else:
line += ' None'
if disp_frame2_in_frame1 is not None:
line += ' ' + disp_frame2_in_frame1[idx]
else:
line += ' None'
if disp_occ is not None:
line += ' ' + disp_occ[idx]
else:
line += ' None'
f.write(line + '\n')
def split_sceneflow(args, split):
# left images
left_image = []
if split == 'train' or split == 'val':
train_path = os.path.join(args.data_root, 'TRAIN')
else:
train_path = os.path.join(args.data_root, 'TEST')
# find all images
for root, dirs, files in os.walk(train_path):
if len(files) > 0 and 'left' in root:
for fname in files:
if '.png' in fname:
fname = os.path.join(root, fname).replace(args.data_root, '')
left_image.append(fname[1:]) # remove leading /
num_imgs = int(len(left_image) * (1 - args.val_ratio))
if split == 'train':
left_image = left_image[:num_imgs]
elif split == 'val':
left_image = left_image[num_imgs:]
left_image = natsorted(left_image)
# right images
right_image = []
for li in left_image:
right_image.append(li.replace('left', 'right'))
# disparity
disparity = []
for li in left_image:
disparity.append(li.replace('.png', '.pfm'))
# optical flow
flow = []
for li in left_image:
fname = li.replace('/left/', '/into_future/left/')
idx = re.search(f'\d+.png', li).group()
post = '_L.pfm'
pre = 'OpticalFlowIntoFuture_'
opt_idx = pre + idx.replace('.png', '') + post
flow.append(fname.replace(idx, opt_idx))
# disparity change
disp_change = []
for li in left_image:
fname = li.replace('/left/', '/into_future/left/')
disp_change.append(fname.replace('.png', '.pfm'))
# flow_occ
flow_occ = None
# disp_frame2_in_frame1
disp_frame2_in_frame1 = None
# disp_occ
disp_occ = None
write_to_file(args, left_image, right_image, disparity, flow, disp_change, flow_occ, disp_frame2_in_frame1,
disp_occ, split)
def split_kitti_depth(args, split):
val_split = ['2011_10_03/2011_10_03_drive_0042_sync/'] # 1 scene
test_split = ['2011_09_26/2011_09_26_drive_0002_sync', '2011_09_26/2011_09_26_drive_0005_sync/',
'2011_09_26/2011_09_26_drive_0013_sync/', '2011_09_26/2011_09_26_drive_0020_sync/',
'2011_09_26/2011_09_26_drive_0023_sync/', '2011_09_26/2011_09_26_drive_0036_sync/',
'2011_09_26/2011_09_26_drive_0079_sync/', '2011_09_26/2011_09_26_drive_0095_sync/',
'2011_09_26/2011_09_26_drive_0113_sync/', '2011_09_28/2011_09_28_drive_0037_sync/',
'2011_09_29/2011_09_29_drive_0026_sync/', '2011_09_30/2011_09_30_drive_0016_sync/',
'2011_10_03/2011_10_03_drive_0047_sync/'] # 13 scenes
# left images
left_image = []
# find all images
for root, dirs, files in os.walk(args.data_root):
if len(files) > 0 and 'image_02' in root:
if split == 'val':
for val_scene in val_split:
if val_scene not in root:
continue
else:
print(val_scene, root)
for fname in files:
if '.png' in fname:
fname = os.path.join(root, fname).replace(args.data_root, '')
left_image.append(fname[1:]) # remove leading /
elif split == 'test':
for test_scene in test_split:
if test_scene not in root:
continue
else:
for fname in files:
if '.png' in fname:
fname = os.path.join(root, fname).replace(args.data_root, '')
left_image.append(fname[1:]) # remove leading /
else: # the rest are training splits
for fname in files:
if '.png' in fname:
fname = os.path.join(root, fname).replace(args.data_root, '')
left_image.append(fname[1:]) # remove leading /
left_image = natsorted(left_image)
# right images
right_image = []
for li in left_image:
right_image.append(li.replace('image_02', 'image_03'))
# disparity
disparity = []
for li in left_image:
disparity.append(li.replace('image_02', 'disp'))
# optical flow
flow = []
for li in left_image:
flow.append(li.replace('image_02', 'flow'))
# disparity change
disp_change = None
# flow_occ
flow_occ = None
# disp_frame2_in_frame1
disp_frame2_in_frame1 = []
for li in left_image:
disp_frame2_in_frame1.append(li.replace('image_02', 'disp2'))
# disp_occ
disp_occ = None
write_to_file(args, left_image, right_image, disparity, flow, disp_change, flow_occ, disp_frame2_in_frame1,
disp_occ, split)
def split_kitti_2015(args, split):
# left images
left_image = []
# find all images
for root, dirs, files in os.walk(args.data_root):
if len(files) > 0 and 'training/image_2' in root:
for fname in files:
if '.png' in fname:
fname = os.path.join(root, fname).replace(args.data_root, '')
left_image.append(fname[1:]) # remove leading /
left_image = natsorted(left_image)
folds = np.array_split(np.stack(left_image), 5) # 5-fold cross validation
for fold in range(5):
if split == 'train':
left_image = [x for ii, x in enumerate(folds) if ii != fold]
left_image = np.concatenate(left_image)
elif split == 'val':
left_image = folds[fold]
num_images = len(left_image)
left_image = left_image[:int(num_images * 0.5)]
elif split == 'test':
left_image = folds[fold]
num_images = len(left_image)
left_image = folds[fold][int(num_images * 0.5):]
left_image = list(left_image)
# right images
right_image = []
for li in left_image:
right_image.append(li.replace('image_2', 'image_3'))
# disparity
disparity = []
for li in left_image:
if '_10' in li: # only disparity of first frame is provided
disparity.append(li.replace('image_2', 'disp_occ_0'))
else:
disparity.append('None')
# optical flow
flow = []
for li in left_image:
if '_10' in li: # only flow of first frame is provided
flow.append(li.replace('image_2', 'flow_occ'))
else:
flow.append('None')
# disparity change
disp_change = None
# flow_occ
flow_occ = None
# disp_frame2_in_frame1
disp_frame2_in_frame1 = []
for li in left_image:
if '_10' in li: # only disp2 of first frame is provided
disp_frame2_in_frame1.append(li.replace('image_2', 'disp_occ_1'))
else:
disp_frame2_in_frame1.append('None')
# disp_occ
disp_occ = None
write_to_file(args, left_image, right_image, disparity, flow, disp_change, flow_occ, disp_frame2_in_frame1,
disp_occ, split + str(fold))
def split_tartanair(args, split):
train_split = ['abandonedfactory', 'abandonedfactory_night', 'amusement', 'endofworld', 'gascola', 'hospital',
'japanesealley', 'neighborhood', 'ocean', 'office', 'office2', 'oldtown', 'seasidetown',
'seasonsforest_winter', 'soulcity', 'westerndesert']
test_split = ['carwelding']
val_split = ['seasonsforest']
# left images
left_image = []
# find all images
for root, dirs, files in os.walk(args.data_root):
if len(files) > 0 and 'image_left' in root:
if split == 'val':
for val_scene in val_split:
if val_scene not in root:
continue
else:
print(val_scene, root)
for fname in files:
if '.png' in fname:
fname = os.path.join(root, fname).replace(args.data_root, '')
left_image.append(fname[1:]) # remove leading /
elif split == 'test':
for test_scene in test_split:
if test_scene not in root:
continue
else:
for fname in files:
if '.png' in fname:
fname = os.path.join(root, fname).replace(args.data_root, '')
left_image.append(fname[1:]) # remove leading /
else: # the rest are training splits
for train_scene in train_split:
if train_scene not in root:
continue
else:
for fname in files:
if '.png' in fname:
fname = os.path.join(root, fname).replace(args.data_root, '')
left_image.append(fname[1:]) # remove leading /
left_image = natsorted(left_image)
# right images
right_image = []
for li in left_image:
right_image.append(li.replace('image_left', 'image_right').replace('_left.png', '_right.png'))
# disparity
disparity = []
for li in left_image:
disparity.append(li.replace('image_left', 'depth_left').replace('_left.png', '_left_depth.npy'))
# optical flow
flow = []
for li in left_image:
flow.append(li.replace('image_left', 'flow').replace('_left.png', '_flow.npy'))
# disparity change
disp_change = None
# flow_occ
flow_occ = []
for li in left_image:
flow.append(li.replace('image_left', 'flow').replace('_left.png', '_mask.npy'))
# disp_frame2_in_frame1
disp_frame2_in_frame1 = None
# disp_occ
disp_occ = None
write_to_file(args, left_image, right_image, disparity, flow, disp_change, flow_occ, disp_frame2_in_frame1,
disp_occ, split)
def main():
parser = ArgumentParser('split generation')
parser.add_argument('--dataset', type=str,
choices=['SceneFlow', 'KITTI_Depth', 'KITTI_2015', 'TartanAir', 'Sintel'])
parser.add_argument('--output_path', type=str, help='path to write the split files')
parser.add_argument('--val_ratio', type=float, default=0.1)
parser.add_argument('--data_root', type=str, help="Path to data (left and right images)")
args = parser.parse_args()
splits = ['train', 'val', 'test']
if args.dataset == 'SceneFlow':
for split in splits:
split_sceneflow(args, split)
elif args.dataset == 'KITTI_Depth':
for split in splits:
split_kitti_depth(args, split)
elif args.dataset == 'KITTI_2015':
for split in splits:
split_kitti_2015(args, split)
elif args.dataset == 'TartanAir':
for split in splits:
split_tartanair(args, split)
if __name__ == "__main__":
main()
|
CODD-main
|
utils/generate_split_files.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
import torch
import torch.nn.functional as F
def normalize_coords(grid):
"""Normalize coordinates of image scale to [-1, 1]
Args:
grid: [B, 2, H, W]
"""
assert grid.size(1) == 2
h, w = grid.size()[2:]
grid[:, 0, :, :] = 2 * (grid[:, 0, :, :].clone() / (w - 1)) - 1 # x: [-1, 1]
grid[:, 1, :, :] = 2 * (grid[:, 1, :, :].clone() / (h - 1)) - 1 # y: [-1, 1]
grid = grid.permute((0, 2, 3, 1)) # [B, H, W, 2]
return grid
def meshgrid(img, homogeneous=False):
"""Generate meshgrid in image scale
Args:
img: [B, _, H, W]
homogeneous: whether to return homogeneous coordinates
Return:
grid: [B, 2, H, W]
"""
b, _, h, w = img.size()
x_range = torch.arange(0, w).view(1, 1, w).expand(1, h, w).type_as(img) # [1, H, W]
y_range = torch.arange(0, h).view(1, h, 1).expand(1, h, w).type_as(img)
grid = torch.cat((x_range, y_range), dim=0) # [2, H, W], grid[:, i, j] = [j, i]
grid = grid.unsqueeze(0).expand(b, 2, h, w) # [B, 2, H, W]
if homogeneous:
ones = torch.ones_like(x_range).unsqueeze(0).expand(b, 1, h, w) # [B, 1, H, W]
grid = torch.cat((grid, ones), dim=1) # [B, 3, H, W]
assert grid.size(1) == 3
return grid
def disp_warp(img, disp, padding_mode="border"):
"""Warping by disparity
Args:
img: [B, 3, H, W]
disp: [B, 1, H, W], positive
padding_mode: 'zeros' or 'border'
Returns:
warped_img: [B, 3, H, W]
valid_mask: [B, 3, H, W]
"""
grid = meshgrid(img) # [B, 2, H, W] in image scale
# Note that -disp here
offset = torch.cat((-disp, torch.zeros_like(disp)), dim=1) # [B, 2, H, W]
sample_grid = grid + offset
sample_grid = normalize_coords(sample_grid) # [B, H, W, 2] in [-1, 1]
warped_img = F.grid_sample(
img, sample_grid, mode="bilinear", padding_mode=padding_mode, align_corners=True
)
mask = torch.ones_like(img)
valid_mask = F.grid_sample(mask, sample_grid, mode="bilinear", padding_mode="zeros", align_corners=True)
valid_mask[valid_mask < 0.9999] = 0
valid_mask[valid_mask > 0] = 1
return warped_img, valid_mask.bool()
def flow_warp(img, flow, padding_mode="border", mode="bilinear"):
"""Warping by flow
Args:
img: [B, _, H, W]
flow: [B, 2, H, W]
padding_mode: 'zeros' or 'border'
Returns:
warped_img: [B, _, H, W]
valid_mask: [B, _, H, W]
"""
assert len(img.shape) == 4 and len(flow.shape) == 4, "Input must have 4 dimension"
assert flow.shape[1] == 2, "Flow must be channel=2"
grid = meshgrid(img) # [B, 2, H, W] in image scale
# Note that -disp here
sample_grid = grid + flow
sample_grid = normalize_coords(sample_grid) # [B, H, W, 2] in [-1, 1]
warped_img = F.grid_sample(img, sample_grid, mode=mode, padding_mode=padding_mode, align_corners=True)
mask = torch.ones_like(img)
valid_mask = F.grid_sample(mask, sample_grid, mode=mode, padding_mode="zeros", align_corners=True)
valid_mask[valid_mask < 0.9999] = 0
valid_mask[valid_mask > 0] = 1
return warped_img, valid_mask.bool()
def interpolate_value_disp(x, indices, maxdisp):
"""
bilinear interpolate tensor x at sampled indices
x: [B, D, H, W] (features)
indices: [B, H, W] sampled indices (0-indexed)
"""
# B,D,H,W to B,H,W,D
x = x.permute(0, 2, 3, 1)
indices = torch.unsqueeze(indices, -1)
indices = torch.clamp(indices, 0, maxdisp - 1)
idx0 = torch.floor(indices).long()
idx1 = torch.min(idx0 + 1, (maxdisp - 1) * torch.ones_like(idx0))
idx0 = torch.max(idx1 - 1, torch.zeros_like(idx0))
y0 = torch.gather(x, -1, idx0)
y1 = torch.gather(x, -1, idx1)
lmbda = indices - idx0.float()
output = (1 - lmbda) * y0 + (lmbda) * y1
output = torch.squeeze(output, -1)
return output
def get_disp_from_offset(pred, off, maxdisp, down):
_, pred = torch.max(pred, 1)
off = interpolate_value_disp(off, pred.float(), maxdisp // down)
pred = (pred + off) * down
return pred
def interpolate_value(x, indices, maxdepth):
"""
bilinear interpolate tensor x at sampled indices
x: [B, D, H, W] (features)
val: [B, H, W] sampled indices (1-indexed)
"""
# B,D,H,W to B,H,W,D
x = x.permute(0, 2, 3, 1)
indices = torch.unsqueeze(indices - 1, -1)
indices = torch.clamp(indices, 0, maxdepth - 1)
idx0 = torch.floor(indices).long()
idx1 = torch.min(idx0 + 1, (maxdepth - 1) * torch.ones_like(idx0))
idx0 = torch.max(idx1 - 1, torch.zeros_like(idx0))
y0 = torch.gather(x, -1, idx0)
y1 = torch.gather(x, -1, idx1)
lmbda = indices - idx0.float()
output = (1 - lmbda) * y0 + (lmbda) * y1
output = torch.squeeze(output, -1)
return output
def get_depth_from_offset(pred, off, mindepth=1, scale=1):
_, pred = torch.max(pred, 1, keepdim=True)
off = torch.gather(off, 1, pred)
pred = pred + mindepth # Make 1-indexed
pred = (pred + off) * scale
return torch.squeeze(pred, 1)
|
CODD-main
|
utils/warp.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
import os
import re
import time
from argparse import ArgumentParser
import cv2
import numpy as np
import open3d as o3d
from natsort import natsorted
from tqdm import tqdm
class InteractivePCDVisualizer(object):
def __call__(self, pcd_list):
o3d.visualization.draw_geometries(pcd_list)
class VideoPCDVisualizer(object):
def __init__(self, save_path, frame_rate, size=(1600, 1600)):
self.vis = o3d.visualization.Visualizer()
self.frame_rate = float(frame_rate)
self.save_path = save_path
self.width, self.height = size
def __call__(self, frames_pcds):
"""
frames_pcds is a list of lists. The outer list holds the frame
pointclouds for the video. The inner list holds the pointclouds for each frame.
pointclouds must be o3d.geometry.PointCloud() objects
"""
self.vis.create_window(width=self.width, height=self.height)
rgb_list = []
for frame_index, frame_pcds in enumerate(frames_pcds):
ctr = self.vis.get_view_control()
for pcd in frame_pcds:
reset_bounding_box = False if frame_index > 0 else True
self.vis.add_geometry(pcd, reset_bounding_box=reset_bounding_box)
# if frame_index == 0:
# ctr.set_up(self.up)
# ctr.set_lookat(self.lookat)
# ctr.set_front(self.front)
# ctr.set_zoom(self.zoom)
opt = self.vis.get_render_option()
opt.point_size = point_size
opt.background_color = [0, 0, 0]
self.vis.poll_events()
self.vis.update_renderer()
for i, frame_pcd in enumerate(frame_pcds):
self.vis.remove_geometry(frame_pcd, reset_bounding_box=False)
rgb = self.vis.capture_screen_float_buffer()
rgb = np.array(rgb) * 255
rgb_list.append(rgb[:, :, ::-1].astype(np.uint8))
time.sleep(1.0 / self.frame_rate)
output_file = cv2.VideoWriter(
filename=self.save_path,
fourcc=cv2.VideoWriter_fourcc(*"mp4v"),
fps=self.frame_rate,
frameSize=(rgb_list[0].shape[1], rgb_list[0].shape[0]),
isColor=True,
)
for rgb in rgb_list:
output_file.write(rgb)
output_file.release()
class PCDBuilder(object):
def __init__(self, fx, fy, cx, cy, baseline):
self.camera = o3d.camera.PinholeCameraIntrinsic()
self.camera.intrinsic_matrix = [[fx, 0, cx], [0, fy, cy], [0, 0, 1]]
self.baseline = baseline
def pcd_from_rgbd(self, color, disp, disp_trunc, remove_flying):
disp[disp < disp_trunc[0]] = 0.0
disp[disp > disp_trunc[1]] = 0.0
color_raw = o3d.geometry.Image(cv2.cvtColor(color, cv2.COLOR_BGR2RGB))
depth_raw = self.camera.intrinsic_matrix[0, 0] / (disp + 1e-5) * self.baseline
depth_raw = o3d.geometry.Image(depth_raw)
rgbd_image = o3d.geometry.RGBDImage.create_from_color_and_depth(
color_raw, depth_raw, depth_trunc=3.0, convert_rgb_to_intensity=False
)
pcd = o3d.geometry.PointCloud.create_from_rgbd_image(rgbd_image, self.camera)
if remove_flying:
pcd, _ = pcd.remove_statistical_outlier(10, 5)
# Flip it, otherwise the pointcloud will be upside down
pcd.transform([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]])
return pcd
def __call__(self, color, depth, depth_trunc, remove_flying):
frame_pcds = []
for idx, img in enumerate(tqdm(color, desc="Creating pcds")):
single_frame_pcds = [
self.pcd_from_rgbd(img, depth[idx], depth_trunc, remove_flying)]
frame_pcds.append(single_frame_pcds)
return frame_pcds
def load_depth_path(color_path, revise_keys=[('img_left', 'Depth'), ('RGB_0_Rectified', 'Depth_sf')]):
depth_path = color_path
for p, r in revise_keys:
depth_path = re.sub(p, r, depth_path)
return depth_path
def main(args):
if args.fy is None:
args.fy = args.fx
if args.cx is None:
args.cx = args.shape[0] / 2
if args.cy is None:
args.cy = args.shape[1] / 2
color_path = args.input
depth_path = args.depth
img_fname_list = natsorted(os.listdir(color_path))
depth_fname_list = natsorted(os.listdir(depth_path))
img_list = []
for idx, fname in enumerate(img_fname_list):
if os.path.splitext(fname)[-1] != '.png':
continue
if idx < args.start_frame:
continue
img = cv2.imread(os.path.join(color_path, fname), cv2.IMREAD_COLOR)
img_list.append(img)
if not args.video:
break
if 0 < args.num_frames <= len(img_list):
break
disp_list = []
for idx, fname in enumerate(depth_fname_list):
if os.path.splitext(fname)[-1] != '.npz':
continue
if idx * 50 < args.start_frame:
continue
disp = np.load(os.path.join(depth_path, fname))['disp']
disp_list.append(disp)
if not args.video:
break
if 0 < args.num_frames and args.num_frames / 50 <= len(disp_list):
break
disp_list = np.concatenate(disp_list, axis=0)
if len(img_list) < disp_list.shape[0]:
disp_list = disp_list[:len(img_list)]
elif len(img_list) > disp_list.shape[0]:
img_list = img_list[:disp_list.shape[0]]
# crop
h, w = img_list[0].shape[:2]
border_h, border_w = int(args.shrink[1] * h), int(args.shrink[0] * w)
pcd_builder = PCDBuilder(args.fx, args.fy, args.cx - border_w, args.cy - border_h, args.baseline)
d_list = []
for idx, color in enumerate(img_list):
border_h_b, border_w_r = int(args.shrink[-1] * h), int(args.shrink[-2] * w)
color = color[border_h:-border_h_b, border_w:-border_w_r]
img_list[idx] = color
disp = disp_list[idx, border_h:-border_h_b, border_w:-border_w_r]
d_list.append(disp)
frame_pcds = pcd_builder(img_list, d_list, args.disp_trunc, args.remove_flying)
if not args.video:
InteractivePCDVisualizer()(frame_pcds[0])
else:
VideoPCDVisualizer(args.output, args.frame_rate)(frame_pcds)
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--video", action='store_true', help='Save visualization to video')
parser.add_argument("--frame-rate", default=30)
parser.add_argument("--input", help="Directory to input images")
parser.add_argument("--depth", help="Directory to depth images")
parser.add_argument("--output", help="A file or directory to save output visualizations. "
"If not given, will show output in an OpenCV window.")
parser.add_argument("--fx", default=51.2 / 36 * 1024,
type=float, help="focal length along x-axis (longer side) in pixels")
parser.add_argument("--fy", default=None,
type=float, help="focal length along y-axis (shorter side) in pixels")
parser.add_argument("--cx", default=None, type=float, help="centre of image along x-axis")
parser.add_argument("--cy", default=None, type=float, help="centre of image along y-axis")
parser.add_argument("--baseline", default=1.0, type=float, help="baseline")
parser.add_argument("--shape", type=int, nargs="+", default=[1600, 1200], help="input image size [W, H]")
parser.add_argument("--disp_trunc", type=float, nargs='+', default=[1.0, 210.0])
parser.add_argument("--shrink", nargs='+', type=float, default=[0.1] * 4, help='left top right bottom')
parser.add_argument("--point_size", type=int, default=3)
parser.add_argument("--num_frames", default=-1, type=int)
parser.add_argument("--remove_flying", action='store_true')
parser.add_argument("--start_frame", type=int, default=0)
args = parser.parse_args()
point_size = args.point_size
main(args)
|
CODD-main
|
utils/vis_point_cloud.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
_base_ = [
'models/consistent_online_depth_network.py', 'datasets/custom.py',
'default_runtime.py'
]
|
CODD-main
|
configs/inference_config.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
_base_ = [
'models/codd.py', 'datasets/scene_flow.py',
'default_runtime.py', 'schedules/schedule_stereo.py'
]
|
CODD-main
|
configs/training_config.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
dict(type='TensorboardLoggerHook')
])
# yapf:enable
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]
cudnn_benchmark = True
|
CODD-main
|
configs/default_runtime.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# pseudo camera parameters that doesn't really matter for inference
intrinsics = [640, 360, 1050, 1050]
calib = 210
disp_range = (1, 210)
depth_range = (calib / 210.0, calib / 1.0)
img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
pipeline = [
dict(type='LoadImagesFromFile'),
dict(type="LoadRImagesFromFile"),
dict(
type='MultiScaleFlipAug',
img_ratios=[1.0],
img_scale=None,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=64),
dict(type="DefaultFormatBundleList"),
dict(type='Collect',
keys=["img", "r_img"],
meta_keys=[
"filename",
"ori_filename",
"ori_shape",
"img_shape",
"pad_shape",
"calib",
"disp_range",
"depth_range",
"intrinsics",
],
),
])
]
data = dict(
test=dict(
type="CustomStereoMultiFrameDataset",
test_mode=True,
img_dir=None,
r_img_dir=None,
ann_dir=None,
disp_dir=None,
img_suffix=".png",
r_img_suffix=".png",
split=None,
pipeline=pipeline,
num_samples=-1,
calib=calib,
disp_range=disp_range,
depth_range=depth_range,
num_frames=-1,
prefix_pattern=r'\d+.+.png',
intrinsics=intrinsics
),
)
|
CODD-main
|
configs/datasets/custom.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# dataset settings
dataset_type = "TartanAirMultiFrameDataset"
data_root = "PATH_TO_DATA"
train_split = "PATH_TO_SPLIT"
val_split = "PATH_TO_SPLIT"
test_split = "PATH_TO_SPLIT"
calib = 320 * 0.25 # from https://github.com/castacks/tartanair_tools/blob/master/data_type.md
disp_range = (1.0, 210.0)
depth_range = (calib / disp_range[1], calib / disp_range[0])
intrinsics = [320, 320, 320, 240] # https://github.com/castacks/tartanair_tools/blob/master/data_type.md
img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
batch_size = 4
crop_size = (448, 640)
train_pipeline = [
dict(type="LoadImagesFromFile"),
dict(type="LoadRImagesFromFile"),
dict(type="LoadDispAnnotations", imdecode_backend="tartanair", key="disp", is_reciprocal=True, calib=calib),
dict(type="LoadOpticalFlowAnnotations", imdecode_backend="tartanair", key="flow"),
dict(type="LoadOcclusionAnnotations", imdecode_backend="tartanair", key="flow_occ"),
dict(type="RandomCrop", crop_size=crop_size),
dict(type="PhotoMetricDistortion"),
dict(type="Normalize", **img_norm_cfg),
dict(type="Pad", size=crop_size, pad_val=0, seg_pad_val=255, disp_pad_val=0),
dict(type="DefaultFormatBundleList"),
dict(
type="Collect",
keys=["img", "r_img", "gt_disp", "gt_flow", "gt_flow_occ"],
meta_keys=[
"filename",
"ori_filename",
"ori_shape",
"img_shape",
"pad_shape",
"img_norm_cfg",
"calib",
"disp_range",
"depth_range",
"intrinsics",
],
),
]
test_pipeline = [
dict(type='LoadImagesFromFile'),
dict(type="LoadRImagesFromFile"),
dict(type="LoadDispAnnotations", imdecode_backend="tartanair", key="disp", is_reciprocal=True, calib=calib),
dict(type="LoadOpticalFlowAnnotations", imdecode_backend="tartanair", key="flow"),
dict(type="LoadOcclusionAnnotations", imdecode_backend="tartanair", key="flow_occ"),
dict(
type='MultiScaleFlipAug',
img_ratios=[1.0],
img_scale=None,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=64),
dict(type="DefaultFormatBundleList"),
dict(type='Collect',
keys=["img", "r_img", "gt_disp", "gt_flow", "gt_flow_occ"],
meta_keys=[
"filename",
"ori_filename",
"ori_shape",
"img_shape",
"pad_shape",
"calib",
"disp_range",
"depth_range",
"intrinsics",
],
),
])
]
data = dict(
samples_per_gpu=batch_size,
workers_per_gpu=batch_size,
train=dict(
type=dataset_type,
disp_range=disp_range,
calib=calib,
depth_range=depth_range,
img_dir=data_root,
r_img_dir=data_root,
disp_dir=data_root,
flow_dir=data_root,
flow_occ_dir=data_root,
num_frames=2,
intrinsics=intrinsics,
split=train_split,
pipeline=train_pipeline,
),
val=dict(
type=dataset_type,
disp_range=disp_range,
calib=calib,
depth_range=depth_range,
img_dir=data_root,
r_img_dir=data_root,
disp_dir=data_root,
flow_dir=data_root,
flow_occ_dir=data_root,
num_frames=-1,
intrinsics=intrinsics,
split=val_split,
pipeline=test_pipeline,
),
test=dict(
type=dataset_type,
disp_range=disp_range,
calib=calib,
depth_range=depth_range,
img_dir=data_root,
r_img_dir=data_root,
disp_dir=data_root,
flow_dir=data_root,
flow_occ_dir=data_root,
num_frames=-1,
intrinsics=intrinsics,
split=test_split,
pipeline=test_pipeline,
),
)
|
CODD-main
|
configs/datasets/tartanair.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# dataset settings
dataset_type = "SceneFlowMultiFrameDataset"
data_root = "PATH_TO_STEREO_IMG"
disp_root = "PATH_TO_DISPARITY"
flow_root = "PATH_TO_FLOW"
disp_change_root = "PATH_TO_DISPARITY_CHANGE"
train_split = "PATH_TO_SPLIT"
val_split = "PATH_TO_SPLIT"
test_split = "PATH_TO_SPLIT"
calib = 1050
disp_range = (1.0, 210.0)
depth_range = (calib / disp_range[1], calib / disp_range[0])
intrinsics = [1050, 1050, 480, 270]
img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
batch_size = 4
crop_size = (384, 768)
train_pipeline = [
dict(type="LoadImagesFromFile"),
dict(type="LoadRImagesFromFile"),
dict(type="LoadDispAnnotations", imdecode_backend="pfm", key="disp"),
dict(type="LoadOpticalFlowAnnotations", imdecode_backend="pfm", key="flow"),
dict(type="LoadDispAnnotations", imdecode_backend="pfm", key="disp_change"),
dict(type="RandomCrop", crop_size=crop_size),
dict(type="PhotoMetricDistortion", asym=True),
dict(type="Normalize", **img_norm_cfg),
dict(type="DefaultFormatBundleList"),
dict(
type="Collect",
keys=["img", "r_img", "gt_disp", "gt_flow", "gt_disp_change"],
meta_keys=[
"filename",
"ori_filename",
"ori_shape",
"img_shape",
"pad_shape",
"img_norm_cfg",
"calib",
"disp_range",
"depth_range",
"intrinsics",
],
),
]
test_pipeline = [
dict(type='LoadImagesFromFile'),
dict(type="LoadRImagesFromFile"),
dict(type="LoadDispAnnotations", imdecode_backend="pfm", key="disp"),
dict(type="LoadOpticalFlowAnnotations", imdecode_backend="pfm", key="flow"),
dict(type="LoadDispAnnotations", imdecode_backend="pfm", key="disp_change"),
dict(
type='MultiScaleFlipAug',
img_ratios=[1.0],
img_scale=None,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=64),
dict(type="DefaultFormatBundleList"),
dict(type='Collect',
keys=["img", "r_img", "gt_disp", "gt_flow", "gt_disp_change"],
meta_keys=[
"filename",
"ori_filename",
"ori_shape",
"img_shape",
"pad_shape",
"calib",
"disp_range",
"depth_range",
"intrinsics",
],
),
])
]
data = dict(
samples_per_gpu=batch_size,
workers_per_gpu=batch_size,
train=dict(
type=dataset_type,
disp_range=disp_range,
calib=calib,
depth_range=depth_range,
img_dir=data_root,
r_img_dir=data_root,
disp_dir=disp_root,
flow_dir=flow_root,
disp_change_dir=disp_change_root,
num_frames=2,
intrinsics=intrinsics,
split=train_split,
pipeline=train_pipeline,
),
val=dict(
type=dataset_type,
disp_range=disp_range,
calib=calib,
depth_range=depth_range,
img_dir=data_root,
r_img_dir=data_root,
disp_dir=disp_root,
flow_dir=flow_root,
disp_change_dir=disp_change_root,
num_frames=-1,
intrinsics=intrinsics,
split=val_split,
pipeline=test_pipeline,
),
test=dict(
type=dataset_type,
disp_range=disp_range,
calib=calib,
depth_range=depth_range,
img_dir=data_root,
r_img_dir=data_root,
disp_dir=disp_root,
flow_dir=flow_root,
disp_change_dir=disp_change_root,
num_frames=-1,
intrinsics=intrinsics,
split=test_split,
pipeline=test_pipeline,
),
)
|
CODD-main
|
configs/datasets/scene_flow.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# dataset settings
dataset_type = "SintelMultiFrameDataset"
data_root = "PATH_TO_DATA"
flow_root = "PATH_TO_FLOW"
train_split = "PATH_TO_SPLIT"
val_split = "PATH_TO_SPLIT"
test_split = "PATH_TO_SPLIT"
calib = 688 * 0.01
disp_range = (1.0, 210.0)
depth_range = (calib / disp_range[1], calib / disp_range[0])
intrinsics = [688, 688, 512,
218] # fx=fy=688, cx=512, cy=218, (from depth folder camera data), baseline=10cm (from stereo data README)
img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
batch_size = 4
crop_size = (320, 1024)
train_pipeline = [
dict(type="LoadImagesFromFile"),
dict(type="LoadRImagesFromFile"),
dict(type="LoadDispAnnotations", imdecode_backend="sintel", key="disp"),
dict(type="LoadOpticalFlowAnnotations", imdecode_backend="sintel", key="flow"),
dict(type="LoadOcclusionAnnotations", key="flow_occ"),
dict(type="RandomCrop", crop_size=crop_size),
dict(type="StereoPhotoMetricDistortion"),
dict(type="Normalize", **img_norm_cfg),
dict(type="DefaultFormatBundleList"),
dict(
type="Collect",
keys=["img", "r_img", "gt_disp", "gt_flow", "gt_flow_occ"],
meta_keys=[
"filename",
"ori_filename",
"ori_shape",
"img_shape",
"pad_shape",
"img_norm_cfg",
"calib",
"disp_range",
"depth_range",
"intrinsics",
],
),
]
test_pipeline = [
dict(type='LoadImagesFromFile'),
dict(type="LoadRImagesFromFile"),
dict(type="LoadDispAnnotations", imdecode_backend="sintel", key="disp"),
dict(type="LoadOpticalFlowAnnotations", imdecode_backend="sintel", key="flow"),
dict(type="LoadOcclusionAnnotations", key="flow_occ"),
dict(
type='MultiScaleFlipAug',
img_ratios=[1.0],
img_scale=None,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=64),
dict(type="DefaultFormatBundleList"),
dict(type='Collect',
keys=["img", "r_img", "gt_disp", "gt_flow", "gt_flow_occ"],
meta_keys=[
"filename",
"ori_filename",
"ori_shape",
"img_shape",
"pad_shape",
"calib",
"disp_range",
"depth_range",
"intrinsics",
],
),
])
]
data = dict(
samples_per_gpu=batch_size,
workers_per_gpu=batch_size,
train=dict(
type=dataset_type,
disp_range=disp_range,
calib=calib,
depth_range=depth_range,
img_dir=data_root,
r_img_dir=data_root,
disp_dir=data_root,
flow_dir=flow_root,
flow_occ_dir=flow_root,
num_frames=2,
intrinsics=intrinsics,
split=train_split,
pipeline=train_pipeline,
),
val=dict(
type=dataset_type,
disp_range=disp_range,
calib=calib,
depth_range=depth_range,
img_dir=data_root,
r_img_dir=data_root,
disp_dir=data_root,
flow_dir=flow_root,
flow_occ_dir=flow_root,
num_frames=-1,
intrinsics=intrinsics,
split=val_split,
pipeline=test_pipeline,
),
test=dict(
type=dataset_type,
disp_range=disp_range,
calib=calib,
depth_range=depth_range,
img_dir=data_root,
r_img_dir=data_root,
disp_dir=data_root,
flow_dir=flow_root,
flow_occ_dir=flow_root,
num_frames=-1,
intrinsics=intrinsics,
split=test_split,
pipeline=test_pipeline,
),
)
|
CODD-main
|
configs/datasets/sintel.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# dataset settings
dataset_type = "KittiDepthMultiFrameDataset"
data_root = "PATH_TO_DATA"
train_split = "PATH_TO_SPLIT"
val_split = "PATH_TO_SPLIT"
test_split = "PATH_TO_SPLIT"
calib = 384.38 # from raw data calibration result
disp_range = (1.0, 210.0)
depth_range = (calib / disp_range[1], calib / disp_range[0])
intrinsics = [721.54, 721.54, 621, 187.5] # image resolution 1242, 375
img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
batch_size = 4
crop_size = (320, 960)
train_pipeline = [
dict(type="LoadImagesFromFile"),
dict(type="LoadRImagesFromFile"),
dict(type="LoadDispAnnotations", imdecode_backend="kitti", key="disp", is_reciprocal=False),
dict(type="LoadOpticalFlowAnnotations", imdecode_backend="kitti", key="flow"),
dict(type="LoadDispAnnotations", imdecode_backend="kitti", key="disp2", is_reciprocal=False),
dict(type="RandomCrop", crop_size=crop_size),
dict(type="PhotoMetricDistortion"),
dict(type="Normalize", **img_norm_cfg),
dict(type="Pad", size=crop_size, pad_val=0, seg_pad_val=255, disp_pad_val=0),
dict(type="DefaultFormatBundleList"),
dict(
type="Collect",
keys=["img", "r_img", "gt_disp", "gt_flow", "gt_disp2"],
meta_keys=[
"filename",
"ori_filename",
"ori_shape",
"img_shape",
"pad_shape",
"img_norm_cfg",
"calib",
"disp_range",
"depth_range",
"intrinsics",
],
),
]
test_pipeline = [
dict(type='LoadImagesFromFile'),
dict(type="LoadRImagesFromFile"),
dict(type="LoadDispAnnotations", imdecode_backend="kitti", key="disp", is_reciprocal=False),
dict(type="LoadOpticalFlowAnnotations", imdecode_backend="kitti", key="flow"),
dict(type="LoadDispAnnotations", imdecode_backend="kitti", key="disp2", is_reciprocal=False),
dict(
type='MultiScaleFlipAug',
img_ratios=[1.0],
img_scale=None,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=64),
dict(type="DefaultFormatBundleList"),
dict(type='Collect',
keys=["img", "r_img", "gt_disp", "gt_flow", "gt_disp2"],
meta_keys=[
"filename",
"ori_filename",
"ori_shape",
"img_shape",
"pad_shape",
"calib",
"disp_range",
"depth_range",
"intrinsics",
],
),
])
]
data = dict(
samples_per_gpu=batch_size,
workers_per_gpu=batch_size,
train=dict(
type=dataset_type,
disp_range=disp_range,
calib=calib,
depth_range=depth_range,
img_dir=data_root,
r_img_dir=data_root,
disp_dir=data_root,
flow_dir=data_root,
disp2_dir=data_root,
num_frames=2,
intrinsics=intrinsics,
split=train_split,
pipeline=train_pipeline,
),
val=dict(
type=dataset_type,
disp_range=disp_range,
calib=calib,
depth_range=depth_range,
img_dir=data_root,
r_img_dir=data_root,
disp_dir=data_root,
flow_dir=data_root,
disp2_dir=data_root,
num_frames=-1,
intrinsics=intrinsics,
split=val_split,
pipeline=test_pipeline,
),
test=dict(
type=dataset_type,
disp_range=disp_range,
calib=calib,
depth_range=depth_range,
img_dir=data_root,
r_img_dir=data_root,
disp_dir=data_root,
flow_dir=data_root,
disp2_dir=data_root,
num_frames=-1,
intrinsics=intrinsics,
split=test_split,
pipeline=test_pipeline,
),
)
|
CODD-main
|
configs/datasets/kitti_depth.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# dataset settings
dataset_type = "Kitti2015MultiFrameDataset"
data_root = "PATH_TO_DATA"
train_split = "PATH_TO_SPLIT"
val_split = "PATH_TO_SPLIT"
test_split = "PATH_TO_SPLIT"
calib = 384.38 # from raw data calibration result
disp_range = (1.0, 210.0)
depth_range = (calib / disp_range[1], calib / disp_range[0])
intrinsics = [721.54, 721.54, 621, 187.5] # image resolution 1242, 375
img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
batch_size = 4
crop_size = (320, 960)
train_pipeline = [
dict(type="LoadImagesFromFile"),
dict(type="LoadRImagesFromFile"),
dict(type="LoadDispAnnotations", imdecode_backend="kitti", key="disp", is_reciprocal=False),
dict(type="LoadOpticalFlowAnnotations", imdecode_backend="kitti", key="flow"),
dict(type="LoadDispAnnotations", imdecode_backend="kitti", key="disp2", is_reciprocal=False),
dict(type="RandomCrop", crop_size=crop_size),
dict(type="PhotoMetricDistortion"),
dict(type="Normalize", **img_norm_cfg),
dict(type="Pad", size=crop_size, pad_val=0, seg_pad_val=255, disp_pad_val=0),
dict(type="DefaultFormatBundleList"),
dict(
type="Collect",
keys=["img", "r_img", "gt_disp", "gt_flow", "gt_disp2"],
meta_keys=[
"filename",
"ori_filename",
"ori_shape",
"img_shape",
"pad_shape",
"img_norm_cfg",
"calib",
"disp_range",
"depth_range",
"intrinsics",
],
),
]
test_pipeline = [
dict(type='LoadImagesFromFile'),
dict(type="LoadRImagesFromFile"),
dict(type="LoadDispAnnotations", imdecode_backend="kitti", key="disp", is_reciprocal=False),
dict(type="LoadOpticalFlowAnnotations", imdecode_backend="kitti", key="flow"),
dict(type="LoadDispAnnotations", imdecode_backend="kitti", key="disp2", is_reciprocal=False),
dict(
type='MultiScaleFlipAug',
img_ratios=[1.0],
img_scale=None,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=64),
dict(type="DefaultFormatBundleList"),
dict(type='Collect',
keys=["img", "r_img", "gt_disp", "gt_flow", "gt_disp2"],
meta_keys=[
"filename",
"ori_filename",
"ori_shape",
"img_shape",
"pad_shape",
"calib",
"disp_range",
"depth_range",
"intrinsics",
],
),
])
]
data = dict(
samples_per_gpu=batch_size,
workers_per_gpu=batch_size,
train=dict(
type=dataset_type,
disp_range=disp_range,
calib=calib,
depth_range=depth_range,
img_dir=data_root,
r_img_dir=data_root,
disp_dir=data_root,
flow_dir=data_root,
disp2_dir=data_root,
num_frames=2,
intrinsics=intrinsics,
split=train_split,
pipeline=train_pipeline,
),
val=dict(
type=dataset_type,
disp_range=disp_range,
calib=calib,
depth_range=depth_range,
img_dir=data_root,
r_img_dir=data_root,
disp_dir=data_root,
flow_dir=data_root,
disp2_dir=data_root,
num_frames=-1,
intrinsics=intrinsics,
split=val_split,
pipeline=test_pipeline,
),
test=dict(
type=dataset_type,
disp_range=disp_range,
calib=calib,
depth_range=depth_range,
img_dir=data_root,
r_img_dir=data_root,
disp_dir=data_root,
flow_dir=data_root,
disp2_dir=data_root,
num_frames=-1,
intrinsics=intrinsics,
split=test_split,
pipeline=test_pipeline,
),
)
|
CODD-main
|
configs/datasets/kitti_2015.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# model settings
max_disp = 320
iters = 1 # 16 for scene flow/KITTI, 1 for Sintel/TartanAir
motion_loss_weight = 1.0 # 0.5 for joint training tartan/KITTI, 1.0 for pretrain
freeze_stereo = True
freeze_motion = False
if freeze_stereo or freeze_motion:
find_unused_parameters = True
model = dict(
type='ConsistentOnlineDynamicDepth',
stereo=dict(
type='HITNetMF',
backbone=dict(
type='HITUNet',
),
initialization=dict(
type='TileInitialization',
max_disp=max_disp,
),
propagation=dict(
type='TilePropagation',
),
loss=dict(
type='HITLoss',
max_disp=max_disp,
alpha=0.9,
c=0.1,
),
),
motion=dict(
type="Motion",
iters=iters,
raft3d=dict(
type="RAFT3D",
cnet_cfg=dict(
init_cfg=dict(type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w18_small'),
# when training from scratch, include this line to initialize the weights
type='HRNet',
norm_cfg=dict(type='SyncBN', requires_grad=False),
norm_eval=True,
extra=dict(
stage1=dict(
num_modules=1,
num_branches=1,
block='BOTTLENECK',
num_blocks=(2,),
num_channels=(64,)),
stage2=dict(
num_modules=1,
num_branches=2,
block='BASIC',
num_blocks=(2, 2),
num_channels=(18, 36)),
stage3=dict(
num_modules=3,
num_branches=3,
block='BASIC',
num_blocks=(2, 2, 2),
num_channels=(18, 36, 72)),
stage4=dict(
num_modules=2,
num_branches=4,
block='BASIC',
num_blocks=(2, 2, 2, 2),
num_channels=(18, 36, 72, 144))
)
)
),
loss=dict(
type='MotionLoss',
loss_weight=motion_loss_weight
),
),
train_cfg=dict(
freeze_stereo=freeze_stereo,
freeze_motion=freeze_motion,
),
test_cfg=dict(mode='whole')
)
|
CODD-main
|
configs/models/stereo_motion.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# model settings
max_disp = 320
iters = 16 # 16 for scene flow/KITTI, 1 for Sintel/TartanAir
motion_loss_weight = 0.5 # 0.5 for joint training tartan/KITTI, 1.0 for pretrain
fusion_loss_weight = 1.0
wr_weight = 1.0
wf_weight = 1.0
freeze_stereo = False
freeze_motion = False
freeze_fusion = False
if freeze_stereo or freeze_motion or freeze_fusion:
find_unused_parameters = True
model = dict(
type='ConsistentOnlineDynamicDepth',
stereo=dict(
type='HITNetMF',
backbone=dict(
type='HITUNet',
),
initialization=dict(
type='TileInitialization',
max_disp=max_disp,
),
propagation=dict(
type='TilePropagation',
),
loss=dict(
type='HITLoss',
max_disp=max_disp,
alpha=0.9,
c=0.1,
),
),
motion=dict(
type="Motion",
iters=iters,
raft3d=dict(
type="RAFT3D",
cnet_cfg=dict(
type='HRNet',
norm_cfg=dict(type='SyncBN', requires_grad=False),
norm_eval=True,
extra=dict(
stage1=dict(
num_modules=1,
num_branches=1,
block='BOTTLENECK',
num_blocks=(2,),
num_channels=(64,)),
stage2=dict(
num_modules=1,
num_branches=2,
block='BASIC',
num_blocks=(2, 2),
num_channels=(18, 36)),
stage3=dict(
num_modules=3,
num_branches=3,
block='BASIC',
num_blocks=(2, 2, 2),
num_channels=(18, 36, 72)),
stage4=dict(
num_modules=2,
num_branches=4,
block='BASIC',
num_blocks=(2, 2, 2, 2),
num_channels=(18, 36, 72, 144))
)
)
),
loss=dict(
type='MotionLoss',
loss_weight=motion_loss_weight
),
),
fusion=dict(
type="Fusion",
in_channels=24,
fusion_channel=32,
corr_cfg=dict(type='px2patch', patch_size=3),
loss=dict(
type='FusionLoss',
loss_weight=fusion_loss_weight,
min_disp=1,
max_disp=320,
wr_weight=wr_weight,
wf_weight=wf_weight
),
),
train_cfg=dict(
freeze_stereo=freeze_stereo,
freeze_motion=freeze_motion,
freeze_fusion=freeze_fusion,
),
test_cfg=dict(mode='whole')
)
|
CODD-main
|
configs/models/codd.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# model settings
max_disp = 320
freeze_stereo = False
freeze_motion = True
freeze_fusion = True
if freeze_stereo or freeze_motion or freeze_fusion:
find_unused_parameters = True
model = dict(
type='ConsistentOnlineDynamicDepth',
stereo=dict(
type='HITNetMF',
backbone=dict(
type='HITUNet',
),
initialization=dict(
type='TileInitialization',
max_disp=max_disp,
),
propagation=dict(
type='TilePropagation',
),
loss=dict(
type='HITLoss',
max_disp=max_disp,
alpha=0.9,
c=0.1,
),
),
train_cfg=dict(
freeze_stereo=freeze_stereo,
freeze_motion=freeze_motion,
freeze_fusion=freeze_fusion,
),
test_cfg=dict(mode='whole')
)
|
CODD-main
|
configs/models/stereo.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# optimizer
gpu_factor = 8
max_iter = 100000 // gpu_factor
optimizer = dict(type="Adam", lr=2e-4, weight_decay=0.00001)
optimizer_config = dict(grad_clip=dict(max_norm=1))
# learning policy
lr_config = dict(
policy="OneCycle",
max_lr=2e-4,
total_steps=max_iter,
pct_start=0.001,
anneal_strategy="linear"
)
# runtime settings
runner = dict(type="IterBasedRunner", max_iters=max_iter)
checkpoint_config = dict(by_epoch=False, interval=5000 // gpu_factor)
evaluation = dict(interval=5000 // gpu_factor, metric="default")
|
CODD-main
|
configs/schedules/schedule_fusion.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# optimizer
optimizer = dict(type='Adam', lr=4e-4, betas=(0.9, 0.999))
optimizer_config = dict()
# learning policy
lr_config = dict(policy='MultiGamma', step=[225, 293, 315], gamma=[0.25, 0.4, 0.25])
# runtime settings
runner = dict(type='EpochBasedRunner', max_epochs=340) # Following HITNet
checkpoint_config = dict(by_epoch=True, interval=20)
evaluation = dict(interval=10, metric='default')
|
CODD-main
|
configs/schedules/schedule_stereo.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# optimizer
gpu_factor = 8
max_iter = 200000 // gpu_factor
optimizer = dict(type="Adam", lr=2e-4, weight_decay=0.00001)
optimizer_config = dict(grad_clip=dict(max_norm=1))
# learning policy
lr_config = dict(
policy="OneCycle",
max_lr=2e-4,
total_steps=max_iter,
pct_start=0.001,
anneal_strategy="linear"
)
# runtime settings
runner = dict(type="IterBasedRunner", max_iters=max_iter)
checkpoint_config = dict(by_epoch=False, interval=10000 // gpu_factor)
evaluation = dict(interval=10000 // gpu_factor, metric="default")
|
CODD-main
|
configs/schedules/schedule_motion.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# optimizer
gpu_factor = 8
max_iter = 100000 // gpu_factor
optimizer = dict(type="Adam", lr=2e-5, weight_decay=1e-6)
optimizer_config = dict(grad_clip=dict(max_norm=1))
# learning policy
lr_config = dict(
policy="OneCycle",
max_lr=2e-5,
total_steps=max_iter,
pct_start=0.001,
anneal_strategy="linear"
)
# runtime settings
runner = dict(type="IterBasedRunner", max_iters=max_iter)
checkpoint_config = dict(by_epoch=False, interval=10000 // gpu_factor)
evaluation = dict(interval=10000 // gpu_factor, metric="default")
|
CODD-main
|
configs/schedules/schedule_motion_finetune.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# optimizer
gpu_factor = 8
max_iter = 50000 // gpu_factor
optimizer = dict(type="Adam", lr=2e-5, weight_decay=1e-6)
optimizer_config = dict(grad_clip=dict(max_norm=1))
# learning policy
lr_config = dict(
policy="OneCycle",
max_lr=2e-5,
total_steps=max_iter,
pct_start=0.001,
anneal_strategy="linear"
)
# runtime settings
runner = dict(type="IterBasedRunner", max_iters=max_iter)
checkpoint_config = dict(by_epoch=False, interval=5000 // gpu_factor)
evaluation = dict(interval=5000 // gpu_factor, metric="default")
|
CODD-main
|
configs/schedules/schedule_fusion_finetune.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# optimizer
gpu_factor = 8
max_iter = 100000 // gpu_factor
optimizer = dict(type="Adam", lr=2e-5, weight_decay=1e-6)
optimizer_config = dict(grad_clip=dict(max_norm=1))
# learning policy
lr_config = dict(
policy="OneCycle",
max_lr=2e-5,
total_steps=max_iter,
pct_start=0.001,
anneal_strategy="linear"
)
# runtime settings
runner = dict(type="IterBasedRunner", max_iters=max_iter)
checkpoint_config = dict(by_epoch=False, interval=10000 // gpu_factor)
evaluation = dict(interval=10000 // gpu_factor, metric="default")
|
CODD-main
|
configs/schedules/schedule_stereo_finetune.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
import os.path as osp
from abc import ABCMeta
from collections import OrderedDict
import numpy as np
import torch
import torch.distributed as dist
from mmcv.runner import BaseModule, auto_fp16
from mmcv.utils import mkdir_or_exist
from mmseg.models.builder import MODELS
from utils import AverageMeter, thres_metric, t_epe_metric, collect_metric, collect_gt, compute_valid_mask, \
compute_gt_disp_change, reset_meter, flow_warp
from .builder import ESTIMATORS
from .motion.raft3d.projective_ops import induced_flow
BF_DEFAULT = 1050 * 0.2 # baseline * focal length
@ESTIMATORS.register_module()
class ConsistentOnlineDynamicDepth(BaseModule, metaclass=ABCMeta):
"""Consistent online depth network"""
def __init__(
self,
stereo=None,
motion=None,
fusion=None,
train_cfg=None,
test_cfg=None,
init_cfg=None,
**kwargs,
):
super(ConsistentOnlineDynamicDepth, self).__init__(**kwargs)
self.fp16_enabled = False
self.train_cfg = train_cfg
self.test_cfg = test_cfg
self.build_model(stereo, motion, fusion)
def build_model(self, stereo, motion, fusion):
assert stereo is not None
self.stereo = MODELS.build(stereo)
if motion is not None:
self.motion = MODELS.build(motion)
else:
self.motion = None
if fusion is not None:
self.fusion = MODELS.build(fusion)
else:
self.fusion = None
def freeze_fusion(self):
if (self.train_cfg is not None) and (
self.train_cfg.get("freeze_fusion", False)
):
return True
else:
return False
def freeze_motion(self):
if (self.train_cfg is not None) and (
self.train_cfg.get("freeze_motion", False)
):
return True
else:
return False
def freeze_stereo(self):
if (self.train_cfg is not None) and (
self.train_cfg.get("freeze_stereo", False)
):
return True
else:
return False
def consistent_online_depth_estimation(self, left_img, right_img, img_metas, state):
"""network
Args:
left_img (Tensor)
right_img (Tensor)
img_metas (Tensor): dataset metas
state (dict): states storing past information
Returns:
dict: outputs
"""
if self.freeze_stereo() or not self.training:
with torch.no_grad():
outputs = self.stereo.stereo_matching(
left_img, right_img, img_metas, state
)
else:
outputs = self.stereo.stereo_matching(left_img, right_img, img_metas, state)
if self.motion is not None:
if self.freeze_motion() or not self.training:
with torch.no_grad():
self.motion(
state,
outputs,
img_metas=img_metas,
train_mode=not self.freeze_motion() & self.training,
)
else:
self.motion(
state,
outputs,
img_metas=img_metas,
train_mode=not self.freeze_motion() & self.training,
)
if self.fusion is not None:
if self.freeze_fusion() or not self.training:
with torch.no_grad():
self.fusion.memory_query(outputs, state, img_metas=img_metas)
self.fusion.memory_update(outputs, state, img_metas=img_metas)
else:
self.fusion.memory_query(outputs, state, img_metas=img_metas)
self.fusion.memory_update(outputs, state, img_metas=img_metas)
return outputs
@auto_fp16(apply_to=("img", "r_img"))
def forward(self, img, img_metas, return_loss=True, **kwargs):
"""Calls either :func:`forward_train` or :func:`forward_test` depending
on whether ``return_loss`` is ``True``.
Note this setting will change the expected inputs. When
``return_loss=True``, img and img_meta are single-nested (i.e. Tensor
and List[dict]), and when ``resturn_loss=False``, img and img_meta
should be double nested (i.e. List[Tensor], List[List[dict]]).
"""
if return_loss:
return self.forward_train(img, img_metas, **kwargs)
else:
return self.forward_test(img, img_metas, **kwargs)
def forward_train(
self,
l_img,
img_metas,
r_img,
gt_disp,
gt_semantic_seg=None,
gt_flow=None,
gt_disp_change=None,
gt_flow_occ=None,
gt_disp2=None,
**kwargs,
):
"""train step
Args:
l_img (Tensor): left image
img_metas (List): dataset meta
r_img (Tensor): right image
gt_disp (Tensor): Nx1xHxW
gt_semantic_seg (Tensor, optional): Nx1xHxW. Defaults to None.
gt_flow (Tensor, optional): Nx2xHxW. Defaults to None.
gt_disp_change (Tensor, optional): Nx1xHxW. Defaults to None.
gt_flow_occ (Tensor, optional): Nx1xHxW, occluded regions of flow, to be used to compute disparity change in TartanAir. Defaults to None.
gt_disp2 (Tensor, optional): disparity of next frame in current frame, to be used to compute disparity change in KITTI Depth. Defaults to None.
Returns:
dict: keys preceded with "loss_" will be summed for backpropagation
"""
state = dict(
pred_disp=[],
gt_disp=[],
mask_disp=[],
pred_disp_pyramid=[],
gt_flow=[],
gt_disp_change=[],
gt_flow_occ=[],
gt_disp2=[],
)
l_img_list = torch.unbind(l_img, dim=1)
r_img_list = torch.unbind(r_img, dim=1)
gt_disp_list = torch.unbind(gt_disp, dim=1)
if gt_flow is not None:
gt_flow_list = torch.unbind(gt_flow, dim=1)
else:
gt_flow_list = None
if gt_disp_change is not None:
gt_disp_change_list = torch.unbind(gt_disp_change, dim=1)
else:
gt_disp_change_list = None
if gt_flow_occ is not None:
gt_flow_occ_list = torch.unbind(gt_flow_occ, dim=1)
else:
gt_flow_occ_list = None
if gt_disp2 is not None:
gt_disp2_list = torch.unbind(gt_disp2, dim=1)
else:
gt_disp2_list = None
losses = dict()
for idx, (l_img, r_img, gt_disp) in enumerate(
zip(l_img_list, r_img_list, gt_disp_list)
):
if gt_flow_list is not None:
gt_flow = gt_flow_list[idx]
state["gt_flow"].append(gt_flow)
if gt_disp_change_list is not None:
gt_disp_change = gt_disp_change_list[idx]
state["gt_disp_change"].append(gt_disp_change)
if gt_flow_occ_list is not None:
gt_flow_occ = gt_flow_occ_list[idx] > 0
state["gt_flow_occ"].append(gt_flow_occ)
if gt_disp2_list is not None:
gt_disp2 = gt_disp2_list[idx]
state["gt_disp2"].append(gt_disp2)
# compute valid mask, save to states
mask_disp = compute_valid_mask(gt_disp, img_metas[0], gt_semantic_seg)
state["gt_disp"].append(gt_disp)
state["mask_disp"].append(mask_disp)
if torch.sum(mask_disp).item() == 0:
print("MASK_SUM", mask_disp.shape, torch.sum(mask_disp))
outputs = self.consistent_online_depth_estimation(l_img, r_img, img_metas, state)
loss = self.losses(outputs, gt_disp, mask_disp, idx, state, img_metas[0], gt_semantic_seg)
losses.update(loss)
return losses
def losses(
self, outputs, gt_disp, mask_disp, idx, state, meta, gt_semantic_seg=None
):
"""compute losses
Args:
outputs (List)
gt_disp (Tensor): Nx1xHxW
mask_disp (Tensor): Nx1xHxW, mask for disparity, True for valid
idx (int): frame index of the video sequence
state (dict): memory states of past information
meta (List): dataset meta
gt_semantic_seg (Tensor, optional): Nx1xHxW. Defaults to None.
Returns:
dict: losses
"""
pred_disp = outputs["pred_disp"]
state["pred_disp"].append(pred_disp)
loss = dict()
if not self.freeze_stereo():
self.stereo.losses(
loss, outputs, gt_disp, mask_disp, idx, gt_semantic_seg, meta
)
if idx >= 1:
if self.motion is not None and not self.freeze_motion() and self.motion.loss is not None:
self.motion.losses(loss, outputs, idx, state, meta)
if self.fusion is not None and not self.freeze_fusion() and self.fusion.loss is not None:
self.fusion.losses(loss, outputs, gt_disp, mask_disp, idx, state, meta)
return loss
def forward_test(self, img, img_metas, r_img=None, **kwargs):
"""
Args:
imgs (List[Tensor]): The outer list is not used.
img_metas (List[List[dict]]): The outer list is not used.
The inner list indicates images in a batch.
"""
for var, name in [(img, "img"), (img_metas, "img_metas")]:
if not isinstance(var, list):
raise TypeError(f"{name} must be a list, but got " f"{type(var)}")
img = img[0]
r_img = r_img[0] if r_img is not None else r_img
img_meta = img_metas[0]
with torch.no_grad():
pred = self.inference(img, r_img, img_meta, **kwargs)
pred = [pred]
return pred
def inference(
self, img, r_img, img_meta, reciprocal=False, evaluate=True, **kwargs
):
"""inference
Args:
img (Tensor): left image
r_img (Tensor): right image
img_meta (List): dataset meta
reciprocal (bool, optional): wheter prediction is depth, if True, use "calib" key in meta to convert to disparity. Defaults to False.
evaluate (bool, optional): if True, evalue against GT, if False, output disparity for visualization. Defaults to True.
Returns:
Tensor: The output disp prediction (evaluate=False) or metrics (evaluate=True)
"""
self.reset_inference_state()
l_img_list = torch.unbind(img, dim=1)
r_img_list = torch.unbind(r_img, dim=1)
B, MF, _, H, W = img.shape
(
gt_disp_list,
gt_flow_list,
gt_disp_change_list,
gt_flow_occ_list,
gt_disp2_list,
gt_disp_occ_list,
) = collect_gt(kwargs)
outputs = []
img_h, img_w = img_meta[0]["img_shape"][:2] # to remove padded region for eval
for idx, (l_img, r_img) in enumerate(zip(l_img_list, r_img_list)):
if gt_disp_list is not None:
gt_disp = gt_disp_list[idx][:, :, :img_h, :img_w]
self.inference_state["gt_disp"].append(gt_disp)
else:
gt_disp = None
if gt_flow_list is not None:
gt_flow = gt_flow_list[idx][:, :, :img_h, :img_w]
self.inference_state["gt_flow"].append(gt_flow)
if gt_disp_change_list is not None:
gt_disp_change = gt_disp_change_list[idx][:, :, :img_h, :img_w]
self.inference_state["gt_disp_change"].append(gt_disp_change)
if gt_flow_occ_list is not None:
gt_flow_occ = (
gt_flow_occ_list[idx] > 0
) # 0 for non-occ, True for occluded
self.inference_state["gt_flow_occ"].append(
gt_flow_occ[:, :, :img_h, :img_w]
)
if gt_disp_change_list is None and idx > 0:
gt_disp_change, _ = compute_gt_disp_change(
self.inference_state["gt_flow_occ"][idx - 1],
self.inference_state["gt_disp"][idx - 1],
self.inference_state["gt_disp"][idx],
self.inference_state["gt_flow"][idx - 1],
)
self.inference_state["gt_disp_change"].append(gt_disp_change)
if gt_disp2_list is not None:
gt_disp2 = gt_disp2_list[idx][:, :, :img_h, :img_w]
self.inference_state["gt_disp2"].append(gt_disp2)
if gt_disp_change_list is None:
gt_disp_change = gt_disp2 - gt_disp
gt_disp_change[gt_disp2 <= 0.0] = BF_DEFAULT
gt_disp_change[gt_disp <= 0.0] = BF_DEFAULT
self.inference_state["gt_disp_change"].append(gt_disp_change)
if gt_disp_occ_list is not None:
# True for non-occluded to comply with semantic seg
gt_disp_occ = (gt_disp_occ_list[idx] <= 0)[:, :, :img_h, :img_w]
else:
gt_disp_occ = None
output = self.consistent_online_depth_estimation(
l_img, r_img, img_meta, self.inference_state
)
pred_disp = output["pred_disp"]
# for stereo depth model
if reciprocal:
pred_disp = img_meta[0]["calib"] / pred_disp
# save prediction (uncropped for temporal model)
self.inference_state["pred_disp"].append(pred_disp)
# crop for evaluation
pred_disp = pred_disp[:, :, :img_h, :img_w]
outputs.append(pred_disp)
# perform evaluation if needed
if evaluate:
gt_disp = self.inference_state.get('gt_disp', None)
assert gt_disp is not None, "No ground truth provided"
gt_disp = gt_disp[-1]
# import matplotlib.pyplot as plt
# plt.imshow(gt_disp.squeeze().cpu())
# plt.show()
self.calc_metric(idx, pred_disp, gt_disp, img_meta[0], img_h, img_w, gt_semantic_seg=gt_disp_occ,
Ts=output.get("Ts", None))
if evaluate: # return evaluated metrics
outputs = collect_metric(self.inference_state)
else: # otherwise, return disp map
outputs = torch.cat(outputs, dim=1)
assert len(outputs.shape) == 4, "Output shape is wrong"
return outputs
def reset_inference_state(self):
"""reset inference states when new sequence starts"""
self.inference_state = OrderedDict(
pred_disp=[],
gt_disp=[],
mask_disp=[],
gt_flow=[],
gt_disp_change=[],
gt_flow_occ=[],
gt_disp2=[],
)
# disp metric
self.inference_state["epe_meter"] = AverageMeter()
self.inference_state["th3_meter"] = AverageMeter()
# temporal metric
self.inference_state["tepe_meter"] = AverageMeter()
self.inference_state["th3_tepe_meter"] = AverageMeter()
self.inference_state["tepe_rel_meter"] = AverageMeter()
self.inference_state["th1_tepe_rel_meter"] = AverageMeter()
# magnitude of flow
self.inference_state["flow_mag_meter"] = AverageMeter()
# 3D metric
self.inference_state["count_all"] = 0.0
self.inference_state["epe2d_scene_flow_all"] = 0.0
self.inference_state["epe2d_optical_flow_all"] = 0.0
self.inference_state["1px_scene_flow_all"] = 0.0
self.inference_state["1px_optical_flow_all"] = 0.0
reset_meter(self.inference_state)
def calc_metric(
self,
idx,
pred_disp,
gt_disp,
meta,
h,
w,
gt_semantic_seg=None,
Ts=None,
):
"""evaluate reuslts
Args:
idx (int): frame idx
pred_disp (Tensor): Nx1xHxW
gt_disp (Tensor): Nx1xHxW
meta (dict): dataset meta
h (int): original image height
w (int): original image width
gt_semantic_seg (Tensor, optional): Nx2xHxW. Defaults to None.
Ts (Tensor, optional): NxHxW. Defaults to None.
"""
mask_disp = compute_valid_mask(
gt_disp, meta, gt_semantic_seg=gt_semantic_seg
) # mask excludes invalid disp
self.inference_state["mask_disp"].append(mask_disp)
if mask_disp.any(): # only compute metrics if there are valid pixels
# compute metrics
self.inference_state["epe_meter"].update(
torch.mean(torch.abs(pred_disp[mask_disp] - gt_disp[mask_disp])).item()
)
self.inference_state["th3_meter"].update(
thres_metric(pred_disp, gt_disp, mask_disp, 3.0).item()
)
# temporal metrics
if idx > 0:
# use previous flow to warp current estimation to previous frame
flow = self.inference_state["gt_flow"][-2]
gt_disp_prev = self.inference_state["gt_disp"][-2]
pred_disp_prev = self.inference_state["pred_disp"][-2][:, :, :h, :w] # crop for evaluation
if torch.any(gt_disp > 0.0):
mask = compute_valid_mask(
gt_disp, meta, gt_flow_prev=flow, gt_semantic_seg=gt_semantic_seg
) # mask excludes invalid flow
else: # in kitti, only disp in one frame is provided, so we input dummy gt_disps
mask = compute_valid_mask(
torch.ones_like(gt_disp, device=gt_disp.device) * BF_DEFAULT / 2.0, meta, gt_flow_prev=flow,
gt_semantic_seg=gt_semantic_seg
) # mask excludes invalid flow
to_warp = torch.cat([gt_disp, pred_disp, mask.float()], dim=1)
to_warp, valid = flow_warp(
to_warp, flow, padding_mode="zeros", mode="nearest"
)
warped_gt_disp, warped_pred_disp, mask_warp = torch.unbind(to_warp, dim=1)
warped_gt_disp, warped_pred_disp = warped_gt_disp.unsqueeze(1), warped_pred_disp.unsqueeze(1) # N1HW
mask_curr = (valid.squeeze()[0] & mask_warp.bool() & mask) # excludes flow occ
if len(self.inference_state["gt_disp2"]) > 0: # if gt provides disp2, use provided
warped_gt_disp = self.inference_state["gt_disp2"][-2]
mask_curr &= warped_gt_disp > 0.0
mask_prev = self.inference_state["mask_disp"][-2] # prev mask only excludes invalid disp
# only compute metrics if there are valid pixels
if mask_prev.any() and mask_curr.any():
disp_tepe, disp_tepe_rel = t_epe_metric(warped_pred_disp, warped_gt_disp, pred_disp_prev, gt_disp_prev,
mask_prev, mask_curr)
self.inference_state["tepe_meter"].update(disp_tepe.mean().item())
self.inference_state["tepe_rel_meter"].update(
disp_tepe_rel.mean().item()
)
self.inference_state["th1_tepe_rel_meter"].update(
(disp_tepe_rel > 1.0).float().mean().item()
)
self.inference_state["th3_tepe_meter"].update(
(disp_tepe > 3.0).float().mean().item()
)
mag = torch.sum(flow ** 2, dim=1).sqrt().squeeze()
self.inference_state["flow_mag_meter"].update(mag.mean().item())
# motion metrics
if Ts is not None and len(self.inference_state["gt_disp_change"]) > 0:
if len(self.inference_state["gt_flow_occ"]) > 0:
# in this case, disp change computed from flow
gt_disp_change = self.inference_state["gt_disp_change"][-1]
mask = compute_valid_mask(gt_disp_prev, meta, gt_flow_prev=flow, gt_disp_change=gt_disp_change,
gt_semantic_seg=gt_semantic_seg) # excludes invalid disp change
gt_flow_occ = self.inference_state["gt_flow_occ"][-2]
mask[gt_flow_occ] = False # excludes flow occ since disp change is computed from flow
else: # otherwise, gt disp change provided
gt_disp_change = self.inference_state["gt_disp_change"][-2]
mask = compute_valid_mask(
gt_disp_prev,
meta,
gt_flow_prev=flow,
gt_disp_change=gt_disp_change,
gt_semantic_seg=gt_semantic_seg,
) # excludes invalid disp change
if mask.any(): # only compute metrics if there are valid pixels
flow = flow.permute(0, 2, 3, 1).squeeze() # HW2
# use transformation field to extract 2D and 3D flow
B = pred_disp.shape[0]
intrinsics = meta["intrinsics"]
intrinsics = torch.tensor(intrinsics).to(pred_disp.device).unsqueeze(0).expand(B, -1)
depth1 = BF_DEFAULT / pred_disp_prev
depth1 = torch.clip(depth1, max=BF_DEFAULT, min=0).squeeze(1)
flow2d_est, _, _ = induced_flow(
Ts[:, :h, :w], depth1, intrinsics
)
flow2d_est[..., -1] = (
flow2d_est[..., -1] * BF_DEFAULT
) # by default this is inverse depth, to convert to disparity it needs BF
flow2d = torch.cat(
[flow, gt_disp_change.squeeze()[..., None]], dim=-1
) # HW3
epe2d_scene_flow = torch.sum((flow2d_est - flow2d) ** 2, -1).sqrt()
epe2d_optical_flow = torch.sum(
((flow2d_est - flow2d) ** 2)[..., :2], -1
).sqrt()
# our evaluation (use all valid pixels)
epe2d_scene_flow = epe2d_scene_flow.squeeze()[mask.squeeze()].float()
epe2d_optical_flow_all = epe2d_optical_flow.squeeze()[mask.squeeze()].float()
self.inference_state["count_all"] += epe2d_scene_flow.reshape(-1).shape[0]
self.inference_state["epe2d_scene_flow_all"] += epe2d_scene_flow.sum()
self.inference_state["epe2d_optical_flow_all"] += epe2d_optical_flow_all.sum()
self.inference_state["1px_scene_flow_all"] += torch.sum(
epe2d_scene_flow < 1.0
)
self.inference_state["1px_optical_flow_all"] += torch.sum(
epe2d_optical_flow_all < 1.0
)
def show_result(
self, filename, result, show=False, out_file=None, running_stats=None, **kwargs
):
"""show result either to terminal or save output
Args:
filename (str)
result (Tensor): disparity or metrics
show (bool, optional): if show, output disparity. Defaults to False.
out_file (str, optional): output filename. Defaults to None.
running_stats (optional): running stats to accumulate results. Defaults to None.
"""
if not show:
if running_stats:
result = result[0]
if running_stats.header is None:
running_stats.header = ["filename"] + [k for k in result.keys()]
running_stats.push(filename, [result[k].cpu().item() for k in result.keys()])
else:
disp = result[0].cpu().numpy()
mkdir_or_exist(osp.dirname(out_file))
with open(out_file.replace(osp.splitext(out_file)[1], ".disp.pred.npz"), "wb") as f:
np.savez_compressed(f, disp=disp)
def train(self, mode=True):
"""overloading torch's train function to freeze different modules when necessary
Args:
mode (bool, optional): True to train, False to eval. Defaults to True.
"""
self.training = mode
for module in self.children():
module.train(mode)
if mode is False:
return
if self.freeze_stereo() and self.stereo is not None:
self.stereo.freeze()
if self.freeze_motion() and self.motion is not None:
self.motion.freeze()
if self.freeze_fusion() and self.fusion is not None:
self.fusion.freeze()
if mode:
n_parameters = sum(
p.numel() for n, p in self.named_parameters() if p.requires_grad
)
print(
"PARAM STATUS: total number of training parameters %.3fM"
% (n_parameters / 1000 ** 2)
)
def train_step(self, data_batch, optimizer, **kwargs):
"""The iteration step during training.
This method defines an iteration step during training, except for the
back propagation and optimizer updating, which are done in an optimizer
hook. Note that in some complicated cases or models, the whole process
including back propagation and optimizer updating is also defined in
this method, such as GAN.
Args:
data (dict): The output of dataloader.
optimizer (:obj:`torch.optim.Optimizer` | dict): The optimizer of
runner is passed to ``train_step()``. This argument is unused
and reserved.
Returns:
dict: It should contain at least 3 keys: ``loss``, ``log_vars``,
``num_samples``.
``loss`` is a tensor for back propagation, which can be a
weighted sum of multiple losses.
``log_vars`` contains all the variables to be sent to the
logger.
``num_samples`` indicates the batch size (when the model is
DDP, it means the batch size on each GPU), which is used for
averaging the logs.
"""
losses = self(**data_batch)
loss, log_vars = self._parse_losses(losses)
train_epe_attrs = [attr for attr in dir(self) if "train_epe" in attr]
for attr in train_epe_attrs:
log_vars.update({attr: getattr(self, attr)})
outputs = dict(
loss=loss,
log_vars=log_vars,
num_samples=len(data_batch["img"].data),
)
return outputs
def val_step(self, data_batch, **kwargs):
"""The iteration step during validation.
This method shares the same signature as :func:`train_step`, but used
during val epochs. Note that the evaluation after training epochs is
not implemented with this method, but an evaluation hook.
"""
output = self(**data_batch, **kwargs)
return output
@staticmethod
def _parse_losses(losses):
"""Parse the raw outputs (losses) of the network.
Args:
losses (dict): Raw output of the network, which usually contain
losses and other necessary information.
Returns:
tuple[Tensor, dict]: (loss, log_vars), loss is the loss tensor
which may be a weighted sum of all losses, log_vars contains
all the variables to be sent to the logger.
"""
log_vars = OrderedDict()
for loss_name, loss_value in losses.items():
if isinstance(loss_value, torch.Tensor):
log_vars[loss_name] = loss_value.mean()
elif isinstance(loss_value, list):
log_vars[loss_name] = sum(_loss.mean() for _loss in loss_value)
elif isinstance(loss_value, dict):
for k, v in loss_value.items():
log_vars[loss_name + "_" + k] = v
else:
raise TypeError(f"{loss_name} is not a tensor or list of tensors")
loss = sum(
_value
for _key, _value in log_vars.items()
if _key.startswith("loss") or (_key.startswith("decode") and "loss" in _key)
)
log_vars["loss"] = loss
for loss_name, loss_value in log_vars.items():
# reduce loss when distributed training
if dist.is_available() and dist.is_initialized():
loss_value = loss_value.data.clone()
dist.all_reduce(loss_value.div_(dist.get_world_size()))
log_vars[loss_name] = loss_value.item()
return loss, log_vars
|
CODD-main
|
model/codd.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
from .builder import *
from .codd import ConsistentOnlineDynamicDepth
from .fusion import *
from .losses import *
from .motion import *
from .stereo import *
from .lr_updater import *
__all__ = ["build_estimator"]
|
CODD-main
|
model/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
import warnings
from mmseg.models.builder import MODELS
ESTIMATORS = MODELS
def build_estimator(cfg, train_cfg=None, test_cfg=None):
"""Build estimator."""
if train_cfg is not None or test_cfg is not None:
warnings.warn(
'train_cfg and test_cfg is deprecated, '
'please specify them in model', UserWarning)
assert cfg.get('train_cfg') is None or train_cfg is None, \
'train_cfg specified in both outer field and model field '
assert cfg.get('test_cfg') is None or test_cfg is None, \
'test_cfg specified in both outer field and model field '
return ESTIMATORS.build(
cfg, default_args=dict(train_cfg=train_cfg, test_cfg=test_cfg))
|
CODD-main
|
model/builder.py
|
from mmcv.runner import HOOKS, LrUpdaterHook
import mmcv
@HOOKS.register_module()
class MultiGammaLrUpdaterHook(LrUpdaterHook):
"""Step LR scheduler.
Args:
step (list[int]): Step to decay the LR. If an int value is given,
regard it as the decay interval. If a list is given, decay LR at
these steps.
gamma (list[float]): LR change ratios at certain steps.
"""
def __init__(self, step, gamma, **kwargs):
assert mmcv.is_list_of(step, int)
assert mmcv.is_list_of(gamma, float)
assert len(gamma) == len(step)
assert all([s > 0 for s in step])
self.step = step
self.gamma = gamma
super(MultiGammaLrUpdaterHook, self).__init__(**kwargs)
def get_lr(self, runner, base_lr):
progress = runner.epoch if self.by_epoch else runner.iter
# calculate exponential term
gamma = 1
for i, s in enumerate(self.step):
if progress < s:
break
gamma *= self.gamma[i]
return base_lr * gamma
|
CODD-main
|
model/lr_updater.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
from .fusion import Fusion
from .others import NullFusion, GTFusion, KalmanFusion
__all__ = ["NullFusion", "GTFusion", "KalmanFusion", "Fusion"]
|
CODD-main
|
model/fusion/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import constant_init, kaiming_init, normal_init, trunc_normal_init
from mmcv.utils.parrots_wrapper import _BatchNorm
from mmseg.models import builder as builder_oss
from mmseg.models.builder import MODELS
from utils import disp_warp
from ..motion.raft3d.raft3d import GradientClip
class BasicBlock(nn.Module):
"""ResNet BasicBlock"""
expansion = 1
def __init__(self, c1, c2, s, p, d):
super(BasicBlock, self).__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(
c1, c2, kernel_size=3, stride=s, padding=d if d > 1 else p, dilation=d
),
nn.Mish(inplace=True),
)
self.conv2 = nn.Conv2d(
c2, c2, kernel_size=3, stride=1, padding=d if d > 1 else p, dilation=d
)
def forward(self, x):
out = self.conv1(x)
out = self.conv2(out)
out += x
return out
@MODELS.register_module()
class Fusion(nn.Module):
def __init__(
self, in_channels, fusion_channel, loss=None, corr_cfg=dict(), ds_scale=4
):
"""fusion network
Args:
in_channels (int): stereo feature channels
fusion_channel (int): fusion feature channels
loss (dict, optional): config for loss. Defaults to None.
corr_cfg (dict, optional): config for correlation. Defaults to dict().
ds_scale (int, optional): low res scale. Defaults to 4.
"""
super(Fusion, self).__init__()
if loss is not None:
self.loss = builder_oss.build_loss(loss)
else:
self.loss = None
self.fusion_channel = fusion_channel
self.ds_scale = ds_scale
self.in_channels = in_channels
# configs
self.patch_size = corr_cfg.get("patch_size", 3)
self.unfold_op = nn.Unfold(
kernel_size=(self.patch_size, self.patch_size),
padding=self.patch_size - 1,
dilation=2,
)
self.key_layer = nn.Sequential(
nn.Conv2d(in_channels, self.fusion_channel, 1, 1, 0, 1), # 1x1
nn.ReLU(inplace=True),
BasicBlock(self.fusion_channel, self.fusion_channel, s=1, p=1, d=1), # 3x3
nn.ReLU(inplace=True),
nn.Conv2d(self.fusion_channel, self.fusion_channel, 1, 1, 0, 1), # 1x1
)
cross_attn_channels = self.patch_size ** 2
stereo_cost_channels = 3 * 2
self_attn_channels = (self.patch_size ** 2 - 1) * 2
flow_channels = 6
binary_channels = 1
feature_channels = self.fusion_channel
# define network
self.conv_corr = nn.Sequential( # for feat and disp corr
nn.Conv2d(
self_attn_channels + cross_attn_channels + stereo_cost_channels,
self.fusion_channel * 2, 1, padding=0, bias=True
), # 1x1
nn.ReLU(inplace=True),
nn.Conv2d(
self.fusion_channel * 2, self.fusion_channel, 1, padding=0, bias=True
),
nn.ReLU(inplace=True),
)
self.conv_disp = nn.Sequential( # for disparity
nn.Conv2d(2, self.fusion_channel, 7, padding=3), # 7x7
nn.ReLU(inplace=True),
nn.Conv2d(
self.fusion_channel, self.fusion_channel, 3, padding=1, bias=True
), # 3x3
nn.ReLU(inplace=True),
)
self.motion_conv = nn.Sequential(
nn.Conv2d(
self.fusion_channel * 2, self.fusion_channel - 2, 7, padding=3, bias=True
),
nn.ReLU(inplace=True),
)
self.weight_head = nn.Sequential(
nn.Conv2d(
self.fusion_channel, self.fusion_channel, 3, padding=1, bias=True
),
nn.Conv2d(self.fusion_channel, 1, 1, padding=0, bias=True),
GradientClip(),
nn.Sigmoid(),
)
self.forget_head = nn.Sequential(
nn.Conv2d(
flow_channels + self_attn_channels + cross_attn_channels + binary_channels,
16, 1, padding=0, bias=True
),
nn.Conv2d(16, 8, 3, padding=1, bias=True),
nn.Conv2d(8, 1, 1, padding=0, bias=True),
GradientClip(),
nn.Sigmoid(),
)
self.residual_conv = nn.Sequential(
nn.Conv2d(
self.fusion_channel + feature_channels, self.fusion_channel, 3, padding=1, bias=True
),
nn.ReLU(inplace=True),
)
self.init_weights()
n_parameters = sum(p.numel() for n, p in self.named_parameters())
print(
"PARAM STATUS: total number of parameters %.3fM in fusion network"
% (n_parameters / 1000 ** 2)
)
def init_weights(self):
"""weight initialization"""
for n, m in self.named_modules():
if isinstance(m, nn.Linear):
trunc_normal_init(m.weight, std=0.02)
if m.bias is not None:
if "ffn" in n:
normal_init(m.bias, std=1e-6)
else:
constant_init(m.bias, 0)
elif isinstance(m, (nn.Conv2d, nn.Conv3d)):
kaiming_init(m.weight, mode="fan_in")
if m.bias is not None:
constant_init(m.bias, 0)
elif isinstance(
m, (_BatchNorm, nn.GroupNorm, nn.LayerNorm, nn.InstanceNorm2d)
):
constant_init(m.bias, 0)
constant_init(m.weight, 1.0)
def _px2patch_corr(self, k, memory_k, B, C, H, W, self_corr=False):
"""pixel-patch correlation
Args:
k (Tensor): keys
memory_k (Tensor): memory keys
B (int): batch size
C (int): channel size
H (int): height
W (int): width
self_corr (bool, optional): Defaults to False.
Returns:
Tensor: correlation value
"""
q = k.reshape(B, C, H * W).unsqueeze(2) # B,C,1,HW
memory_k = self.unfold_feat(memory_k, self.patch_size) # B,C,pq,HW
if C == 1:
kk = q - memory_k # B, pq, W
else:
kk = (q * memory_k).sum(1) # B, pq, HW
kk = kk.view(B, self.patch_size ** 2, H, W) # B, pq, H, W
if self_corr:
mask = (
torch.ones(self.patch_size ** 2).bool().to(k.device)
) # drop self-self
mask[(self.patch_size ** 2) // 2] = False
kk = kk[:, mask]
# normalize
kk = kk / math.sqrt(C)
return kk
def disparity_confidence(self, pred_curr, pred_warp, fea_l, fea_r):
"""approximate disparity confidence
Args:
pred_curr (Tensor): Nx1xHxW
pred_warp (Tensor): Nx1xHxW
fea_l (Tensor): left feature
fea_r (Tensor): right feature
Returns:
Tensor: disparity confidence
"""
pred_curr, pred_warp = (
pred_curr[
...,
self.ds_scale // 2 - 1:: self.ds_scale,
self.ds_scale // 2 - 1:: self.ds_scale,
],
pred_warp[
...,
self.ds_scale // 2 - 1:: self.ds_scale,
self.ds_scale // 2 - 1:: self.ds_scale,
],
)
local_cv_warp = []
local_cv_pred = []
for k in range(-1, 2, 1):
local_warp = pred_warp / self.ds_scale + k
local_pred = pred_curr / self.ds_scale + k
warp_fea_r, _ = disp_warp(fea_r, local_warp, padding_mode="zeros")
pred_fea_r, _ = disp_warp(fea_r, local_pred, padding_mode="zeros")
cv_warp = torch.norm(fea_l - warp_fea_r, 1, 1, keepdim=True) / (self.in_channels / 24.0)
cv_pred = torch.norm(fea_l - pred_fea_r, 1, 1, keepdim=True) / (self.in_channels / 24.0)
local_cv_warp.append(cv_warp)
local_cv_pred.append(cv_pred)
# local cost volume for all the disp hypothesis[B, 3, H/scale, W/scale]
local_cv_warp = torch.cat(local_cv_warp, 1)
local_cv_pred = torch.cat(local_cv_pred, 1)
return local_cv_pred, local_cv_warp
def compute_input_cues(
self,
pred_curr,
pred_warp,
feat_curr,
feat_warp,
flow_warp,
confidence_warp,
fea_l,
fea_r,
):
"""compute input cues to regress weights
Args:
pred_curr ([type]): Nx1xHxW
pred_warp ([type]): Nx1xHxW
feat_curr ([type]): NxCxHxW
feat_warp ([type]): NxCxHxW
flow_warp ([type]): Nx3xHxW
confidence_warp ([type]): Nx3xHxW
fea_l ([type]): NxCxHxW
fea_r ([type]): NxCxHxW
Returns:
Tensor, Tensor: input cues at two resolutions
"""
B = feat_curr.shape[0]
H, W = feat_curr.shape[-2:]
# get hypothesis cost from stereo
cost_curr, cost_warp = self.disparity_confidence(
pred_curr, pred_warp, fea_l, fea_r
)
# get attention features
feat_cat = torch.cat([feat_curr, feat_warp], dim=0)
disp_cat_fr = torch.cat([pred_curr, pred_warp], dim=0)
feat_cross_attn = self._px2patch_corr(
feat_curr, feat_warp, B, self.fusion_channel, H, W
)
feat_self_attn = self._px2patch_corr(
feat_cat, feat_cat, 2 * B, self.fusion_channel, H, W, self_corr=True
)
disp_cross_attn = self._px2patch_corr(
pred_curr, pred_warp, B, 1, pred_curr.shape[-2], pred_curr.shape[-1]
)
disp_self_attn_fr = self._px2patch_corr(
disp_cat_fr,
disp_cat_fr,
2 * B,
1,
disp_cat_fr.shape[-2],
disp_cat_fr.shape[-1],
self_corr=True,
)
feat_self_attn = torch.cat(torch.chunk(feat_self_attn, 2, dim=0), dim=1)
disp_self_attn_fr = torch.cat(torch.chunk(disp_self_attn_fr, 2, dim=0), dim=1)
disp_cross_attn = disp_cross_attn.abs()
disp_self_attn_fr = disp_self_attn_fr.abs()
# concat
corr_feat = [feat_cross_attn, feat_self_attn, cost_curr, cost_warp]
corr_feat_fr = [
disp_cross_attn,
disp_self_attn_fr,
flow_warp,
(pred_warp > 0).float(),
confidence_warp,
]
corr_feat = torch.cat(corr_feat, dim=1)
corr_feat_fr = torch.cat(corr_feat_fr, dim=1)
return corr_feat, corr_feat_fr
def fuse(self, corr_feat, pred_curr, pred_warp, feat_curr):
"""estimate fusion weights
Args:
corr_feat (Tensor): NxCorrxHxW
pred_curr (Tensor): Nx1xHxW
pred_warp (Tensor): Nx1xHxW
feat_curr (Tensor): NxCxHxW
Returns:
Tensor: fusion weights
"""
pred_curr, pred_warp = (
pred_curr[
...,
self.ds_scale // 2 - 1:: self.ds_scale,
self.ds_scale // 2 - 1:: self.ds_scale,
],
pred_warp[
...,
self.ds_scale // 2 - 1:: self.ds_scale,
self.ds_scale // 2 - 1:: self.ds_scale,
],
)
# compute features
corr = self.conv_corr(corr_feat)
disp = self.conv_disp(torch.cat([pred_curr, pred_warp], dim=1))
mo = self.motion_conv(torch.cat([corr, disp], dim=1))
inp = torch.cat([feat_curr, mo, pred_curr, pred_warp], dim=1)
net = self.residual_conv(inp) + corr # long skip connection
# output fusion weight
fusion_weights = self.weight_head(net)
fusion_weights = F.interpolate(fusion_weights, scale_factor=self.ds_scale)
return fusion_weights
def memory_query(self, outputs, state, *args, **kwargs):
"""query memory state and produce fused disparity"""
left_feat, pred_curr = outputs["left_feat"], outputs["pred_disp"]
feat_curr = self.key_layer(left_feat)
if "memory" not in state:
outputs["left_feat"] = feat_curr # update left feat after projection
else:
# compute input cues
left_img_prev, feat_warp, confidence_warp, pred_warp, flow_warp = state[
"memory"
]
fea_l, fea_r = outputs["left_feat"], outputs["right_feat"]
corr_feat, corr_feat_fr = self.compute_input_cues(
pred_curr,
pred_warp,
feat_curr,
feat_warp,
flow_warp,
confidence_warp,
fea_l,
fea_r,
)
# fuse
fusion_weights = self.fuse(corr_feat, pred_curr, pred_warp, feat_curr)
fusion_weights = (
fusion_weights * (pred_warp > 0.0).float()
) # To prevent gradient shortcut
reset_weights = self.forget_head(corr_feat_fr)
reset_weights = (
reset_weights * (pred_warp > 0.0).float()
) # To prevent gradient shortcut
disp_fused = (
pred_curr * (1 - fusion_weights * reset_weights)
+ pred_warp * fusion_weights * reset_weights
)
outputs["pred_disp"] = disp_fused
outputs["fusion_weights"] = fusion_weights
outputs["reset_weights"] = reset_weights
outputs["pred_curr"] = pred_curr
outputs["pred_warp"] = pred_warp
outputs["left_feat"] = feat_curr
def memory_update(self, outputs, state, *args, **kwargs):
"""update memory state"""
state["memory"] = [
outputs["left_img"],
outputs["left_feat"],
outputs["pred_disp"].squeeze(1),
]
def unfold_feat(self, feat, p=3):
"""unfolding feature for pix-patch correlation
Args:
feat (NxCxHxW): feature to be unfolded
p (int, optional): patch size. Defaults to 3.
Returns:
Tensor: unfolded tensor
"""
B, C, H, W = feat.shape
feat = self.unfold_op(feat) # B,C*p^2,HW
feat = feat.view(B, C, p ** 2, H * W)
return feat
def losses(self, loss, outputs, gt_disp, mask, idx, state, meta):
disp_fused = outputs["pred_disp"]
fusion_weights = outputs["fusion_weights"]
reset_weights = outputs["reset_weights"]
pred_curr = outputs["pred_curr"]
pred_warp = outputs["pred_warp"]
self.loss(
disp_fused,
gt_disp,
fusion_weights,
reset_weights,
pred_curr,
pred_warp,
idx,
loss,
)
def freeze(self):
self.eval()
self.loss.eval()
for param in self.parameters():
param.requires_grad = False
|
CODD-main
|
model/fusion/fusion.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
import torch
import torch.nn as nn
from mmseg.models.builder import MODELS
@MODELS.register_module()
class NullFusion(nn.Module):
"""Implements a NULL memory module that does not do anything"""
def __init__(
self,
**kwargs,
):
super(NullFusion, self).__init__()
self.loss = None
def init_weights(self, pretrained=None):
pass
def forward(self, x):
"""This function should never be called"""
pass
def memory_query(self, outputs, state, *args, **kwargs):
"""This function should update pred disp"""
pass
def memory_update(self, outputs, state, *args, **kwargs):
"""This function should update memory"""
state["memory"] = [
outputs["left_img"],
outputs["left_feat"],
outputs["pred_disp"].squeeze(1),
]
@MODELS.register_module()
class GTFusion(nn.Module):
def __init__(
self,
**kwargs,
):
super(GTFusion, self).__init__()
self.loss = None
def init_weights(self, pretrained=None):
pass
def forward(self, x):
"""This function should never be called"""
pass
def memory_query(self, outputs, state, *args, **kwargs):
if "memory" in state:
"""This function should update pred disp"""
gt_disp = state["gt_disp"][-1]
pred_disp = outputs["pred_disp"]
_, _, _, pred_disp_warp, _ = state["memory"]
# pad gt size so dimension matches
h, w = pred_disp.shape[-2:]
h_pad, w_pad = h - gt_disp.shape[-2], w - gt_disp.shape[-1]
gt_disp = torch.nn.functional.pad(gt_disp, (0, w_pad, 0, h_pad))
err_curr = (pred_disp.squeeze() - gt_disp).abs()
err_warp = (pred_disp_warp.squeeze() - gt_disp).abs()
pred_disp_fused = torch.empty_like(pred_disp)
# select curr better
mask = (err_curr - err_warp) < -1
pred_disp_fused[mask] = pred_disp[mask]
# select warp better
mask = (err_curr - err_warp) > 1
pred_disp_fused[mask] = pred_disp_warp[mask]
# average two
mask = ((err_curr - err_warp) <= 1) & ((err_curr - err_warp) >= -1)
pred_disp_fused[mask] = (pred_disp[mask] + pred_disp_warp[mask]) / 2
# skip invalid
mask = pred_disp_warp <= 0.0
pred_disp_fused[mask] = pred_disp[mask]
valid_mask = gt_disp > 0.0
pred_disp_fused[~valid_mask] = pred_disp[~valid_mask]
outputs["pred_disp"] = pred_disp_fused
# dummy outputs so we keep everything else consistent
outputs["fusion_weights"] = torch.zeros_like(pred_disp).to(pred_disp.device)
outputs["fusion_weights"].requires_grad = True
outputs["reset_weights"] = torch.zeros_like(pred_disp).to(pred_disp.device)
outputs["pred_curr"] = pred_disp
outputs["pred_warp"] = pred_disp_warp
def memory_update(self, outputs, state, *args, **kwargs):
"""This function should update memory"""
state["memory"] = [
outputs["left_img"],
outputs["left_feat"],
outputs["pred_disp"].squeeze(1),
]
@MODELS.register_module()
class KalmanFusion(nn.Module):
def __init__(
self,
R=1e-5,
Q=1e-5,
**kwargs,
):
"""
R: measurment variance, decrease to upweigh current estimation
Q: process variance, decrease to downweight current estimation
"""
super(KalmanFusion, self).__init__()
self.R = R
self.Q = Q
self.loss = None
def init_weights(self, pretrained=None):
pass
def forward(self, x):
"""This function should never be called"""
pass
def memory_query(self, outputs, state, *args, **kwargs):
if "memory" in state:
"""This function should update pred disp"""
_, _, _, pred_disp_warp, _ = state["memory"]
pred_disp = outputs["pred_disp"]
if self.P is None: # P has not been initialized:
self.P = torch.zeros_like(pred_disp).to(
pred_disp.device
) # we process each pixel individually
Pminus = self.P + self.Q
# measurement update
K = Pminus / (Pminus + self.R)
pred_disp_fused = pred_disp_warp + K * (
pred_disp - pred_disp_warp
) # weighted sum
outliers = (pred_disp_warp - pred_disp).abs() > 1
pred_disp_fused[pred_disp_warp <= 0.0] = pred_disp[
pred_disp_warp <= 0.0
] # skip invalid
pred_disp_fused[outliers] = pred_disp[outliers] # skip assumed outliers
outputs["pred_disp"] = pred_disp_fused
# dummy outputs so we keep everything else consistent
outputs["fusion_weights"] = torch.zeros_like(pred_disp).to(pred_disp.device)
outputs["fusion_weights"].requires_grad = True
outputs["reset_weights"] = torch.zeros_like(pred_disp).to(pred_disp.device)
outputs["pred_curr"] = pred_disp
outputs["pred_warp"] = pred_disp_warp
else:
self.P = None
def memory_update(self, outputs, state, *args, **kwargs):
"""This function should update memory"""
state["memory"] = [
outputs["left_img"],
outputs["left_feat"],
outputs["pred_disp"].squeeze(1),
]
|
CODD-main
|
model/fusion/others.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmseg.models import LOSSES
@LOSSES.register_module()
class FusionLoss(nn.Module):
def __init__(
self, min_disp=1, max_disp=192, loss_weight=(1.0), wr_weight=1.0, wf_weight=1.0
):
"""fusion loss
Args:
min_disp (int, optional): minimum valid disparity. Defaults to 1.
max_disp (int, optional): maximum valid disparity. Defaults to 192.
loss_weight (tuple, optional): weight of loss. Defaults to (1.0).
wr_weight (float, optional): weight of reset weight loss. Defaults to 1.0.
wf_weight (float, optional): weight of fusion weight loss. Defaults to 1.0.
"""
super(FusionLoss, self).__init__()
self.min_disp = min_disp
self.max_disp = max_disp
self.loss_weight = loss_weight
self.wr_weight = wr_weight
self.wf_weight = wf_weight
self.C1 = 1.0
self.C2 = 5.0
def fusion_weight_loss(self, disp_curr, disp_warp, gt_disp, weight_warp, mask):
weight_curr = 1 - weight_warp
err_curr = torch.abs(disp_curr - gt_disp)
err_warp = torch.abs(disp_warp - gt_disp)
curr_closer_mask = (err_curr - err_warp) < -self.C1
curr_further_mask = (err_curr - err_warp) > self.C1
curr_same_mask = (err_curr - err_warp).abs() <= self.C1
wf_loss_closer = torch.mean(
weight_warp[curr_closer_mask & mask]
) # curr closer, impose loss on warp
wf_loss_further = torch.mean(
weight_curr[curr_further_mask & mask]
) # curr further, impose loss on curr
wf_loss_same = torch.mean(
torch.abs(weight_curr[curr_same_mask & mask] - 0.5)
) # constrain the weights in roughly even region
return wf_loss_closer, wf_loss_further, wf_loss_same
def reset_weight_loss(self, disp_curr, disp_warp, gt_disp, weight_warp, mask):
weight_curr = 1 - weight_warp
err_curr = torch.abs(disp_curr - gt_disp)
err_warp = torch.abs(disp_warp - gt_disp)
curr_closer_mask = (err_curr - err_warp) < -self.C2
curr_further_mask = (err_curr - err_warp) > self.C2
wr_loss_closer = torch.mean(
weight_warp[curr_closer_mask & mask]
) # curr closer, impose loss on warp
wr_loss_further = torch.mean(
weight_curr[curr_further_mask & mask]
) # curr further, impose loss on curr
return wr_loss_closer, wr_loss_further
def forward(
self,
pred_disp,
gt_disp,
fusion_weight,
reset_weight,
disp_curr,
disp_warp,
idx,
loss,
**kwargs,
):
if torch.any(torch.tensor(self.loss_weight) > 0.0):
mask = (gt_disp >= self.min_disp) & (gt_disp <= self.max_disp)
disp_loss = F.smooth_l1_loss(
pred_disp[mask], gt_disp[mask], reduction="mean"
)
mask = mask & (disp_warp > 0) # impose loss on valid projection only
wf_loss_closer, wf_loss_further, wf_loss_same = self.fusion_weight_loss(
disp_curr, disp_warp, gt_disp, fusion_weight, mask
)
wr_loss_closer, wr_loss_further = self.reset_weight_loss(
disp_curr, disp_warp, gt_disp, reset_weight, mask
)
total_loss = (
disp_loss
+ (wf_loss_closer + wf_loss_further + wf_loss_same * 0.2)
* self.wf_weight
+ (wr_loss_closer + wr_loss_further) * self.wr_weight
)
loss["loss_temporal{}".format(idx)] = total_loss * self.loss_weight
else:
loss["loss_temporal{}".format(idx)] = torch.tensor(
[0.0], requires_grad=True, device=gt_disp.device
)
return
@LOSSES.register_module()
class MotionLoss(nn.Module):
def __init__(self, loss_weight=(1.0)):
super(MotionLoss, self).__init__()
self.loss_weight = loss_weight
self.RV_WEIGHT = 0.2
self.DZ_WEIGHT = 210.0
self.gamma = 0.9
def forward(self, flow2d_est, flow2d_rev, flow_gt, mask, idx, loss, **kwargs):
valid_mask = mask.permute(0, 2, 3, 1)
N = len(flow2d_est)
loss_total = 0.0
for i in range(N):
w = self.gamma ** (N - i - 1)
fl_rev = flow2d_rev[i]
fl_est, dz_est = flow2d_est[i].split([2, 1], dim=-1)
fl_gt, dz_gt = flow_gt.split([2, 1], dim=-1)
loss_total += w * (valid_mask * (fl_est - fl_gt).abs()).mean()
loss_total += (
w * self.DZ_WEIGHT * (valid_mask * (dz_est - dz_gt).abs()).mean()
)
loss_total += (
w * self.RV_WEIGHT * (valid_mask * (fl_rev - fl_gt).abs()).mean()
)
loss["loss_warp{}".format(idx)] = loss_total * self.loss_weight
with torch.no_grad():
epe_2d = (fl_est - fl_gt).norm(dim=-1)
epe_2d = epe_2d.view(-1)[valid_mask.view(-1)]
epe_dz = (dz_est - dz_gt).norm(dim=-1)
epe_dz = epe_dz.view(-1)[valid_mask.view(-1)]
metrics = {
"epe2d_warp{}".format(idx): epe_2d.mean(),
"epedz_warp{}".format(idx): epe_dz.mean(),
"1px_warp{}".format(idx): (epe_2d < 1).float().mean(),
"3px_warp{}".format(idx): (epe_2d < 3).float().mean(),
"5px_warp{}".format(idx): (epe_2d < 5).float().mean(),
}
loss.update(metrics)
return
|
CODD-main
|
model/losses/temporal.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmseg.models import LOSSES
def subpix_cost(cost: torch.Tensor, disp: torch.Tensor, maxdisp: int):
"""
phi, e.g. eqt(9) in HITNet paper
:param cost:
:param disp:
:return:
"""
# pdb.set_trace()
disp[disp >= maxdisp - 1] = maxdisp - 2
disp[disp < 0] = 0
disp_floor = disp.floor()
sub_cost = (disp - disp_floor) * torch.gather(cost, 1, disp_floor.long() + 1) + (
disp_floor + 1 - disp
) * torch.gather(cost, 1, disp_floor.long())
# pdb.set_trace()
return sub_cost
def get_non_match_disp(pred_init_cost: torch.Tensor, d_gt: torch.Tensor):
"""
HITNet paper, eqt (11)
:param pred_init_cost: B, D, H, W
:param d_gt: B, 1, H, W
:return: LongTensor: min_non_match_disp: B, 1, H, W
"""
B, D, H, W = pred_init_cost.size()
disp_cand = (
torch.arange(0, D, step=1, device=d_gt.device).view(1, -1, 1, 1).repeat(B, 1, H, W).float()
)
match_disp_lower_bound = d_gt - 1.5
match_disp_upper_bound = d_gt + 1.5
tmp_cost = torch.where(
(disp_cand < match_disp_lower_bound) | (disp_cand > match_disp_upper_bound),
pred_init_cost,
torch.tensor(float("inf"), device=d_gt.device),
)
# pdb.set_trace()
__, min_non_match_disp = torch.min(tmp_cost, dim=1, keepdim=True)
# pdb.set_trace()
return min_non_match_disp
def echo_loss(x, alpha, c):
"""
https://arxiv.org/pdf/1701.03077.pdf
An amazing loss function presented in paper: A General and Adaptive Robust Loss Function (CVPR 2019).
The name prefix 'echo' is the name of a hero in Overwatch who can become any other hero during her ultimate
:param x: torch.Tensor
:param alpha: shape param
:param c > 0: scale param
:return: torch.Tensor: loss
"""
loss = (abs(alpha - 2) / alpha) * ((((x / c) ** 2) / abs(alpha - 2) + 1) ** (alpha / 2) - 1)
return loss
@LOSSES.register_module()
class HITLoss(nn.Module):
"""
https://arxiv.org/pdf/2007.12140.pdf
"""
def __init__(
self, max_disp=320, lambda_init=1, lambda_prop=1, lambda_slant=1, lambda_w=1, alpha=0.9, c=0.1
):
super(HITLoss, self).__init__()
self.maxdisp = max_disp
self.lambda_init = lambda_init
self.lambda_prop = lambda_prop
self.lambda_slant = lambda_slant
self.lambda_w = lambda_w
self.alpha = alpha
self.c = c
# edge_kx = np.array([[1, 0, -1], [2, 0, -2], [1, 0, -1]])
# edge_ky = np.array([[1, 2, 1], [0, 0, 0], [-1, -2, -1]])
# edge_k = np.stack((edge_kx, edge_ky))
# edge_k = torch.from_numpy(edge_k).float().view(2, 1, 3, 3)
# self.register_buffer("edge_k", edge_k)
A = torch.zeros(81, 3)
for i in range(81):
A[i, 0] = i // 9 - 4
A[i, 1] = i % 9 - 4
A[i, 2] = 1
A_T = A.t()
A_inverse = (A_T.mm(A)).inverse()
# B = (A_t*A)^-1*A_t
B = A_inverse.mm(A_T)
convy_weight = torch.unsqueeze(torch.unsqueeze(B[0, :].view(9, 9), dim=0), dim=0)
convx_weight = torch.unsqueeze(torch.unsqueeze(B[1, :].view(9, 9), dim=0), dim=0)
self.convy = nn.Conv2d(1, 1, 9, stride=1, padding=4, bias=False)
self.convy.weight = nn.Parameter(convy_weight)
self.convx = nn.Conv2d(1, 1, 9, stride=1, padding=4, bias=False)
self.convx.weight = nn.Parameter(convx_weight)
def img_grad(self, img):
img_grad = F.conv2d(img, self.edge_k, padding=1)
img_dx = img_grad[:, 0, :, :].contiguous().view_as(img) # h direction
img_dy = img_grad[:, 1, :, :].contiguous().view_as(img) # w direction
return img_dx, img_dy
def init_loss(self, pred_init_cost: torch.Tensor, d_gt: torch.Tensor, maxdisp, beta=1):
"""
Initialization loss, HITNet paper eqt(10
:param pred_init_cost:
:param d_gt:
:param beta:
:return: init loss [B*1*H*W]
"""
cost_gt = subpix_cost(pred_init_cost, d_gt, maxdisp)
cost_nm = torch.gather(pred_init_cost, 1, get_non_match_disp(pred_init_cost, d_gt))
loss = cost_gt + F.relu(beta - cost_nm)
return loss
def prop_loss(self, d_diff, A=1, alpha=1, c=0.1):
"""
Loss from HITNet eqt(12
:param d_diff: |d^gt - d^|
:param A: The truncation value
:param alpha: shape param
:param c > 0: scale param
:return: torch.Tensor: L^prop [B*1*H*W]
"""
loss = echo_loss(torch.clamp(d_diff, max=A), alpha, c)
# pdb.set_trace()
return loss
def slant_loss(self, dx, dy, dx_gt, dy_gt, d_diff, mask, B=1):
closer_mask = d_diff < B
mask = mask * closer_mask # mask and
slant_diff = torch.cat([dx_gt - dx, dy_gt - dy], dim=1)
loss = torch.norm(slant_diff, p=1, dim=1, keepdim=True)[mask]
return loss # 1-dim vector
def w_loss(self, conf, diff, mask, C1=1, C2=1.5):
"""
:param conf: aka omega
:param diff: |d^gt - d^|
:param C1:
:param C2:
:return: torch.Tensor: loss
"""
closer_mask = diff < C1
further_mask = diff > C2
mask = mask * (closer_mask + further_mask) # mask and
closer_item = F.relu(1 - conf)
further_item = F.relu(conf)
# pdb.set_trace()
loss = closer_item * closer_mask.float() + further_item * further_mask.float()
return loss[mask] # 1-dim vector
def forward(
self, init_cv_cost_pyramid, prop_disp_pyramid, dx_pyramid, dy_pyramid, w_pyramid, d_gt, seg_gt=None
):
"""
:param init_cv_cost_pyramid:
:param prop_disp_pyramid:
:param slant_pyramid:
:param w_pyramid:
:param d_gt:
:param maxdisp:
:param loss_init:
:param loss_prop:
:param loss_slant:
:param loss_w:
:param lambda_init:
:param lambda_prop:
:param lambda_slant:
:param lambda_w:
:return:
"""
# dx_gt, dy_gt = self.img_grad(d_gt)
if seg_gt is not None:
d_gt[seg_gt == 0] = 0
dx_gt = self.convx(d_gt)
dy_gt = self.convy(d_gt)
d_gt_pyramid = []
for i in range(len(init_cv_cost_pyramid)):
scale = 4 * (2 ** i) # 4,8,16,32,64
d_gt_pyramid.append(torch.nn.MaxPool2d(scale, scale)(d_gt) / (scale / 4))
d_gt_pyramid.reverse() # disp ground truth generation. From small to large.
init_loss_pyramid = []
for i, cv in enumerate(init_cv_cost_pyramid):
# pdb.set_trace()
mask = (d_gt_pyramid[i] > 0) & (
d_gt_pyramid[i] < self.maxdisp / (2 ** (len(init_cv_cost_pyramid) - 1 - i))
)
init_loss_pyramid.append(
self.lambda_init
* self.init_loss(
cv, d_gt_pyramid[i], self.maxdisp / (2 ** (len(init_cv_cost_pyramid) - 1 - i))
)[mask]
)
# pdb.set_trace()
init_loss_vec = torch.cat(init_loss_pyramid, dim=0) # 1-dim vector
# pdb.set_trace()
prop_loss_pyramid = [] # masked
prop_diff_pyramid = [] # not masked
mask = (d_gt > 0) & (d_gt < self.maxdisp)
prop_loss_weights = [
1 / 64,
1 / 32,
1 / 32,
1 / 16,
1 / 16,
1 / 8,
1 / 8,
1 / 4,
1 / 4,
1 / 4,
1 / 2,
1,
]
A = [1] * 9 + [10000] * 3
for i, disp in enumerate(prop_disp_pyramid):
prop_diff_pyramid.append(torch.abs(d_gt - disp))
prop_loss_pyramid.append(
self.lambda_prop
* prop_loss_weights[i]
* self.prop_loss(prop_diff_pyramid[-1], A=A[i], alpha=self.alpha, c=self.c)[mask]
)
# pdb.set_trace()
prop_loss_vec = torch.cat(prop_loss_pyramid, dim=0)
# pdb.set_trace()
slant_loss_pyramid = []
slant_loss_weights = [
1 / 64,
1 / 32,
1 / 32,
1 / 16,
1 / 16,
1 / 8,
1 / 8,
1 / 4,
1 / 4,
1 / 4,
1 / 2,
1,
]
for i in range(len(dx_pyramid)):
slant_loss_pyramid.append(
self.lambda_slant
* slant_loss_weights[i]
* self.slant_loss(
dx_pyramid[i], dy_pyramid[i], dx_gt, dy_gt, prop_diff_pyramid[i], mask
)
)
slant_loss_vec = torch.cat(slant_loss_pyramid, dim=0)
# pdb.set_trace()
w_loss_pyramid = []
w_loss_weights = [1 / 32, 1 / 32, 1 / 16, 1 / 16, 1 / 8, 1 / 8, 1 / 4, 1 / 4]
for i, w in enumerate(w_pyramid):
w_loss_pyramid.append(
self.lambda_w
* w_loss_weights[i]
* self.w_loss(
w, prop_diff_pyramid[i + 1], mask
) # index for prop_diff_pyramid plus 1 since there is no confidence at 1st level
)
w_loss_vec = torch.cat(w_loss_pyramid, dim=0)
# pdb.set_trace()
total_loss_vec = torch.cat(
[init_loss_vec, prop_loss_vec, slant_loss_vec, w_loss_vec], dim=0
)
# pdb.set_trace()
losses = {
"init_loss": torch.mean(init_loss_vec),
"prop_loss": torch.mean(prop_loss_vec),
"slant_loss": torch.mean(slant_loss_vec),
"w_loss": torch.mean(w_loss_vec),
}
return torch.mean(total_loss_vec), losses
@LOSSES.register_module()
class HITLossWithDepth(HITLoss):
"""
https://arxiv.org/pdf/2007.12140.pdf
"""
def __init__(
self, lambda_depth=1, lambda_depth_grad=1, lambda_depth_normal=1, eps=1e-8, **kwargs
):
super(HITLossWithDepth, self).__init__(**kwargs)
self.lambda_depth = lambda_depth
self.lambda_depth_grad = lambda_depth_grad
self.lambda_depth_normal = lambda_depth_normal
self.calib = 1
self.eps = eps
self.cos = nn.CosineSimilarity(dim=1, eps=self.eps)
def edgenorm_loss(self, depth, target, mask):
depth = depth * mask.float()
target = target * mask.float()
target_grad_dx, target_grad_dy = self.img_grad(target)
depth_grad_dx, depth_grad_dy = self.img_grad(depth)
ones = torch.ones_like(depth)[mask]
depth_normal = torch.stack((-depth_grad_dx[mask], -depth_grad_dy[mask], ones), dim=1)
target_normal = torch.stack((-target_grad_dx[mask], -target_grad_dy[mask], ones), dim=1)
loss_dx = self.comp_err(depth_grad_dx[mask], target_grad_dx[mask]).mean()
loss_dy = self.comp_err(depth_grad_dy[mask], target_grad_dy[mask]).mean()
loss_normal = torch.abs(1 - self.cos(target_normal, depth_normal)).mean()
return loss_dx + loss_dy, loss_normal
def comp_err(self, depth, target):
return torch.log(torch.abs(depth - target).add(1.0))
def forward(
self, init_cv_cost_pyramid, prop_disp_pyramid, dx_pyramid, dy_pyramid, w_pyramid, d_gt
):
hitnet_loss, loss_dict = super(HITLossWithDepth, self).forward(
init_cv_cost_pyramid, prop_disp_pyramid, dx_pyramid, dy_pyramid, w_pyramid, d_gt
)
loss_dict['hitnet_loss'] = hitnet_loss
pred_disp = prop_disp_pyramid[-1]
pred_depth = self.calib / (pred_disp + self.eps)
target_depth = self.calib / (d_gt + self.eps)
mask = (d_gt > 0) & (d_gt < self.maxdisp)
depth_loss = self.lambda_depth * self.comp_err(pred_depth[mask], target_depth[mask]).mean()
loss_dict["depth_loss"] = depth_loss
total_loss = hitnet_loss + depth_loss
if (self.lambda_depth_grad > 0) or (self.lambda_depth_normal > 0):
grad_loss, normal_loss = self.edgenorm_loss(pred_depth, target_depth, mask)
depth_grad_loss = self.lambda_depth_grad * grad_loss
depth_normal_loss = self.lambda_depth_normal * normal_loss
total_loss = total_loss + depth_grad_loss + depth_normal_loss
loss_dict.update({"depth_grad_loss": depth_grad_loss, "depth_normal_loss": depth_normal_loss})
return total_loss, loss_dict
|
CODD-main
|
model/losses/hitnet.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
from .hitnet import *
from .temporal import *
|
CODD-main
|
model/losses/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
from .hitnet import HITNetMF
|
CODD-main
|
model/stereo/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
import torch
import torch.nn as nn
from mmseg.models.builder import BACKBONES
def conv_down(inp, oup):
return nn.Sequential(
nn.Conv2d(inp, oup, 4, stride=2, padding=1),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Conv2d(oup, oup, 3, stride=1, padding=1),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
)
def conv_up(inp, oup):
return nn.Sequential(
nn.ConvTranspose2d(inp, oup, 2, stride=2, padding=0),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
)
def conv_merge(inp, oup):
return nn.Sequential(
nn.Conv2d(inp, oup, 1, stride=1, padding=0),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Conv2d(oup, oup, 3, stride=1, padding=1),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Conv2d(oup, oup, 3, stride=1, padding=1),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
)
def conv(inp, oup):
return nn.Sequential(
nn.Conv2d(inp, oup, 3, stride=1, padding=1),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
)
@BACKBONES.register_module()
class HITUNet(nn.Module):
def __init__(self):
super().__init__()
# 16,16,24,24,32
self.conv1 = conv(3, 16)
self.down1 = conv_down(16, 16)
self.down2 = conv_down(16, 24)
self.down3 = conv_down(24, 24)
self.down4 = nn.Sequential(
conv_down(24, 32),
nn.Conv2d(32, 32, 3, stride=1, padding=1),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Conv2d(32, 32, 3, stride=1, padding=1),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
)
self.up4 = conv_up(32, 24)
self.up3 = conv_up(24, 24)
self.up2 = conv_up(24, 16)
self.up1 = conv_up(16, 16)
self.merge4 = conv_merge(24 + 24, 24)
self.merge3 = conv_merge(24 + 24, 24)
self.merge2 = conv_merge(16 + 16, 16)
self.merge1 = conv_merge(16 + 16, 16)
def forward(self, x):
x_down = self.conv1(x) # 16*320*960
x_down1 = self.down1(x_down) # 16*160*480
x_down2 = self.down2(x_down1) # 24*96*320
x_down3 = self.down3(x_down2) # 24*48*160
x_down4 = self.down4(x_down3) # 32*24*80
x_up4 = self.up4(x_down4)
x_up4 = self.merge4(torch.cat((x_down3, x_up4), dim=1)) # 24*48*160
x_up3 = self.up3(x_up4)
x_up3 = self.merge3(torch.cat((x_down2, x_up3), dim=1)) # 24*96*320
x_up2 = self.up2(x_up3)
x_up2 = self.merge2(torch.cat((x_down1, x_up2), dim=1)) # 16*192*640
x_up1 = self.up1(x_up2)
x_up1 = self.merge1(torch.cat((x_down, x_up1), dim=1)) # 16*384*1280
return [x_down4, x_up4, x_up3, x_up2, x_up1]
|
CODD-main
|
model/stereo/hitnet/backbone.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmseg.models import builder as builder_oss
from mmseg.models.builder import MODELS
from utils import thres_metric
from ...builder import ESTIMATORS
@ESTIMATORS.register_module()
class HITNetMF(nn.Module):
"""Implementation of HITNet
<https://arxiv.org/abs/1904.07850>.
"""
def __init__(
self,
backbone,
initialization,
propagation,
loss=None,
):
super(HITNetMF, self).__init__()
self.backbone = builder_oss.build_backbone(backbone)
self.tile_init = MODELS.build(initialization)
self.tile_update = MODELS.build(propagation)
self.freezed = False
if loss is not None:
self.loss = builder_oss.build_loss(loss)
else:
self.loss = None
n_parameters = sum(p.numel() for n, p in self.named_parameters())
print(
"PARAM STATUS: total number of parameters %.3fM in stereo network"
% (n_parameters / 1000 ** 2)
)
def extract_feat(self, img):
"""Extract features from images."""
x = self.backbone(img)
return x
def losses(self, loss, outputs, gt_disp, mask_disp, idx, gt_semantic_seg, meta):
init_cv_pyramid = outputs["init_cv_pyramid"]
prop_disp_pyramid = outputs["prop_disp_pyramid"]
dx_pyramid = outputs["dx_pyramid"]
dy_pyramid = outputs["dy_pyramid"]
w_pyramid = outputs["w_pyramid"]
loss["loss_disp{}".format(idx)], loss_dict = self.loss(
init_cv_pyramid,
prop_disp_pyramid,
dx_pyramid,
dy_pyramid,
w_pyramid,
gt_disp,
gt_semantic_seg,
)
for k, v in loss_dict.items():
loss[k + "{}".format(idx)] = v
with torch.no_grad():
pred_disp = outputs["pred_disp"]
loss["epe{}".format(idx)] = F.l1_loss(
gt_disp[mask_disp], pred_disp[mask_disp], reduction="mean"
)
loss["thres3"] = thres_metric(pred_disp, gt_disp, mask_disp, 3.0)
def stereo_matching(self, left_img, right_img, img_metas=None, state=None):
left_fea_pyramid = self.extract_feat(left_img)
right_fea_pyramid = self.extract_feat(right_img)
init_cv_pyramid, init_tile_pyramid = self.tile_init(
left_fea_pyramid, right_fea_pyramid
)
outputs = self.tile_update(
left_fea_pyramid, right_fea_pyramid, init_tile_pyramid
)
if self.training and not self.freezed:
outputs["init_cv_pyramid"] = init_cv_pyramid
outputs["pred_disp"] = outputs["prop_disp_pyramid"][-1]
outputs["left_feat"] = left_fea_pyramid[2]
outputs["right_feat"] = right_fea_pyramid[2]
else:
outputs = dict(
pred_disp=outputs,
left_feat=left_fea_pyramid[2],
right_feat=right_fea_pyramid[2],
)
outputs["left_img"] = left_img
if len(outputs["pred_disp"].shape) == 3:
outputs["pred_disp"] = outputs["pred_disp"].unsqueeze(1)
return outputs
def freeze(self):
self.tile_update.eval()
for param in self.tile_update.parameters():
param.requires_grad = False
self.tile_init.eval()
for param in self.tile_init.parameters():
param.requires_grad = False
feature_extractor = (
self.backbone if self.backbone is not None else self.feature_extractor
)
feature_extractor.eval()
for param in feature_extractor.parameters():
param.requires_grad = False
self.loss.eval()
for param in self.loss.parameters():
param.requires_grad = False
self.freezed = True
|
CODD-main
|
model/stereo/hitnet/hitnet.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
from .backbone import HITUNet
from .initialization import TileInitialization
from .propagation import TilePropagation
from .hitnet import HITNetMF
|
CODD-main
|
model/stereo/hitnet/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmseg.models.builder import MODELS
def make_grid(h, w, device):
gridh = torch.arange(h, device=device).float()
gridw = torch.arange(w, device=device).float()
gridh, gridw = torch.meshgrid(gridh, gridw)
grid = torch.stack((gridw, gridh), dim=-1)
return grid
def calc_init_disp(feature_left, feature_right, max_disp):
# n,c,h,w = feature_left.size()
grid = make_grid(feature_left.size(2), feature_right.size(3), feature_right.device)
a = torch.zeros((grid.shape[0], grid.shape[1], 1), device=grid.device)
grid = torch.cat((grid, a), dim=-1)
grid = grid.unsqueeze(0).unsqueeze(0).float()
# grid[:, :, :, :, 0] = grid[:,:,:,:,0] -1
grid[:, :, :, :, 0] = grid[:, :, :, :, 0] / (feature_right.size(3) - 1) * 2 - 1
grid[:, :, :, :, 1] = grid[:, :, :, :, 1] / (feature_right.size(2) - 1) * 2 - 1
grid = grid[:, ::4, :, ::4, :]
grid = grid.repeat(feature_right.size(0), max_disp, 1, 1, 1)
max_disp = torch.arange(max_disp, dtype=grid.dtype, device=grid.device)
max_disp = max_disp / (feature_right.size(3) - 1) * 2
max_disp = max_disp.view(1, -1, 1, 1)
grid[:, :, :, :, 0] = grid[:, :, :, :, 0] - max_disp
feature_right = feature_right.unsqueeze(2)
# size = n,c,d,h,w
feature_right = F.grid_sample(
feature_right, grid, mode="nearest", align_corners=True, padding_mode="zeros"
)
cv = feature_left.unsqueeze(2) - feature_right
cv = torch.norm(cv, p=1, dim=1)
return cv
@MODELS.register_module()
class TileInitialization(nn.Module):
"""
Tile hypothesis initialization
input: dual feature pyramid
output: initial tile hypothesis pyramid
"""
def __init__(self, max_disp, fea_c=[16, 16, 24, 24, 32]):
super().__init__()
self.maxdisp = max_disp
fea_c1x, fea_c2x, fea_c4x, fea_c8x, fea_c16x = fea_c
self.pad = nn.ZeroPad2d((0, 3, 0, 0))
self.tile_conv1x = nn.Sequential(
nn.Conv2d(fea_c1x, 16, 4, 4, 0),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Conv2d(16, 16, 1, 1, 0),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
)
self.tile_conv2x = nn.Sequential(
nn.Conv2d(fea_c2x, 16, 4, 4, 0),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Conv2d(16, 16, 1, 1, 0),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
)
self.tile_conv4x = nn.Sequential(
nn.Conv2d(fea_c4x, 16, 4, 4, 0),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Conv2d(16, 16, 1, 1, 0),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
)
self.tile_conv8x = nn.Sequential(
nn.Conv2d(fea_c8x, 16, 4, 4, 0),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Conv2d(16, 16, 1, 1, 0),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
)
self.tile_conv16x = nn.Sequential(
nn.Conv2d(fea_c16x, 16, 4, 4, 0),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Conv2d(16, 16, 1, 1, 0),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
)
# D in Eq. (4)
self.tile_fea_dscrpt16x = nn.Sequential(
nn.Conv2d(17, 13, 1),
nn.LeakyReLU(negative_slope=0.2, inplace=True)
)
self.tile_fea_dscrpt8x = nn.Sequential(
nn.Conv2d(17, 13, 1),
nn.LeakyReLU(negative_slope=0.2, inplace=True)
)
self.tile_fea_dscrpt4x = nn.Sequential(
nn.Conv2d(33, 13, 1),
nn.LeakyReLU(negative_slope=0.2, inplace=True)
)
self.tile_fea_dscrpt2x = nn.Sequential(
nn.Conv2d(25, 13, 1),
nn.LeakyReLU(negative_slope=0.2, inplace=True)
)
self.tile_fea_dscrpt1x = nn.Sequential(
nn.Conv2d(25, 13, 1),
nn.LeakyReLU(negative_slope=0.2, inplace=True)
)
def tile_features(self, fea_l, fea_r):
tile_fea_l1x = self.tile_conv1x(fea_l[-1])
padded_fea_r1x = self.pad(fea_r[-1])
self.tile_conv1x[0].stride = (4, 1)
tile_fea_r1x = self.tile_conv1x(padded_fea_r1x)
self.tile_conv1x[0].stride = (4, 4)
tile_fea_l2x = self.tile_conv2x(fea_l[-2])
padded_fea_r2x = self.pad(fea_r[-2])
self.tile_conv2x[0].stride = (4, 1)
tile_fea_r2x = self.tile_conv2x(padded_fea_r2x)
self.tile_conv2x[0].stride = (4, 4)
tile_fea_l4x = self.tile_conv4x(fea_l[-3])
padded_fea_r4x = self.pad(fea_r[-3])
self.tile_conv4x[0].stride = (4, 1)
tile_fea_r4x = self.tile_conv4x(padded_fea_r4x)
self.tile_conv4x[0].stride = (4, 4)
tile_fea_l8x = self.tile_conv8x(fea_l[-4])
padded_fea_r8x = self.pad(fea_r[-4])
self.tile_conv8x[0].stride = (4, 1)
tile_fea_r8x = self.tile_conv8x(padded_fea_r8x)
self.tile_conv8x[0].stride = (4, 4)
tile_fea_l16x = self.tile_conv16x(fea_l[-5])
padded_fea_r16x = self.pad(fea_r[-5])
self.tile_conv16x[0].stride = (4, 1)
tile_fea_r16x = self.tile_conv16x(padded_fea_r16x)
self.tile_conv16x[0].stride = (4, 4)
return [
[tile_fea_l16x, tile_fea_r16x],
[tile_fea_l8x, tile_fea_r8x],
[tile_fea_l4x, tile_fea_r4x],
[tile_fea_l2x, tile_fea_r2x],
[tile_fea_l1x, tile_fea_r1x],
]
def tile_hypothesis_pyramid(self, tile_feature_pyramid, fea_l_pyramid):
# Eq. (2)
init_tile_cost16x = calc_init_disp(tile_feature_pyramid[0][0], tile_feature_pyramid[0][1], self.maxdisp // 16)
init_tile_cost8x = calc_init_disp(tile_feature_pyramid[1][0], tile_feature_pyramid[1][1], self.maxdisp // 8)
init_tile_cost4x = calc_init_disp(tile_feature_pyramid[2][0], tile_feature_pyramid[2][1], self.maxdisp // 4)
init_tile_cost2x = calc_init_disp(tile_feature_pyramid[3][0], tile_feature_pyramid[3][1], self.maxdisp // 2)
init_tile_cost1x = calc_init_disp(tile_feature_pyramid[4][0], tile_feature_pyramid[4][1], self.maxdisp)
# Eq. (3)
min_tile_cost16x, min_tile_disp16x = torch.min(init_tile_cost16x, 1)
min_tile_cost8x, min_tile_disp8x = torch.min(init_tile_cost8x, 1)
min_tile_cost4x, min_tile_disp4x = torch.min(init_tile_cost4x, 1)
min_tile_cost2x, min_tile_disp2x = torch.min(init_tile_cost2x, 1)
min_tile_cost1x, min_tile_disp1x = torch.min(init_tile_cost1x, 1)
min_tile_cost16x = torch.unsqueeze(min_tile_cost16x, 1)
min_tile_cost8x = torch.unsqueeze(min_tile_cost8x, 1)
min_tile_cost4x = torch.unsqueeze(min_tile_cost4x, 1)
min_tile_cost2x = torch.unsqueeze(min_tile_cost2x, 1)
min_tile_cost1x = torch.unsqueeze(min_tile_cost1x, 1)
min_tile_disp16x = min_tile_disp16x.float().unsqueeze(1)
min_tile_disp8x = min_tile_disp8x.float().unsqueeze(1)
min_tile_disp4x = min_tile_disp4x.float().unsqueeze(1)
min_tile_disp2x = min_tile_disp2x.float().unsqueeze(1)
min_tile_disp1x = min_tile_disp1x.float().unsqueeze(1)
# Eq. (4)
tile_dscrpt16x = self.tile_fea_dscrpt16x(torch.cat([min_tile_cost16x, tile_feature_pyramid[0][0]], 1))
tile_dscrpt8x = self.tile_fea_dscrpt8x(torch.cat([min_tile_cost8x, tile_feature_pyramid[1][0]], 1))
tile_dscrpt4x = self.tile_fea_dscrpt4x(torch.cat([min_tile_cost4x, fea_l_pyramid[0]], 1))
tile_dscrpt2x = self.tile_fea_dscrpt2x(torch.cat([min_tile_cost2x, fea_l_pyramid[1]], 1))
tile_dscrpt1x = self.tile_fea_dscrpt1x(torch.cat([min_tile_cost1x, fea_l_pyramid[2]], 1))
tile_dx16x = torch.zeros_like(min_tile_disp16x)
tile_dx8x = torch.zeros_like(min_tile_disp8x)
tile_dx4x = torch.zeros_like(min_tile_disp4x)
tile_dx2x = torch.zeros_like(min_tile_disp2x)
tile_dx1x = torch.zeros_like(min_tile_disp1x)
tile_dy16x = torch.zeros_like(min_tile_disp16x)
tile_dy8x = torch.zeros_like(min_tile_disp8x)
tile_dy4x = torch.zeros_like(min_tile_disp4x)
tile_dy2x = torch.zeros_like(min_tile_disp2x)
tile_dy1x = torch.zeros_like(min_tile_disp1x)
tile_hyp16x = torch.cat([min_tile_disp16x, tile_dx16x, tile_dy16x, tile_dscrpt16x], 1)
tile_hyp8x = torch.cat([min_tile_disp8x, tile_dx8x, tile_dy8x, tile_dscrpt8x], 1)
tile_hyp4x = torch.cat([min_tile_disp4x, tile_dx4x, tile_dy4x, tile_dscrpt4x], 1)
tile_hyp2x = torch.cat([min_tile_disp2x, tile_dx2x, tile_dy2x, tile_dscrpt2x], 1)
tile_hyp1x = torch.cat([min_tile_disp1x, tile_dx1x, tile_dy1x, tile_dscrpt1x], 1)
return [
[
init_tile_cost16x,
init_tile_cost8x,
init_tile_cost4x,
init_tile_cost2x,
init_tile_cost1x,
],
[
tile_hyp16x,
tile_hyp8x,
tile_hyp4x,
tile_hyp2x,
tile_hyp1x,
]
]
def forward(self, fea_l_pyramid, fea_r_pyramid):
tile_feature_duo_pyramid = self.tile_features(fea_l_pyramid, fea_r_pyramid)
init_cv_pyramid, init_hypo_pyramid = self.tile_hypothesis_pyramid(tile_feature_duo_pyramid, fea_l_pyramid)
return [init_cv_pyramid, init_hypo_pyramid]
|
CODD-main
|
model/stereo/hitnet/initialization.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmseg.models.builder import MODELS
def to_plane(d, dx, dy, size=4):
c = torch.linspace(-(size - 1) / 2, (size - 1) / 2, size, device=d.device)
a = c.view([1, 1, size])
a = torch.unsqueeze(a.repeat(1, d.shape[2] * size, d.shape[3]), dim=1)
b = c.view([1, size, 1])
b = torch.unsqueeze(b.repeat(1, d.shape[2], d.shape[3] * size), dim=1)
d_4 = F.interpolate(d, scale_factor=size, mode="nearest")
dx_4 = F.interpolate(dx, scale_factor=size, mode="nearest")
dy_4 = F.interpolate(dy, scale_factor=size, mode="nearest")
d_plane = d_4 + a * dx_4 + b * dy_4
return d_plane
def upsample(h, scale=2, size=2):
d = h[:, 0:1, :, :]
dx = h[:, 1:2, :, :]
dy = h[:, 2:3, :, :]
d = to_plane(d, dx, dy, size=size) * scale
h_up = torch.cat((d, F.interpolate(h[:, 1:, :, :], scale_factor=size, mode="nearest")), dim=1)
return h_up
def warp(x, disp):
"""
warp an image/tensor (im2) back to im1, according to the optical flow
x: [B, C, H, W] (im2)
flo: [B, 2, H, W] flow
"""
B, C, H, W = x.size()
# mesh grid
xx = torch.arange(0, W, device=x.device).view(1, -1).repeat(H, 1)
yy = torch.arange(0, H, device=x.device).view(-1, 1).repeat(1, W)
xx = xx.view(1, 1, H, W).repeat(B, 1, 1, 1)
yy = yy.view(1, 1, H, W).repeat(B, 1, 1, 1)
vgrid = torch.cat((xx, yy), 1).float()
# vgrid = Variable(grid)
vgrid[:, :1, :, :] = vgrid[:, :1, :, :] - disp
# scale grid to [-1,1]
vgrid[:, 0, :, :] = 2.0 * vgrid[:, 0, :, :].clone() / max(W - 1, 1) - 1.0
vgrid[:, 1, :, :] = 2.0 * vgrid[:, 1, :, :].clone() / max(H - 1, 1) - 1.0
vgrid = vgrid.permute(0, 2, 3, 1)
output = F.grid_sample(x, vgrid, align_corners=True)
return output
class TileWarping(nn.Module):
def __init__(self):
super(TileWarping, self).__init__()
self.unshuffle = torch.nn.PixelUnshuffle(4)
def forward(self, tile_plane: torch.Tensor, fea_l: torch.Tensor, fea_r: torch.Tensor):
"""
local cost volume
:param tile_plane: d, dx, dy
:param fea_l:
:param fea_r:
:return: local cost volume
"""
tile_d = tile_plane[:, :1, :, :]
tile_dx = tile_plane[:, 1:2, :, :]
tile_dy = tile_plane[:, 2:3, :, :]
local_cv = []
for k in range(-1, 2):
local_d = to_plane(tile_d + k, tile_dx, tile_dy, size=4) # Eq. (5)
warped_fea_r = warp(fea_r, local_d)
cv = torch.norm(fea_l - warped_fea_r, 1, 1, keepdim=True)
cv = self.unshuffle(cv)
local_cv.append(cv)
# local cost volume for all the disp hypothesis[B, 48, H/4, W/4]
local_cv = torch.cat(local_cv, 1)
return local_cv
def convbn(in_channel, out_channel, kernel_size, stride, pad, dilation):
# no bn
return nn.Sequential(
nn.Conv2d(
in_channel,
out_channel,
kernel_size=kernel_size,
stride=stride,
padding=dilation if dilation > 1 else pad,
dilation=dilation,
)
)
class BasicBlock(nn.Module):
"""ResNet BasicBlock"""
expansion = 1
def __init__(self, c1, c2, s, downsample, p, d):
super(BasicBlock, self).__init__()
self.conv1 = nn.Sequential(
convbn(c1, c2, 3, s, p, d),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
)
self.conv2 = convbn(c2, c2, 3, 1, p, d)
self.stride = s
def forward(self, x):
out = self.conv1(x)
out = self.conv2(out)
out += x
return out
class TileUpdate0(nn.Module):
"""
Tile Update for a single resolution
forward input: fea duo from current resolution, tile hypothesis from current resolution
forward output: refined tile hypothesis and confidence (if available)
"""
def __init__(self, in_c, out_c, hid_c):
super(TileUpdate0, self).__init__()
self.tile_warping = TileWarping()
self.decrease = nn.Sequential(
nn.Conv2d(64, 16, 1, stride=1, padding=0),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
)
self.conv0 = nn.Sequential(
nn.Conv2d(in_c, hid_c, 1, stride=1, padding=0),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
)
self.resblock0 = nn.Sequential(
BasicBlock(32, 32, s=1, p=1, downsample=None, d=1),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
)
self.resblock1 = nn.Sequential(
BasicBlock(32, 32, s=1, p=1, downsample=None, d=1),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
)
self.lastconv = nn.Conv2d(hid_c, out_c, 3, 1, 1)
self.unshuffle = torch.nn.PixelUnshuffle(4)
# For final disparity and each supervision signal to be positive
self.relu = nn.ReLU()
def forward(self, fea_l, fea_r, current_hypothesis):
fea = self.unshuffle(torch.norm(fea_l, 1, 1, keepdim=True))
# local cost volume in Eq. (7)
current_tile_local_cv = self.tile_warping(current_hypothesis[:, :3, :, :], fea_l, fea_r)
current_tile_local_cv = self.decrease(torch.cat([fea, current_tile_local_cv], 1))
# a_l in Eq. (7)
aug_current_tile_hypothesis = torch.cat([current_hypothesis, current_tile_local_cv], 1)
# U_l
tile_hypothesis_update = self.conv0(aug_current_tile_hypothesis)
tile_hypothesis_update = self.resblock0(tile_hypothesis_update)
tile_hypothesis_update = self.resblock1(tile_hypothesis_update)
tile_hypothesis_update = self.lastconv(tile_hypothesis_update)
refined_hypothesis = current_hypothesis + tile_hypothesis_update
refined_hypothesis[:, :1, :, :] = F.relu(refined_hypothesis[:, :1, :, :].clone())
return [refined_hypothesis]
class TileUpdate(nn.Module):
"""
Tile Update for a single resolution
forward input: fea duo from current resolution, tile hypothesis from current and previous resolution
forward output: refined tile hypothesis and confidence (if available)
"""
def __init__(self):
super(TileUpdate, self).__init__()
self.tile_warping = TileWarping()
self.decrease = nn.Sequential(
nn.Conv2d(64, 16, 1, stride=1, padding=0),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
)
self.conv0 = nn.Sequential(
nn.Conv2d(64, 32, 1, stride=1, padding=0),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
)
self.resblock0 = nn.Sequential(
BasicBlock(32, 32, s=1, p=1, downsample=None, d=1),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
)
self.resblock1 = nn.Sequential(
BasicBlock(32, 32, s=1, p=1, downsample=None, d=1),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
)
self.lastconv = nn.Conv2d(32, 34, 3, 1, 1)
self.unshuffle = torch.nn.PixelUnshuffle(4)
# For final disparity and each supervision signal to be positive
self.relu = nn.ReLU()
def forward(self, fea_l, fea_r, current_hypothesis, prev_hypothesis):
fea = self.unshuffle(torch.norm(fea_l, 1, 1, keepdim=True))
current_tile_local_cv = self.tile_warping(current_hypothesis[:, :3, :, :], fea_l, fea_r)
current_tile_local_cv = self.decrease(torch.cat([fea, current_tile_local_cv], 1))
up_prev_hypothesis = upsample(prev_hypothesis, 2)
up_prev_tile_local_cv = self.tile_warping(up_prev_hypothesis[:, :3, :, :], fea_l, fea_r)
up_prev_tile_local_cv = self.decrease(torch.cat([fea, up_prev_tile_local_cv], 1))
aug_hypothesis_set = torch.cat(
(current_hypothesis, current_tile_local_cv, up_prev_hypothesis, up_prev_tile_local_cv),
1,
)
tile_hypothesis_update = self.conv0(aug_hypothesis_set)
tile_hypothesis_update = self.resblock0(tile_hypothesis_update)
tile_hypothesis_update = self.resblock1(tile_hypothesis_update)
tile_hypothesis_update = self.lastconv(tile_hypothesis_update)
conf = tile_hypothesis_update[:, :2, :, :] # [:, 0, :, :] is for pre
prev_delta_hypothesis = tile_hypothesis_update[:, 2:18, :, :]
current_delta_hypothesis = tile_hypothesis_update[:, 18:34, :, :]
_, hypothesis_select_mask = torch.max(conf, dim=1, keepdim=True)
hypothesis_select_mask = hypothesis_select_mask.float()
update_current_hypothesis = current_hypothesis + current_delta_hypothesis
update_current_hypothesis[:, :1, :, :] = F.relu(
update_current_hypothesis[:, :1, :, :].clone()) # Force disp to be positive
update_prev_hypothesis = up_prev_hypothesis + prev_delta_hypothesis
update_prev_hypothesis[:, :1, :, :] = F.relu(
update_prev_hypothesis[:, :1, :, :].clone()) # Force disp to be positive
refined_hypothesis = hypothesis_select_mask * update_current_hypothesis + (
1 - hypothesis_select_mask) * update_prev_hypothesis
update_current_hypothesis_and_conf = torch.cat((update_current_hypothesis, conf[:, 1:2, :, :]), 1)
update_prev_hypothesis_and_conf = torch.cat((update_prev_hypothesis, conf[:, :1, :, :]), 1)
return [
refined_hypothesis,
update_current_hypothesis_and_conf, update_prev_hypothesis_and_conf,
]
class PostTileUpdate(nn.Module):
"""
Post Tile Update for a single resolution: decrease tile size, e.g. upsampling tile hypothesis, and do tile warping
forward input: fea duo from the largest resolution, tile hypothesis from previous resolution
forward output: refined tile hypothesis
"""
def __init__(self, in_c, out_c, hid_c, resblk_num):
super(PostTileUpdate, self).__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(in_c, hid_c, 1, stride=1, padding=0),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Conv2d(hid_c, hid_c, 3, stride=1, padding=1),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
)
resblks = nn.ModuleList()
for i in range(resblk_num):
dilation = 3 if i == 1 else 1
resblks.append(
nn.Sequential(
BasicBlock(hid_c, hid_c, s=1, p=1, downsample=None, d=dilation),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
)
)
self.resblocks = nn.Sequential(*resblks)
self.lastconv = nn.Conv2d(hid_c, out_c, kernel_size=3, padding=1)
# For final disparity and each supervision signal to be positive
self.relu = nn.ReLU()
def forward(self, fea_l, prev_hypothesis):
guided_prev_tile_hypothesis = torch.cat([fea_l, prev_hypothesis], 1)
tile_hypothesis_update = self.conv1(guided_prev_tile_hypothesis)
tile_hypothesis_update = self.resblocks(tile_hypothesis_update)
tile_hypothesis_update = self.lastconv(tile_hypothesis_update)
refined_hypothesis = prev_hypothesis + tile_hypothesis_update
refined_hypothesis[:, :1, :, :] = F.relu(refined_hypothesis[:, :1, :, :].clone()) # Force disp to be positive
return refined_hypothesis
class FinalTileUpdate(nn.Module):
"""
Final Tile Update: only predicts disp
forward input: fea duo from the largest resolution, tile hypothesis from previous resolution
forward output: refined tile hypothesis
"""
def __init__(self, in_c, out_c, hid_c, resblk_num):
super(FinalTileUpdate, self).__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(in_c, hid_c, 1, stride=1, padding=0),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Conv2d(hid_c, hid_c, 3, stride=1, padding=1),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
)
resblks = nn.ModuleList()
for _ in range(resblk_num):
resblks.append(
nn.Sequential(
BasicBlock(hid_c, hid_c, s=1, p=1, downsample=None, d=1),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
)
)
self.resblocks = nn.Sequential(*resblks)
self.lastconv = nn.Conv2d(hid_c, out_c, kernel_size=3, padding=1)
# For final disparity and each supervision signal to be positive
self.relu = nn.ReLU()
def forward(self, fea_l, prev_hypothesis):
guided_prev_tile_hypothesis = torch.cat([fea_l, prev_hypothesis], 1)
tile_hypothesis_update = self.conv1(guided_prev_tile_hypothesis)
tile_hypothesis_update = self.resblocks(tile_hypothesis_update)
tile_hypothesis_update = self.lastconv(tile_hypothesis_update)
refined_hypothesis = prev_hypothesis[:, 0, :, :].unsqueeze(1) + tile_hypothesis_update
refined_hypothesis = F.relu(refined_hypothesis.clone()) # Force disp to be positive
return refined_hypothesis
@MODELS.register_module()
class TilePropagation(nn.Module):
"""
Tile hypothesis initialization
input: dual feature pyramid
output: initial tile hypothesis pyramid
"""
def __init__(self):
super(TilePropagation, self).__init__()
self.tile_update0 = TileUpdate0(32, 16, 32) # 1/16 tile refine
self.tile_update1 = TileUpdate() # 1/8 tile refine
self.tile_update2 = TileUpdate() # 1/4 tile refine
self.tile_update3 = TileUpdate() # 1/2 tile refine
self.tile_update4 = TileUpdate() # 1/1 tile refine
self.tile_update4_1 = PostTileUpdate(40, 16, 32, 4) # 1/1 tile refine
self.tile_update5 = PostTileUpdate(32, 16, 32, 4) # 2/1 tile refine tile_size=2
self.tile_update6 = FinalTileUpdate(32, 3, 16, 2) # 2/1 tile refine tile_size=1
# For final disparity and each supervision signal to be positive
# self.relu = nn.ReLU(inplace=True)
def forward(self, left_fea_pyramid, right_fea_pyramid, init_tile_pyramid):
refined_tile16x = self.tile_update0(left_fea_pyramid[0], right_fea_pyramid[0], init_tile_pyramid[0])
tile_update8x = self.tile_update1(left_fea_pyramid[1], right_fea_pyramid[1], init_tile_pyramid[1],
refined_tile16x[0])
tile_update4x = self.tile_update2(left_fea_pyramid[2], right_fea_pyramid[2], init_tile_pyramid[2],
tile_update8x[0])
tile_update2x = self.tile_update3(left_fea_pyramid[3], right_fea_pyramid[3], init_tile_pyramid[3],
tile_update4x[0])
tile_update1x = self.tile_update4(left_fea_pyramid[4], right_fea_pyramid[4], init_tile_pyramid[4],
tile_update2x[0])
refined_tile1x = self.tile_update4_1(left_fea_pyramid[2], tile_update1x[0])
refined_tile05x = self.tile_update5(left_fea_pyramid[3], upsample(refined_tile1x, 1))
refined_tile025x = self.tile_update6(left_fea_pyramid[4], upsample(refined_tile05x, 1))
final_disp = refined_tile025x[:, 0:1, :, :]
if self.training:
# For training phase, we need to upsample disps using slant equation
# For training phase, we need to upsample dx and dy using nearest interpolation
up_plane_16x = upsample(refined_tile16x[0], 16, 64)
up_plane_8x_cur = upsample(tile_update8x[1], 8, 32)
up_plane_8x_pre = upsample(tile_update8x[2], 8, 32)
up_plane_4x_cur = upsample(tile_update4x[1], 4, 16)
up_plane_4x_pre = upsample(tile_update4x[2], 4, 16)
up_plane_2x_cur = upsample(tile_update2x[1], 2, 8)
up_plane_2x_pre = upsample(tile_update2x[2], 2, 8)
up_plane_1x_cur = upsample(tile_update1x[1], 1, 4)
up_plane_1x_pre = upsample(tile_update1x[2], 1, 4)
up_plane_1x = upsample(refined_tile1x, 1, 4)
up_plane_05x = upsample(refined_tile05x, 1, 2)
prop_disp_pyramid = [
up_plane_16x[:, :1, :, :],
up_plane_8x_cur[:, :1, :, :],
up_plane_8x_pre[:, :1, :, :],
up_plane_4x_cur[:, :1, :, :],
up_plane_4x_pre[:, :1, :, :],
up_plane_2x_cur[:, :1, :, :],
up_plane_2x_pre[:, :1, :, :],
up_plane_1x_cur[:, :1, :, :],
up_plane_1x_pre[:, :1, :, :],
up_plane_1x[:, :1, :, :],
up_plane_05x[:, :1, :, :],
refined_tile025x[:, :1, :, :]
]
# WARNING: EACH PYRAMID MUST ALIGN ACCORDING TO PRE-CUR ORDER AND RESOLUTION ORDER SINCE SUPERVISION WOULDN'T SEE THE ORDER
dx_pyramid = [
up_plane_16x[:, 1:2, :, :],
up_plane_8x_cur[:, 1:2, :, :],
up_plane_8x_pre[:, 1:2, :, :],
up_plane_4x_cur[:, 1:2, :, :],
up_plane_4x_pre[:, 1:2, :, :],
up_plane_2x_cur[:, 1:2, :, :],
up_plane_2x_pre[:, 1:2, :, :],
up_plane_1x_cur[:, 1:2, :, :],
up_plane_1x_pre[:, 1:2, :, :],
up_plane_1x[:, 1:2, :, :],
up_plane_05x[:, 1:2, :, :],
refined_tile025x[:, 1:2, :, :]
]
dy_pyramid = [
up_plane_16x[:, 2:3, :, :],
up_plane_8x_cur[:, 2:3, :, :],
up_plane_8x_pre[:, 2:3, :, :],
up_plane_4x_cur[:, 2:3, :, :],
up_plane_4x_pre[:, 2:3, :, :],
up_plane_2x_cur[:, 2:3, :, :],
up_plane_2x_pre[:, 2:3, :, :],
up_plane_1x_cur[:, 2:3, :, :],
up_plane_1x_pre[:, 2:3, :, :],
up_plane_1x[:, 2:3, :, :],
up_plane_05x[:, 2:3, :, :],
refined_tile025x[:, 2:3, :, :],
]
w_pyramid = [
up_plane_8x_cur[:, 3:4, :, :],
up_plane_8x_pre[:, 3:4, :, :],
up_plane_4x_cur[:, 3:4, :, :],
up_plane_4x_pre[:, 3:4, :, :],
up_plane_2x_cur[:, 3:4, :, :],
up_plane_2x_pre[:, 3:4, :, :],
up_plane_1x_cur[:, 3:4, :, :],
up_plane_1x_pre[:, 3:4, :, :],
]
outputs = {
"prop_disp_pyramid": prop_disp_pyramid,
"dx_pyramid": dx_pyramid,
"dy_pyramid": dy_pyramid,
"w_pyramid": w_pyramid,
}
return outputs
else:
return final_disp
|
CODD-main
|
model/stereo/hitnet/propagation.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
from .motion import Motion
from .others import GTMotion
__all__ = ["Motion", "GTMotion"]
|
CODD-main
|
model/motion/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmseg.models import builder as builder_oss
from mmseg.models.builder import MODELS
from pytorch3d.renderer import (
PerspectiveCameras,
PointsRasterizationSettings,
PointsRenderer,
PointsRasterizer,
AlphaCompositor,
)
# Data structures and functions for rendering
from pytorch3d.structures import Pointclouds
from utils import compute_valid_mask, compute_gt_disp_change
from .raft3d.projective_ops import inv_project
class PointsRendererWithDepth(PointsRenderer):
"""Augment PointsRenderer to output depth"""
def __init__(self, rasterizer, compositor) -> None:
super(PointsRendererWithDepth, self).__init__(rasterizer, compositor)
def forward(self, point_clouds, **kwargs) -> torch.Tensor:
fragments = self.rasterizer(point_clouds, **kwargs)
# Construct weights based on the distance of a point to the true point.
# However, this could be done differently: e.g. predicted as opposed
# to a function of the weights.
r = self.rasterizer.raster_settings.radius
dists2 = fragments.dists.permute(0, 3, 1, 2)
weights = 1 - dists2 / (r * r)
images = self.compositor(fragments.idx.long().permute(0, 3, 1, 2), weights,
point_clouds.features_packed().permute(1, 0), **kwargs)
zbuf = fragments.zbuf.permute(0, 3, 1, 2)
return images, F.relu(zbuf)
BF_DEFAULT = 1050 * 0.2 # baseline * focal length
@MODELS.register_module()
class Motion(nn.Module):
def __init__(self, raft3d=None, ds_scale=4, iters=16, loss=None):
"""motion network
Args:
raft3d (dict, optional): config for raft3d. Defaults to None.
ds_scale (int, optional): low res scale. Defaults to 4.
iters (int, optional): optimization iterations. Defaults to 16.
loss (dict, optional): config for losses. Defaults to None.
"""
super(Motion, self).__init__()
self.renderer = PointsRendererWithDepth(
rasterizer=PointsRasterizer(),
compositor=AlphaCompositor(),
)
self.ds_scale = ds_scale
self.iters = iters
# scene flow estimator
self.raft3d = MODELS.build(raft3d)
if loss is not None:
self.loss = builder_oss.build_loss(loss)
else:
self.loss = None
n_parameters = sum(p.numel() for n, p in self.named_parameters())
print(
"PARAM STATUS: total number of parameters %.3fM in motion network"
% (n_parameters / 1000 ** 2)
)
def transform_and_project(self, Ts, depth, feat, intrinsics, radius):
"""transform the previous state and project into the current frame
Args:
Ts (Tensor): NxHxW, transformation
depth (Tensor): NxHxW
feat (Tensor): NxCxHxW, feature
intrinsics (intrinsics): Nx4, fx, fy, cx, cy
radius (float): rendering radius, increase to aggregate more points
Returns:
Tensor, Tensor: aligned feature and depth
"""
# transform
X1 = inv_project(depth, intrinsics)
X2_est = Ts * X1
# create pc
B = intrinsics.shape[0]
verts = X2_est.reshape(B, -1, 3).contiguous()
C = feat.shape[1]
feat = feat.permute(0, 2, 3, 1).reshape(B, -1, C)
verts[..., 0] = verts[..., 0] * -1
verts[..., 1] = verts[..., 1] * -1
point_cloud = Pointclouds(points=verts, features=feat)
# project
h, w = depth.shape[-2:]
cameras = PerspectiveCameras(
device=depth.device,
principal_point=intrinsics[:, -2:].float(),
focal_length=intrinsics[:, :2].float(),
image_size=((h, w),),
in_ndc=False,
)
# The radius (in NDC units) of the disk to be rasterized.
raster_settings = PointsRasterizationSettings(image_size=(h, w), radius=1.0 / h * radius, points_per_pixel=8)
self.renderer.rasterizer.cameras = cameras
self.renderer.rasterizer.raster_settings = raster_settings
feat_warp, zbuf = self.renderer(
point_cloud,
gamma=(1e-4,),
bg_col=torch.tensor([0.0] * 9, dtype=torch.float32, device=depth.device),
eps=1e-5,
)
return feat_warp, zbuf[:, 0].unsqueeze(1)
def forward(self, state, outputs, img_metas, train_mode=False, **kwargs):
"""
Args:
state (dict): memory states
outputs (dict): outputs
img_metas (dict): dataset metas
train_mode (bool, optional): if True, return auxiliary outputs from raft3d. Defaults to False.
"""
img_curr = outputs["left_img"]
if "memory" not in state:
self.raft3d(
img_curr, None, None, None, state, outputs, train_mode=train_mode
)
return
else:
B = outputs["pred_disp"].shape[0]
intrinsics = img_metas[0]["intrinsics"]
intrinsics = (
torch.tensor(intrinsics).to(outputs["pred_disp"].device).unsqueeze(0).expand(B, -1)
)
depth_scale = BF_DEFAULT / intrinsics[0, 0]
img_prev, feat_prev, disp_prev = state["memory"]
disp_curr = outputs["pred_disp"]
depth_prev = (
depth_scale * intrinsics[0, 0] / (disp_prev + 1e-5)
) # convert to depth (up to scale)
depth_prev = torch.clip(depth_prev, max=BF_DEFAULT, min=0).squeeze(1) # avoid inf in depth
depth_curr = depth_scale * intrinsics[0, 0] / (disp_curr + 1e-5)
depth_curr = torch.clip(depth_curr, max=BF_DEFAULT, min=0).squeeze(1)
# Raft3D takes depth as NxHxW, not Nx1xHxW
self.raft3d(
img_curr,
depth_prev,
depth_curr,
intrinsics,
state,
outputs,
iters=self.iters,
train_mode=train_mode,
)
Ts = outputs["Ts"]
# full res depth warping
w = depth_curr.shape[-1]
flow2d_est = outputs["flow2d_est_induced"].permute(0, 3, 1, 2) # N3HW
confidence = outputs["weight"] # N3HW
to_proj = torch.cat([img_prev, flow2d_est, confidence], dim=1)
warped, depth_warp = self.transform_and_project(
Ts, depth_prev, to_proj, intrinsics, radius=2.0
)
img_warp, flow_warp, confidence_warp = (warped[:, :3], warped[:, 3:6], warped[:, 6:])
disp_warp = (
depth_scale * intrinsics[0, 0] / (depth_warp + 1e-5)
) # convert back to disp
disp_warp[disp_warp > w] = 0.0
# low res feature warping
Ts = Ts[:, self.ds_scale // 2 - 1:: self.ds_scale, self.ds_scale // 2 - 1:: self.ds_scale]
depth_prev = depth_prev[:, self.ds_scale // 2 - 1:: self.ds_scale, self.ds_scale // 2 - 1:: self.ds_scale]
intrinsics = (
intrinsics.float() / self.ds_scale
) # adjust focal length here so points can be projected, otherwise focal too long
feat_warp, _ = self.transform_and_project(Ts, depth_prev, feat_prev, intrinsics, radius=4.0)
if len(disp_warp.shape) == 3:
disp_warp = disp_warp.unsqueeze(1)
state["memory"] = [img_warp, feat_warp, confidence_warp, disp_warp, flow_warp]
return
def losses(self, loss, outputs, idx, state, meta):
gt_disp_prev = state["gt_disp"][-2]
gt_disp_curr = state["gt_disp"][-1]
gt_flow = state["gt_flow"][-2]
if len(state["gt_disp_change"]) != 0:
gt_disp_change = state["gt_disp_change"][-2]
elif len(state["gt_flow_occ"]) != 0: # no gt disp change
gt_flow_occ_prev = state["gt_flow_occ"][-2] # True for occluded
gt_disp_change, gt_disp_curr_warp = compute_gt_disp_change(gt_flow_occ_prev, gt_disp_prev,
gt_disp_curr, gt_flow)
elif len(state["gt_disp2"]) != 0:
gt_disp2 = state["gt_disp2"][-2] # this is in previous frame
gt_disp_change = gt_disp2 - gt_disp_prev
gt_disp_change[gt_disp2 <= 0.0] = BF_DEFAULT
gt_disp_change[gt_disp_prev <= 0.0] = BF_DEFAULT
else:
raise Exception("No disp change provided to train.")
gt_flow = state["gt_flow"][-2] # Nx2xHxW
flowz = (
gt_disp_change / BF_DEFAULT
) # estimation is inverse depth, supervising inverse depth needs to divide disparity by BF_DEFAULT
mask = state["mask_disp"][-2]
mask &= compute_valid_mask(
gt_disp_prev, meta, gt_flow_prev=gt_flow, gt_disp_change=gt_disp_change
)
flowxyz = torch.cat([gt_flow, flowz], dim=1).permute(0, 2, 3, 1) # NxHxWx3
flow2d_est = outputs["flow2d_est"]
flow2d_rev = outputs["flow2d_rev"]
self.loss(flow2d_est, flow2d_rev, flowxyz, mask, idx, loss)
def freeze(self):
self.eval()
self.loss.eval()
for param in self.parameters():
param.requires_grad = False
|
CODD-main
|
model/motion/motion.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
import torch
import torch.nn as nn
from lietorch import SE3
from mmseg.models.builder import MODELS
from utils import flow_warp
@MODELS.register_module()
class GTMotion(nn.Module):
def __init__(self):
super(GTMotion, self).__init__()
self.loss = None
def forward(self, state, outputs, img_metas, train_mode=False, **kwargs):
if "memory" not in state:
return
else:
img_prev, feat_prev, disp_prev = state["memory"]
gt_disp_change = state["gt_disp_change"][
-1
] # read lastest disp change as Into_Past
gt_flow = state["gt_flow"][-1] # read lastest flow as Into_Past
gt_flow_occ = state["gt_flow_occ"][-1] # read lastest flow as Into_Past
# pad gt size so dimension matches
batch_size = disp_prev.shape[0]
h, w = disp_prev.shape[-2:]
h_pad, w_pad = h - gt_flow.shape[-2], w - gt_flow.shape[-1]
gt_flow = torch.nn.functional.pad(gt_flow, (0, w_pad, 0, h_pad))
gt_disp_change = torch.nn.functional.pad(
gt_disp_change, (0, w_pad, 0, h_pad)
)
gt_flow_occ = torch.nn.functional.pad(gt_flow_occ, (0, w_pad, 0, h_pad))
to_warp = torch.cat([img_prev, disp_prev.unsqueeze(1)], dim=1)
to_warp, valid = flow_warp(
to_warp, gt_flow, padding_mode="zeros", mode="nearest"
)
to_warp[~valid] = 0.0
to_warp[gt_flow_occ.expand_as(to_warp) > 0] = 0.0
img_warp = to_warp[:, :3]
disp_warp = to_warp[:, -1]
disp_warp = disp_warp - gt_disp_change
disp_warp[~valid[:, 0].unsqueeze(1)] = 0.0 # mask out invalid region
disp_warp[gt_flow_occ > 0] = 0.0
feat_warp, valid = flow_warp(
feat_prev,
gt_flow[:, :, 2::4, 2::4],
padding_mode="zeros",
mode="nearest",
)
feat_warp[~valid] = 0.0
gt_flow = torch.cat([gt_flow, gt_disp_change], dim=1)
gt_confidence = torch.ones_like(gt_flow, device=gt_flow.device)
state["memory"] = [img_warp, feat_warp, gt_confidence, disp_warp, gt_flow]
# dummy outputs
outputs["Ts"] = SE3.Identity(batch_size, h, w, device=gt_flow.device)
return
|
CODD-main
|
model/motion/others.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# Adapted from RAFT3D repository: https://github.com/princeton-vl/RAFT-3D
import lietorch_extras
import torch
import torch.nn.functional as F
from lietorch import SE3
from . import projective_ops as pops
class SE3BuilderInplace(torch.autograd.Function):
@staticmethod
def forward(ctx, se3, ae, pts, target, weight, intrinsics, radius=32):
""" Build linear system Hx = b """
ctx.radius = radius
ctx.save_for_backward(se3, ae, pts, target, weight, intrinsics)
H, b = lietorch_extras.se3_build_inplace(
se3, ae, pts, target, weight, intrinsics, radius)
return H, b
@staticmethod
def backward(ctx, grad_H, grad_b):
se3, ae, pts, target, weight, intrinsics = ctx.saved_tensors
ae_grad, target_grad, weight_grad = lietorch_extras.se3_build_inplace_backward(
se3, ae, pts, target, weight, intrinsics, grad_H, grad_b, ctx.radius)
return None, ae_grad, None, target_grad, weight_grad, None
class SE3Builder(torch.autograd.Function):
@staticmethod
def forward(ctx, attn, se3, pts, target, weight, intrinsics, radius=32):
""" Build linear system Hx = b """
ctx.radius = radius
ctx.save_for_backward(attn, se3, pts, target, weight, intrinsics)
H, b = lietorch_extras.se3_build(
attn, se3, pts, target, weight, intrinsics, radius)
return H, b
@staticmethod
def backward(ctx, grad_H, grad_b):
attn, se3, pts, target, weight, intrinsics = ctx.saved_tensors
grad_H = grad_H.contiguous()
grad_b = grad_b.contiguous()
attn_grad, target_grad, weight_grad = lietorch_extras.se3_build_backward(
attn, se3, pts, target, weight, intrinsics, grad_H, grad_b, ctx.radius)
return attn_grad, None, None, target_grad, weight_grad, None
class SE3Solver(torch.autograd.Function):
@staticmethod
def forward(ctx, H, b):
ctx.save_for_backward(H, b)
x, = lietorch_extras.cholesky6x6_forward(H, b)
return x
@staticmethod
def backward(ctx, grad_x):
H, b = ctx.saved_tensors
grad_x = grad_x.contiguous()
grad_H, grad_b = lietorch_extras.cholesky6x6_backward(H, b, grad_x)
return grad_H, grad_b
class CholeskySolver(torch.autograd.Function):
@staticmethod
def forward(ctx, H, b):
# don't crash training if cholesky decomp fails
try:
U = torch.cholesky(H)
xs = torch.cholesky_solve(b, U)
ctx.save_for_backward(U, xs)
ctx.failed = False
except Exception as e:
print(e)
ctx.failed = True
xs = torch.zeros_like(b)
return xs
@staticmethod
def backward(ctx, grad_x):
if ctx.failed:
return None, None
U, xs = ctx.saved_tensors
dz = torch.cholesky_solve(grad_x, U)
dH = -torch.matmul(xs, dz.transpose(-1, -2))
return dH, dz
def block_solve(H, b, ep=0.1, lm=0.0001):
""" solve normal equations """
B, N, _, D, _ = H.shape
I = torch.eye(D).to(H.device)
H = H + (ep + lm * H) * I
H = H.permute(0, 1, 3, 2, 4)
H = H.reshape(B, N * D, N * D)
b = b.reshape(B, N * D, 1)
x = CholeskySolver.apply(H, b)
return x.reshape(B, N, D)
def attention_matrix(X):
""" compute similiarity matrix between all pairs of embeddings """
batch, ch, ht, wd = X.shape
X = X.view(batch, ch, ht * wd) / 8.0
dist = -torch.sum(X ** 2, dim=1).view(batch, 1, ht * wd) + \
-torch.sum(X ** 2, dim=1).view(batch, ht * wd, 1) + \
2 * torch.matmul(X.transpose(1, 2), X)
A = torch.sigmoid(dist)
return A.view(batch, ht, wd, ht, wd)
def step(Ts, ae, target, weight, depth, intrinsics, lm=.0001, ep=10.0):
""" dense gauss newton update """
pts = pops.inv_project(depth, intrinsics)
pts = pts.permute(0, 3, 1, 2).contiguous()
attn = attention_matrix(ae)
se3 = Ts.matrix().permute(0, 3, 4, 1, 2).contiguous()
# build the linear system
H, b = SE3Builder.apply(attn, se3, pts, target, weight, intrinsics)
I = torch.eye(6, device=H.device)[..., None, None]
H = H + (lm * H + ep) * I # damping
dx = SE3Solver.apply(H, b)
dx = dx.permute(0, 3, 4, 1, 2).squeeze(-1).contiguous()
Ts = SE3.exp(dx) * Ts
return Ts
def step_inplace(Ts, ae, target, weight, depth, intrinsics, lm=.0001, ep=10.0):
""" dense gauss newton update with computing similiarity matrix """
pts = pops.inv_project(depth, intrinsics)
pts = pts.permute(0, 3, 1, 2).contiguous()
# tensor representation of SE3
se3 = Ts.data.permute(0, 3, 1, 2).contiguous()
ae = ae / 8.0
# build the linear system
H, b = SE3BuilderInplace.apply(se3, ae, pts, target, weight, intrinsics)
I = torch.eye(6, device=H.device)[..., None, None]
H = H + (lm * H + ep) * I # damping
dx = SE3Solver.apply(H, b)
dx = dx.permute(0, 3, 4, 1, 2).squeeze(-1).contiguous()
Ts = SE3.exp(dx) * Ts
return Ts
def cvx_upsample(data, mask):
""" convex combination upsampling (see RAFT) """
batch, ht, wd, dim = data.shape
data = data.permute(0, 3, 1, 2)
mask = mask.view(batch, 1, 9, 8, 8, ht, wd)
mask = torch.softmax(mask, dim=2)
up_data = F.unfold(data, [3, 3], padding=1)
up_data = up_data.view(batch, dim, 9, 1, 1, ht, wd)
up_data = torch.sum(mask * up_data, dim=2)
up_data = up_data.permute(0, 4, 2, 5, 3, 1)
up_data = up_data.reshape(batch, 8 * ht, 8 * wd, dim)
return up_data
def upsample_se3(Ts, mask):
""" upsample a se3 field """
tau_phi = Ts.log()
return SE3.exp(cvx_upsample(tau_phi, mask))
def upsample_flow(flow, mask):
""" upsample a flow field """
flow = flow * torch.as_tensor([8.0, 8.0, 1.0]).to(flow.device)
return cvx_upsample(flow, mask)
|
CODD-main
|
model/motion/raft3d/se3_field.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# Adapted from RAFT3D repository: https://github.com/princeton-vl/RAFT-3D
import torch
import torch.nn as nn
import torch.nn.functional as F
# lietorch for tangent space backpropogation
from lietorch import SE3
from mmseg.models import builder as builder_oss
from mmseg.models.builder import MODELS
from . import projective_ops as pops
from . import se3_field
from .blocks.corr import CorrBlock
from .blocks.extractor import BasicEncoder
from .blocks.gru import ConvGRU
from .sampler_ops import depth_sampler
GRAD_CLIP = 0.01
class GradClip(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, grad_x):
o = torch.zeros_like(grad_x)
grad_x = torch.where(grad_x.abs() > GRAD_CLIP, o, grad_x)
grad_x = torch.where(torch.isnan(grad_x), o, grad_x)
return grad_x
class GradientClip(nn.Module):
def __init__(self):
super(GradientClip, self).__init__()
def forward(self, x):
return GradClip.apply(x)
class BasicUpdateBlock(nn.Module):
def __init__(self, hidden_dim=128, input_dim=128):
super(BasicUpdateBlock, self).__init__()
self.gru = ConvGRU(hidden_dim)
self.corr_enc = nn.Sequential(
nn.Conv2d(196, 256, 3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, 3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(256, 3 * 128, 1, padding=0),
)
self.flow_enc = nn.Sequential(
nn.Conv2d(9, 128, 7, padding=3),
nn.ReLU(inplace=True),
nn.Conv2d(128, 3 * 128, 1, padding=0),
)
self.ae = nn.Sequential(
nn.Conv2d(128, 256, 3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(256, 32, 1, padding=0),
GradientClip(),
)
self.delta = nn.Sequential(
nn.Conv2d(128, 256, 3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(256, 3, 1, padding=0),
GradientClip(),
)
self.weight = nn.Sequential(
nn.Conv2d(128, 256, 3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(256, 3, 1, padding=0),
nn.Sigmoid(),
GradientClip(),
)
self.mask = nn.Sequential(
nn.Conv2d(128, 256, 3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(256, 64 * 9, 1, padding=0),
GradientClip(),
)
def forward(self, net, inp, corr, flow, twist, dz, upsample=True):
motion_info = torch.cat([flow, 10 * dz, 10 * twist], dim=-1)
motion_info = motion_info.clamp(-50.0, 50.0).permute(0, 3, 1, 2)
mot = self.flow_enc(motion_info)
cor = self.corr_enc(corr)
net = self.gru(net, inp, cor, mot)
ae = self.ae(net)
mask = self.mask(net)
delta = self.delta(net)
weight = self.weight(net)
return net, mask, ae, delta, weight
class ResizeConcatConv(nn.Module):
"""
Resize + concat + 1 layer conv
"""
def __init__(self, in_channels, out_channels=32):
super(ResizeConcatConv, self).__init__()
assert isinstance(in_channels, (list, tuple))
self.in_channels = in_channels
self.out_channels = out_channels
self.convs = nn.Sequential(
nn.Conv2d(sum(in_channels), self.out_channels, kernel_size=1, padding=0, stride=1, bias=False),
nn.ReLU(inplace=True),
)
def forward(self, inputs):
assert len(inputs) == len(self.in_channels)
upsampled_inputs = [
F.interpolate(
input=x, size=inputs[1].shape[2:], mode="bilinear", align_corners=True
)
for x in inputs
]
inputs = torch.cat(upsampled_inputs, dim=1)
outputs = self.convs(inputs)
return outputs
@MODELS.register_module()
class RAFT3D(nn.Module):
def __init__(self, cnet_cfg=None):
super(RAFT3D, self).__init__()
self.hidden_dim = hdim = 128
self.context_dim = cdim = 128
self.corr_levels = 4
self.corr_radius = 3
# feature network, context network, and update block
self.fnet = BasicEncoder(output_dim=128, norm_fn="instance")
if cnet_cfg is None:
self.cnet = FPN(output_dim=hdim + 3 * hdim)
else:
self.cnet = nn.Sequential(
builder_oss.build_backbone(cnet_cfg),
ResizeConcatConv(cnet_cfg["extra"]["stage4"]["num_channels"], 128 * 4),
)
if cnet_cfg.get('init_cfg', None) is not None:
self.cnet[0].init_weights()
self.update_block = BasicUpdateBlock(hidden_dim=hdim)
def initializer(self, image1):
"""Initialize coords and transformation maps"""
batch_size, ch, ht, wd = image1.shape
device = image1.device
y0, x0 = torch.meshgrid(torch.arange(ht // 8), torch.arange(wd // 8))
coords0 = torch.stack([x0, y0], dim=-1).float()
coords0 = coords0[None].repeat(batch_size, 1, 1, 1).to(device)
Ts = SE3.Identity(batch_size, ht // 8, wd // 8, device=device)
return Ts, coords0
def features_and_correlation(self, net_inp, fmap_prev, image_curr):
# extract features and build correlation volume
fmap_curr = self.fnet(image_curr)
corr_fn = CorrBlock(fmap_prev, fmap_curr, radius=self.corr_radius)
# extract context features using Resnet50
net, inp = net_inp.split([128, 128 * 3], dim=1)
net = torch.tanh(net)
inp = torch.relu(inp)
return corr_fn, net, inp, fmap_curr
def forward(
self,
image_curr,
depth_prev,
depth_curr,
intrinsics,
state,
outputs,
iters=12,
train_mode=False,
):
"""Estimate optical flow between pair of frames"""
if "memory" not in state:
state["raft_feat"] = self.fnet(image_curr)
state["raft_netinp"] = self.cnet(image_curr)
return
else:
fmap_prev = state["raft_feat"]
net_inp = state["raft_netinp"]
Ts, coords0 = self.initializer(image_curr)
corr_fn, net, inp, fmap_curr = self.features_and_correlation(
net_inp, fmap_prev, image_curr
)
# intrinsics and depth at 1/8 resolution
intrinsics_r8 = intrinsics / 8.0
depth1_r8 = depth_prev[:, 3::8, 3::8]
depth2_r8 = depth_curr[:, 3::8, 3::8]
flow_est_list = []
flow_rev_list = []
for _ in range(iters):
Ts = Ts.detach()
coords1_xyz, _ = pops.projective_transform(Ts, depth1_r8, intrinsics_r8)
coords1, zinv_proj = coords1_xyz.split([2, 1], dim=-1)
zinv, _ = depth_sampler(1.0 / depth2_r8, coords1)
corr = corr_fn(coords1.permute(0, 3, 1, 2).contiguous())
flow = coords1 - coords0
dz = zinv.unsqueeze(-1) - zinv_proj
twist = Ts.log()
net, mask, ae, delta, weight = self.update_block(
net, inp, corr, flow, dz, twist
)
target = coords1_xyz.permute(0, 3, 1, 2) + delta
target = target.contiguous()
# Gauss-Newton step
# Ts = se3_field.step(Ts, ae, target, weight, depth1_r8, intrinsics_r8)
Ts = se3_field.step_inplace(Ts, ae, target, weight, depth1_r8, intrinsics_r8)
if train_mode:
flow2d_rev = target.permute(0, 2, 3, 1)[..., :2] - coords0
flow2d_rev = se3_field.cvx_upsample(8 * flow2d_rev, mask)
Ts_up = se3_field.upsample_se3(Ts, mask)
flow2d_est, flow3d_est, valid = pops.induced_flow(
Ts_up, depth_prev, intrinsics
)
flow_est_list.append(flow2d_est)
flow_rev_list.append(flow2d_rev)
if train_mode:
outputs[
"flow2d_est"
] = flow_est_list
outputs["flow2d_rev"] = flow_rev_list
Ts_up = se3_field.upsample_se3(Ts, mask)
outputs["Ts"] = Ts_up
flow2d_est, _, _ = pops.induced_flow(Ts_up, depth_prev, intrinsics)
outputs["flow2d_est_induced"] = flow2d_est
weight = se3_field.cvx_upsample(weight.permute(0, 2, 3, 1), mask).permute(
0, 3, 1, 2
)
outputs["weight"] = weight
# update state
state["raft_feat"] = fmap_curr
state["raft_netinp"] = self.cnet(image_curr)
return
|
CODD-main
|
model/motion/raft3d/raft3d.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
from .raft3d import RAFT3D
|
CODD-main
|
model/motion/raft3d/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# Adapted from RAFT3D repository: https://github.com/princeton-vl/RAFT-3D
import torch
import torch.nn.functional as F
def bilinear_sampler(img, coords, mode='bilinear', mask=False):
""" Wrapper for grid_sample, uses pixel coordinates """
H, W = img.shape[-2:]
xgrid, ygrid = coords.split([1, 1], dim=-1)
xgrid = 2 * xgrid / (W - 1) - 1
ygrid = 2 * ygrid / (H - 1) - 1
grid = torch.cat([xgrid, ygrid], dim=-1)
img = F.grid_sample(img, grid, align_corners=True)
if mask:
mask = (xgrid > -1) & (ygrid > -1) & (xgrid < 1) & (ygrid < 1)
return img, mask.float()
return img
def depth_sampler(depths, coords):
depths_proj, valid = bilinear_sampler(depths[:, None], coords, mask=True)
return depths_proj.squeeze(dim=1), valid
|
CODD-main
|
model/motion/raft3d/sampler_ops.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# Adapted from RAFT3D repository: https://github.com/princeton-vl/RAFT-3D
from .sampler_ops import *
MIN_DEPTH = 0.05
EPS = 1e-5
def project(Xs, intrinsics):
""" Pinhole camera projection """
X, Y, Z = Xs.unbind(dim=-1)
Z = Z + EPS
fx, fy, cx, cy = intrinsics[:, None, None].unbind(dim=-1)
x = fx * (X / Z) + cx
y = fy * (Y / Z) + cy
d = 1.0 / Z
coords = torch.stack([x, y, d], dim=-1)
return coords
def inv_project(depths, intrinsics):
""" Pinhole camera inverse-projection """
ht, wd = depths.shape[-2:]
fx, fy, cx, cy = \
intrinsics[:, None, None].unbind(dim=-1)
y, x = torch.meshgrid(
torch.arange(ht).to(depths.device).float(),
torch.arange(wd).to(depths.device).float())
X = depths * ((x - cx) / fx)
Y = depths * ((y - cy) / fy)
Z = depths
return torch.stack([X, Y, Z], dim=-1)
def projective_transform(Ts, depth, intrinsics):
""" Project points from I1 to I2 """
X0 = inv_project(depth, intrinsics)
X1 = Ts * X0
x1 = project(X1, intrinsics)
valid = (X0[..., -1] > MIN_DEPTH) & (X1[..., -1] > MIN_DEPTH)
return x1, valid.float()
def induced_flow(Ts, depth, intrinsics):
""" Compute 2d and 3d flow fields """
X0 = inv_project(depth, intrinsics)
X1 = Ts * X0
x0 = project(X0, intrinsics)
x1 = project(X1, intrinsics)
flow2d = x1 - x0
flow3d = X1 - X0
valid = (X0[..., -1] > MIN_DEPTH) & (X1[..., -1] > MIN_DEPTH)
return flow2d, flow3d, valid.float()
def backproject_flow3d(flow2d, depth0, depth1, intrinsics):
""" compute 3D flow from 2D flow + depth change """
ht, wd = flow2d.shape[0:2]
fx, fy, cx, cy = \
intrinsics[None].unbind(dim=-1)
y0, x0 = torch.meshgrid(
torch.arange(ht).to(depth0.device).float(),
torch.arange(wd).to(depth0.device).float())
x1 = x0 + flow2d[..., 0]
y1 = y0 + flow2d[..., 1]
X0 = depth0 * ((x0 - cx) / fx)
Y0 = depth0 * ((y0 - cy) / fy)
Z0 = depth0
X1 = depth1 * ((x1 - cx) / fx)
Y1 = depth1 * ((y1 - cy) / fy)
Z1 = depth1
flow3d = torch.stack([X1 - X0, Y1 - Y0, Z1 - Z0], dim=-1)
return flow3d
def backproject_flow2d_to_pts(flow2d, z0, intrinsics):
"""
flow2d: NHW3
z0: NHW
intrinsics: N4
"""
ht, wd = flow2d.shape[1:3]
fx, fy, cx, cy = intrinsics.unbind(dim=-1)
y0, x0 = torch.meshgrid( # HW
torch.arange(ht).to(z0.device).float(),
torch.arange(wd).to(z0.device).float())
y0 = y0[None].expand_as(z0)
x0 = x0[None].expand_as(z0)
x1 = x0 + flow2d[..., 0]
y1 = y0 + flow2d[..., 1]
z1 = z0 + flow2d[..., 2]
depth1 = fx.unsqueeze(-1).unsqueeze(-1) / (z1 + EPS)
X1 = depth1 * ((x1 - cx.unsqueeze(-1).unsqueeze(-1)) / fx.unsqueeze(-1).unsqueeze(-1))
Y1 = depth1 * ((y1 - cy.unsqueeze(-1).unsqueeze(-1)) / fy.unsqueeze(-1).unsqueeze(-1))
Z1 = depth1
return torch.stack([X1, Y1, Z1], dim=0)
|
CODD-main
|
model/motion/raft3d/projective_ops.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# Adapted from RAFT3D repository: https://github.com/princeton-vl/RAFT-3D
import lietorch_extras
import torch
import torch.nn.functional as F
class CorrSampler(torch.autograd.Function):
""" Index from correlation pyramid """
@staticmethod
def forward(ctx, volume, coords, radius):
ctx.save_for_backward(volume, coords)
ctx.radius = radius
corr, = lietorch_extras.corr_index_forward(volume, coords, radius)
return corr
@staticmethod
def backward(ctx, grad_output):
volume, coords = ctx.saved_tensors
grad_output = grad_output.contiguous()
grad_volume, = lietorch_extras.corr_index_backward(volume, coords, grad_output, ctx.radius)
return grad_volume, None, None
class CorrBlock:
def __init__(self, fmap1, fmap2, num_levels=4, radius=4):
self.num_levels = num_levels
self.radius = radius
self.corr_pyramid = []
# all pairs correlation
corr = CorrBlock.corr(fmap1, fmap2)
batch, h1, w1, h2, w2 = corr.shape
corr = corr.reshape(batch * h1 * w1, 1, h2, w2)
for i in range(self.num_levels):
self.corr_pyramid.append(
corr.view(batch, h1, w1, h2 // 2 ** i, w2 // 2 ** i))
corr = F.avg_pool2d(corr, 2, stride=2)
return
def __call__(self, coords):
out_pyramid = []
bz, _, ht, wd = coords.shape
for i in range(self.num_levels):
corr = CorrSampler.apply(self.corr_pyramid[i], coords / 2 ** i, self.radius)
out_pyramid.append(corr.view(bz, -1, ht, wd))
return torch.cat(out_pyramid, dim=1)
@staticmethod
def corr(fmap1, fmap2):
batch, dim, ht, wd = fmap1.shape
fmap1 = fmap1.view(batch, dim, ht * wd) / 4.0
fmap2 = fmap2.view(batch, dim, ht * wd) / 4.0
corr = torch.matmul(fmap1.transpose(1, 2), fmap2)
return corr.view(batch, ht, wd, ht, wd)
|
CODD-main
|
model/motion/raft3d/blocks/corr.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# Adapted from RAFT3D repository: https://github.com/princeton-vl/RAFT-3D
import time
import numpy as np
import scipy.sparse
import torch
import torch.nn.functional as F
from sksparse import cholmod
class GridCholeskySolver(torch.autograd.Function):
@staticmethod
def forward(ctx, chols, J, w, b):
""" Solve linear system """
B, H, W, M, N = J.shape
D = b.shape[-1]
bs = b.detach().reshape(B, -1, D).cpu().numpy()
xs = []
for i in range(len(chols)):
xs += [chols[i](bs[i])]
xs = np.stack(xs).astype(np.float32)
xs = torch.from_numpy(xs).to(J.device)
xs = xs.view(B, H, W, N // 4, D)
ctx.chols = chols
ctx.save_for_backward(xs, J, w, b)
return xs
@staticmethod
def backward(ctx, grad_output):
xs, J, w, b = ctx.saved_tensors
B, H, W, M, N = J.shape
D = b.shape[-1]
gs = grad_output.reshape(B, -1, D).cpu().numpy()
chols = ctx.chols
dz = []
for i in range(len(chols)):
dz += [chols[i](gs[i])]
dz = np.stack(dz, axis=0).astype(np.float32)
dz = torch.from_numpy(dz).to(J.device).view(*xs.shape)
J = GridFactor(A=J, w=w)
grad_J = torch.matmul(-w[..., None] * J.A(dz), J._unfold(xs).transpose(-1, -2)) + \
torch.matmul(-w[..., None] * J.A(xs), J._unfold(dz).transpose(-1, -2))
grad_w = -torch.sum(J.A(xs) * J.A(dz), -1)
return None, grad_J, grad_w, dz
sym_factor = None
sym_shape = None
class GridFactor:
""" Generalized grid factors """
def __init__(self, A=None, w=None):
self.factors = []
self.weights = []
self.residuals = []
self.chols = None
self.Af = A
self.wf = w
def _build_factors(self):
self.Af = torch.cat(self.factors, dim=3)
self.wf = torch.cat(self.weights, dim=3)
def add_factor(self, Js, ws=None, rs=None, ftype='u'):
""" Add factor to graph """
B, H, W, M, N = Js[0].shape
device = Js[0].device
A = torch.zeros([B, H, W, M, N, 2, 2]).to(device)
w = torch.zeros([B, H, W, M]).to(device)
# unary factor
if ftype == 'u':
A[..., 0, 0] = Js[0]
w[:] = ws[:]
# horizontal pairwise factor
elif ftype == 'h':
A[..., 0, 0] = Js[0]
A[..., 0, 1] = Js[1]
w[:, :, :-1, :] = ws[:, :, :-1, :]
# verticle pairwise factor
elif ftype == 'v':
A[..., 0, 0] = Js[0]
A[..., 1, 0] = Js[1]
w[:, :-1, :, :] = ws[:, :-1, :, :]
A = A.view(B, H, W, M, 2 * 2 * N)
self.factors.append(A)
self.weights.append(w)
if rs is not None:
self.residuals.append(rs)
def _fold(self, x):
""" Transposed fold operator """
B, H, W, M, D = x.shape
x = x.transpose(-1, -2)
x = x.reshape(B, H, W, M * D)
x = F.pad(x, [0, 0, 1, 0, 1, 0])
x = x.reshape(B, (H + 1) * (W + 1), M * D).permute(0, 2, 1)
x = F.fold(x, [H, W], [2, 2], padding=1)
x = x.permute(0, 2, 3, 1).reshape(B, H, W, D, M // 4)
return x.transpose(-1, -2)
def _unfold(self, x):
""" Transposed unfold operator """
B, H, W, N, D = x.shape
x = x.transpose(-1, -2)
x = F.pad(x.view(B, H, W, N * D), [0, 0, 0, 1, 0, 1])
x = x.permute(0, 3, 1, 2)
x = F.unfold(x, [2, 2], padding=0)
x = x.permute(0, 2, 1).reshape(B, H, W, D, 4 * N)
return x.transpose(-1, -2)
def A(self, x, w=False):
""" Linear operator """
return torch.matmul(self.Af, self._unfold(x))
def At(self, y):
""" Adjoint operator """
w = self.wf.unsqueeze(dim=-1)
At = self.Af.transpose(-1, -2)
return self._fold(torch.matmul(At, w * y))
def to_csc(self):
""" Convert linear operator into scipy csc matrix"""
if self.Af is None:
self._build_factors()
with torch.no_grad():
B, H, W, N, M = self.Af.shape
dims = [torch.arange(d).cuda() for d in (H, W, N, M // 4)]
i0, j0, k0, h0 = \
[x.reshape(-1) for x in torch.meshgrid(*dims)]
# repeats are ok because edge weights get zeroed
s = [W * (M // 4), M // 4, 1]
i1 = i0 + 1
j1 = j0 + 1
i1[i1 >= H] = H - 1
j1[j1 >= W] = W - 1
col_idx = torch.stack([
s[0] * i0 + s[1] * j0 + s[2] * h0,
s[0] * i0 + s[1] * j1 + s[2] * h0,
s[0] * i1 + s[1] * j0 + s[2] * h0,
s[0] * i1 + s[1] * j1 + s[2] * h0
], dim=-1).view(-1)
dense_shape = [H * W * N, H * W * (M // 4)]
col_idx = col_idx.cpu().numpy()
row_idx = M * np.arange(0, H * W * N + 1)
A = self.Af.detach().view(B, H * W * N, M)
wsqrt = self.wf.detach().sqrt().view(B, H * W * N, 1)
vals = (wsqrt * A).cpu().numpy()
sparse_matricies = []
for batch_ix in range(B):
data = (vals[batch_ix].reshape(-1), col_idx, row_idx)
mat = scipy.sparse.csr_matrix(data, shape=dense_shape)
mat.sum_duplicates()
sparse_matricies.append(mat.T)
return sparse_matricies
def factorAAt(self):
""" Peform sparse cholesky factorization """
global sym_factor, sym_shape
with torch.no_grad():
self.chols = []
start = time.time()
As = self.to_csc()
if sym_factor is None or As[0].shape != sym_shape:
sym_factor = cholmod.analyze_AAt(As[0], ordering_method='best')
sym_shape = As[0].shape
for A in As:
chol = sym_factor.cholesky_AAt(A)
self.chols.append(chol)
return self.chols
def solveAAt(self, b=None):
if self.chols is None:
self.factorAAt()
if b is None:
r = torch.cat(self.residuals, -2)
b = self.At(r)
x = GridCholeskySolver.apply(self.chols, self.Af, self.wf, b)
return x.reshape(*b.shape)
|
CODD-main
|
model/motion/raft3d/blocks/grid.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# Adapted from RAFT3D repository: https://github.com/princeton-vl/RAFT-3D
|
CODD-main
|
model/motion/raft3d/blocks/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# Adapted from RAFT3D repository: https://github.com/princeton-vl/RAFT-3D
import torch
import torch.nn as nn
class ResidualBlock(nn.Module):
def __init__(self, in_planes, planes, norm_fn='group', stride=1):
super(ResidualBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, padding=1, stride=stride)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, padding=1)
self.relu = nn.ReLU(inplace=True)
num_groups = planes // 8
if norm_fn == 'group':
self.norm1 = nn.GroupNorm(num_groups=num_groups, num_channels=planes)
self.norm2 = nn.GroupNorm(num_groups=num_groups, num_channels=planes)
if not stride == 1:
self.norm3 = nn.GroupNorm(num_groups=num_groups, num_channels=planes)
elif norm_fn == 'batch':
self.norm1 = nn.BatchNorm2d(planes)
self.norm2 = nn.BatchNorm2d(planes)
if not stride == 1:
self.norm3 = nn.BatchNorm2d(planes)
elif norm_fn == 'instance':
self.norm1 = nn.InstanceNorm2d(planes)
self.norm2 = nn.InstanceNorm2d(planes)
if not stride == 1:
self.norm3 = nn.InstanceNorm2d(planes)
elif norm_fn == 'none':
self.norm1 = nn.Sequential()
self.norm2 = nn.Sequential()
if not stride == 1:
self.norm3 = nn.Sequential()
if stride == 1:
self.downsample = None
else:
self.downsample = nn.Sequential(
nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride), self.norm3)
def forward(self, x):
y = x
y = self.relu(self.norm1(self.conv1(y)))
y = self.relu(self.norm2(self.conv2(y)))
if self.downsample is not None:
x = self.downsample(x)
return self.relu(x + y)
class BottleneckBlock(nn.Module):
def __init__(self, in_planes, planes, norm_fn='group', stride=1):
super(BottleneckBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes // 4, kernel_size=1, padding=0)
self.conv2 = nn.Conv2d(planes // 4, planes // 4, kernel_size=3, padding=1, stride=stride)
self.conv3 = nn.Conv2d(planes // 4, planes, kernel_size=1, padding=0)
self.relu = nn.ReLU(inplace=True)
num_groups = planes // 8
if norm_fn == 'group':
self.norm1 = nn.GroupNorm(num_groups=num_groups, num_channels=planes // 4)
self.norm2 = nn.GroupNorm(num_groups=num_groups, num_channels=planes // 4)
self.norm3 = nn.GroupNorm(num_groups=num_groups, num_channels=planes)
if not stride == 1:
self.norm4 = nn.GroupNorm(num_groups=num_groups, num_channels=planes)
elif norm_fn == 'batch':
self.norm1 = nn.BatchNorm2d(planes // 4)
self.norm2 = nn.BatchNorm2d(planes // 4)
self.norm3 = nn.BatchNorm2d(planes)
if not stride == 1:
self.norm4 = nn.BatchNorm2d(planes)
elif norm_fn == 'instance':
self.norm1 = nn.InstanceNorm2d(planes // 4)
self.norm2 = nn.InstanceNorm2d(planes // 4)
self.norm3 = nn.InstanceNorm2d(planes)
if not stride == 1:
self.norm4 = nn.InstanceNorm2d(planes)
elif norm_fn == 'none':
self.norm1 = nn.Sequential()
self.norm2 = nn.Sequential()
self.norm3 = nn.Sequential()
if not stride == 1:
self.norm4 = nn.Sequential()
if stride == 1:
self.downsample = None
else:
self.downsample = nn.Sequential(
nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride), self.norm4)
def forward(self, x):
y = x
y = self.relu(self.norm1(self.conv1(y)))
y = self.relu(self.norm2(self.conv2(y)))
y = self.relu(self.norm3(self.conv3(y)))
if self.downsample is not None:
x = self.downsample(x)
return self.relu(x + y)
class BasicEncoder(nn.Module):
def __init__(self, output_dim=128, norm_fn='batch', dropout=0.0, depth_input=False):
super(BasicEncoder, self).__init__()
self.norm_fn = norm_fn
if self.norm_fn == 'group':
self.norm1 = nn.GroupNorm(num_groups=8, num_channels=64)
elif self.norm_fn == 'batch':
self.norm1 = nn.BatchNorm2d(64)
elif self.norm_fn == 'instance':
self.norm1 = nn.InstanceNorm2d(64)
elif self.norm_fn == 'none':
self.norm1 = nn.Sequential()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3)
if depth_input:
self.conv1a = nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3)
self.relu1 = nn.ReLU(inplace=True)
self.in_planes = 64
self.layer1 = self._make_layer(64, stride=1)
self.layer2 = self._make_layer(96, stride=2)
self.layer3 = self._make_layer(128, stride=2)
# output convolution
self.conv2 = nn.Conv2d(128, output_dim, kernel_size=1)
self.dropout = None
if dropout > 0:
self.dropout = nn.Dropout2d(p=dropout)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.InstanceNorm2d, nn.GroupNorm)):
if m.weight is not None:
nn.init.constant_(m.weight, 1)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def _make_layer(self, dim, stride=1):
layer1 = ResidualBlock(self.in_planes, dim, self.norm_fn, stride=stride)
layer2 = ResidualBlock(dim, dim, self.norm_fn, stride=1)
layers = (layer1, layer2)
self.in_planes = dim
return nn.Sequential(*layers)
def forward(self, x, y=None):
# if input is list, combine batch dimension
is_list = isinstance(x, tuple) or isinstance(x, list)
if is_list:
batch_dim = x[0].shape[0]
x = torch.cat(x, dim=0)
if y is not None:
y = torch.cat(y, dim=0)
x = self.conv1(x)
if y is not None:
x = x + self.conv1a(y)
x = self.norm1(x)
x = self.relu1(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.conv2(x)
if self.training and self.dropout is not None:
x = self.dropout(x)
if is_list:
x = torch.split(x, [batch_dim, batch_dim], dim=0)
return x
class SmallEncoder(nn.Module):
def __init__(self, output_dim=128, norm_fn='batch', dropout=0.0):
super(SmallEncoder, self).__init__()
self.norm_fn = norm_fn
if self.norm_fn == 'group':
self.norm1 = nn.GroupNorm(num_groups=8, num_channels=32)
elif self.norm_fn == 'batch':
self.norm1 = nn.BatchNorm2d(32)
elif self.norm_fn == 'instance':
self.norm1 = nn.InstanceNorm2d(32)
elif self.norm_fn == 'none':
self.norm1 = nn.Sequential()
self.conv1 = nn.Conv2d(3, 32, kernel_size=7, stride=2, padding=3)
self.relu1 = nn.ReLU(inplace=True)
self.in_planes = 32
self.layer1 = self._make_layer(32, stride=1)
self.layer2 = self._make_layer(64, stride=2)
self.layer3 = self._make_layer(96, stride=2)
self.dropout = None
if dropout > 0:
self.dropout = nn.Dropout2d(p=dropout)
self.conv2 = nn.Conv2d(96, output_dim, kernel_size=1)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.InstanceNorm2d, nn.GroupNorm)):
if m.weight is not None:
nn.init.constant_(m.weight, 1)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def _make_layer(self, dim, stride=1):
layer1 = BottleneckBlock(self.in_planes, dim, self.norm_fn, stride=stride)
layer2 = BottleneckBlock(dim, dim, self.norm_fn, stride=1)
layers = (layer1, layer2)
self.in_planes = dim
return nn.Sequential(*layers)
def forward(self, x):
# if input is list, combine batch dimension
is_list = isinstance(x, tuple) or isinstance(x, list)
if is_list:
batch_dim = x[0].shape[0]
x = torch.cat(x, dim=0)
x = self.conv1(x)
x = self.norm1(x)
x = self.relu1(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.conv2(x)
if self.training and self.dropout is not None:
x = self.dropout(x)
if is_list:
x = torch.split(x, [batch_dim, batch_dim], dim=0)
return x
|
CODD-main
|
model/motion/raft3d/blocks/extractor.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# Adapted from RAFT3D repository: https://github.com/princeton-vl/RAFT-3D
import torch
import torch.nn as nn
class ConvGRU(nn.Module):
def __init__(self, hidden_dim=128, input_dim=192 + 128, dilation=4):
super(ConvGRU, self).__init__()
self.hidden_dim = hidden_dim
self.convz1 = nn.Conv2d(hidden_dim, hidden_dim, 3, padding=1)
self.convz2 = nn.Conv2d(hidden_dim, hidden_dim, 3, dilation=dilation, padding=dilation)
self.convr1 = nn.Conv2d(hidden_dim, hidden_dim, 3, padding=1)
self.convr2 = nn.Conv2d(hidden_dim, hidden_dim, 3, dilation=dilation, padding=dilation)
self.convq1 = nn.Conv2d(hidden_dim, hidden_dim, 3, padding=1)
self.convq2 = nn.Conv2d(hidden_dim, hidden_dim, 3, dilation=dilation, padding=dilation)
def forward(self, h, *inputs):
iz, ir, iq = 0, 0, 0
for inp in inputs:
inp = inp.split([self.hidden_dim] * 3, dim=1)
iz = iz + inp[0]
ir = ir + inp[1]
iq = iq + inp[2]
z = torch.sigmoid(self.convz1(h) + self.convz2(h) + iz)
r = torch.sigmoid(self.convr1(h) + self.convr2(h) + ir)
q = torch.tanh(self.convq1(r * h) + self.convq2(r * h) + iq)
h = (1 - z) * h + z * q
return h
|
CODD-main
|
model/motion/raft3d/blocks/gru.py
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
"""
import os
import pylab
import torch
import pickle
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import torch.nn.functional as F
from sklearn.cluster import KMeans
from utils import mic_acc_cal, shot_acc
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--root', type=str, default='')
parser.add_argument('--type', type=str, default='test')
args = parser.parse_args()
# ----------------------------------------------------------------------------------
root = args.root
train_file = 'trainfeat_all.pkl'
test_file = '{}feat_all.pkl'.format(args.type)
# load data
with open(os.path.join(root, train_file), 'rb') as f:
trainset = pickle.load(f)
if args.type == 'train':
testset = trainset
else:
with open(os.path.join(root, test_file), 'rb') as f:
testset = pickle.load(f)
testsize = len(testset['feats'])
batch_size = 512
# Calculate centriods
centroids = []
c_labels = []
for i in np.unique(trainset['labels']):
c_labels.append(i)
centroids.append(np.mean(trainset['feats'][trainset['labels']==i], axis=0))
centroids = torch.Tensor(np.stack(centroids))
c_labels = np.array(c_labels)
# ----------------------------------------------------------------------------------
# load weight
x = torch.load(os.path.join(root, 'final_model_checkpoint.pth'), map_location=torch.device('cpu'))
weights = x['state_dict_best']['classifier']['module.fc.weight'].cpu()
bias = x['state_dict_best']['classifier']['module.fc.bias'].cpu()
def cos_similarity(A, B):
feat_dim = A.size(1)
normB = torch.norm(B, 2, 1, keepdim=True)
B = B / normB
AB = torch.mm(A, B.t())
return AB
def linear_classifier(inputs, weights, bias):
return torch.addmm(bias, inputs, weights.t())
def logits2preds(logits, labels):
_, nns = logits.max(dim=1)
preds = np.array([labels[i] for i in nns])
return preds
def preds2accs(preds, testset, trainset):
top1_all = mic_acc_cal(preds, testset['labels'])
many, median, low, cls_accs = shot_acc(preds, testset['labels'], trainset['labels'], acc_per_cls=True)
top1_all = np.mean(cls_accs)
print("{:.2f} \t {:.2f} \t {:.2f} \t {:.2f}".format(
many * 100, median*100, low*100, top1_all*100))
def dotproduct_similarity(A, B):
feat_dim = A.size(1)
AB = torch.mm(A, B.t())
return AB
def forward(weights):
total_logits = []
for i in range(testsize // batch_size + 1):
# if i%10 == 0:
# print('{}/{}'.format(i, testsize // batch_size + 1))
feat = testset['feats'][batch_size*i:min(batch_size*(i+1), testsize)]
feat = torch.Tensor(feat)
logits = dotproduct_similarity(feat, weights)
total_logits.append(logits)
total_logits = torch.cat(total_logits)
return total_logits
def pnorm(weights, p):
normB = torch.norm(weights, 2, 1)
ws = weights.clone()
for i in range(weights.size(0)):
ws[i] = ws[i] / torch.pow(normB[i], p)
return ws
for p in np.linspace(0,2,21):
ws = pnorm(weights, p)
logits = forward(ws)
preds = logits2preds(logits, c_labels)
preds2accs(preds, testset, trainset)
|
classifier-balancing-main
|
tau_norm.py
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
"""
import os
import yaml
import csv
import h5py
class Logger(object):
def __init__(self, logdir):
self.logdir = logdir
if not os.path.isdir(logdir):
os.makedirs(logdir)
self.cfg_file = os.path.join(self.logdir, 'cfg.yaml')
self.acc_file = os.path.join(self.logdir, 'acc.csv')
self.loss_file = os.path.join(self.logdir, 'loss.csv')
self.ws_file = os.path.join(self.logdir, 'ws.h5')
self.acc_keys = None
self.loss_keys = None
self.logging_ws = False
def log_cfg(self, cfg):
print('===> Saving cfg parameters to: ', self.cfg_file)
with open(self.cfg_file, 'w') as f:
yaml.dump(cfg, f)
def log_acc(self, accs):
if self.acc_keys is None:
self.acc_keys = [k for k in accs.keys()]
with open(self.acc_file, 'w') as f:
writer = csv.DictWriter(f, fieldnames=self.acc_keys)
writer.writeheader()
writer.writerow(accs)
else:
with open(self.acc_file, 'a') as f:
writer = csv.DictWriter(f, fieldnames=self.acc_keys)
writer.writerow(accs)
def log_loss(self, losses):
# valid_losses = {k: v for k, v in losses.items() if v is not None}
valid_losses = losses
if self.loss_keys is None:
self.loss_keys = [k for k in valid_losses.keys()]
with open(self.loss_file, 'w') as f:
writer = csv.DictWriter(f, fieldnames=self.loss_keys)
writer.writeheader()
writer.writerow(valid_losses)
else:
with open(self.loss_file, 'a') as f:
writer = csv.DictWriter(f, fieldnames=self.loss_keys)
writer.writerow(valid_losses)
def log_ws(self, e, ws):
mode = 'a' if self.logging_ws else 'w'
self.logging_ws = True
key = 'Epoch{:02d}'.format(e)
with h5py.File(self.ws_file, mode) as f:
g = f.create_group(key)
for k, v in ws.items():
g.create_dataset(k, data=v)
|
classifier-balancing-main
|
logger.py
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the OLTR project which
notice below and in LICENSE in the root directory of
this source tree.
Copyright (c) 2019, Zhongqi Miao
All rights reserved.
"""
import numpy as np
import matplotlib.pyplot as plt
import torch
from sklearn.metrics import f1_score
import torch.nn.functional as F
import importlib
import pdb
def source_import(file_path):
"""This function imports python module directly from source code using importlib"""
spec = importlib.util.spec_from_file_location('', file_path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
return module
def batch_show(inp, title=None):
"""Imshow for Tensor."""
inp = inp.numpy().transpose((1, 2, 0))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
plt.figure(figsize=(20,20))
plt.imshow(inp)
if title is not None:
plt.title(title)
def print_write(print_str, log_file):
print(*print_str)
if log_file is None:
return
with open(log_file, 'a') as f:
print(*print_str, file=f)
def init_weights(model, weights_path, caffe=False, classifier=False):
"""Initialize weights"""
print('Pretrained %s weights path: %s' % ('classifier' if classifier else 'feature model',
weights_path))
weights = torch.load(weights_path)
if not classifier:
if caffe:
weights = {k: weights[k] if k in weights else model.state_dict()[k]
for k in model.state_dict()}
else:
weights = weights['state_dict_best']['feat_model']
weights = {k: weights['module.' + k] if 'module.' + k in weights else model.state_dict()[k]
for k in model.state_dict()}
else:
weights = weights['state_dict_best']['classifier']
weights = {k: weights['module.fc.' + k] if 'module.fc.' + k in weights else model.state_dict()[k]
for k in model.state_dict()}
model.load_state_dict(weights)
return model
def shot_acc (preds, labels, train_data, many_shot_thr=100, low_shot_thr=20, acc_per_cls=False):
if isinstance(train_data, np.ndarray):
training_labels = np.array(train_data).astype(int)
else:
training_labels = np.array(train_data.dataset.labels).astype(int)
if isinstance(preds, torch.Tensor):
preds = preds.detach().cpu().numpy()
labels = labels.detach().cpu().numpy()
elif isinstance(preds, np.ndarray):
pass
else:
raise TypeError('Type ({}) of preds not supported'.format(type(preds)))
train_class_count = []
test_class_count = []
class_correct = []
for l in np.unique(labels):
train_class_count.append(len(training_labels[training_labels == l]))
test_class_count.append(len(labels[labels == l]))
class_correct.append((preds[labels == l] == labels[labels == l]).sum())
many_shot = []
median_shot = []
low_shot = []
for i in range(len(train_class_count)):
if train_class_count[i] > many_shot_thr:
many_shot.append((class_correct[i] / test_class_count[i]))
elif train_class_count[i] < low_shot_thr:
low_shot.append((class_correct[i] / test_class_count[i]))
else:
median_shot.append((class_correct[i] / test_class_count[i]))
if len(many_shot) == 0:
many_shot.append(0)
if len(median_shot) == 0:
median_shot.append(0)
if len(low_shot) == 0:
low_shot.append(0)
if acc_per_cls:
class_accs = [c / cnt for c, cnt in zip(class_correct, test_class_count)]
return np.mean(many_shot), np.mean(median_shot), np.mean(low_shot), class_accs
else:
return np.mean(many_shot), np.mean(median_shot), np.mean(low_shot)
def weighted_shot_acc (preds, labels, ws, train_data, many_shot_thr=100, low_shot_thr=20):
training_labels = np.array(train_data.dataset.labels).astype(int)
if isinstance(preds, torch.Tensor):
preds = preds.detach().cpu().numpy()
labels = labels.detach().cpu().numpy()
elif isinstance(preds, np.ndarray):
pass
else:
raise TypeError('Type ({}) of preds not supported'.format(type(preds)))
train_class_count = []
test_class_count = []
class_correct = []
for l in np.unique(labels):
train_class_count.append(len(training_labels[training_labels == l]))
test_class_count.append(ws[labels==l].sum())
class_correct.append(((preds[labels==l] == labels[labels==l]) * ws[labels==l]).sum())
many_shot = []
median_shot = []
low_shot = []
for i in range(len(train_class_count)):
if train_class_count[i] > many_shot_thr:
many_shot.append((class_correct[i] / test_class_count[i]))
elif train_class_count[i] < low_shot_thr:
low_shot.append((class_correct[i] / test_class_count[i]))
else:
median_shot.append((class_correct[i] / test_class_count[i]))
return np.mean(many_shot), np.mean(median_shot), np.mean(low_shot)
def F_measure(preds, labels, openset=False, theta=None):
if openset:
# f1 score for openset evaluation
true_pos = 0.
false_pos = 0.
false_neg = 0.
for i in range(len(labels)):
true_pos += 1 if preds[i] == labels[i] and labels[i] != -1 else 0
false_pos += 1 if preds[i] != labels[i] and labels[i] != -1 and preds[i] != -1 else 0
false_neg += 1 if preds[i] != labels[i] and labels[i] == -1 else 0
precision = true_pos / (true_pos + false_pos)
recall = true_pos / (true_pos + false_neg)
return 2 * ((precision * recall) / (precision + recall + 1e-12))
else:
# Regular f1 score
return f1_score(labels.detach().cpu().numpy(), preds.detach().cpu().numpy(), average='macro')
def mic_acc_cal(preds, labels):
if isinstance(labels, tuple):
assert len(labels) == 3
targets_a, targets_b, lam = labels
acc_mic_top1 = (lam * preds.eq(targets_a.data).cpu().sum().float() \
+ (1 - lam) * preds.eq(targets_b.data).cpu().sum().float()) / len(preds)
else:
acc_mic_top1 = (preds == labels).sum().item() / len(labels)
return acc_mic_top1
def weighted_mic_acc_cal(preds, labels, ws):
acc_mic_top1 = ws[preds == labels].sum() / ws.sum()
return acc_mic_top1
def class_count (data):
labels = np.array(data.dataset.labels)
class_data_num = []
for l in np.unique(labels):
class_data_num.append(len(labels[labels == l]))
return class_data_num
# def dataset_dist (in_loader):
# """Example, dataset_dist(data['train'][0])"""
# label_list = np.array([x[1] for x in in_loader.dataset.samples])
# total_num = len(data_list)
# distribution = []
# for l in np.unique(label_list):
# distribution.append((l, len(label_list[label_list == l])/total_num))
# return distribution
# New Added
def torch2numpy(x):
if isinstance(x, torch.Tensor):
return x.detach().cpu().numpy()
elif isinstance(x, (list, tuple)):
return tuple([torch2numpy(xi) for xi in x])
else:
return x
def logits2score(logits, labels):
scores = F.softmax(logits, dim=1)
score = scores.gather(1, labels.view(-1, 1))
score = score.squeeze().cpu().numpy()
return score
def logits2entropy(logits):
scores = F.softmax(logits, dim=1)
scores = scores.cpu().numpy() + 1e-30
ent = -scores * np.log(scores)
ent = np.sum(ent, 1)
return ent
def logits2CE(logits, labels):
scores = F.softmax(logits, dim=1)
score = scores.gather(1, labels.view(-1, 1))
score = score.squeeze().cpu().numpy() + 1e-30
ce = -np.log(score)
return ce
def get_priority(ptype, logits, labels):
if ptype == 'score':
ws = 1 - logits2score(logits, labels)
elif ptype == 'entropy':
ws = logits2entropy(logits)
elif ptype == 'CE':
ws = logits2CE(logits, labels)
return ws
def get_value(oldv, newv):
if newv is not None:
return newv
else:
return oldv
|
classifier-balancing-main
|
utils.py
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the OLTR project which
notice below and in LICENSE in the root directory of
this source tree.
Copyright (c) 2019, Zhongqi Miao
All rights reserved.
"""
import os
import copy
import pickle
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from tqdm import tqdm
from utils import *
from logger import Logger
import time
import numpy as np
import warnings
import pdb
class model ():
def __init__(self, config, data, test=False):
self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
self.config = config
self.training_opt = self.config['training_opt']
self.memory = self.config['memory']
self.data = data
self.test_mode = test
self.num_gpus = torch.cuda.device_count()
self.do_shuffle = config['shuffle'] if 'shuffle' in config else False
# Setup logger
self.logger = Logger(self.training_opt['log_dir'])
# Initialize model
self.init_models()
# Load pre-trained model parameters
if 'model_dir' in self.config and self.config['model_dir'] is not None:
self.load_model(self.config['model_dir'])
# Under training mode, initialize training steps, optimizers, schedulers, criterions, and centroids
if not self.test_mode:
# If using steps for training, we need to calculate training steps
# for each epoch based on actual number of training data instead of
# oversampled data number
print('Using steps for training.')
self.training_data_num = len(self.data['train'].dataset)
self.epoch_steps = int(self.training_data_num \
/ self.training_opt['batch_size'])
# Initialize model optimizer and scheduler
print('Initializing model optimizer.')
self.scheduler_params = self.training_opt['scheduler_params']
self.model_optimizer, \
self.model_optimizer_scheduler = self.init_optimizers(self.model_optim_params_list)
self.init_criterions()
if self.memory['init_centroids']:
self.criterions['FeatureLoss'].centroids.data = \
self.centroids_cal(self.data['train_plain'])
# Set up log file
self.log_file = os.path.join(self.training_opt['log_dir'], 'log.txt')
if os.path.isfile(self.log_file):
os.remove(self.log_file)
self.logger.log_cfg(self.config)
else:
if 'KNNClassifier' in self.config['networks']['classifier']['def_file']:
self.load_model()
if not self.networks['classifier'].initialized:
cfeats = self.get_knncentroids()
print('===> Saving features to %s' %
os.path.join(self.training_opt['log_dir'], 'cfeats.pkl'))
with open(os.path.join(self.training_opt['log_dir'], 'cfeats.pkl'), 'wb') as f:
pickle.dump(cfeats, f)
self.networks['classifier'].update(cfeats)
self.log_file = None
def init_models(self, optimizer=True):
networks_defs = self.config['networks']
self.networks = {}
self.model_optim_params_list = []
print("Using", torch.cuda.device_count(), "GPUs.")
for key, val in networks_defs.items():
# Networks
def_file = val['def_file']
# model_args = list(val['params'].values())
# model_args.append(self.test_mode)
model_args = val['params']
model_args.update({'test': self.test_mode})
self.networks[key] = source_import(def_file).create_model(**model_args)
if 'KNNClassifier' in type(self.networks[key]).__name__:
# Put the KNN classifier on one single GPU
self.networks[key] = self.networks[key].cuda()
else:
self.networks[key] = nn.DataParallel(self.networks[key]).cuda()
if 'fix' in val and val['fix']:
print('Freezing feature weights except for self attention weights (if exist).')
for param_name, param in self.networks[key].named_parameters():
# Freeze all parameters except self attention parameters
if 'selfatt' not in param_name and 'fc' not in param_name:
param.requires_grad = False
# print(' | ', param_name, param.requires_grad)
# Optimizer list
optim_params = val['optim_params']
self.model_optim_params_list.append({'params': self.networks[key].parameters(),
'lr': optim_params['lr'],
'momentum': optim_params['momentum'],
'weight_decay': optim_params['weight_decay']})
def init_criterions(self):
criterion_defs = self.config['criterions']
self.criterions = {}
self.criterion_weights = {}
for key, val in criterion_defs.items():
def_file = val['def_file']
loss_args = list(val['loss_params'].values())
self.criterions[key] = source_import(def_file).create_loss(*loss_args).cuda()
self.criterion_weights[key] = val['weight']
if val['optim_params']:
print('Initializing criterion optimizer.')
optim_params = val['optim_params']
optim_params = [{'params': self.criterions[key].parameters(),
'lr': optim_params['lr'],
'momentum': optim_params['momentum'],
'weight_decay': optim_params['weight_decay']}]
# Initialize criterion optimizer and scheduler
self.criterion_optimizer, \
self.criterion_optimizer_scheduler = self.init_optimizers(optim_params)
else:
self.criterion_optimizer = None
def init_optimizers(self, optim_params):
optimizer = optim.SGD(optim_params)
if self.config['coslr']:
print("===> Using coslr eta_min={}".format(self.config['endlr']))
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer, self.training_opt['num_epochs'], eta_min=self.config['endlr'])
else:
scheduler = optim.lr_scheduler.StepLR(optimizer,
step_size=self.scheduler_params['step_size'],
gamma=self.scheduler_params['gamma'])
return optimizer, scheduler
def batch_forward (self, inputs, labels=None, centroids=False, feature_ext=False, phase='train'):
'''
This is a general single batch running function.
'''
# Calculate Features
self.features, self.feature_maps = self.networks['feat_model'](inputs)
# If not just extracting features, calculate logits
if not feature_ext:
# During training, calculate centroids if needed to
if phase != 'test':
if centroids and 'FeatureLoss' in self.criterions.keys():
self.centroids = self.criterions['FeatureLoss'].centroids.data
torch.cat([self.centroids] * self.num_gpus)
else:
self.centroids = None
if self.centroids is not None:
centroids_ = torch.cat([self.centroids] * self.num_gpus)
else:
centroids_ = self.centroids
# Calculate logits with classifier
self.logits, self.direct_memory_feature = self.networks['classifier'](self.features, centroids_)
def batch_backward(self):
# Zero out optimizer gradients
self.model_optimizer.zero_grad()
if self.criterion_optimizer:
self.criterion_optimizer.zero_grad()
# Back-propagation from loss outputs
self.loss.backward()
# Step optimizers
self.model_optimizer.step()
if self.criterion_optimizer:
self.criterion_optimizer.step()
def batch_loss(self, labels):
self.loss = 0
# First, apply performance loss
if 'PerformanceLoss' in self.criterions.keys():
self.loss_perf = self.criterions['PerformanceLoss'](self.logits, labels)
self.loss_perf *= self.criterion_weights['PerformanceLoss']
self.loss += self.loss_perf
# Apply loss on features if set up
if 'FeatureLoss' in self.criterions.keys():
self.loss_feat = self.criterions['FeatureLoss'](self.features, labels)
self.loss_feat = self.loss_feat * self.criterion_weights['FeatureLoss']
# Add feature loss to total loss
self.loss += self.loss_feat
def shuffle_batch(self, x, y):
index = torch.randperm(x.size(0))
x = x[index]
y = y[index]
return x, y
def train(self):
# When training the network
print_str = ['Phase: train']
print_write(print_str, self.log_file)
time.sleep(0.25)
print_write(['Do shuffle??? --- ', self.do_shuffle], self.log_file)
# Initialize best model
best_model_weights = {}
best_model_weights['feat_model'] = copy.deepcopy(self.networks['feat_model'].state_dict())
best_model_weights['classifier'] = copy.deepcopy(self.networks['classifier'].state_dict())
best_acc = 0.0
best_epoch = 0
# best_centroids = self.centroids
end_epoch = self.training_opt['num_epochs']
# Loop over epochs
for epoch in range(1, end_epoch + 1):
for model in self.networks.values():
model.train()
torch.cuda.empty_cache()
# Set model modes and set scheduler
# In training, step optimizer scheduler and set model to train()
self.model_optimizer_scheduler.step()
if self.criterion_optimizer:
self.criterion_optimizer_scheduler.step()
# Iterate over dataset
total_preds = []
total_labels = []
for step, (inputs, labels, indexes) in enumerate(self.data['train']):
# Break when step equal to epoch step
if step == self.epoch_steps:
break
if self.do_shuffle:
inputs, labels = self.shuffle_batch(inputs, labels)
inputs, labels = inputs.cuda(), labels.cuda()
# If on training phase, enable gradients
with torch.set_grad_enabled(True):
# If training, forward with loss, and no top 5 accuracy calculation
self.batch_forward(inputs, labels,
centroids=self.memory['centroids'],
phase='train')
self.batch_loss(labels)
self.batch_backward()
# Tracking predictions
_, preds = torch.max(self.logits, 1)
total_preds.append(torch2numpy(preds))
total_labels.append(torch2numpy(labels))
# Output minibatch training results
if step % self.training_opt['display_step'] == 0:
minibatch_loss_feat = self.loss_feat.item() \
if 'FeatureLoss' in self.criterions.keys() else None
minibatch_loss_perf = self.loss_perf.item() \
if 'PerformanceLoss' in self.criterions else None
minibatch_loss_total = self.loss.item()
minibatch_acc = mic_acc_cal(preds, labels)
print_str = ['Epoch: [%d/%d]'
% (epoch, self.training_opt['num_epochs']),
'Step: %5d'
% (step),
'Minibatch_loss_feature: %.3f'
% (minibatch_loss_feat) if minibatch_loss_feat else '',
'Minibatch_loss_performance: %.3f'
% (minibatch_loss_perf) if minibatch_loss_perf else '',
'Minibatch_accuracy_micro: %.3f'
% (minibatch_acc)]
print_write(print_str, self.log_file)
loss_info = {
'Epoch': epoch,
'Step': step,
'Total': minibatch_loss_total,
'CE': minibatch_loss_perf,
'feat': minibatch_loss_feat
}
self.logger.log_loss(loss_info)
# Update priority weights if using PrioritizedSampler
# if self.training_opt['sampler'] and \
# self.training_opt['sampler']['type'] == 'PrioritizedSampler':
if hasattr(self.data['train'].sampler, 'update_weights'):
if hasattr(self.data['train'].sampler, 'ptype'):
ptype = self.data['train'].sampler.ptype
else:
ptype = 'score'
ws = get_priority(ptype, self.logits.detach(), labels)
# ws = logits2score(self.logits.detach(), labels)
inlist = [indexes.cpu().numpy(), ws]
if self.training_opt['sampler']['type'] == 'ClassPrioritySampler':
inlist.append(labels.cpu().numpy())
self.data['train'].sampler.update_weights(*inlist)
# self.data['train'].sampler.update_weights(indexes.cpu().numpy(), ws)
if hasattr(self.data['train'].sampler, 'get_weights'):
self.logger.log_ws(epoch, self.data['train'].sampler.get_weights())
if hasattr(self.data['train'].sampler, 'reset_weights'):
self.data['train'].sampler.reset_weights(epoch)
# After every epoch, validation
rsls = {'epoch': epoch}
rsls_train = self.eval_with_preds(total_preds, total_labels)
rsls_eval = self.eval(phase='val')
rsls.update(rsls_train)
rsls.update(rsls_eval)
# Reset class weights for sampling if pri_mode is valid
if hasattr(self.data['train'].sampler, 'reset_priority'):
ws = get_priority(self.data['train'].sampler.ptype,
self.total_logits.detach(),
self.total_labels)
self.data['train'].sampler.reset_priority(ws, self.total_labels.cpu().numpy())
# Log results
self.logger.log_acc(rsls)
# Under validation, the best model need to be updated
if self.eval_acc_mic_top1 > best_acc:
best_epoch = epoch
best_acc = self.eval_acc_mic_top1
best_centroids = self.centroids
best_model_weights['feat_model'] = copy.deepcopy(self.networks['feat_model'].state_dict())
best_model_weights['classifier'] = copy.deepcopy(self.networks['classifier'].state_dict())
print('===> Saving checkpoint')
self.save_latest(epoch)
print()
print('Training Complete.')
print_str = ['Best validation accuracy is %.3f at epoch %d' % (best_acc, best_epoch)]
print_write(print_str, self.log_file)
# Save the best model and best centroids if calculated
self.save_model(epoch, best_epoch, best_model_weights, best_acc, centroids=best_centroids)
# Test on the test set
self.reset_model(best_model_weights)
self.eval('test' if 'test' in self.data else 'val')
print('Done')
def eval_with_preds(self, preds, labels):
# Count the number of examples
n_total = sum([len(p) for p in preds])
# Split the examples into normal and mixup
normal_preds, normal_labels = [], []
mixup_preds, mixup_labels1, mixup_labels2, mixup_ws = [], [], [], []
for p, l in zip(preds, labels):
if isinstance(l, tuple):
mixup_preds.append(p)
mixup_labels1.append(l[0])
mixup_labels2.append(l[1])
mixup_ws.append(l[2] * np.ones_like(l[0]))
else:
normal_preds.append(p)
normal_labels.append(l)
# Calculate normal prediction accuracy
rsl = {'train_all':0., 'train_many':0., 'train_median':0., 'train_low': 0.}
if len(normal_preds) > 0:
normal_preds, normal_labels = list(map(np.concatenate, [normal_preds, normal_labels]))
n_top1 = mic_acc_cal(normal_preds, normal_labels)
n_top1_many, \
n_top1_median, \
n_top1_low, = shot_acc(normal_preds, normal_labels, self.data['train'])
rsl['train_all'] += len(normal_preds) / n_total * n_top1
rsl['train_many'] += len(normal_preds) / n_total * n_top1_many
rsl['train_median'] += len(normal_preds) / n_total * n_top1_median
rsl['train_low'] += len(normal_preds) / n_total * n_top1_low
# Calculate mixup prediction accuracy
if len(mixup_preds) > 0:
mixup_preds, mixup_labels, mixup_ws = \
list(map(np.concatenate, [mixup_preds*2, mixup_labels1+mixup_labels2, mixup_ws]))
mixup_ws = np.concatenate([mixup_ws, 1-mixup_ws])
n_top1 = weighted_mic_acc_cal(mixup_preds, mixup_labels, mixup_ws)
n_top1_many, \
n_top1_median, \
n_top1_low, = weighted_shot_acc(mixup_preds, mixup_labels, mixup_ws, self.data['train'])
rsl['train_all'] += len(mixup_preds) / 2 / n_total * n_top1
rsl['train_many'] += len(mixup_preds) / 2 / n_total * n_top1_many
rsl['train_median'] += len(mixup_preds) / 2 / n_total * n_top1_median
rsl['train_low'] += len(mixup_preds) / 2 / n_total * n_top1_low
# Top-1 accuracy and additional string
print_str = ['\n Training acc Top1: %.3f \n' % (rsl['train_all']),
'Many_top1: %.3f' % (rsl['train_many']),
'Median_top1: %.3f' % (rsl['train_median']),
'Low_top1: %.3f' % (rsl['train_low']),
'\n']
print_write(print_str, self.log_file)
return rsl
def eval(self, phase='val', openset=False, save_feat=False):
print_str = ['Phase: %s' % (phase)]
print_write(print_str, self.log_file)
time.sleep(0.25)
if openset:
print('Under openset test mode. Open threshold is %.1f'
% self.training_opt['open_threshold'])
torch.cuda.empty_cache()
# In validation or testing mode, set model to eval() and initialize running loss/correct
for model in self.networks.values():
model.eval()
self.total_logits = torch.empty((0, self.training_opt['num_classes'])).cuda()
self.total_labels = torch.empty(0, dtype=torch.long).cuda()
self.total_paths = np.empty(0)
get_feat_only = save_feat
feats_all, labels_all, idxs_all, logits_all = [], [], [], []
featmaps_all = []
# Iterate over dataset
for inputs, labels, paths in tqdm(self.data[phase]):
inputs, labels = inputs.cuda(), labels.cuda()
# If on training phase, enable gradients
with torch.set_grad_enabled(False):
# In validation or testing
self.batch_forward(inputs, labels,
centroids=self.memory['centroids'],
phase=phase)
if not get_feat_only:
self.total_logits = torch.cat((self.total_logits, self.logits))
self.total_labels = torch.cat((self.total_labels, labels))
self.total_paths = np.concatenate((self.total_paths, paths))
if get_feat_only:
logits_all.append(self.logits.cpu().numpy())
feats_all.append(self.features.cpu().numpy())
labels_all.append(labels.cpu().numpy())
idxs_all.append(paths.numpy())
if get_feat_only:
typ = 'feat'
if phase == 'train_plain':
name = 'train{}_all.pkl'.format(typ)
elif phase == 'test':
name = 'test{}_all.pkl'.format(typ)
elif phase == 'val':
name = 'val{}_all.pkl'.format(typ)
fname = os.path.join(self.training_opt['log_dir'], name)
print('===> Saving feats to ' + fname)
with open(fname, 'wb') as f:
pickle.dump({
'feats': np.concatenate(feats_all),
'labels': np.concatenate(labels_all),
'idxs': np.concatenate(idxs_all),
},
f, protocol=4)
return
probs, preds = F.softmax(self.total_logits.detach(), dim=1).max(dim=1)
if openset:
preds[probs < self.training_opt['open_threshold']] = -1
self.openset_acc = mic_acc_cal(preds[self.total_labels == -1],
self.total_labels[self.total_labels == -1])
print('\n\nOpenset Accuracy: %.3f' % self.openset_acc)
# Calculate the overall accuracy and F measurement
self.eval_acc_mic_top1= mic_acc_cal(preds[self.total_labels != -1],
self.total_labels[self.total_labels != -1])
self.eval_f_measure = F_measure(preds, self.total_labels, openset=openset,
theta=self.training_opt['open_threshold'])
self.many_acc_top1, \
self.median_acc_top1, \
self.low_acc_top1, \
self.cls_accs = shot_acc(preds[self.total_labels != -1],
self.total_labels[self.total_labels != -1],
self.data['train'],
acc_per_cls=True)
# Top-1 accuracy and additional string
print_str = ['\n\n',
'Phase: %s'
% (phase),
'\n\n',
'Evaluation_accuracy_micro_top1: %.3f'
% (self.eval_acc_mic_top1),
'\n',
'Averaged F-measure: %.3f'
% (self.eval_f_measure),
'\n',
'Many_shot_accuracy_top1: %.3f'
% (self.many_acc_top1),
'Median_shot_accuracy_top1: %.3f'
% (self.median_acc_top1),
'Low_shot_accuracy_top1: %.3f'
% (self.low_acc_top1),
'\n']
rsl = {phase + '_all': self.eval_acc_mic_top1,
phase + '_many': self.many_acc_top1,
phase + '_median': self.median_acc_top1,
phase + '_low': self.low_acc_top1,
phase + '_fscore': self.eval_f_measure}
if phase == 'val':
print_write(print_str, self.log_file)
else:
acc_str = ["{:.1f} \t {:.1f} \t {:.1f} \t {:.1f}".format(
self.many_acc_top1 * 100,
self.median_acc_top1 * 100,
self.low_acc_top1 * 100,
self.eval_acc_mic_top1 * 100)]
if self.log_file is not None and os.path.exists(self.log_file):
print_write(print_str, self.log_file)
print_write(acc_str, self.log_file)
else:
print(*print_str)
print(*acc_str)
if phase == 'test':
with open(os.path.join(self.training_opt['log_dir'], 'cls_accs.pkl'), 'wb') as f:
pickle.dump(self.cls_accs, f)
return rsl
def centroids_cal(self, data, save_all=False):
centroids = torch.zeros(self.training_opt['num_classes'],
self.training_opt['feature_dim']).cuda()
print('Calculating centroids.')
torch.cuda.empty_cache()
for model in self.networks.values():
model.eval()
feats_all, labels_all, idxs_all = [], [], []
# Calculate initial centroids only on training data.
with torch.set_grad_enabled(False):
for inputs, labels, idxs in tqdm(data):
inputs, labels = inputs.cuda(), labels.cuda()
# Calculate Features of each training data
self.batch_forward(inputs, feature_ext=True)
# Add all calculated features to center tensor
for i in range(len(labels)):
label = labels[i]
centroids[label] += self.features[i]
# Save features if requried
if save_all:
feats_all.append(self.features.cpu().numpy())
labels_all.append(labels.cpu().numpy())
idxs_all.append(idxs.numpy())
if save_all:
fname = os.path.join(self.training_opt['log_dir'], 'feats_all.pkl')
with open(fname, 'wb') as f:
pickle.dump({'feats': np.concatenate(feats_all),
'labels': np.concatenate(labels_all),
'idxs': np.concatenate(idxs_all)},
f)
# Average summed features with class count
centroids /= torch.tensor(class_count(data)).float().unsqueeze(1).cuda()
return centroids
def get_knncentroids(self):
datakey = 'train_plain'
assert datakey in self.data
print('===> Calculating KNN centroids.')
torch.cuda.empty_cache()
for model in self.networks.values():
model.eval()
feats_all, labels_all = [], []
# Calculate initial centroids only on training data.
with torch.set_grad_enabled(False):
for inputs, labels, idxs in tqdm(self.data[datakey]):
inputs, labels = inputs.cuda(), labels.cuda()
# Calculate Features of each training data
self.batch_forward(inputs, feature_ext=True)
feats_all.append(self.features.cpu().numpy())
labels_all.append(labels.cpu().numpy())
feats = np.concatenate(feats_all)
labels = np.concatenate(labels_all)
featmean = feats.mean(axis=0)
def get_centroids(feats_, labels_):
centroids = []
for i in np.unique(labels_):
centroids.append(np.mean(feats_[labels_==i], axis=0))
return np.stack(centroids)
# Get unnormalized centorids
un_centers = get_centroids(feats, labels)
# Get l2n centorids
l2n_feats = torch.Tensor(feats.copy())
norm_l2n = torch.norm(l2n_feats, 2, 1, keepdim=True)
l2n_feats = l2n_feats / norm_l2n
l2n_centers = get_centroids(l2n_feats.numpy(), labels)
# Get cl2n centorids
cl2n_feats = torch.Tensor(feats.copy())
cl2n_feats = cl2n_feats - torch.Tensor(featmean)
norm_cl2n = torch.norm(cl2n_feats, 2, 1, keepdim=True)
cl2n_feats = cl2n_feats / norm_cl2n
cl2n_centers = get_centroids(cl2n_feats.numpy(), labels)
return {'mean': featmean,
'uncs': un_centers,
'l2ncs': l2n_centers,
'cl2ncs': cl2n_centers}
def reset_model(self, model_state):
for key, model in self.networks.items():
weights = model_state[key]
weights = {k: weights[k] for k in weights if k in model.state_dict()}
model.load_state_dict(weights)
def load_model(self, model_dir=None):
model_dir = self.training_opt['log_dir'] if model_dir is None else model_dir
if not model_dir.endswith('.pth'):
model_dir = os.path.join(model_dir, 'final_model_checkpoint.pth')
print('Validation on the best model.')
print('Loading model from %s' % (model_dir))
checkpoint = torch.load(model_dir)
model_state = checkpoint['state_dict_best']
self.centroids = checkpoint['centroids'] if 'centroids' in checkpoint else None
for key, model in self.networks.items():
# if not self.test_mode and key == 'classifier':
if not self.test_mode and \
'DotProductClassifier' in self.config['networks'][key]['def_file']:
# Skip classifier initialization
print('Skiping classifier initialization')
continue
weights = model_state[key]
weights = {k: weights[k] for k in weights if k in model.state_dict()}
x = model.state_dict()
x.update(weights)
model.load_state_dict(x)
def save_latest(self, epoch):
model_weights = {}
model_weights['feat_model'] = copy.deepcopy(self.networks['feat_model'].state_dict())
model_weights['classifier'] = copy.deepcopy(self.networks['classifier'].state_dict())
model_states = {
'epoch': epoch,
'state_dict': model_weights
}
model_dir = os.path.join(self.training_opt['log_dir'],
'latest_model_checkpoint.pth')
torch.save(model_states, model_dir)
def save_model(self, epoch, best_epoch, best_model_weights, best_acc, centroids=None):
model_states = {'epoch': epoch,
'best_epoch': best_epoch,
'state_dict_best': best_model_weights,
'best_acc': best_acc,
'centroids': centroids}
model_dir = os.path.join(self.training_opt['log_dir'],
'final_model_checkpoint.pth')
torch.save(model_states, model_dir)
def output_logits(self, openset=False):
filename = os.path.join(self.training_opt['log_dir'],
'logits_%s'%('open' if openset else 'close'))
print("Saving total logits to: %s.npz" % filename)
np.savez(filename,
logits=self.total_logits.detach().cpu().numpy(),
labels=self.total_labels.detach().cpu().numpy(),
paths=self.total_paths)
|
classifier-balancing-main
|
run_networks.py
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the OLTR project which
notice below and in LICENSE in the root directory of
this source tree.
Copyright (c) 2019, Zhongqi Miao
All rights reserved.
"""
import os
import argparse
import pprint
from data import dataloader
from run_networks import model
import warnings
import yaml
from utils import source_import, get_value
data_root = {'ImageNet': '/datasets01_101/imagenet_full_size/061417',
'Places': '/datasets01_101/Places365/041019',
'iNaturalist18': '/checkpoint/bykang/iNaturalist18'}
parser = argparse.ArgumentParser()
parser.add_argument('--cfg', default=None, type=str)
parser.add_argument('--test', default=False, action='store_true')
parser.add_argument('--batch_size', type=int, default=None)
parser.add_argument('--test_open', default=False, action='store_true')
parser.add_argument('--output_logits', default=False)
parser.add_argument('--model_dir', type=str, default=None)
parser.add_argument('--save_feat', type=str, default='')
# KNN testing parameters
parser.add_argument('--knn', default=False, action='store_true')
parser.add_argument('--feat_type', type=str, default='cl2n')
parser.add_argument('--dist_type', type=str, default='l2')
# Learnable tau
parser.add_argument('--val_as_train', default=False, action='store_true')
args = parser.parse_args()
def update(config, args):
# Change parameters
config['model_dir'] = get_value(config['model_dir'], args.model_dir)
config['training_opt']['batch_size'] = \
get_value(config['training_opt']['batch_size'], args.batch_size)
# Testing with KNN
if args.knn and args.test:
training_opt = config['training_opt']
classifier_param = {
'feat_dim': training_opt['feature_dim'],
'num_classes': training_opt['num_classes'],
'feat_type': args.feat_type,
'dist_type': args.dist_type,
'log_dir': training_opt['log_dir']}
classifier = {
'def_file': './models/KNNClassifier.py',
'params': classifier_param,
'optim_params': config['networks']['classifier']['optim_params']}
config['networks']['classifier'] = classifier
return config
# ============================================================================
# LOAD CONFIGURATIONS
with open(args.cfg) as f:
config = yaml.load(f)
config = update(config, args)
test_mode = args.test
test_open = args.test_open
if test_open:
test_mode = True
output_logits = args.output_logits
training_opt = config['training_opt']
relatin_opt = config['memory']
dataset = training_opt['dataset']
if not os.path.isdir(training_opt['log_dir']):
os.makedirs(training_opt['log_dir'])
print('Loading dataset from: %s' % data_root[dataset.rstrip('_LT')])
pprint.pprint(config)
def split2phase(split):
if split == 'train' and args.val_as_train:
return 'train_val'
else:
return split
if not test_mode:
sampler_defs = training_opt['sampler']
if sampler_defs:
if sampler_defs['type'] == 'ClassAwareSampler':
sampler_dic = {
'sampler': source_import(sampler_defs['def_file']).get_sampler(),
'params': {'num_samples_cls': sampler_defs['num_samples_cls']}
}
elif sampler_defs['type'] in ['MixedPrioritizedSampler',
'ClassPrioritySampler']:
sampler_dic = {
'sampler': source_import(sampler_defs['def_file']).get_sampler(),
'params': {k: v for k, v in sampler_defs.items() \
if k not in ['type', 'def_file']}
}
else:
sampler_dic = None
splits = ['train', 'train_plain', 'val']
if dataset not in ['iNaturalist18', 'ImageNet']:
splits.append('test')
data = {x: dataloader.load_data(data_root=data_root[dataset.rstrip('_LT')],
dataset=dataset, phase=split2phase(x),
batch_size=training_opt['batch_size'],
sampler_dic=sampler_dic,
num_workers=training_opt['num_workers'])
for x in splits}
training_model = model(config, data, test=False)
training_model.train()
else:
warnings.filterwarnings("ignore", "(Possibly )?corrupt EXIF data",
UserWarning)
print('Under testing phase, we load training data simply to calculate \
training data number for each class.')
if 'iNaturalist' in training_opt['dataset']:
splits = ['train', 'val']
test_split = 'val'
else:
splits = ['train', 'val', 'test']
test_split = 'test'
if 'ImageNet' == training_opt['dataset']:
splits = ['train', 'val']
test_split = 'val'
if args.knn or True:
splits.append('train_plain')
data = {x: dataloader.load_data(data_root=data_root[dataset.rstrip('_LT')],
dataset=dataset, phase=x,
batch_size=training_opt['batch_size'],
sampler_dic=None,
test_open=test_open,
num_workers=training_opt['num_workers'],
shuffle=False)
for x in splits}
training_model = model(config, data, test=True)
# training_model.load_model()
training_model.load_model(args.model_dir)
if args.save_feat in ['train_plain', 'val', 'test']:
saveit = True
test_split = args.save_feat
else:
saveit = False
training_model.eval(phase=test_split, openset=test_open, save_feat=saveit)
if output_logits:
training_model.output_logits(openset=test_open)
print('ALL COMPLETED.')
|
classifier-balancing-main
|
main.py
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the OLTR project which
notice below and in LICENSE in the root directory of
this source tree.
Copyright (c) 2019, Zhongqi Miao
All rights reserved.
"""
import torch
from torch import nn
from torch.nn import functional as F
import pdb
class ModulatedAttLayer(nn.Module):
def __init__(self, in_channels, reduction = 2, mode='embedded_gaussian'):
super(ModulatedAttLayer, self).__init__()
self.in_channels = in_channels
self.reduction = reduction
self.inter_channels = in_channels // reduction
self.mode = mode
assert mode in ['embedded_gaussian']
self.g = nn.Conv2d(self.in_channels, self.inter_channels, kernel_size = 1)
self.theta = nn.Conv2d(self.in_channels, self.inter_channels, kernel_size = 1)
self.phi = nn.Conv2d(self.in_channels, self.inter_channels, kernel_size = 1)
self.conv_mask = nn.Conv2d(self.inter_channels, self.in_channels, kernel_size = 1, bias=False)
self.relu = nn.ReLU(inplace=True)
self.avgpool = nn.AvgPool2d(7, stride=1)
self.fc_spatial = nn.Linear(7 * 7 * self.in_channels, 7 * 7)
self.init_weights()
def init_weights(self):
msra_list = [self.g, self.theta, self.phi]
for m in msra_list:
nn.init.kaiming_normal_(m.weight.data)
m.bias.data.zero_()
self.conv_mask.weight.data.zero_()
def embedded_gaussian(self, x):
# embedded_gaussian cal self-attention, which may not strong enough
batch_size = x.size(0)
g_x = self.g(x).view(batch_size, self.inter_channels, -1)
g_x = g_x.permute(0, 2, 1)
theta_x = self.theta(x).view(batch_size, self.inter_channels, -1)
theta_x = theta_x.permute(0, 2, 1)
phi_x = self.phi(x).view(batch_size, self.inter_channels, -1)
map_t_p = torch.matmul(theta_x, phi_x)
mask_t_p = F.softmax(map_t_p, dim=-1)
map_ = torch.matmul(mask_t_p, g_x)
map_ = map_.permute(0, 2, 1).contiguous()
map_ = map_.view(batch_size, self.inter_channels, x.size(2), x.size(3))
mask = self.conv_mask(map_)
x_flatten = x.view(-1, 7 * 7 * self.in_channels)
spatial_att = self.fc_spatial(x_flatten)
spatial_att = spatial_att.softmax(dim=1)
spatial_att = spatial_att.view(-1, 7, 7).unsqueeze(1)
spatial_att = spatial_att.expand(-1, self.in_channels, -1, -1)
final = spatial_att * mask + x
return final, [x, spatial_att, mask]
def forward(self, x):
if self.mode == 'embedded_gaussian':
output, feature_maps = self.embedded_gaussian(x)
else:
raise NotImplemented("The code has not been implemented.")
return output, feature_maps
|
classifier-balancing-main
|
layers/ModulatedAttLayer.py
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the OLTR project which
notice below and in LICENSE in the root directory of
this source tree.
Copyright (c) 2019, Zhongqi Miao
All rights reserved.
"""
import torch.nn as nn
def create_loss ():
print('Loading Softmax Loss.')
return nn.CrossEntropyLoss()
|
classifier-balancing-main
|
loss/SoftmaxLoss.py
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the OLTR project which
notice below and in LICENSE in the root directory of
this source tree.
Copyright (c) 2019, Zhongqi Miao
All rights reserved.
"""
import torch
import torch.nn as nn
from torch.autograd.function import Function
import pdb
class DiscCentroidsLoss(nn.Module):
def __init__(self, num_classes, feat_dim, size_average=True):
super(DiscCentroidsLoss, self).__init__()
self.num_classes = num_classes
self.centroids = nn.Parameter(torch.randn(num_classes, feat_dim))
self.disccentroidslossfunc = DiscCentroidsLossFunc.apply
self.feat_dim = feat_dim
self.size_average = size_average
def forward(self, feat, label):
batch_size = feat.size(0)
# calculate attracting loss
feat = feat.view(batch_size, -1)
# To check the dim of centroids and features
if feat.size(1) != self.feat_dim:
raise ValueError("Center's dim: {0} should be equal to input feature's \
dim: {1}".format(self.feat_dim,feat.size(1)))
batch_size_tensor = feat.new_empty(1).fill_(batch_size if self.size_average else 1)
loss_attract = self.disccentroidslossfunc(feat, label, self.centroids, batch_size_tensor).squeeze()
# calculate repelling loss
distmat = torch.pow(feat, 2).sum(dim=1, keepdim=True).expand(batch_size, self.num_classes) + \
torch.pow(self.centroids, 2).sum(dim=1, keepdim=True).expand(self.num_classes, batch_size).t()
distmat.addmm_(1, -2, feat, self.centroids.t())
classes = torch.arange(self.num_classes).long().cuda()
labels_expand = label.unsqueeze(1).expand(batch_size, self.num_classes)
mask = labels_expand.eq(classes.expand(batch_size, self.num_classes))
distmat_neg = distmat
distmat_neg[mask] = 0.0
# margin = 50.0
margin = 10.0
loss_repel = torch.clamp(margin - distmat_neg.sum() / (batch_size * self.num_classes), 0.0, 1e6)
# loss = loss_attract + 0.05 * loss_repel
loss = loss_attract + 0.01 * loss_repel
return loss
class DiscCentroidsLossFunc(Function):
@staticmethod
def forward(ctx, feature, label, centroids, batch_size):
ctx.save_for_backward(feature, label, centroids, batch_size)
centroids_batch = centroids.index_select(0, label.long())
return (feature - centroids_batch).pow(2).sum() / 2.0 / batch_size
@staticmethod
def backward(ctx, grad_output):
feature, label, centroids, batch_size = ctx.saved_tensors
centroids_batch = centroids.index_select(0, label.long())
diff = centroids_batch - feature
# init every iteration
counts = centroids.new_ones(centroids.size(0))
ones = centroids.new_ones(label.size(0))
grad_centroids = centroids.new_zeros(centroids.size())
counts = counts.scatter_add_(0, label.long(), ones)
grad_centroids.scatter_add_(0, label.unsqueeze(1).expand(feature.size()).long(), diff)
grad_centroids = grad_centroids/counts.view(-1, 1)
return - grad_output * diff / batch_size, None, grad_centroids / batch_size, None
def create_loss (feat_dim=512, num_classes=1000):
print('Loading Discriminative Centroids Loss.')
return DiscCentroidsLoss(num_classes, feat_dim)
|
classifier-balancing-main
|
loss/DiscCentroidsLoss.py
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the OLTR project which
notice below and in LICENSE in the root directory of
this source tree.
Copyright (c) 2019, Zhongqi Miao
All rights reserved.
"""
import torch
import torch.nn as nn
from models.CosNormClassifier import CosNorm_Classifier
from utils import *
from os import path
import pdb
class MetaEmbedding_Classifier(nn.Module):
def __init__(self, feat_dim=2048, num_classes=1000):
super(MetaEmbedding_Classifier, self).__init__()
self.num_classes = num_classes
self.fc_hallucinator = nn.Linear(feat_dim, num_classes)
self.fc_selector = nn.Linear(feat_dim, feat_dim)
self.cosnorm_classifier = CosNorm_Classifier(feat_dim, num_classes)
def forward(self, x, centroids, *args):
# storing direct feature
direct_feature = x
batch_size = x.size(0)
feat_size = x.size(1)
# set up visual memory
x_expand = x.unsqueeze(1).expand(-1, self.num_classes, -1)
centroids_expand = centroids.unsqueeze(0).expand(batch_size, -1, -1)
keys_memory = centroids
# computing reachability
dist_cur = torch.norm(x_expand - centroids_expand, 2, 2)
values_nn, labels_nn = torch.sort(dist_cur, 1)
scale = 10.0
reachability = (scale / values_nn[:, 0]).unsqueeze(1).expand(-1, feat_size)
# computing memory feature by querying and associating visual memory
values_memory = self.fc_hallucinator(x)
values_memory = values_memory.softmax(dim=1)
memory_feature = torch.matmul(values_memory, keys_memory)
# computing concept selector
concept_selector = self.fc_selector(x)
concept_selector = concept_selector.tanh()
x = reachability * (direct_feature + concept_selector * memory_feature)
# storing infused feature
infused_feature = concept_selector * memory_feature
logits = self.cosnorm_classifier(x)
return logits, [direct_feature, infused_feature]
def create_model(feat_dim=2048, num_classes=1000, stage1_weights=False, dataset=None, log_dir=None, test=False, *args):
print('Loading Meta Embedding Classifier.')
clf = MetaEmbedding_Classifier(feat_dim, num_classes)
if not test:
if stage1_weights:
assert(dataset)
print('Loading %s Stage 1 Classifier Weights.' % dataset)
if log_dir is not None:
weight_dir = path.join('/'.join(log_dir.split('/')[:-1]), 'stage1')
else:
weight_dir = './logs/%s/stage1' % dataset
print('==> Loading weights from %s' % weight_dir)
clf.fc_hallucinator = init_weights(model=clf.fc_hallucinator,
weights_path=path.join(weight_dir, 'final_model_checkpoint.pth'),
classifier=True)
else:
print('Random initialized classifier weights.')
return clf
|
classifier-balancing-main
|
models/MetaEmbeddingClassifier.py
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
"""
from models.ResNetFeature import *
from utils import *
from os import path
def create_model(use_selfatt=False, use_fc=False, dropout=None, stage1_weights=False, dataset=None, log_dir=None, test=False, *args):
print('Loading Scratch ResNet 101 Feature Model.')
resnet101 = ResNet(Bottleneck, [3, 4, 23, 3], use_modulatedatt=use_selfatt, use_fc=use_fc, dropout=None)
if not test:
if stage1_weights:
assert(dataset)
print('Loading %s Stage 1 ResNet 101 Weights.' % dataset)
if log_dir is not None:
weight_dir = path.join('/'.join(log_dir.split('/')[:-1]), 'stage1')
else:
weight_dir = './logs/%s/stage1' % dataset
print('==> Loading weights from %s' % weight_dir)
resnet101 = init_weights(model=resnet101,
weights_path=path.join(weight_dir, 'final_model_checkpoint.pth'))
else:
print('No Pretrained Weights For Feature Model.')
return resnet101
|
classifier-balancing-main
|
models/ResNet101Feature.py
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the OLTR project which
notice below and in LICENSE in the root directory of
this source tree.
Copyright (c) 2019, Zhongqi Miao
All rights reserved.
"""
from models.ResNetFeature import *
from utils import *
from os import path
def create_model(use_selfatt=False, use_fc=False, dropout=None, stage1_weights=False, dataset=None, caffe=False, log_dir=None, test=False):
print('Loading Scratch ResNet 152 Feature Model.')
resnet152 = ResNet(Bottleneck, [3, 8, 36, 3], use_modulatedatt=use_selfatt, use_fc=use_fc, dropout=None)
if not test:
assert(caffe != stage1_weights)
if caffe:
print('Loading Caffe Pretrained ResNet 152 Weights.')
resnet152 = init_weights(model=resnet152,
weights_path='./logs/caffe_resnet152.pth',
caffe=True)
elif stage1_weights:
assert(dataset)
print('Loading %s Stage 1 ResNet 152 Weights.' % dataset)
if log_dir is not None:
weight_dir = path.join('/'.join(log_dir.split('/')[:-1]), 'stage1')
else:
weight_dir = './logs/%s/stage1' % dataset
print('==> Loading weights from %s' % weight_dir)
resnet152 = init_weights(model=resnet152,
weights_path=path.join(weight_dir, 'final_model_checkpoint.pth'))
else:
print('No Pretrained Weights For Feature Model.')
return resnet152
|
classifier-balancing-main
|
models/ResNet152FeatureCaffe.py
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
"""
import math
import torch.nn as nn
import torch.nn.functional as F
from layers.ModulatedAttLayer import ModulatedAttLayer
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None,
groups=1, base_width=64, is_last=False):
super(Bottleneck, self).__init__()
width = int(planes * (base_width / 64.)) * groups
self.conv1 = nn.Conv2d(inplanes, width, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(width)
self.conv2 = nn.Conv2d(width, width, kernel_size=3, stride=stride,
groups=groups, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(width)
self.conv3 = nn.Conv2d(width, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.is_last = is_last
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNext(nn.Module):
def __init__(self, block, layers, groups=1, width_per_group=64,
use_modulatedatt=False, use_fc=False, dropout=None,
use_glore=False, use_gem=False):
self.inplanes = 64
super(ResNext, self).__init__()
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(7, stride=1)
self.use_fc = use_fc
self.use_dropout = True if dropout else False
if self.use_fc:
print('Using fc.')
self.fc_add = nn.Linear(512*block.expansion, 512)
if self.use_dropout:
print('Using dropout.')
self.dropout = nn.Dropout(p=dropout)
self.use_modulatedatt = use_modulatedatt
if self.use_modulatedatt:
print('Using self attention.')
self.modulatedatt = ModulatedAttLayer(in_channels=512*block.expansion)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1, is_last=False):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample,
groups=self.groups, base_width=self.base_width))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes,
groups=self.groups, base_width=self.base_width,
is_last=(is_last and i == blocks-1)))
return nn.Sequential(*layers)
def forward(self, x, *args):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
if self.use_modulatedatt:
x, feature_maps = self.modulatedatt(x)
else:
feature_maps = None
x = self.avgpool(x)
x = x.view(x.size(0), -1)
if self.use_fc:
x = F.relu(self.fc_add(x))
if self.use_dropout:
x = self.dropout(x)
return x, feature_maps
|
classifier-balancing-main
|
models/ResNextFeature.py
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
"""
from models.ResNetFeature import *
from utils import *
from os import path
def create_model(use_selfatt=False, use_fc=False, dropout=None, stage1_weights=False, dataset=None, log_dir=None, test=False, *args):
print('Loading Scratch ResNet 50 Feature Model.')
resnet50 = ResNet(Bottleneck, [3, 4, 6, 3], use_modulatedatt=use_selfatt, use_fc=use_fc, dropout=None)
if not test:
if stage1_weights:
assert(dataset)
print('Loading %s Stage 1 ResNet 10 Weights.' % dataset)
if log_dir is not None:
# subdir = log_dir.strip('/').split('/')[-1]
# subdir = subdir.replace('stage2', 'stage1')
# weight_dir = path.join('/'.join(log_dir.split('/')[:-1]), subdir)
weight_dir = path.join('/'.join(log_dir.split('/')[:-1]), 'stage1')
else:
weight_dir = './logs/%s/stage1' % dataset
print('==> Loading weights from %s' % weight_dir)
resnet50 = init_weights(model=resnet50,
weights_path=path.join(weight_dir, 'final_model_checkpoint.pth'))
else:
print('No Pretrained Weights For Feature Model.')
return resnet50
|
classifier-balancing-main
|
models/ResNet50Feature.py
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the OLTR project which
notice below and in LICENSE in the root directory of
this source tree.
Copyright (c) 2019, Zhongqi Miao
All rights reserved.
"""
import math
import torch.nn as nn
import torch.nn.functional as F
from layers.ModulatedAttLayer import ModulatedAttLayer
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, use_modulatedatt=False, use_fc=False, dropout=None):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(7, stride=1)
self.use_fc = use_fc
self.use_dropout = True if dropout else False
if self.use_fc:
print('Using fc.')
self.fc_add = nn.Linear(512*block.expansion, 512)
if self.use_dropout:
print('Using dropout.')
self.dropout = nn.Dropout(p=dropout)
self.use_modulatedatt = use_modulatedatt
if self.use_modulatedatt:
print('Using self attention.')
self.modulatedatt = ModulatedAttLayer(in_channels=512*block.expansion)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x, *args):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
if self.use_modulatedatt:
x, feature_maps = self.modulatedatt(x)
else:
feature_maps = None
x = self.avgpool(x)
x = x.view(x.size(0), -1)
if self.use_fc:
x = F.relu(self.fc_add(x))
if self.use_dropout:
x = self.dropout(x)
return x, feature_maps
|
classifier-balancing-main
|
models/ResNetFeature.py
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the OLTR project which
notice below and in LICENSE in the root directory of
this source tree.
Copyright (c) 2019, Zhongqi Miao
All rights reserved.
"""
import torch.nn as nn
from utils import *
from os import path
class DotProduct_Classifier(nn.Module):
def __init__(self, num_classes=1000, feat_dim=2048, *args):
super(DotProduct_Classifier, self).__init__()
# print('<DotProductClassifier> contains bias: {}'.format(bias))
self.fc = nn.Linear(feat_dim, num_classes)
def forward(self, x, *args):
x = self.fc(x)
return x, None
def create_model(feat_dim, num_classes=1000, stage1_weights=False, dataset=None, log_dir=None, test=False, *args):
print('Loading Dot Product Classifier.')
clf = DotProduct_Classifier(num_classes, feat_dim)
if not test:
if stage1_weights:
assert(dataset)
print('Loading %s Stage 1 Classifier Weights.' % dataset)
if log_dir is not None:
subdir = log_dir.strip('/').split('/')[-1]
subdir = subdir.replace('stage2', 'stage1')
weight_dir = path.join('/'.join(log_dir.split('/')[:-1]), subdir)
# weight_dir = path.join('/'.join(log_dir.split('/')[:-1]), 'stage1')
else:
weight_dir = './logs/%s/stage1' % dataset
print('==> Loading classifier weights from %s' % weight_dir)
clf.fc = init_weights(model=clf.fc,
weights_path=path.join(weight_dir, 'final_model_checkpoint.pth'),
classifier=True)
else:
print('Random initialized classifier weights.')
return clf
|
classifier-balancing-main
|
models/DotProductClassifier.py
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
"""
from models.ResNextFeature import *
from utils import *
from os import path
def create_model(use_selfatt=False, use_fc=False, dropout=None, stage1_weights=False, dataset=None, log_dir=None, test=False, *args):
print('Loading Scratch ResNext 101 Feature Model.')
resnext = ResNext(Bottleneck, [3, 4, 23, 3], use_modulatedatt=use_selfatt, use_fc=use_fc, dropout=None,
groups=32, width_per_group=4)
if not test:
if stage1_weights:
assert(dataset)
print('Loading %s Stage 1 ResNext 101 Weights.' % dataset)
if log_dir is not None:
weight_dir = path.join('/'.join(log_dir.split('/')[:-1]), 'stage1')
else:
weight_dir = './logs/%s/stage1' % dataset
print('==> Loading weights from %s' % weight_dir)
resnext = init_weights(model=resnext,
weights_path=path.join(weight_dir, 'final_model_checkpoint.pth'))
else:
print('No Pretrained Weights For Feature Model.')
return resnext
|
classifier-balancing-main
|
models/ResNext101Feature.py
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
"""
import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
from utils import *
from os import path
class DotProduct_Classifier(nn.Module):
def __init__(self, num_classes=1000, feat_dim=2048, *args):
super(DotProduct_Classifier, self).__init__()
# print('<DotProductClassifier> contains bias: {}'.format(bias))
self.fc = nn.Linear(feat_dim, num_classes)
self.scales = Parameter(torch.ones(num_classes))
for param_name, param in self.fc.named_parameters():
param.requires_grad = False
def forward(self, x, *args):
x = self.fc(x)
x *= self.scales
return x, None
def create_model(feat_dim, num_classes=1000, stage1_weights=False, dataset=None, log_dir=None, test=False, *args):
print('Loading Dot Product Classifier.')
clf = DotProduct_Classifier(num_classes, feat_dim)
if not test:
if stage1_weights:
assert(dataset)
print('Loading %s Stage 1 Classifier Weights.' % dataset)
if log_dir is not None:
subdir = log_dir.strip('/').split('/')[-1]
subdir = subdir.replace('stage2', 'stage1')
weight_dir = path.join('/'.join(log_dir.split('/')[:-1]), subdir)
# weight_dir = path.join('/'.join(log_dir.split('/')[:-1]), 'stage1')
else:
weight_dir = './logs/%s/stage1' % dataset
print('==> Loading classifier weights from %s' % weight_dir)
clf.fc = init_weights(model=clf.fc,
weights_path=path.join(weight_dir, 'final_model_checkpoint.pth'),
classifier=True)
else:
print('Random initialized classifier weights.')
return clf
|
classifier-balancing-main
|
models/TauNormClassifier.py
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the OLTR project which
notice below and in LICENSE in the root directory of
this source tree.
Copyright (c) 2019, Zhongqi Miao
All rights reserved.
"""
import torch
import math
import torch.nn as nn
from torch.nn.parameter import Parameter
import pdb
class CosNorm_Classifier(nn.Module):
def __init__(self, in_dims, out_dims, scale=16, margin=0.5, init_std=0.001):
super(CosNorm_Classifier, self).__init__()
self.in_dims = in_dims
self.out_dims = out_dims
self.scale = scale
self.margin = margin
self.weight = Parameter(torch.Tensor(out_dims, in_dims).cuda())
self.reset_parameters()
def reset_parameters(self):
stdv = 1. / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
def forward(self, input, *args):
norm_x = torch.norm(input.clone(), 2, 1, keepdim=True)
ex = (norm_x / (1 + norm_x)) * (input / norm_x)
ew = self.weight / torch.norm(self.weight, 2, 1, keepdim=True)
return torch.mm(self.scale * ex, ew.t())
def create_model(in_dims=512, out_dims=1000):
print('Loading Cosine Norm Classifier.')
return CosNorm_Classifier(in_dims=in_dims, out_dims=out_dims)
|
classifier-balancing-main
|
models/CosNormClassifier.py
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the OLTR project which
notice below and in LICENSE in the root directory of
this source tree.
Copyright (c) 2019, Zhongqi Miao
All rights reserved.
"""
from models.ResNetFeature import *
from utils import *
from os import path
def create_model(use_selfatt=False, use_fc=False, dropout=None, stage1_weights=False, dataset=None, log_dir=None, test=False, *args):
print('Loading Scratch ResNet 10 Feature Model.')
resnet10 = ResNet(BasicBlock, [1, 1, 1, 1], use_modulatedatt=use_selfatt, use_fc=use_fc, dropout=None)
if not test:
if stage1_weights:
assert(dataset)
print('Loading %s Stage 1 ResNet 10 Weights.' % dataset)
if log_dir is not None:
weight_dir = path.join('/'.join(log_dir.split('/')[:-1]), 'stage1')
else:
weight_dir = './logs/%s/stage1' % dataset
print('==> Loading weights from %s' % weight_dir)
resnet10 = init_weights(model=resnet10,
weights_path=path.join(weight_dir, 'final_model_checkpoint.pth'))
else:
print('No Pretrained Weights For Feature Model.')
return resnet10
|
classifier-balancing-main
|
models/ResNet10Feature.py
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
"""
from models.ResNextFeature import *
from utils import *
from os import path
def create_model(use_selfatt=False, use_fc=False, dropout=None, stage1_weights=False, dataset=None, log_dir=None, test=False, *args):
print('Loading Scratch ResNext 152 Feature Model.')
resnext = ResNext(Bottleneck, [3, 8, 36, 3], use_modulatedatt=use_selfatt, use_fc=use_fc, dropout=None,
groups=32, width_per_group=4)
if not test:
if stage1_weights:
assert(dataset)
print('Loading %s Stage 1 ResNext 152 Weights.' % dataset)
if log_dir is not None:
weight_dir = path.join('/'.join(log_dir.split('/')[:-1]), 'stage1')
else:
weight_dir = './logs/%s/stage1' % dataset
print('==> Loading weights from %s' % weight_dir)
resnext = init_weights(model=resnext,
weights_path=path.join(weight_dir, 'final_model_checkpoint.pth'))
else:
print('No Pretrained Weights For Feature Model.')
return resnext
|
classifier-balancing-main
|
models/ResNext152Feature.py
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
"""
import torch
import torch.nn as nn
import numpy as np
import pickle
from os import path
class KNNClassifier(nn.Module):
def __init__(self, feat_dim=512, num_classes=1000, feat_type='cl2n', dist_type='l2'):
super(KNNClassifier, self).__init__()
assert feat_type in ['un', 'l2n', 'cl2n'], "feat_type is wrong!!!"
assert dist_type in ['l2', 'cos'], "dist_type is wrong!!!"
self.feat_dim = feat_dim
self.num_classes = num_classes
self.centroids = torch.randn(num_classes, feat_dim)
self.feat_mean = torch.randn(feat_dim)
self.feat_type = feat_type
self.dist_type = dist_type
self.initialized = False
def update(self, cfeats):
mean = cfeats['mean']
centroids = cfeats['{}cs'.format(self.feat_type)]
mean = torch.from_numpy(mean)
centroids = torch.from_numpy(centroids)
self.feat_mean.copy_(mean)
self.centroids.copy_(centroids)
if torch.cuda.is_available():
self.feat_mean = self.feat_mean.cuda()
self.centroids = self.centroids.cuda()
self.initialized = True
def forward(self, inputs, *args):
centroids = self.centroids
feat_mean = self.feat_mean
# Feature transforms
if self.feat_type == 'cl2n':
inputs = inputs - feat_mean
#centroids = centroids - self.feat_mean
if self.feat_type in ['l2n', 'cl2n']:
norm_x = torch.norm(inputs, 2, 1, keepdim=True)
inputs = inputs / norm_x
#norm_c = torch.norm(centroids, 2, 1, keepdim=True)
#centroids = centroids / norm_c
# Logit calculation
if self.dist_type == 'l2':
logit = self.l2_similarity(inputs, centroids)
elif self.dist_type == 'cos':
logit = self.cos_similarity(inputs, centroids)
return logit, None
def l2_similarity(self, A, B):
# input A: [bs, fd] (batch_size x feat_dim)
# input B: [nC, fd] (num_classes x feat_dim)
feat_dim = A.size(1)
AB = torch.mm(A, B.t())
AA = (A**2).sum(dim=1, keepdim=True)
BB = (B**2).sum(dim=1, keepdim=True)
dist = AA + BB.t() - 2*AB
return -dist
def cos_similarity(self, A, B):
feat_dim = A.size(1)
AB = torch.mm(A, B.t())
AB = AB / feat_dim
return AB
def create_model(feat_dim, num_classes=1000, feat_type='cl2n', dist_type='l2',
log_dir=None, test=False, *args):
print('Loading KNN Classifier')
print(feat_dim, num_classes, feat_type, dist_type, log_dir, test)
clf = KNNClassifier(feat_dim, num_classes, feat_type, dist_type)
if log_dir is not None:
fname = path.join(log_dir, 'cfeats.pkl')
if path.exists(fname):
print('===> Loading features from %s' % fname)
with open(fname, 'rb') as f:
data = pickle.load(f)
clf.update(data)
else:
print('Random initialized classifier weights.')
return clf
if __name__ == "__main__":
cens = np.eye(4)
mean = np.ones(4)
xs = np.array([
[0.9, 0.1, 0.0, 0.0],
[0.2, 0.1, 0.1, 0.6],
[0.3, 0.3, 0.4, 0.0],
[0.0, 1.0, 0.0, 0.0],
[0.25, 0.25, 0.25, 0.25]
])
xs = torch.Tensor(xs)
classifier = KNNClassifier(feat_dim=4, num_classes=4,
feat_type='un')
classifier.update(mean, cens)
import pdb; pdb.set_trace()
logits, _ = classifier(xs)
|
classifier-balancing-main
|
models/KNNClassifier.py
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
"""
from models.ResNextFeature import *
from utils import *
from os import path
def create_model(use_selfatt=False, use_fc=False, dropout=None, stage1_weights=False, dataset=None, log_dir=None, test=False, *args):
print('Loading Scratch ResNext 50 Feature Model.')
resnext = ResNext(Bottleneck, [3, 4, 6, 3], use_modulatedatt=use_selfatt, use_fc=use_fc, dropout=None,
groups=32, width_per_group=4, use_glore=False, use_gem=False)
if not test:
if stage1_weights:
assert(dataset)
print('Loading %s Stage 1 ResNext 50 Weights.' % dataset)
if log_dir is not None:
weight_dir = path.join('/'.join(log_dir.split('/')[:-1]), 'stage1')
else:
weight_dir = './logs/%s/stage1' % dataset
print('==> Loading weights from %s' % weight_dir)
resnext = init_weights(model=resnext,
weights_path=path.join(weight_dir, 'final_model_checkpoint.pth'))
else:
print('No Pretrained Weights For Feature Model.')
return resnext
|
classifier-balancing-main
|
models/ResNext50Feature.py
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
"""
from models.ResNetFeature import *
from utils import *
from os import path
def create_model(use_selfatt=False, use_fc=False, dropout=None, stage1_weights=False, dataset=None, log_dir=None, test=False, *args):
print('Loading Scratch ResNet 152 Feature Model.')
resnet = ResNet(Bottleneck, [3, 8, 36, 3], use_modulatedatt=use_selfatt, use_fc=use_fc, dropout=None)
if not test:
if stage1_weights:
assert(dataset)
print('Loading %s Stage 1 ResNet 152 Weights.' % dataset)
if log_dir is not None:
weight_dir = path.join('/'.join(log_dir.split('/')[:-1]), 'stage1')
else:
weight_dir = './logs/%s/stage1' % dataset
print('==> Loading weights from %s' % weight_dir)
resnet = init_weights(model=resnet,
weights_path=path.join(weight_dir, 'final_model_checkpoint.pth'))
else:
print('No Pretrained Weights For Feature Model.')
return resnet
|
classifier-balancing-main
|
models/ResNet152Feature.py
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the OLTR project which
notice below and in LICENSE in the root directory of
this source tree.
Copyright (c) 2019, Zhongqi Miao
All rights reserved.
"""
import random
import numpy as np
from torch.utils.data.sampler import Sampler
import pdb
##################################
## Class-aware sampling, partly implemented by frombeijingwithlove
##################################
class RandomCycleIter:
def __init__ (self, data, test_mode=False):
self.data_list = list(data)
self.length = len(self.data_list)
self.i = self.length - 1
self.test_mode = test_mode
def __iter__ (self):
return self
def __next__ (self):
self.i += 1
if self.i == self.length:
self.i = 0
if not self.test_mode:
random.shuffle(self.data_list)
return self.data_list[self.i]
def class_aware_sample_generator (cls_iter, data_iter_list, n, num_samples_cls=1):
i = 0
j = 0
while i < n:
# yield next(data_iter_list[next(cls_iter)])
if j >= num_samples_cls:
j = 0
if j == 0:
temp_tuple = next(zip(*[data_iter_list[next(cls_iter)]]*num_samples_cls))
yield temp_tuple[j]
else:
yield temp_tuple[j]
i += 1
j += 1
class ClassAwareSampler (Sampler):
def __init__(self, data_source, num_samples_cls=1,):
num_classes = len(np.unique(data_source.labels))
self.class_iter = RandomCycleIter(range(num_classes))
cls_data_list = [list() for _ in range(num_classes)]
for i, label in enumerate(data_source.labels):
cls_data_list[label].append(i)
self.data_iter_list = [RandomCycleIter(x) for x in cls_data_list]
self.num_samples = max([len(x) for x in cls_data_list]) * len(cls_data_list)
self.num_samples_cls = num_samples_cls
def __iter__ (self):
return class_aware_sample_generator(self.class_iter, self.data_iter_list,
self.num_samples, self.num_samples_cls)
def __len__ (self):
return self.num_samples
def get_sampler():
return ClassAwareSampler
##################################
|
classifier-balancing-main
|
data/ClassAwareSampler.py
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
"""
import random
import numpy as np
from torch.utils.data.sampler import Sampler
class PriorityTree(object):
def __init__(self, capacity, fixed_weights=None, fixed_scale=1.0,
init_weight=1.0):
"""
fixed_weights: weights that wont be updated by self.update()
"""
assert fixed_weights is None or len(fixed_weights) == capacity
self._capacity = capacity
self._tree_size = 2 * capacity - 1
self.fixed_scale = fixed_scale
self.fixed_weights = np.zeros(self._capacity) if fixed_weights is None \
else fixed_weights
self.tree = np.zeros(self._tree_size)
self._initialized = False
self.initialize(init_weight)
def initialize(self, init_weight):
"""Initialize the tree."""
# Rescale the fixed_weights if it is not zero
if self.fixed_weights.sum() > 0 and init_weight > 0:
self.fixed_weights *= self.fixed_scale * init_weight * self.capacity \
/ self.fixed_weights.sum()
print('FixedWeights: {}'.format(self.fixed_weights.sum()))
self.update_whole(init_weight + self.fixed_weights)
self._initialized = True
def reset_fixed_weights(self, fixed_weights, rescale=False):
""" Reset the manually designed weights and
update the whole tree accordingly.
@rescale: rescale the fixed_weights such that
fixed_weights.sum() = self.fixed_scale * adaptive_weights.sum()
"""
adaptive_weights = self.get_adaptive_weights()
fixed_sum = fixed_weights.sum()
if rescale and fixed_sum > 0:
scale = self.fixed_scale * adaptive_weights.sum() / fixed_sum
self.fixed_weights = fixed_weights * scale
else:
self.fixed_weights = fixed_weights
self.update_whole(self.fixed_weights + adaptive_weights)
def update_whole(self, total_weights):
""" Update the whole tree based on per-example sampling weights """
lefti = self.pointer_to_treeidx(0)
righti = self.pointer_to_treeidx(self.capacity-1)
self.tree[lefti:righti+1] = total_weights
# Iteratively find a parent layer
while lefti != 0 and righti != 0:
lefti = (lefti - 1) // 2 if lefti != 0 else 0
righti = (righti - 1) // 2 if righti != 0 else 0
# Assign paraent weights from right to left
for i in range(righti, lefti-1, -1):
self.tree[i] = self.tree[2*i+1] + self.tree[2*i+2]
def get_adaptive_weights(self):
""" Get the instance-aware weights, that are not mannually designed"""
return self.get_total_weights() - self.fixed_weights
def get_total_weights(self):
""" Get the per-example sampling weights
return shape: [capacity]
"""
lefti = self.pointer_to_treeidx(0)
righti = self.pointer_to_treeidx(self.capacity-1)
return self.tree[lefti:righti+1]
@property
def size(self):
return self._tree_size
@property
def capacity(self):
return self._capacity
def __len__(self):
return self.capacity
def pointer_to_treeidx(self, pointer):
assert pointer < self.capacity
return int(pointer + self.capacity - 1)
def update(self, pointer, priority):
assert pointer < self.capacity
tree_idx = self.pointer_to_treeidx(pointer)
priority += self.fixed_weights[pointer]
delta = priority - self.tree[tree_idx]
self.tree[tree_idx] = priority
while tree_idx != 0:
tree_idx = (tree_idx - 1) // 2
self.tree[tree_idx] += delta
def get_leaf(self, value):
assert self._initialized, 'PriorityTree not initialized!!!!'
assert self.total > 0, 'No priority weights setted!!'
parent = 0
while True:
left_child = 2 * parent + 1
right_child = 2 * parent + 2
if left_child >= len(self.tree):
tgt_leaf = parent
break
if value < self.tree[left_child]:
parent = left_child
else:
value -= self.tree[left_child]
parent = right_child
data_idx = tgt_leaf - self.capacity + 1
return data_idx, self.tree[tgt_leaf] # data idx, priority
@property
def total(self):
assert self._initialized, 'PriorityTree not initialized!!!!'
return self.tree[0]
@property
def max(self):
return np.max(self.tree[-self.capacity:])
@property
def min(self):
assert self._initialized, 'PriorityTree not initialized!!!!'
return np.min(self.tree[-self.capacity:])
def get_weights(self):
return {'fixed_weights': self.fixed_weights,
'total_weights': self.get_total_weights()}
class MixedPrioritizedSampler(Sampler):
"""
A sampler combining manually designed sampling strategy and prioritized
sampling strategy.
Manually disigned strategy contains two parts:
$$ manual_weights = lam * balanced_weights + (1-lam) uniform_weights
Here we use a generalized version of balanced weights as follows,
when n limits to infinity, balanced_weights = real_balanced_weights
$$ balanced_weights = uniform_weights ^ (1/n)
Then the balanced weights are scaled such that
$$ balanced_weights.sum() = balance_scale * uniform_weights.sum()
Note: above weights are per-class weights
Overall sampling weights are given as
$$ sampling_weights = manual_weights * fixed_scale + priority_weights
Arguments:
@dataset: A dataset
@balance_scale: The scale of balanced_weights
@lam: A weight to combine balanced weights and uniform weights
- None for shifting sampling
- 0 for uniform sampling
- 1 for balanced sampling
@fixed_scale: The scale of manually designed weights
@cycle: shifting strategy
- 0 for linear shifting: 3 -> 2 - > 1
- 1 for periodic shifting:
3 -> 2 - > 1 -> 3 -> 2 - > 1 -> 3 -> 2 - > 1
- 2 for cosine-like periodic shifting:
3 -> 2 - > 1 -> 1 -> 2 - > 3 -> 3 -> 2 - > 1
@nroot:
- None for truly balanced weights
- >= 2 for pseudo-balanced weights
@rescale: whether to rebalance the manual weights and priority weights
every epoch
@root_decay:
- 'exp': for exponential decay
- 'linear': for linear decay
"""
def __init__(self, dataset, balance_scale=1.0, fixed_scale=1.0,
lam=None, epochs=90, cycle=0, nroot=None, manual_only=False,
rescale=False, root_decay=None, decay_gap=30, ptype='score',
alpha=1.0):
"""
"""
self.dataset = dataset
self.balance_scale = balance_scale
self.fixed_scale = fixed_scale
self.epochs = epochs
self.lam = lam
self.cycle = cycle
self.nroot = nroot
self.rescale = rescale
self.manual_only = manual_only
self.root_decay = root_decay
self.decay_gap = decay_gap
self.ptype = ptype
self.num_samples = len(dataset)
self.alpha = alpha
# If using root_decay, reset relevent parameters
if self.root_decay in ['exp', 'linear', 'autoexp']:
self.lam = 1
self.manual_only = True
self.nroot = 1
if self.root_decay == 'autoexp':
self.decay_gap = 1
self.decay_factor = np.power(nroot, 1/(self.epochs-1))
else:
assert self.root_decay is None
assert self.nroot is None or self.nroot >= 2
print("====> Decay GAP: {}".format(self.decay_gap))
# Take care of lambdas
if self.lam is None:
self.freeze = False
if cycle == 0:
self.lams = np.linspace(0, 1, epochs)
elif cycle == 1:
self.lams = np.concatenate([np.linspace(0,1,epochs//3)] * 3)
elif cycle == 2:
self.lams = np.concatenate([np.linspace(0,1,epochs//3),
np.linspace(0,1,epochs//3)[::-1],
np.linspace(0,1,epochs//3)])
else:
raise NotImplementedError(
'cycle = {} not implemented'.format(cycle))
else:
self.lams = [self.lam]
self.freeze = True
# Get num of samples per class
self.cls_cnts = []
self.labels = labels = np.array(self.dataset.labels)
for l in np.unique(labels):
self.cls_cnts.append(np.sum(labels==l))
self.num_classes = len(self.cls_cnts)
self.cnts = np.array(self.cls_cnts).astype(float)
# Get per-class image indexes
self.cls_idxs = [[] for _ in range(self.num_classes)]
for i, label in enumerate(self.dataset.labels):
self.cls_idxs[label].append(i)
for ci in range(self.num_classes):
self.cls_idxs[ci] = np.array(self.cls_idxs[ci])
# Build balanced weights based on class counts
self.balanced_weights = self.get_balanced_weights(self.nroot)
self.manual_weights = self.get_manual_weights(self.lams[0])
# Setup priority tree
if self.ptype == 'score':
self.init_weight = 1.
elif self.ptype in ['CE', 'entropy']:
self.init_weight = 6.9
else:
raise NotImplementedError('ptype {} not implemented'.format(self.ptype))
if self.manual_only:
self.init_weight = 0.
self.init_weight = np.power(self.init_weight, self.alpha)
self.ptree = PriorityTree(self.num_samples, self.manual_weights,
fixed_scale=self.fixed_scale,
init_weight=self.init_weight)
def get_manual_weights(self, lam):
# Merge balanced weights and uniform weights
if lam == 1:
manual_weights = self.balanced_weights
elif lam == 0:
manual_weights = np.ones(len(self.balanced_weights))
else:
manual_weights = self.balanced_weights * lam + (1-lam)
return manual_weights
def get_balanced_weights(self, nroot):
""" Calculate normalized generalized balanced weights """
cnts = self.cnts
if nroot is None:
# Real balanced sampling weights
cls_ws = cnts.min() / cnts
elif nroot >= 1:
# Generalized balanced weights
cls_ws = cnts / cnts.sum()
cls_ws = np.power(cls_ws, 1./nroot) * cnts.sum()
cls_ws = cls_ws / cnts
else:
raise NotImplementedError('root:{} not implemented'.format(nroot))
# Get un-normalized weights
balanced_weights = np.zeros(self.num_samples)
for ci in range(self.num_classes):
balanced_weights[self.cls_idxs[ci]] = cls_ws[ci]
# Normalization and rescale
balanced_weights *= self.num_samples / balanced_weights.sum() * \
self.balance_scale
return balanced_weights
def __iter__(self):
for _ in range(self.num_samples):
w = random.random() * self.ptree.total
i, pri = self.ptree.get_leaf(w)
yield i
def __len__(self):
return self.num_samples
def reset_weights(self, epoch):
if not self.freeze and self.fixed_scale > 0:
if epoch >= self.epochs:
e = self.epochs - 1
elif epoch < 1:
e = 0
else:
e = epoch
self.manual_weights = self.get_manual_weights(self.lams[e])
self.ptree.reset_fixed_weights(self.manual_weights, self.rescale)
if self.root_decay in ['exp', 'linear', 'autoexp'] and epoch % self.decay_gap == 0:
if self.root_decay == 'exp':
self.nroot *= 2
elif self.root_decay == 'linear':
self.nroot += 1
elif self.root_decay == 'autoexp':
# self.nroot *= self.decay_factor
self.nroot = np.power(self.decay_factor, epoch)
bw = self.get_balanced_weights(self.nroot)
self.ptree.reset_fixed_weights(bw)
def update_weights(self, inds, weights):
""" Update priority weights """
if not self.manual_only:
weights = np.clip(weights, 0, self.init_weight)
weights = np.power(weights, self.alpha)
for i, w in zip(inds, weights):
self.ptree.update(i, w)
def get_weights(self):
return self.ptree.get_weights()
def get_sampler():
return MixedPrioritizedSampler
|
classifier-balancing-main
|
data/MixedPrioritizedSampler.py
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
"""
import random
import numpy as np
from torch.utils.data.sampler import Sampler
class RandomCycleIter:
def __init__ (self, data, test_mode=False):
self.data_list = list(data)
self.length = len(self.data_list)
self.i = self.length - 1
self.test_mode = test_mode
def __iter__ (self):
return self
def __next__ (self):
self.i += 1
if self.i == self.length:
self.i = 0
if not self.test_mode:
random.shuffle(self.data_list)
return self.data_list[self.i]
class PriorityTree(object):
def __init__(self, capacity, init_weights, fixed_weights=None, fixed_scale=1.0,
alpha=1.0):
"""
fixed_weights: weights that wont be updated by self.update()
"""
assert fixed_weights is None or len(fixed_weights) == capacity
assert len(init_weights) == capacity
self.alpha = alpha
self._capacity = capacity
self._tree_size = 2 * capacity - 1
self.fixed_scale = fixed_scale
self.fixed_weights = np.zeros(self._capacity) if fixed_weights is None \
else fixed_weights
self.tree = np.zeros(self._tree_size)
self._initialized = False
self.initialize(init_weights)
def initialize(self, init_weights):
"""Initialize the tree."""
# Rescale the fixed_weights if it is not zero
self.fixed_scale_init = self.fixed_scale
if self.fixed_weights.sum() > 0 and init_weights.sum() > 0:
self.fixed_scale_init *= init_weights.sum() / self.fixed_weights.sum()
self.fixed_weights *= self.fixed_scale * init_weights.sum() \
/ self.fixed_weights.sum()
print('FixedWeights: {}'.format(self.fixed_weights.sum()))
self.update_whole(init_weights + self.fixed_weights)
self._initialized = True
def reset_adaptive_weights(self, adaptive_weights):
self.update_whole(self.fixed_weights + adaptive_weights)
def reset_fixed_weights(self, fixed_weights, rescale=False):
""" Reset the manually designed weights and
update the whole tree accordingly.
@rescale: rescale the fixed_weights such that
fixed_weights.sum() = self.fixed_scale * adaptive_weights.sum()
"""
adaptive_weights = self.get_adaptive_weights()
fixed_sum = fixed_weights.sum()
if rescale and fixed_sum > 0:
# Rescale fixedweight based on adaptive weights
scale = self.fixed_scale * adaptive_weights.sum() / fixed_sum
else:
# Rescale fixedweight based on previous fixedweight
scale = self.fixed_weights.sum() / fixed_sum
self.fixed_weights = fixed_weights * scale
self.update_whole(self.fixed_weights + adaptive_weights)
def update_whole(self, total_weights):
""" Update the whole tree based on per-example sampling weights """
if self.alpha != 1:
total_weights = np.power(total_weights, self.alpha)
lefti = self.pointer_to_treeidx(0)
righti = self.pointer_to_treeidx(self.capacity-1)
self.tree[lefti:righti+1] = total_weights
# Iteratively find a parent layer
while lefti != 0 and righti != 0:
lefti = (lefti - 1) // 2 if lefti != 0 else 0
righti = (righti - 1) // 2 if righti != 0 else 0
# Assign paraent weights from right to left
for i in range(righti, lefti-1, -1):
self.tree[i] = self.tree[2*i+1] + self.tree[2*i+2]
def get_adaptive_weights(self):
""" Get the instance-aware weights, that are not mannually designed"""
if self.alpha == 1:
return self.get_total_weights() - self.fixed_weights
else:
return self.get_raw_total_weights() - self.fixed_weights
def get_total_weights(self):
""" Get the per-example sampling weights
return shape: [capacity]
"""
lefti = self.pointer_to_treeidx(0)
righti = self.pointer_to_treeidx(self.capacity-1)
return self.tree[lefti:righti+1]
def get_raw_total_weights(self):
""" Get the per-example sampling weights
return shape: [capacity]
"""
lefti = self.pointer_to_treeidx(0)
righti = self.pointer_to_treeidx(self.capacity-1)
return np.power(self.tree[lefti:righti+1], 1/self.alpha)
@property
def size(self):
return self._tree_size
@property
def capacity(self):
return self._capacity
def __len__(self):
return self.capacity
def pointer_to_treeidx(self, pointer):
assert pointer < self.capacity
return int(pointer + self.capacity - 1)
def update(self, pointer, priority):
assert pointer < self.capacity
tree_idx = self.pointer_to_treeidx(pointer)
priority += self.fixed_weights[pointer]
if self.alpha != 1:
priority = np.power(priority, self.alpha)
delta = priority - self.tree[tree_idx]
self.tree[tree_idx] = priority
while tree_idx != 0:
tree_idx = (tree_idx - 1) // 2
self.tree[tree_idx] += delta
def update_delta(self, pointer, delta):
assert pointer < self.capacity
tree_idx = self.pointer_to_treeidx(pointer)
ratio = 1- self.fixed_weights[pointer] / self.tree[tree_idx]
# delta *= ratio
if self.alpha != 1:
# Update delta
if self.tree[tree_idx] < 0 or \
np.power(self.tree[tree_idx], 1/self.alpha) + delta < 0:
import pdb; pdb.set_trace()
delta = np.power(np.power(self.tree[tree_idx], 1/self.alpha) + delta,
self.alpha) \
- self.tree[tree_idx]
self.tree[tree_idx] += delta
while tree_idx != 0:
tree_idx = (tree_idx - 1) // 2
self.tree[tree_idx] += delta
def get_leaf(self, value):
assert self._initialized, 'PriorityTree not initialized!!!!'
assert self.total > 0, 'No priority weights setted!!'
parent = 0
while True:
left_child = 2 * parent + 1
right_child = 2 * parent + 2
if left_child >= len(self.tree):
tgt_leaf = parent
break
if value < self.tree[left_child]:
parent = left_child
else:
value -= self.tree[left_child]
parent = right_child
data_idx = tgt_leaf - self.capacity + 1
return data_idx, self.tree[tgt_leaf] # data idx, priority
@property
def total(self):
assert self._initialized, 'PriorityTree not initialized!!!!'
return self.tree[0]
@property
def max(self):
return np.max(self.tree[-self.capacity:])
@property
def min(self):
assert self._initialized, 'PriorityTree not initialized!!!!'
return np.min(self.tree[-self.capacity:])
def get_weights(self):
wdict = {'fixed_weights': self.fixed_weights,
'total_weights': self.get_total_weights()}
if self.alpha != 1:
wdict.update({'raw_total_weights': self.get_raw_total_weights(),
'alpha': self.alpha})
return wdict
class ClassPrioritySampler(Sampler):
"""
A sampler combining manually designed sampling strategy and prioritized
sampling strategy.
Manually disigned strategy contains two parts:
$$ manual_weights = lam * balanced_weights + (1-lam) uniform_weights
Here we use a generalized version of balanced weights as follows,
when n limits to infinity, balanced_weights = real_balanced_weights
$$ balanced_weights = uniform_weights ^ (1/n)
Then the balanced weights are scaled such that
$$ balanced_weights.sum() = balance_scale * uniform_weights.sum()
Note: above weights are per-class weights
Overall sampling weights are given as
$$ sampling_weights = manual_weights * fixed_scale + priority_weights
Arguments:
@dataset: A dataset
@balance_scale: The scale of balanced_weights
@lam: A weight to combine balanced weights and uniform weights
- None for shifting sampling
- 0 for uniform sampling
- 1 for balanced sampling
@fixed_scale: The scale of manually designed weights
- fixed_scale < 0 means, the manually designed distribution will
be used as the backend distribution of priorities.
@cycle: shifting strategy
- 0 for linear shifting: 3 -> 2 - > 1
- 1 for periodic shifting:
3 -> 2 - > 1 -> 3 -> 2 - > 1 -> 3 -> 2 - > 1
- 2 for cosine-like periodic shifting:
3 -> 2 - > 1 -> 1 -> 2 - > 3 -> 3 -> 2 - > 1
@nroot:
- None for truly balanced weights
- >= 2 for pseudo-balanced weights
@rescale: whether to rebalance the manual weights and priority weights
every epoch
@root_decay:
- 'exp': for exponential decay
- 'linear': for linear decay
"""
def __init__(self, dataset, balance_scale=1.0, fixed_scale=1.0,
lam=None, epochs=90, cycle=0, nroot=None, manual_only=False,
rescale=False, root_decay=None, decay_gap=30, ptype='score',
pri_mode='train', momentum=0., alpha=1.0):
"""
"""
self.dataset = dataset
self.balance_scale = balance_scale
self.fixed_scale = fixed_scale
self.epochs = epochs
self.lam = lam
self.cycle = cycle
self.nroot = nroot
self.rescale = rescale
self.manual_only = manual_only
self.root_decay = root_decay
self.decay_gap = decay_gap
self.ptype = ptype
self.pri_mode = pri_mode
self.num_samples = len(dataset)
self.manual_as_backend = False
self.momentum = momentum
self.alpha = alpha
assert 0. <= self.momentum <= 1.0
assert 0. <= self.alpha
# Change the backend distribution of priority if needed
if self.fixed_scale < 0:
self.fixed_scale = 0
self.manual_as_backend = True
# If using root_decay, reset relevent parameters
if self.root_decay in ['exp', 'linear', 'autoexp']:
self.lam = 1
self.manual_only = True
self.nroot = 1
if self.root_decay == 'autoexp':
self.decay_gap = 1
self.decay_factor = np.power(nroot, 1/(self.epochs-1))
else:
assert self.root_decay is None
assert self.nroot is None or self.nroot > 1
print("====> Decay GAP: {}".format(self.decay_gap))
# Take care of lambdas
self.freeze = True
if self.lam is None:
self.freeze = False
if cycle == 0:
self.lams = np.linspace(0, 1, epochs)
elif cycle == 1:
self.lams = np.concatenate([np.linspace(0,1,epochs//3)] * 3)
elif cycle == 2:
self.lams = np.concatenate([np.linspace(0,1,epochs//3),
np.linspace(0,1,epochs//3)[::-1],
np.linspace(0,1,epochs//3)])
else:
raise NotImplementedError(
'cycle = {} not implemented'.format(cycle))
else:
self.lams = [self.lam]
# Get num of samples per class
self.cls_cnts = []
self.labels = labels = np.array(self.dataset.labels)
for l in np.unique(labels):
self.cls_cnts.append(np.sum(labels==l))
self.num_classes = len(self.cls_cnts)
self.cnts = np.array(self.cls_cnts).astype(float)
# Get per-class image indexes
self.cls_idxs = [[] for _ in range(self.num_classes)]
for i, label in enumerate(self.dataset.labels):
self.cls_idxs[label].append(i)
self.data_iter_list = [RandomCycleIter(x) for x in self.cls_idxs]
for ci in range(self.num_classes):
self.cls_idxs[ci] = np.array(self.cls_idxs[ci])
# Build balanced weights based on class counts
self.balanced_weights = self.get_balanced_weights(self.nroot)
self.uniform_weights = self.get_uniform_weights()
self.manual_weights = self.get_manual_weights(self.lams[0])
# back_weights = self.get_balanced_weights(1.5)
back_weights = self.uniform_weights
# Calculate priority ratios that reshape priority into target distribution
self.per_cls_ratios = self.get_cls_ratios(
self.manual_weights if self.manual_as_backend else back_weights)
self.per_example_ratios = self.broadcast(self.per_cls_ratios)
# Setup priority tree
if self.ptype == 'score':
self.init_weight = 1.
elif self.ptype in ['CE', 'entropy']:
self.init_weight = 6.9
else:
raise NotImplementedError('ptype {} not implemented'.format(self.ptype))
if self.manual_only:
self.init_weight = 0.
self.per_example_uni_weights = np.ones(self.num_samples) * self.init_weight
self.per_example_velocities = np.zeros(self.num_samples)
# init_priorities = np.power(self.init_weight, self.alpha) \
# * self.uniform_weights * self.per_cls_ratios
init_priorities = self.init_weight * self.uniform_weights * self.per_cls_ratios
self.ptree = PriorityTree(self.num_classes, init_priorities,
self.manual_weights.copy(), fixed_scale=self.fixed_scale,
alpha=self.alpha)
def get_cls_ratios(self, tgt_weights):
if tgt_weights is self.uniform_weights:
return np.ones_like(self.uniform_weights)
per_cls_ratios = tgt_weights / self.uniform_weights
per_cls_ratios *= self.uniform_weights.sum() / tgt_weights.sum()
return per_cls_ratios
def get_cls_weights(self):
ratioed_ws = self.per_example_uni_weights * self.per_example_ratios
return self.debroadcast_sum(ratioed_ws)
def broadcast(self, per_cls_info):
per_exmaple_info = np.zeros(self.num_samples)
# Braodcast per-cls info to each example
for ci in range(self.num_classes):
per_exmaple_info[self.cls_idxs[ci]] = per_cls_info[ci]
return per_exmaple_info
def debroadcast_sum(self, per_example_info):
per_cls_info = np.zeros(self.num_classes)
# DeBraodcast per-example info to each cls by summation
for ci in range(self.num_classes):
per_cls_info[ci] = per_example_info[self.cls_idxs[ci]].sum()
return per_cls_info
def get_manual_weights(self, lam):
# Merge balanced weights and uniform weights
if lam == 1:
manual_weights = self.balanced_weights.copy()
elif lam == 0:
manual_weights = self.uniform_weights.copy()
else:
manual_weights = self.balanced_weights * lam + (1-lam) * self.uniform_weights
return manual_weights
def get_uniform_weights(self):
return self.cnts.copy()
def get_balanced_weights(self, nroot):
""" Calculate normalized generalized balanced weights """
cnts = self.cnts
if nroot is None:
# Real balanced sampling weights, each class has the same weights
# Un-normalized !!!
cls_ws = np.ones(len(cnts))
elif nroot >= 1:
# Generalized balanced weights
# Un-normalized !!!
cls_ws = cnts / cnts.sum()
cls_ws = np.power(cls_ws, 1./nroot) * cnts.sum()
cls_ws = cls_ws
else:
raise NotImplementedError('root:{} not implemented'.format(nroot))
# Get un-normalized weights
balanced_weights = cls_ws
# Normalization and rescale
balanced_weights *= self.num_samples / balanced_weights.sum() * \
self.balance_scale
return balanced_weights
def __iter__(self):
for _ in range(self.num_samples):
w = random.random() * self.ptree.total
ci, pri = self.ptree.get_leaf(w)
yield next(self.data_iter_list[ci])
def __len__(self):
return self.num_samples
def reset_weights(self, epoch):
# If it is linear shifting
if not self.freeze:
e = np.clip(epoch, 0, self.epochs-1)
self.manual_weights = self.get_manual_weights(self.lams[e])
# make sure 'self.fixed_scale > 0' and 'self.manual_as_backend = True' are
# mutually exclusive
if self.fixed_scale > 0:
self.ptree.reset_fixed_weights(self.manual_weights, self.rescale)
if self.manual_as_backend:
self.update_backend_distribution(self.manual_weights)
# If it is root decay
if self.root_decay in ['exp', 'linear', 'autoexp'] and epoch % self.decay_gap == 0:
if self.root_decay == 'exp':
self.nroot *= 2
elif self.root_decay == 'linear':
self.nroot += 1
elif self.root_decay == 'autoexp':
# self.nroot *= self.decay_factor
self.nroot = np.power(self.decay_factor, epoch)
bw = self.get_balanced_weights(self.nroot)
if self.manual_as_backend:
self.update_backend_distribution(bw)
else:
self.ptree.reset_fixed_weights(bw)
def update_backend_distribution(self, tgt_weights):
# Recalculate the cls ratios based on the given target distribution
self.per_cls_ratios = self.get_cls_ratios(tgt_weights)
self.per_example_ratios = self.broadcast(self.per_cls_ratios)
# Recalculate the new per-class weights based on the new ratios
# new_backend_weights = self.init_weight * self.uniform_weights * self.per_cls_ratios
new_cls_weights = self.get_cls_weights()
self.ptree.reset_adaptive_weights(new_cls_weights)
def update_weights(self, inds, weights, labels):
""" Update priority weights """
if not self.manual_only and self.pri_mode == 'train':
weights = np.clip(weights, 0, self.init_weight)
# Iterate over all classes in the batch
for l in np.unique(labels):
# Calculate per-class delta weights
example_inds = inds[labels==l]
last_weights = self.per_example_uni_weights[example_inds]
# delta = np.power(weights[labels==l], self.alpha) - \
# np.power(last_weights, self.alpha)
delta = weights[labels==l] - last_weights
delta = self.momentum * self.per_example_velocities[example_inds] + \
(1-self.momentum) * delta
# Update velocities
self.per_example_velocities[example_inds] = delta
# Update per-example weights
# self.per_example_uni_weights[example_inds] = weights[labels==l]
self.per_example_uni_weights[example_inds] += delta
# Sacle the delta
# (ie, the per-example weights both before and after update)
delta *= self.per_example_ratios[example_inds]
# Update tree
if self.alpha == 1:
self.ptree.update_delta(l, delta.sum())
else:
self.ptree.update(l, self.per_example_uni_weights[self.cls_idxs[l]].sum())
def reset_priority(self, weights, labels):
if self.pri_mode == 'valid':
assert len(np.unique(labels)) == self.num_classes
weights = np.clip(weights, 0, self.init_weight)
cls_weights = np.zeros(self.num_classes)
for c in np.unique(labels):
cls_weights[c] = weights[labels==c].mean()
cls_weights *= self.cnts
cls_weights *= self.per_cls_ratios
self.ptree.reset_adaptive_weights(cls_weights)
def get_weights(self):
return self.ptree.get_weights()
def get_sampler():
return ClassPrioritySampler
|
classifier-balancing-main
|
data/ClassPrioritySampler.py
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the OLTR project which
notice below and in LICENSE in the root directory of
this source tree.
Copyright (c) 2019, Zhongqi Miao
All rights reserved.
"""
import numpy as np
import torchvision
from torch.utils.data import Dataset, DataLoader, ConcatDataset
from torchvision import transforms
import os
from PIL import Image
# Image statistics
RGB_statistics = {
'iNaturalist18': {
'mean': [0.466, 0.471, 0.380],
'std': [0.195, 0.194, 0.192]
},
'default': {
'mean': [0.485, 0.456, 0.406],
'std':[0.229, 0.224, 0.225]
}
}
# Data transformation with augmentation
def get_data_transform(split, rgb_mean, rbg_std, key='default'):
data_transforms = {
'train': transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(rgb_mean, rbg_std)
]) if key == 'iNaturalist18' else transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0),
transforms.ToTensor(),
transforms.Normalize(rgb_mean, rbg_std)
]),
'val': transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(rgb_mean, rbg_std)
]),
'test': transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(rgb_mean, rbg_std)
])
}
return data_transforms[split]
# Dataset
class LT_Dataset(Dataset):
def __init__(self, root, txt, transform=None):
self.img_path = []
self.labels = []
self.transform = transform
with open(txt) as f:
for line in f:
self.img_path.append(os.path.join(root, line.split()[0]))
self.labels.append(int(line.split()[1]))
def __len__(self):
return len(self.labels)
def __getitem__(self, index):
path = self.img_path[index]
label = self.labels[index]
with open(path, 'rb') as f:
sample = Image.open(f).convert('RGB')
if self.transform is not None:
sample = self.transform(sample)
return sample, label, index
# Load datasets
def load_data(data_root, dataset, phase, batch_size, sampler_dic=None, num_workers=4, test_open=False, shuffle=True):
if phase == 'train_plain':
txt_split = 'train'
elif phase == 'train_val':
txt_split = 'val'
phase = 'train'
else:
txt_split = phase
txt = './data/%s/%s_%s.txt'%(dataset, dataset, txt_split)
# txt = './data/%s/%s_%s.txt'%(dataset, dataset, (phase if phase != 'train_plain' else 'train'))
print('Loading data from %s' % (txt))
if dataset == 'iNaturalist18':
print('===> Loading iNaturalist18 statistics')
key = 'iNaturalist18'
else:
key = 'default'
rgb_mean, rgb_std = RGB_statistics[key]['mean'], RGB_statistics[key]['std']
if phase not in ['train', 'val']:
transform = get_data_transform('test', rgb_mean, rgb_std, key)
else:
transform = get_data_transform(phase, rgb_mean, rgb_std, key)
print('Use data transformation:', transform)
set_ = LT_Dataset(data_root, txt, transform)
print(len(set_))
if phase == 'test' and test_open:
open_txt = './data/%s/%s_open.txt'%(dataset, dataset)
print('Testing with opensets from %s'%(open_txt))
open_set_ = LT_Dataset('./data/%s/%s_open'%(dataset, dataset), open_txt, transform)
set_ = ConcatDataset([set_, open_set_])
if sampler_dic and phase == 'train':
print('Using sampler: ', sampler_dic['sampler'])
# print('Sample %s samples per-class.' % sampler_dic['num_samples_cls'])
print('Sampler parameters: ', sampler_dic['params'])
return DataLoader(dataset=set_, batch_size=batch_size, shuffle=False,
sampler=sampler_dic['sampler'](set_, **sampler_dic['params']),
num_workers=num_workers)
else:
print('No sampler.')
print('Shuffle is %s.' % (shuffle))
return DataLoader(dataset=set_, batch_size=batch_size,
shuffle=shuffle, num_workers=num_workers)
|
classifier-balancing-main
|
data/dataloader.py
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Usage:
1. Change "root" to your data path
2. python gen_lists.py
"""
import os
import json
from tqdm import tqdm
root = '/checkpoint/bykang/iNaturalist18'
json2txt = {
'train2018.json': 'iNaturalist18_train.txt',
'val2018.json': 'iNaturalist18_val.txt'
}
def convert(json_file, txt_file):
with open(json_file, 'r') as f:
data = json.load(f)
lines = []
for i in tqdm(range(len(data['images']))):
assert data['images'][i]['id'] == data['annotations'][i]['id']
img_name = data['images'][i]['file_name']
label = data['annotations'][i]['category_id']
lines.append(img_name + ' ' + str(label) + '\n')
with open(txt_file, 'w') as ftxt:
ftxt.writelines(lines)
for k, v in json2txt.items():
print('===> Converting {} to {}'.format(k, v))
srcfile = os.path.join(root, k)
convert(srcfile, v)
|
classifier-balancing-main
|
data/iNaturalist18/gen_lists.py
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
"""
import os
import json
from tqdm import tqdm
root = '/datasets01_101/imagenet_full_size/061417'
split2txt = {
'train': 'ImageNet_train.txt',
'val': 'ImageNet_val.txt',
# 'test': 'ImageNet_test.txt',
}
def convert(split, txt_file):
clsnames = os.listdir(os.path.join(root, split))
clsnames.sort()
lines = []
for i, name in enumerate(clsnames):
imgs = os.listdir(os.path.join(root, split, name))
imgs.sort()
for img in imgs:
lines.append(os.path.join(split, name, img) + ' ' + str(i) + '\n')
with open(txt_file, 'w') as f:
f.writelines(lines)
for k, v in split2txt.items():
print('===> Converting {} to {}'.format(k, v))
convert(k, v)
|
classifier-balancing-main
|
data/ImageNet/gen_txt.py
|
import re
import sys
import os
import os.path
import random
import json
import time
import nltk.data
import spacy
import pandas as pd
import random
from multiprocessing import Pipe, Pool
from functools import partial
from collections import defaultdict, Counter
from tqdm import tqdm
sys.path.append("/checkpoint/simarora/KILT/")
# from kilt.knowledge_source import KnowledgeSource
from nltk.corpus import stopwords
stop_words = stopwords.words('english')
tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
# https://github.com/egerber/spaCy-entity-linker
# initialize language model
nlp = spacy.load("en_core_web_md")
nlp.add_pipe("entityLinker", last=True)
random.seed(1)
INBOX = "dasovich-j"
MY_PATH = "/private/home/simarora/pqa/PersonalDataDemo/" # SET YOUR PATH!
VALID_NER_TYPES = ['ORG', 'PERSON', 'LOC', 'EVENT', 'PRODUCT', 'LANGUAGE', 'LAW']
NER_TYPES_DICT = {
'ORG': 'ORGANIZATION',
'PERSON': "PEOPLE",
'LOC': "LOCATION",
'EVENT': "EVENT",
'PRODUCT': "PRODUCT",
'LANGUAGE': "LANGUAGES",
'LAW': "LEGAL"
}
PUNCT = ["'", ";", ":", ".", ",", '"', "|", ">", "<", "/", "?", ":", ";", "(", ")"]
OVERRIDE = []
# CREATE THE LOCAL CORPUS (approximately 5500 seconds)
def remove_structure_tokens(body):
string_encode = body.encode("ascii", "ignore")
body = string_encode.decode()
body = body.strip()
body = body.strip("]")
body = body.strip("[")
CLEAN_PAIRS = [("\no", " "), ("\n", " "), ("\\n", " "), ("\\t", " "), ("\\", ""),
(" /", " "), (">", " "), ("=09", " "), ("=01", " "), ("=09=09=09=09", " "), ("---", " "),("|", " "),
("___", " "), ("[IMAGE]", " "), ("= ", " "), ("???", " "), ("**", " "), ("??", " "), ("\xa0", " "),
("=20", " "), ("0F", " "), (' " ', " "), (" ' ", " "), (". ?", ". "), ("=01", ""), ("=07", ""),
("National Assn.", "National Association")]
for clean in CLEAN_PAIRS:
body = body.replace(clean[0], clean[1])
# floating quotes
body_toks = body.split()
if body_toks and body_toks[0] in ['"', "'", "?"]:
body_toks = body_toks[1:]
clean_body_toks = []
for ind, tok in enumerate(body_toks):
filt = 0
if len(tok) == 1 and tok in PUNCT:
filt = 1
if all(ch == "?" for ch in tok): # or all(ch == "_" for ch in tok):
filt = 1
if ind > 0 and '.com' in body_toks[ind-1] and tok == 'o':
filt = 1
if len(tok) > 2 and tok[0] == "?":
tok = tok[1:]
if not filt:
clean_body_toks.append(tok.strip())
# get rid of 't o' and 'o f' type splits
combined_tok = ''
combined_toks = []
for ind, tok in enumerate(clean_body_toks):
if combined_tok:
if len(tok) == 1 and tok.islower():
combined_tok = combined_tok + tok
combined_toks.append(combined_tok)
combined_tok = ''
else:
combined_toks.append(combined_tok)
combined_toks.append(tok)
combined_tok = ''
else:
if len(tok) == 1 and tok.islower():
combined_tok = tok
else:
combined_toks.append(tok)
combined_tok = ''
body = " ".join(combined_toks)
# step 4: Wikiextractor cleaning steps
body = body.replace('\t', ' ')
body = body.replace('...', ' ')
body = re.sub(u' (,:\.\)\]»)', r'\1', body)
body = re.sub(u'(\[\(«) ', r'\1', body)
body = re.sub(r'\n\W+?\n', '\n', body, flags=re.U) # lines with only punctuations
body = body.replace(',,', ',').replace(',.', '.')
# Common abbreviations
body = body.replace("U.S. ", "United States ")
body = body.replace("Dept. ", "Department ")
body = body.replace(" ", " ")
return body
def identify_duplicates_by_text():
first_sentences = []
first_sentence_map = defaultdict(list)
duplicates_map = {}
num_duplicates = 0
sentences_matched = 0
with open(f"Enron_{INBOX}/EmailsCorpus.json") as f:
EnronPassages = json.load(f)
EnronPassages_New = {}
for key, passage in tqdm(EnronPassages.items()):
sents = passage['sents']
# check if it's a duplicate
is_duplicate = 0
for sent in sents:
if sent in first_sentences:
is_duplicate = 1
sentences_matched += 1
first_sentence_map[sent].append(key)
break
# save whether it's a duplicate or not
if not is_duplicate:
for sent in sents:
if len(sent.split()) > 1:
first_sentences.append(sent)
break
first_sentence_map[sents[0]].append(key)
duplicates_map[key] = False
else:
duplicates_map[key] = True
num_duplicates += 1
if not duplicates_map[key]:
EnronPassages_New[key] = passage
print(f"Marked {num_duplicates} passages as duplicates.")
print(f"For {sentences_matched} passages, the first sentences matched exactly.")
with open("first_sentence_map.json", "w") as f:
json.dump(first_sentence_map, f)
# only save the non-duplicates
with open(f"{MY_PATH}/Enron_{INBOX}/EmailsCorpus_FILTERED.json", "w") as f:
json.dump(EnronPassages_New, f)
return duplicates_map
def identify_linked_entities(bodies_lst):
# want one mapping on entities to passages
linked_entities_lst = []
for body in bodies_lst:
doc = nlp(body)
# iterates over sentences and prints linked entities
linked_entities = []
for sent in doc.sents:
for entity in sent._.linkedEntities.__dict__['entities']:
entity_title = entity.__dict__['label']
identifier = entity.__dict__['identifier']
description = entity.__dict__['description']
entity_details = {
'title': entity_title,
'identifier': identifier,
'description': description
}
linked_entities.append(entity_details)
linked_entities_lst.append(linked_entities)
return linked_entities_lst
def get_ner_tags(bodies_lst):
ner_tags_lst = []
for body in bodies_lst:
doc = nlp(body)
ner_tags = []
for ent in doc.ents:
ner_tag = {
'text': ent.text,
'start_char': ent.start_char,
'end_char': ent.end_char,
'ner': ent.label_
}
ner_tags.append(ner_tag)
ner_tags_lst.append(ner_tags)
return ner_tags_lst
def split_body_to_sents(body):
MAXIMUM_WORDS = 150
MINIMUM_WORDS = 50
num_words = 0
body_sents, body_sents_lst = [], []
EDGE_CASES = ["Assn.", "Abbrev.", "Var.", "Gov.", "Mass.", "No.",
"Corp.", "Co.", "Cos.", "Inc.", "Pg.", "etc.", "?Pg.", "II.",
"Mr.", "Mrs.", "Ms.", "CH.", "Ch.", "Md.", "Cup."]
# split body into sentences
all_sents = tokenizer.tokenize(body)
new_all_sents = []
current_sent = []
for sent in all_sents:
if sent and sent != " ":
if (len(sent) > 1 and sent[-1] == "." and sent[-2].isdigit()) or (
len(sent) ==2 and sent[-1] == "." and sent[-2].isupper()) or (
len(sent) ==2 and sent[-1] == "(") or (
sent.split()[-1] in EDGE_CASES) or (
len([ch for ch in sent.split()[-1] if ch == "."]) > 1) or (
len(sent) > 2 and sent[-1] == "." and sent[-2].isupper() and sent[-3] == " "):
current_sent.append(sent)
else:
current_sent.append(sent)
sent = " ".join(current_sent.copy())
new_all_sents.append(sent)
current_sent = []
all_sents = new_all_sents.copy()
# split into chunks of some maximum length
for sent in all_sents:
if sent:
body_sents.append(sent)
num_words += len(sent.split())
if num_words > MAXIMUM_WORDS:
body_sents_lst.append(body_sents.copy())
body_sents = []
num_words = 0
# add the trailing/passages
if num_words >= MINIMUM_WORDS:
body_sents_lst.append(body_sents.copy())
body_sents = []
num_words = 0
bodies_lst = []
for body_sents in body_sents_lst:
body = " ".join(body_sents)
bodies_lst.append(body)
return bodies_lst.copy(), body_sents_lst.copy()
def create_local_documents(data, index):
passage2sents = {}
finalEntries = {}
entity2emailid = defaultdict(list)
email2entities = defaultdict(list)
email_key = index
assert type(index) == int, print("index is not the correct format")
psg_key = 0
row = data[index]
body = row["Body"]
if body.strip():
email_title = "EMAIL_" + str(email_key)
body = remove_structure_tokens(body)
# split the email into the MAX SEQ LENGTH sized chunks
bodies_lst, body_sents_lst = split_body_to_sents(body)
# get entity annotations
ner_tags_lst = get_ner_tags(bodies_lst)
linked_entities_lst = identify_linked_entities(bodies_lst)
for body, body_sents, linked_entities, ner_tags in zip(bodies_lst, body_sents_lst, linked_entities_lst, ner_tags_lst):
psg_title = f"PERSONAL_e{str(email_key)}_p{str(psg_key)}"
passage2sents[psg_title] = body_sents
new_id = f"e{str(email_key)}_p{str(psg_key)}"
finalEntries[new_id] = {
"id": new_id,
"email_title":email_title,
"title":psg_title,
"text":body,
"sents":body_sents,
"ner_tags_lst":ner_tags,
"linked_entities_lst":linked_entities
}
for ent in linked_entities:
entity2emailid[ent['title']].append(psg_title)
email2entities[psg_title].append(ent['title'])
psg_key += 1
return finalEntries, entity2emailid, email2entities, passage2sents
def create_local_passages_wrapper():
# unzips the raw data
pool = Pool(8)
passage2sents = {}
entity2emailid = defaultdict(list)
email2entities = defaultdict(list)
# load the correct inbox and the mappings
with open(f"/checkpoint/simarora/PersonalDatasets/Enron/parsed_maildir/{INBOX}_09082021.csv") as f:
data = pd.read_csv(f)
print(f"Length of inbox: {INBOX} is {len(data)}")
st = time.time()
# select entries with an existent body and message id
data = data[pd.notnull(data['Body'])]
data = data[pd.notnull(data['Message-ID'])]
data = data.to_dict('records')
# data = data[0:100]
data_indices = range(len(data))
entries_lst, entity2emailid_lst, email2entities_lst, passage2sents_lst = zip(*pool.map(partial(create_local_documents, data), data_indices))
finalEntries = {}
for entries_dict in entries_lst:
for key, entry in entries_dict.items():
finalEntries[key] = entry
with open(f"Enron_{INBOX}/EmailsCorpus.json", "w") as f:
json.dump(finalEntries, f)
for passage2sents_subdict in passage2sents_lst:
for psg_key, sents in passage2sents_subdict.items():
passage2sents[psg_key] = sents
for email2entities_subdict in email2entities_lst:
for psg_key, entities_list in email2entities_subdict.items():
email2entities[psg_key] = entities_list
for entity2emailid_subdict in entity2emailid_lst:
for entity_name, psgs_list in entity2emailid_subdict.items():
if entity_name in entity2emailid:
entity2emailid[entity_name].extend(psgs_list)
else:
entity2emailid[entity_name] = psgs_list
# # save the mappings
with open(f"/checkpoint/simarora/PersonalDatasets/Enron/qa_runs/{INBOX}/subject2sents.json", "w") as f:
json.dump(passage2sents, f)
print(f"Saved passages 2 sents for {len(passage2sents)} passages.")
with open(f"{MY_PATH}/Enron_{INBOX}/entity2emailid.json", "w") as f:
json.dump(entity2emailid, f)
print(f"Saved entity2emailid for {len(entity2emailid)} entities.")
with open(f"{MY_PATH}/Enron_{INBOX}/email2entities.json", "w") as f:
json.dump(email2entities, f)
print(f"Saved email2entities for {len(email2entities)} emails.")
print(f"Generate full set of personal documents in time: {time.time() - st}")
print(f"There are: {len(finalEntries)} passages created.")
def extra_cleaning():
with open(f"{MY_PATH}/Enron_{INBOX}/EmailsCorpus_FILTERED.json") as f:
EnronPassages = json.load(f)
EnronPassages_New = {}
for key, passage in tqdm(EnronPassages.items()):
new_sents = []
for sent in passage['sents']:
sent = remove_structure_tokens(sent)
if sent and sent != " ":
if sent[0] == "?" and len(sent) > 1:
sent = sent[1:]
new_sents.append(sent)
passage["sents"] = new_sents
passage['text'] = " ".join(new_sents)
EnronPassages_New[key] = passage
with open(f"{MY_PATH}/Enron_{INBOX}/EmailsCorpus_FILTERED.json", "w") as f:
json.dump(EnronPassages, f)
# FILTER POOR QUALITY NED TAGS AND GENERATE FINAL LISTS OF LOCAL / GLOBAL ENTITIES
def ner_alias_replacements(tag_text):
tag_toks = tag_text.split()
tag_toks = [tok.replace("\\'s", "") for tok in tag_toks]
tag_toks = [tok.replace("'s", "") for tok in tag_toks]
tag_toks = [tok for tok in tag_toks if tok not in ['RE', 'F1', 'To:', "PS", "Subject", "Sent"]]
tag_toks = [tok.replace("=20","").replace("=","").strip() for tok in tag_toks if tok not in ['the'] and tok not in PUNCT]
tag_text = " ".join(tag_toks)
tag_text = tag_text.replace("Enron", "")
tag_text = tag_text.replace("U.S.", "United States")
tag_text = tag_text.replace("US", "United States")
tag_text = tag_text.replace("LA", "Los Angeles")
tag_text = tag_text.replace("L.A.", "Los Angeles")
tag_text = tag_text.replace("SF", "San Francisco")
tag_text = tag_text.replace("NY", "New York")
tag_text = tag_text.replace("N.Y.", "New York")
# punct
tag_text = tag_text.replace("**", "").strip()
tag_text = tag_text.replace("-", "").strip()
tag_text = tag_text.replace("\\t", " ").strip()
tag_text = tag_text.replace("\\", "")
tag_text = tag_text.replace(":", " ").strip()
tag_text = tag_text.replace(" ", " ")
return tag_text
def filter_named_entities():
print("Running some filtering on tagged entities to remove poor quallity!")
with open(f"{MY_PATH}/Enron_{INBOX}/EmailsCorpus_FILTERED.json") as f:
EnronPassages = json.load(f)
linkedentities2nertags_global = {}
for k, v in tqdm(EnronPassages.items()):
text = v['text']
ner_tag_text = []
ner_tag_to_item = {}
for tag in v['ner_tags_lst']:
tag_text = ner_alias_replacements(tag['text'])
if tag_text:
ner_tag_text.append(tag_text)
ner_tag_to_item[tag_text] = tag
filtered_ents = []
for ent in v['linked_entities_lst']:
filter = 0
# FILTER 1: exact match of alias and entity title
if not ent or not ent['title'] or ent['title'] not in text:
filter = 1
# FILTER 2: if it's an an entity title that's not in the ner tagged spanned text at all
if ent['title'] not in ner_tag_text:
filter = 1
# FILTER 3: if it's a PERSON NER tag, and not the full name (first, last) then drop it
if not filter and ner_tag_to_item[ent['title']]['ner'] == "PERSON":
if len(ent['title'].split()) == 1:
filter = 1
# sometimes the second word is just an initial e.g., "Richard B."
elif len(ent['title'].split()) == 2 and len(ent['title'].split()[1]) < 3:
filter = 1
# FILTER 4: do any of the entity linking description words match the text? e.g., Nokia Chairman
if not filter:
linkedentities2nertags_global[ent['title']] = ner_tag_to_item[ent['title']]['ner']
filtered_ents.append(ent)
v['linked_entities_lst'] = filtered_ents
with open(f"{MY_PATH}/Enron_{INBOX}/EmailsCorpus_FILTERED.json", "w") as f:
json.dump(EnronPassages, f)
with open(f"{MY_PATH}/Enron_{INBOX}/linkedentities2nertags_global.json", "w") as f:
json.dump(linkedentities2nertags_global, f)
# PRODUCE A LIST OF THE LOCAL AND GLOBAL ENTITIES
def get_wiki_df():
st = time.time()
passages_path = '/checkpoint/simarora/mdr/data/hotpot_index/wiki_id2doc.json'
with open(passages_path) as f:
wiki_id2doc = json.load(f)
passages = []
for k, v in wiki_id2doc.items():
v['id'] = k
passages.append(v)
print(f"Loaded full set of documents in {time.time() - st}")
st = time.time()
st = time.time()
df = pd.DataFrame(passages)
print(f"Loaded full set of documents in {time.time() - st}")
st = time.time()
wikititles = [psg['title'] for psg in passages]
return df, wikititles
def get_variations_lst(titles, wikititles=[], cache=None, text="", sents=[]):
global_titles = {}
remaining = []
for tup in titles:
title, tag = tup[0], tup[1]
filt = 1
if " Cor" in title:
USE_REPLACEMENTS = [("Corp.",""), ("Corporation", ""), ("Corp.", "Corporation"), ("Corp.", "Company")]
elif " Co" in title:
USE_REPLACEMENTS = [("Co.",""), ("Co", ""), ("Co.", "Company"), ("& Co.", ""), ("Computer", "")]
elif "The " in title:
USE_REPLACEMENTS = [("The ", "")]
elif "Inc" in title:
USE_REPLACEMENTS = [("Inc. ", ""), ("Inc.", "")]
elif "Venture" in title:
USE_REPLACEMENTS = [("Ventures", " "), ("Venture Fund", " ")]
elif any(wd in title for wd in ['URL', 'Ltd.', '&', "Venture", "Capital", "News"]):
USE_REPLACEMENTS = [("Ltd.", ""), ("URL", ""), ("&", "&"), ("Limited", ""), ("Newspaper", " "), ("Capital", " ")]
else:
USE_REPLACEMENTS = []
if USE_REPLACEMENTS:
title = title.replace(",", " ")
title = title.replace(" ", " ")
for replace in USE_REPLACEMENTS:
title_new = title.replace(replace[0], replace[1]).strip()
if title == title_new:
continue
elif title_new in cache and cache[title_new]:
filt = 0
break
elif title_new in wikititles:
filt = 0
cache[title_new] = 1
break
if not filt:
text = text.replace(title, title_new)
text = text.replace(" ", " ")
new_sents = []
for sent in sents:
new_sents.append(sent.replace(title, title_new).replace(" ", " "))
sents = new_sents.copy()
global_titles[title_new] = tag
else:
remaining.append(title)
return global_titles, remaining, text, sents, cache
def local_ents_refilter_by_wikipassages():
df, wikititles = get_wiki_df()
THRESH = 9
freq_local = []
with open(f"{MY_PATH}/Enron_{INBOX}/local_entities.json") as f:
local_ents = json.load(f)
for key, value in local_ents.items():
if value > THRESH:
freq_local.append(key)
swapped_titles = []
for local_title in freq_local:
sents = len(df[df['title'].str.contains(local_title)]["id"].values)
sents += len(df[df['text'].str.contains(local_title)]["id"].values)
if sents >= 1:
swapped_titles.append(local_title)
with open(f"Enron_{INBOX}/local_ents_refilter.json", "w") as f:
json.dump(swapped_titles, f)
def local_ents_checker(local_title, hard_coded_dictionary):
# hard rules for which we want to exclude the local title as a local entity
if any((len(tok) == 2 and tok.islower() and tok not in stop_words) for tok in local_title.split()):
return False
if any(tok in['PM', 'AM', 'EDT', 'EST', 'PST', 'AB', 'SB', 'Cc', 'RE', 'F1',
'To:', "PS", "P.S.", "Subject", 'said', 'said.', "hasn\'t", 'has',
"doesn\'t", "does", "didn\'t", "did"] for tok in local_title.split()):
return False
if any((len(tok) == 1 and tok.islower() and tok not in ['a', 'i']) for tok in local_title.split()):
return False
if any(wd in local_title for wd in ['United States', "Dow Jones", 'New York', 'Committee', "AT&T",
"Associated Press", "Goldman Sachs", "Pacific Gas", "The Times",
"Financial Times", "Haas School", "Charles Schwab",
"Morgan Stanley", "J.P. Morgan", "Standard &",
"FERC", 'Los Angeles', "PG&E", "San Francisco", ".com"]):
return False
if local_title.split()[0] == '&' or local_title.split()[-1] == '&':
return False
if local_title.split()[0] in ['of', 'To', "or"] or local_title.split()[-1] == 'of':
return False
if "?" in local_title:
return False
if local_title.isupper() or local_title.islower():
return False
for tok in local_title.split():
if all(t.isdigit() for t in tok):
return False
if hard_coded_dictionary[local_title]:
OVERRIDE.append(local_title)
return False
return True
def hard_coded_remove_local_entities():
hard_coded_dictionary = defaultdict(int)
remove_local = [
'Jeff Dasovich', 'Wash. Post', 'Private Company Business News', 'Public Service Company of New Mexico',
'Channing Way Berkeley', 'Universal Studios', 'California State', "National Assn.",
'University of California, Berkeley Berkeley', 'AP Business Writer', 'Bad News', 'English News',
'West Coast', 'Haas Social Venture Competition', 'Haas Haas Celebrations',
'Electrical Engineering', 'Board of Directors', 'Pacific G&E', 'Calif Gov', 'California Senate',
'California Legislature', 'The Economic Times Copyright', 'Times Staff', 'Costa Times',
'Times Staff Writers', 'World Watch The Wall Street Journal', "Mobile Outlook",
'The Wall Street Journal A2', 'Dear Haas Community', 'California State University and University of California',
'Jeff Dasovich NA', 'Justice Department', 'Energy Department', 'State Department', 'The Commerce Department',
'Department of Water', 'Department of Finance', 'Defense Department', 'Interior Department',
'Water Resources Department', 'Department of Commerce', 'The Energy Department', 'The Justice Department',
'The Department of Energy', 'Department of Education', 'Labor Department', 'The Department of Water Resources',
'The Labor Department', 'Treasury Department', 'Commerce Department', 'Northern and', 'Account and',
'Computer Science or Engineering', 'Participation in Roundtable on Lessons Learned',
'English News Service', 'Newport News', 'Domestic News', 'Eastern Time', 'Central Time', 'Govt Affairs',
'Evening MBA Program Office', 'General Accounting Office', 'III Chief of Staff Office of Assembly',
'Office of Emergency Services', 'Office of Government Ethics', 'The General Accounting Office', 'Docket Office',
'DSan Diego', 'The State Government', 'United Kingdom0F', 'Page A1', 'Gas & Electric', 'George W.',
'Gov Davis', 'Board BOSTON', 'Science & Technology', "Gov't Affairs", 'Section 19.3.2',
'Dow Jones)The California Independent System Operator','Corp. Cut', 'Securities & Exchange Commission',
"Director Institute of Management, Innovation and Organization"
]
print(f"Total remove local size: {len(remove_local)}")
with open(f"{MY_PATH}/Enron_{INBOX}/hard_block_local_entities_v2.json", "w") as f:
json.dump(remove_local, f)
global_override = [
'UC CSU', 'Enron Corp.', "Securities & Exchange Commission", "QUALCOMM, Inc.", 'UC Berkeley',
'University of California Berkeley', 'Berkeley CA', 'University of California at Berkeley',
'Merrill Lynch & Co.', 'Wells Fargo & Co.', 'Boeing Co.', 'U.C. Berkeley', 'Bain & Co.', 'Allen & Co.',
'Bear, Stearns & Co.', 'General Electric Co.', 'Ford Motor Co.', 'Walt Disney Co.', 'Transwestern Pipeline Co.',
'Halliburton Co.', 'Portland General Electric Co.', 'Southern California Edison Co.',
'Transwestern Pipeline Co.', 'American Electric Power Co.', 'El Paso Natural Gas Co.','DTE Energy Co.',
'Green Mountain Energy Co.','Commonwealth Edison Co.', 'Arizona Public Service Co.','Tata Power Co.',
'Duke Energy Co.', 'DuPont Co.','Gas Co.','Gujarat Gas Co.', 'McKinsey & Co.', 'Goldman, Sachs & Co.',
'Economic Times', 'New York Times', "New President & CEO", "President & CEO", "VC Fund", "Lays Off",
'UC San Diego', 'District of Columbia', 'JP Morgan Chase', 'Morgan Point', 'JP Morgan',
'Transwestern Pipeline Company', 'McKinsey & Company', 'The Gas Company', 'The Washington Post Co.',
'El Paso Natural Gas Company', 'Portland General Electric Company', 'L.A. Times', 'Wall Street Journal',
'Transwestern Pipeline Company', 'Southern California Edison Company', 'Chicago Tribune Company',
'Idaho Power Company', 'The Dabhol Power Company', "The Securities and Exchange Commission",
'The New Power Company', 'San Diego Gas and Electric Company', 'Greenfield Shipping Company',
'Public Utility Holding Company Act', 'San Diego Gas & Electric Company', 'UC Davis', 'UC Irvine',
'UC BERKELEY', 'Department of Water Resources', 'Exelon Corp.', "Chronicle Staff Writers",
'Department of Energy', 'Department of Environmental Protection', "Department of Water Resources",
'TXU Corp.', 'Apache Corp.', 'Microsoft Corp.', 'Intel Corp.', 'Sony Corp.', 'News Corp.',
'General Motors Corp.', 'Exxon Mobil Corp.', 'Chevron Corp.', 'Compaq Computer Corp.',
'Nortel Networks Corp.', 'Enron North America Corp.', 'Enron Canada Corp.', 'Oracle Corp.', 'PPL Corp.',
'EMC Corp.', 'BellSouth Corp.', 'National Thermal Power Corp.', 'American Electric Power Service Corp.',
'Illinova Corp.', 'Electric Corp.', 'El Paso Energy Corp.', 'Indian Oil Corp.', 'TransAlta Corp.',
'Fluor Corp.', 'Dabhol Power Corp.', 'Mobil Corp.', 'Exxon Corp.', 'ChevronTexaco Corp.', 'E nron Corp.',
'Questar Corp.', 'Qwest Corp.', 'Sprint Corp.', '- Enron Corp.', 'Bank of America Corp.',
'Bechtel Corp.', 'First Albany Corp.', 'Sempra Energy Corp.', 'Yellow Corp.', 'Sempra Energy Trading Corp.',
'Credit Suisse First Boston Corp.', 'VoiceStream Wireless Corp.', 'Oil & Natural Gas Corp.', 'Enron Corp. Cut',
'Enron Corporation', 'VC Personnel', "Time Warner Telecom, Inc.", "Time Warner Telecom", "Our Bureau Copyright",
"Nortel Networks", "National Public Radio", "Independent Ene rgy Producers Association",
"Cinergy Corp.", "Dynegy Inc.", "Dynegy Corp.", "Nasdaq Stock Market", "The Economist Newspaper",
"The Independent London FOREIGN", "Dell Computer", "Viacom Inc.", "Compaq Computer", "Reuters Limited",
"WalMart Stores Inc.", "Cisco Systems Inc.", "Royal Dutch Shell Group", "Occidental Petroleum Corp.",
"Marathon Oil Canada Inc.", "NRG Energy Inc.", "Barclays Global Investors", "Deloitte Consulting",
"Financial Desk", "AP Business Writer DATELINE", "Financial Desk Markets", "Shiv SenaBJP",
"AP Online", "Futu reTense", "Procter & Gamble", "Chronicle Staff", "Environmental Strategies", "Editorial Desk",
"Johnson & Johnson", "Assembly Floor", "Assembly Energy", "Working Council",
"HewlettPackard Co.", "Board SAN FRANCISCO", "Angel Investors", "Your Account Settings", "McGrawHill, Inc.",
"Deutsche Bank AG", "Industrial Markets", "Verizon Communications, Inc.", "Washington Post Staff",
"Sun Microsystems Inc.", "Oil & Gas", "a Federal Energy Regulatory Commission", "UBS Capital", "AT&T Ventures",
"The Boston Consulting Group", "Oracle Venture Fund", "Gas Daily",
"The Supreme Court", "Internet Outlook", "Round Two", "NRG Energy, Inc.", 'Department of Justice',
"Wireless Telecommunications", "a Securities and Exchange Commission", "Week Change", "Pacific, Boston",
'Department of Water Resources.',"The Hindu Copyright (C", "PR Newswire (Copyright (c)", "Finance Ministry",
]
swapped_titles = [
'Enron Corp', 'Enron Corp.', 'Smith Street', 'Power Exchange', 'General Fund', 'Ken Lay', 'Dow Jones', 'Jim Foster', 'UBS Warburg',
'California Senate', 'Energy Committee', 'Universal Studios', 'Nevada Power Co.', 'Sierra Pacific Power', 'UC Berkeley', 'Bush Administration',
'Steve Baum', 'Dept. of', 'Water Resources', 'The Chronicle', 'Department of Water Resources', 'Legislative Analyst', 'Gordon Smith',
'Federal Energy Regulatory', 'Anne Kelly', 'Andy Brown', 'State Legislature', 'Quaker Oats', 'Advisory Group', 'San Diego Gas', 'Action Network',
'Government Affairs', 'Jeff D.', 'Utility Service', 'Williams Communications', 'Public Service Commission', 'Direct Access', 'California State',
'John Campbell', 'Chamber of Commerce', 'Sacramento Bee', 'San Jose Mercury News', 'Craig Rose', 'David Ward', 'Don Thompson', 'Public Affairs',
'Wall Street Journal', 'Independent System', 'Public Utilities Commission', 'Bill Campbell', 'John Nelson', 'Charles Schwab', 'Corporate Finance',
'California Assembly', 'Susan Davis', 'Pacific Gas', 'Proposition 9', 'Energy Commission', 'The Utility Reform Network', "Arthur O\\'Donnell",
'Electric Co.', 'Paul Patterson', 'Independent System Operator', 'Tom Higgins', 'Wheeler Ridge', 'Southern California Gas Co.', 'El Paso',
'Watson Wyatt', 'United States EPA', 'Business Development', 'David Young', 'Hewlett Packard', 'Bill Jones', 'Ray Hart', 'Pacific Gas &', 'California Edison',
'Senate Energy', 'Sony Computer Entertainment America', 'Reliant Energy', 'Pro Tem', 'Maharashtra Government', 'Salomon Smith Barney', 'West Coast',
'The White House', 'Claire Buchan', 'Halliburton Co.', 'Apache Corp.', 'Duke Energy Corp.', 'Dabhol Power Co.', 'Economic Times', 'Independent Energy',
'in California', 'Portland General Electric Co.', 'Portland General', 'Sierra Pacific', 'Mike Day', 'Rocky Mountain', 'Securities and Exchange Commission',
'AES Corp.', 'Michael Kahn', 'Dan Schnur', 'UC Davis', 'New York Times', 'John Stevens', 'Electric Company', 'Broadband Services', 'Ken Rice', 'Bay Area',
'New York Times Company', 'El Paso Energy', 'Rebecca Smith', 'Washington Post', 'Environmental Protection Agency', 'Southern Co.', 'Federal Reserve',
'International Business Machines', 'Microsoft Corp.', 'Intel Corp.', 'Walt Disney Co.', 'Verizon Communications Inc.', 'Sony Corp.', 'News Corp.', 'Big Board',
'George Bush', 'Entergy Corp.', 'Dabhol Power', 'Department of Energy', 'Portland General Electric Company', 'Phillips Petroleum Co.', 'Shell Oil Co.',
'John Chambers', 'Haas School', 'Utility Reform Network', 'Mark Cooper', 'North Field', 'State Government', 'Central Government', 'New Power', 'National Grid',
'Gulf Coast', 'John Anderson', 'General Motors Corp.', 'Home Depot', 'Exxon Mobil', 'MBA Program', 'Forest Service', 'Napa Valley', 'Carnegie Mellon',
'Washington University', 'John Edmiston', 'Quaker Oats Co.', 'American Electric Power Co.', 'Jeff Miller', 'Louis XIV', 't o', 'Joe Edwards', 'William S.',
'Energy Policy Act', 'General Electric Co.', 'International Business Machines Corp.', 'America Online', 'Wal-Mart Stores', 'Ford Motor', 'Bell Atlantic',
'SBC Communications', 'Fortune magazine', 'Exxon Mobil Corp.', 'Texaco Inc.', 'Chevron Corp.', 'Ford Motor Co.', 'Citigroup Inc.', 'Phillips Petroleum',
'J.C. Penney', 'Waste Management', 'Ethics Commission', 'Philip Morris', 'Union Government', 'Oversight Board', 'John Burton', 'County Board of Supervisors',
'Michael Katz', 'Jonathan Berk', 'University of Texas', 'Graduate School of Business', 'Wharton School', 'Mike Allen', 'California Commission', 'United States News',
'Andrew Rose', 'Ken Rosen', 'Urban Economics', 'Eugene E.', 'Business Administration', 'National Economic Council', 'Board of Directors', 'Asia Pacific',
'Marketing Group', 'John Morel', 'Electrical Engineering', 'External Affairs', 'Energy Services', 'New York', 'al l', 'New Economy', 'First Amendment', 'East Coast',
'Tracy Fairchild', 'Nevada Power', 'Amr Ibrahim', 'California Street', 'Republican Assembly', 'Supreme Court', 'Roger Salazar', 'Aaron Thomas', 'Joe Dunn',
'Tom Williams', 'John Sousa', 'east coast', 'Chapter 11', 'House Energy', 'Union Bank of California', 'Computer Center', 'District Court', 'Charles Robinson',
'State of California', 'J.P. Morgan', 'Golden State', 'Department of Environmental Protection', 'Natural Gas Act', 'Fortune 100', 'west coast', 'Dabhol Power Co',
'Lee Brown', 'City Council', 'City Hall', 'Digital Media', 'Edward Jones', 'Bank of New York', 'Bank One', 'Bankruptcy Court', 'Public Service Co.', 'United States Bank',
'Department of Water and Power', 'United States Bankruptcy Court', 'Southern California Gas', 'Eastern Time', 'Steve Johnson', 'Investors Service', 'Mercury News',
'Peter Cartwright', 'Securities Exchange Act', 'United States Supreme Court', 'PECO Energy Co.', 'Steve Wright', 'Cal State', 'Morro Bay', 'Southern Energy', 'AES Corp',
'Business Week', 'Mission Energy', 'Pacific Gas and Electric Co.', 'California Public Utilities', 'Henry Duque', 'United States Energy', 'Clean Air Act', 'Justice Department',
'Energy Department', 'Moss Landing', 'Chula Vista', 'United States House', 'Montana Power Co.', 'Montana Power', 'General Counsel', 'Pacific Gas and', 'Bankruptcy Code',
'College of Engineering', 'Federal Government', 'Squaw Valley', 'South Bay', 'Geoff Brown', 'Geoffrey Brown', 'Pat Wood', 'Oracle Corp.', 'Apple Computer', 'PPL Corp.',
'Wisconsin Energy', 'Stephen Oliver', "Los Angeles\\'", 'Cove Point', 'Williams Co.', 'United States Treasury', 'United States Circuit Court', 'Ras Laffan', 'Signature Services',
'customer s', 'United States Mail', 'United States Court of Appeals', 'Qualcomm Inc.', 'State Department', 'Bay area', 'Morgan Point', 'John Olson', 'Mike Smith', 'K Street',
'Richard Sanders', 'Bob Williams', 'Gary Fergus', 'Central Time', 'UC Irvine', 'Round One', 'Public Utility Commission', 'Energy Crisis', 'Energy Regulatory Commission',
'Rebecca Mark', 'Solar Power', 'Sierra Pacific Power Co.', 'Shell Oil', 'Sacramento Municipal Utility', 'Air Force', 'Workers Party', 'Peter Evans',
'Competitive Telecommunications Association', 'Richard Lyons', 'Commonwealth Edison Co.', 'Atal Bihari', 'Coyote Valley', 'Superior Court', 'Costa Times', 'Jack Scott',
'Jim Sanders', 'General Accounting Office', 'National Energy', 'Bill Morrow', 'Bob Foster', 'Bill Leonard', 'David Freeman', 'Dave Freeman', 'Board of Supervisors',
'Willie Brown', 'Communications Committee', 'Red Herring', 'Paul Carpenter', 'Harvey Morris', 'Market Surveillance Committee', 'State Auditor', 'The European Union',
'Electric Corp.', 'Utilities Commission', 'California Independent System', 'Joseph Dunn', 'John White', 'Robert Laurie', 'Richard Ellis',
'West Asia', 'Arizona Public Service Co.', 'Stephen Frank', 'Ross Johnson', 'Patrick Wood', 'David Hitchcock', 'Investor Service', 'ta ke', 'English News Service',
'Indian Oil Corp.', 'David Cox', 'Ben Campbell', 'John Wilson', 'Craig Barrett', 'William Wise', 'System Operator', 'East Bay', 'Fluor Corp.', 'sta te',
'Conference Board', 'San Francisco Chron', 'rat e', 'Dan Smith', 'Federal Energy', 'Clark Kelso', 'San Diego Gas &', 'Senate Select Committee', 'Public Utilities',
'Gray Dav', 'Department of Water', 'th e', 'Fair Oaks', 'Press Club', 'Tom Riley', 'Tamara Johnson', 'Air Resources Board', 'Regulatory Affairs', 'Marina del Rey',
'Desert Southwest', 'Franchise Tax Board', 'Investor Relations', 'General Assembly', 'High Point', 'Human Resources', 'ou r', 'Chase Manhattan', 'Ray Lane',
'Alex Brown', 'Venture Partners', 'Thomas White', 'Senate Appropriations', 'Robert C.', 'tha n', 'British Telecommunications plc', 'Health and Human Services',
'Harris Interactive', 'Kleiner Perkins', 'Mobil Corp.', 'Exxon Corp.', 'Steve Elliott', 'Board of Equalization', 'Department of Finance', 'Phi Beta Kappa', 'Richard Simon',
'Bank of Nova Scotia', 'Credit Lyonnais', 'Neil Stein', 'Wen Chen', 'Energy Conference', 'Undergraduate Program', 'Task Force', 'Legislative Counsel', 'Andersen Consulting',
'Indian Government', 'Ajit Kumar', 'Peter Behr', 'Kevin Murray', 'Carl Pope', 'Sean Gallagher', 'K. Lay', "Paul O\\'Neill", 'Chase Manhattan Bank', 'Maharashtra State', 'Banc of America',
'Ian Russell', 'Questar Corp.', 'State Senate', 'Republican Party', 'British Telecom', 'Salomon Smith', 'Defense Department', 'Wholesale Energy Market', 'Laurence Drivon', 'Western Power',
'John Hill', 'Regulatory Commission', 'o r', 'United States District Court', 'Air Quality', 'The Golden State', 'Boeing Co.', 'Social Security', 'UC San Diego', 'mor e', "Brian D\\'Arcy",
'the administration', 'n California', 'Northern and', 'yea r', 'International Power', 'California Chamber', 'Mike Briggs', 'California Independent', 'Elk Grove', 'wer e',
'Commonwealth Club', 'tha t', 'Los Angeles Department', 'stat e', 'Arctic National Wildlife', 'Diablo Canyon', 'District of Columbia', 'Pfizer Inc.', 'Jack Stewart', 'Keith McCrea',
'Barclays Capital', 'Qwest Corp.', 'Sprint Corp.', 'Enforcement Bureau', 'Financial Express', 'Business Council', 'Newport News', 'Press Trust', 'Nesbitt Burns', 'Brad Williams', 't he',
'Scott Reed', 'Chris Cox', 'Edwin Chen', 'Los Angeles Department of Water and ', 'Water Resources Department', 'at a', 'Randy Cunningham', 'Duke Power', 'Jeffrey A.', 'Jeff Brown',
'pa y', 'Joe Nation', 'Star Fleet', 'Montana Resources', 'Marine Corps', 'Office of Emergency Services', 'Otay Mesa', 'Rick Johnson', 'Societe Generale', 'Michael Hoffman',
'Blackstone Group', 'Community Energy', 'c Utilities Commission', 'Capital Investors', 'Venture Fund', 'Department of Commerce', 'Pinot Noir', 'Governing Board', 'vic e',
'Eastman Kodak', 'Carlyle Group', 'Grey Advertising', 'Model N', 'WR Hambrecht', 'North Slope', 'Energy Foundation', 'Christopher F.', 'Raymond James', 'Product Development',
'Dain Rauscher', 'Imperial Bank', 'Venture Capital', 'and Washington', 'Sevin Rosen', 'of Sales', 'Bank of America Corp.', 'n energy', 'Three Mile Island', 'Los Angeles Department of Water',
'Mark Baldwin', 'Global Coal', 'TL Ventures', 'George H.W. Bush', 'United States Power', 'for California', 'an d', 'control s', 'don e', 'the commission', 'Data Centers',
'Western Region', 'Capital Partners', 'Public Utility Holding Company Act', 'John Browne', 'Virodhi Andolan', 'are a', 'William Hogan', 'business development', 'Ken Smith',
'State Board of Equalization', 'Duke Energy Co.', 'Information Technology', 'William Blair', 'Technology Ventures', 'Capital Management', 'Growth Capital', 'Thomas Weisel',
'Investment Management', 'Union Pacific', 'Public Policy Institute', 'David Anderson', 'New West', 'supreme court', 'Susan Scott', 'Judiciary Committee', 'Eastman Chemical',
'Hummer Winblad', 'Draper Fisher', 'Arthur Andersen LLP', 'Department of Education', 'September 11th', 'S. David', 'Lloyds TSB', 'Republican party', 'for a', 'Amadeus Capital',
'Clay Johnson', 'Labor Department', 'Bill Wood', 'official s', 'Angeles Department of Water and Power', 'Florida Supreme Court', 'Governmental Affairs Committee', 'Royal Dutch',
'Alfred Kahn', 'World Affairs Council', 'Richard B.', 'Mechanical Engineering', 'Project Manager', 'The Independent Institute', 'Sony Music Entertainment', 'Texas Pacific',
'Providence Equity', 'Azure Capital', 'Page 2', 'Intel Corporation', 'Ministry of Defense', 'La Suer', 'Wind River', 'First Energy', 'Arts Alliance', 'Critical Path',
'Office of Government Ethics', 'Moore Capital', 'Desert Star', 'California Energy', 'United Way', 'Contra Costa', 'State Water Resources Control Board', 'West coast',
'Scott Miller', 'Channel 8', 'Rules Committee', 'Finance Group', 'PECO Energy', '2001 Los Angeles', 'Department of Justice', 'Contra Costa County', 'section 2', 'Pequot Capital',
'Bill Hall', 'William Hall', 'Royal Caribbean', 'Lee Friedman', 'Tom Gros', 'Blue Shield', 'Science Applications International', 'BMG Entertainment', 'Court of Appeals',
'Jeff Green', 'Bill Massey', 'Reed Elsevier', 'International Affairs', 'Professor of Public Policy', 'Computer Science', 'Data Warehouse', 'Michael Day', 'Dow Chemical',
'Fleur de Lys', 'Mona L', 'the Commission', 'First Fund', 'Discovery Capital', 'Applied Micro Circuits', 'California Report', 'Michael Ramsay', 'Tim Carter', 'Alpine Meadows',
'Order No', 'Salvation Army', 'Shaw Group', 'Michael M.', 'Chris H.', 'Williams III', 'Duke of', 'San Jose', 'David W', 'PS 2', 'Doug Smith', 'Securities and Exchange',
'Bonneville Power', 'Vol. 3', 'Steve Smith', 'Strategic Energy', 'Cal State Fullerton', 'Steve Hall', 'Phillip K.', 'Political Reform Act', 'Senate Committee', 'Glenn Johnson',
'Fair Political Practices Commission', 'Electric Board', 'Power Authority', 'Bill Ahern', 'John D. Dingell', 'John S.', 'New Energy', 'Northern Natural Gas', 'Michael Kirby',
'Gas Co.', 'Charlotte Observer', 'Stephen Moore', 'L.A. Times', 'Company, Inc.', 'Bob Anderson', 'William Mead', 'South Lake Tahoe', 'Wisconsin Gas', 'Mark Long',
'The Financial Express', "Brian O'Connell", 'Jim Fallon', 'Red Cross', 'Ann M.', 'James D.', 'Mark A.', 'Kevin Kelley', 'Steven J.', 'Linda J.', 'Coral Springs', 'P.O. Box',
'Steve C.', 'Susan M.', 'Cornell Club', 'Performance Management', 'Review Group', 'Robin Hill', 'Bad News', 'Opus One', 'Wireless Services', 'First Round',
'Kennedy School of Government', 'National Geographic', 'John Bowers', 'Optical Internet', 'Applied Physics', 'Implementation Group', 'Don Smith', 'Project Management',
'Community Choice', 'Power Pool', 'Press Conference', 'Treasury Department', 'Antitrust Act', 'Public Regulation Commission', 'Ray Williams', 'Facility Management', 'Ross Ain',
'Nord Pool', 'SBC Communications, Inc.', 'Global Telecom', 'Corporation Commission', 'Finance Committee', 'Valley Center', 'Motorola, Inc.', 'Fifth Circuit', 'Communications, Inc.',
'International Bureau', 'National Historic Preservation Act', 'Transportation Commission', 'Management Committee', 'South Slope', 'ris k', 'Dennis Harris', 'Public Affairs Committee',
'Data Quality', 'Murray P.', 'Rebecca W.', 'Hardy Jr', 'Barbara A.', 'Mona L.', 'World Trade Center', 'West Gas', 'English News', 'Nigel Shaw', 'Exchange Commission', 'Lisa M.',
'Commerce Department', 'American Water Works', 'American Water', 'Jane M.', 'Global Executive', 'Rob Nichol', 'Bill Ross', 'James Docker', 'Community Affairs', 'Project Lead',
'Mike Heim', 'Quinn Gillespie', 'William Barry', 'Milberg Weiss', '| | |', 'University Health Services', 'Adam N', 'Linda L.', 'Jo Ann', 'William Johnson', 'Blockbuster Inc.',
'Kenneth Rice', 'Commerzbank Securities', 'FPL Group', "Gray Davis'", 'San Diego Gas & Electric Co.', 'John Stout', 'Foundation for Taxpayer and Consumer Rights', 'MCI WorldCom',
'Covad Communications', 'Lucent Technologies', 'Jeff Skilling', 'San Diego Union Tribune', 'McGraw Hill', 'KGO Radio', 'San Diego Gas & Electric', 'Alpert Davis',
'Kern River Gas Transmission', 'Saber Partners', 'SoCal Gas', 'Con Edison', "Mike Day'", 'Technologic Partners', 'H&Q Asia Pacific', 'Law Ministry', 'Kasturi & Sons Ltd',
'Power Purchase Agreement', 'Calpine Corp.', 'Senate Floor', 'Delta Power', 'The California Energy Commission', 'Sierra Pacific Resources', 'Dan Richard',
'The Public Utilities Commission', 'Electronics Boutique', 'The California Public Utilities Commission', 'El Paso Corporation', 'William A. Wise', 'Tibco Software',
'Vivendi Universal', 'AOL Time Warner', 'Qwest Communications International Inc.', 'Gas Authority of India Ltd', 'Dominion Resources', 'Mirant Corp.', 'Michael Aguirre',
'British Petroleum', 'Valero Energy Corp.', 'Capstone Turbine Corp.', 'Conoco Inc.', 'Anadarko Petroleum Corp.', 'Schlumberger Ltd.', 'Deloitte & Touche', 'Japan Corp.',
'Finance Ministry', 'Lucent Technologies Inc.', 'CBS MarketWatch', 'Product Management', 'Jimmy Bean', 'Organization of Petroleum Exporting Countries', 'France Telecom',
'Dell Computer Corp.', 'Credit Lyonnais Securities', 'Azurix Corp.', 'Dow Jones & Company,', 'Illinois Power', 'Avista Corp.', 'Saks Inc.', 'Florida Power & Light',
'Northeast Utilities', 'Fisher Center for Real Estate and Urban Economics', 'Council of Economic Advisors', 'The Orange County Register', 'Mark Johnson',
'Lehman Brothers Holdings Inc.', 'Northwest Natural Gas', 'Comcast Interactive Capital', 'MSN Explorer', 'American Electronics Association', 'Richard Gephardt',
'Fortune Magazine', 'Hugo Chavez', 'Sycamore Networks', 'Corporate Communications', 'Duke Energy Corporation', 'Energy Intelligence Group', 'Montgomery Watson',
'Bertelsmann AG', 'Dresdner Kleinwort Wasserstein', 'Northern and Central California', 'Canada Corp.', 'National Desk', 'The Federal Energy Regulatory Commission',
'Calpine Corporation', '9th Circuit Court of Appeals', 'The Chronicle Publishing Co.', 'Stone & Webster', 'Pacific Gas and Electric', 'Bureau of Reclamation',
'John E. Bryson', 'Cingular Wireless', 'The Public Service Commission', 'Tyco International Ltd.', 'JDS Uniphase', 'Reliant Energy Services', 'Copley News Service',
'Columbia River Basin', 'Energy Services Inc.', 'British Wind Energy Association', 'Energy Systems Inc.', 'Phyllis Hamilton', 'UC Regents', 'National Thermal Power Corporation',
'Washington Bureau', 'Strategic Petroleum Reserve', 'Chuck Watson', 'Simmons & Co.', 'Energy Division', 'Vulcan Ventures', 'ING Barings', 'Science Communications',
'Anschutz Investment', 'Donaldson, Lufkin & Jenrette', 'Sigma Partners', 'Technology Crossover Ventures', 'Morgenthaler Ventures', 'New Millennium Partners',
'Internet Capital Group', 'Network Appliance', 'Hambrecht & Quist', 'Energy Services, Inc.', 'Larry Summers', 'Kohlberg Kravis Roberts & Co.', 'Blockbuster Video',
'Suez Lyonnaise des Eaux', 'John Heine', 'Lester Center for Entrepreneurship and Innovation', 'North American Electric Reliability Council', 'World Trade Organisation',
'Craig D.', 'Joseph Lieberman', 'Eli Lilly & Co.', 'Prudential Securities Inc.', 'Arter & Hadden', 'National Electric Power Authority', 'The Maharashtra Government',
'Judah Rose', 'Mirant Corp', 'Vestas Wind Systems', 'Global Crossing Ltd.', 'B.C. Hydro', 'The Brattle Group', 'The Energy Commission', 'The California Assembly',
'Global Markets', 'Career Services', "Department of Water Resources'", 'Western Energy', 'Ernst & Young', 'ABN Amro', 'Northwest Natural Gas Co.', 'Media Services',
'Steve Ballmer', 'Jeffrey Immelt', 'Wilson Sonsini Goodrich & Rosati', 'Duke Energy Corp', 'The Bonneville Power Administration', 'Regulatory Affairs Department',
'Industrial Development Bank of India', 'Paul Dawson', 'Giga Information', 'Crosspoint Venture Partners', 'Liberate Technologies', 'Chris Bowman', 'Barnes & Noble',
'Michael K. Powell', 'Bridgestone Firestone', 'Sofinnova Ventures', 'Ron Nichols', 'Navigant Consulting Inc.', 'Davis Administration', "Paul O'Neill", 'Joseph Pratt',
'Palm Computing', 'Industrial Finance Corporation', 'Utility Board', 'San Diego Superior Court', 'Con Ed', 'Carl Ingram', 'Pacific Bell Park', 'Mohave Generating Station',
'David Marshall', 'The Sacramento Municipal Utility District', 'U S WEST Communications, Inc.', 'Atal Behari', 'Dan Becker', 'James Woody', 'The City Council',
'The Public Utility Commission', 'Sun America', 'Middle East Economic Digest', 'National Energy Policy Development Group', 'Paul Kaufman', 'Jonathan Leonard',
'California Constitution', '11th Amendment', 'Canaan Partners', 'Whitney & Co.', 'Apollo Management', 'Blue Chip Venture', 'Kleiner Perkins Caufield & Byers',
'Scott Laughlin', 'CA Assembly', 'Labrador Ventures', 'J. & W. Seligman', 'Cable & Wireless', 'Crescendo Ventures', 'Jafco Ventures', 'Texas Pacific Group', 'with Davis',
'PA Consulting', 'Professional Services', 'Network Infrastructure', 'Benchmark Capital', 'Safeguard Scientifics', 'Zone Labs', 'Oxford Bioscience', 'Kodiak Venture Partners',
'Texas Public Utilities Commission', 'Christie Whitman', 'Low Income Home Energy Assistance Program', 'Williams Capital Group', 'Joseph Sent', 'William Blair Capital Partners',
'CNET Networks', 'Polaris Venture Partners', 'Bay Partners', 'Doll Capital Management', 'BP Plc', 'Joe Bob Perkins', 'Edward Kahn', 'Norman Y. Mineta', 'Sr. VP',
'Advent Venture Partners', 'Mark Fabiani', 'Independent Power Producers', 'Artemis Ventures', 'Trident Capital', 'Mohr Davidow Ventures', 'Ask Jeeves',
'The Electric Reliability Council of Texas', 'Democratic Assembly', 'OC Register', 'Gabriel Venture Partners', 'Challenge Fund', 'Insight Capital Partners',
'Sierra Ventures', 'Sandler Capital Management', 'Niagara Mohawk', 'Guy Phillips', 'Department of Health Services', 'John Flory', 'News World Communications, Inc.',
'VantagePoint Venture Partners', 'Walden International', 'Den Danske Bank', 'Lloyds TSB Development Capital', 'A.G. Edwards', 'Terra Lycos', 'SK Global',
'Gray Cary Ware & Freidenrich', 'Field Institute', 'Mexican Energy', 'Corporate Development', 'Willis Stein & Partners', 'Burrill & Co.', 'Prime Ventures',
'The Federal Energy Regulatory', 'Calpine Corp', 'Trinity Ventures', 'Mt. Tam', 'ARCH Venture Partners', 'First Union Capital Partners', 'Columbia Capital', '9th Circuit',
'Real Media', 'Sofinnova Partners', 'World Wide Packets', 'Netscape Communications', 'Department of Defense', 'Atal Behari Vajpayee', 'Holland & Knight', 'ETF Group',
'D.J. Smith', 'RRE Ventures', 'Boston Capital Ventures', 'New World Ventures', 'Global Switch', 'Horizon Ventures', 'Service Factory', 'CB Capital', 'GE Power Systems',
'Campesinos Unidos', 'Schroder Ventures', 'AT&T Canada', 'Coral Energy', 'Jupiter Communications', 'Venture Strategy Partners', 'Davidow Ventures', 'EchoStar Communications',
'AT&T Wireless', 'Itochu International', 'Mike Hansen', 'The California Department of Water Resources', 'GTCR Golder Rauner', "Ontario Teachers' Pension Plan Board",
'San Diego Gas & Electric Co', 'Lehman Brothers Venture Partners', 'MSN Hotmail', 'Mohr Davidow', 'J. & W. Seligman & Co.', 'Faculty Club', 'SAP Ventures', 'Capital Group',
'Pilgrim Baxter', 'Heather Cameron', 'ITC Holdings', 'NIB Capital', 'Datek Online', 'Freei Networks', 'Green Mountain Energy Company', 'Duquesne Light',
'Dell Computer Corporation', 'The Charles Schwab Corporation', 'Bayerische Landesbank', 'StarVest Partners', 'American Lawyer Media', 'Credit Suisse Group',
'Robert Mondavi Winery', 'Allegis Capital', 'Diego Gas & Electric Co.', 'Pervasive Computing', 'Lotus Notes', 'Mirant Corporation', 'Virginia Ellis',
'Electric Power Group', 'Jim Fleming', 'FPL Energy', 'Bechtel Group', 'Reliance Industries Ltd.', 'Richard Ferreira', 'Russell Hubbard', 'TransAlta Energy', 'Joel Newton',
'The Economist Group', 'Eugene Water & Electric Board', 'Qwest Communications', 'The Commission', 'AT&T Broadband', 'Rob Lamkin', 'California Supreme Court', 'Kasturi & Sons Ltd.',
'Kaufman, Paul', 'George H. Ryan', 'National Cable Television Association', 'Mobile Services', 'Public Utilities Act', 'Cambridge Silicon Radio', 'Clinton Administration',
'CSU Fresno', 'EBS, Inc.', 'Network Engineering', 'Common Carrier', 'BellSouth Telecommunications, Inc.', 'Nextel Communications, Inc.', 'Southwestern Bell Telephone Co.',
'Qwest Communications International, Inc.', 'WorldCom, Inc.', 'The State Corporation Commission', 'Lucent Technologies, Inc.', 'Cable Services',
'National Exchange Carrier Association, Inc.', 'John D. Rockefeller IV', 'FPL FiberNet', 'EOG Resources, Inc.', 'Catholic Health East', 'Christi L.', 'Mr Munde',
'Northern Natural Gas Co.', 'BSES Ltd.', 'BSES Ltd', 'Berkshire Hathaway Inc.', 'James J. Cramer', 'Robert Christensen', 'The Goldman Sachs Foundation', 'George Vaughn',
'David McManus', 'Gas Authority of India', 'Mary Lynne'
]
global_override.extend(swapped_titles.copy())
print(f"Added {len(swapped_titles)} to global override.")
global_override = list(set(global_override))
print(f"Total global override size: {len(global_override)}")
with open(f"{MY_PATH}/Enron_{INBOX}/hard_block_global_override.json", "w") as f:
json.dump(global_override, f)
for title in remove_local:
hard_coded_dictionary[title] = 1
for title in global_override:
hard_coded_dictionary[title] = 1
return hard_coded_dictionary
def create_named_entity_maps():
print("Creating named entity maps!")
global_ents = Counter()
local_ents = Counter()
# load stuff
with open(f"{MY_PATH}/Enron_{INBOX}/EmailsCorpus_FILTERED.json") as f:
EnronPassages = json.load(f)
with open("wikititles.json") as f:
wikititles = json.load(f)
with open("/checkpoint/simarora/mdr/wikipassages2sents.json") as f:
wikipassages2sents = json.load(f)
qid2types_wiki_filtered, title2qid, _, _, _, _ = global_entity_valid_types(wikititles)
hard_coded_dictionary = hard_coded_remove_local_entities()
with open(f"{MY_PATH}/Enron_{INBOX}/linkedentities2nertags_global.json") as f:
linkedentities2nertags_global = json.load(f)
nertags2psgs_global = defaultdict(list)
nertags2psgs_local = defaultdict(list)
fname = f"{MY_PATH}/Enron_{INBOX}/global_existence_cache.json"
if os.path.isfile(fname):
with open(fname) as f:
global_existence_cache = json.load(f)
else:
global_existence_cache = {}
# iterate through passages
EnronPassages_New = {}
for k, v in tqdm(EnronPassages.items()):
# filter entities in passages with tons of emails, NED model does poorly on these names
email_words = v['text'].count("@")
email_words += v['text'].count("E-mail")
if email_words > 1:
v['GLOBAL_ENTITIES'] = []
v['LOCAL_ENTITIES'] = []
EnronPassages_New[k] = v.copy()
continue
# if the passage has global entities
title_in_global = []
title_not_global = []
for ent in v['linked_entities_lst']:
title = ent['title']
if title in global_existence_cache:
if global_existence_cache[title]:
title_in_global.append((title, linkedentities2nertags_global[title]))
else:
title_not_global.append((title, linkedentities2nertags_global[title]))
else:
if title in wikititles:
global_existence_cache[title] = 1
title_in_global.append((title, linkedentities2nertags_global[title]))
else:
global_existence_cache[title] = 0
title_not_global.append((title, linkedentities2nertags_global[title]))
for tag in v['ner_tags_lst']:
title = ner_alias_replacements(tag['text'])
if len(title.split()) > 1 and tag['ner'] in VALID_NER_TYPES:
if title in global_existence_cache:
if global_existence_cache[title]:
title_in_global.append((title, tag['ner']))
else:
title_not_global.append((title, tag['ner']))
else:
if title in wikititles:
global_existence_cache[title] = 1
title_in_global.append((title, tag['ner']))
else:
global_existence_cache[title] = 0
title_not_global.append((title, tag['ner']))
title_not_global = [t for t in title_not_global if not hard_coded_dictionary[t[0]] == 1]
variations_lst, title_not_global, new_text, new_sents, global_existence_cache = get_variations_lst(title_not_global, wikititles=wikititles, cache=global_existence_cache, text=v['text'], sents=v['sents'])
v['text'] = new_text
v['sents'] = new_sents
for title, tag in variations_lst.items():
if title not in global_existence_cache:
global_existence_cache[title] = 1
title_in_global.append((title, tag))
# save local and global entities for the psg
filtered_psg_local_ents = []
filtered_psg_global_ents = []
for tup in title_in_global:
ent, nertag = tup[0], tup[1]
global_ents[ent] += 1
filtered_psg_global_ents.append(ent)
filter_a = filter_global_ent(ent, wikipassages2sents, title2qid, qid2types_wiki_filtered)
if not filter_a:
nertags2psgs_global[nertag].append((v['id'], ent))
for tag in v['ner_tags_lst']:
tag_text = ner_alias_replacements(tag['text'])
if tag_text and tag_text not in filtered_psg_global_ents and tag['ner'] in VALID_NER_TYPES and local_ents_checker(tag_text, hard_coded_dictionary):
if len(tag_text.split()) > 1:
local_ents[tag_text] += 1
filtered_psg_local_ents.append(tag_text)
nertags2psgs_local[tag['ner']].append((v['id'], tag_text))
v['GLOBAL_ENTITIES'] = filtered_psg_global_ents.copy()
v['LOCAL_ENTITIES'] = filtered_psg_local_ents.copy()
EnronPassages_New[k] = v.copy()
# save stuff
with open(f"{MY_PATH}/Enron_{INBOX}/EmailsCorpus_FILTERED.json", "w") as f:
json.dump(EnronPassages_New, f)
with open(f"{MY_PATH}/Enron_{INBOX}/local_entities.json", "w") as f:
json.dump(local_ents, f)
with open(f"{MY_PATH}/Enron_{INBOX}/global_entities.json", "w") as f:
json.dump(global_ents, f)
with open(f"{MY_PATH}/Enron_{INBOX}/global_existence_cache.json", "w") as f:
json.dump(global_existence_cache, f)
with open(f"{MY_PATH}/Enron_{INBOX}/nertags2psgs_local.json", "w") as f:
json.dump(nertags2psgs_local, f)
with open(f"{MY_PATH}/Enron_{INBOX}/nertags2psgs_global.json", "w") as f:
json.dump(nertags2psgs_global, f)
# DUPLICATE PASSAGES
def identify_duplicates(EnronPassages):
ENT_OVERLAP_THRESH = 5
OVERLAP_PCT_THRESH = 0.75
entity_sets = []
first_sentences = []
first_sentence_map = defaultdict(list)
duplicates_map = {}
# metadata
num_duplicates = 0
sentences_matched = 0
entities_overlapped = 0
for key, passage in tqdm(EnronPassages.items()):
sents = passage['sents']
ents = passage['GLOBAL_ENTITIES'].copy()
ents.extend(passage['LOCAL_ENTITIES'].copy())
entity_set = set(ents)
# check if it's a duplicate
is_duplicate = 0
for sent in sents:
if sent in first_sentences:
is_duplicate = 1
sentences_matched += 1
first_sentence_map[sent].append(key)
break
if not is_duplicate:
for ent_set in entity_sets:
overlap = len(entity_set.intersection(ent_set))
if overlap > ENT_OVERLAP_THRESH and overlap/len(entity_set) >= OVERLAP_PCT_THRESH:
is_duplicate = 1
entities_overlapped += 1
# save whether it's a duplicate or not
if not is_duplicate:
for sent in sents:
if len(sent.split()) > 1:
first_sentences.append(sent)
break
entity_sets.append(entity_set)
first_sentence_map[sents[0]].append(key)
duplicates_map[key] = False
else:
duplicates_map[key] = True
num_duplicates += 1
print(f"Marked {num_duplicates} passages as duplicates.")
print(f"For {sentences_matched} passages, the first sentences matched exactly.")
print(f"For {entities_overlapped} passages, the entity set had a high overlap with another passage's entity set.\n")
with open("first_sentence_map.json", "w") as f:
json.dump(first_sentence_map, f)
return duplicates_map
def global_entity_valid_types(wikititles):
print("Loading type information ...")
with open ('/checkpoint/simarora/open_domain_data/BOOTLEG_entitydb/data/entity_db/entity_mappings/qid2title.json') as f:
qid2title = json.load(f)
with open("/checkpoint/simarora/open_domain_data/BOOTLEG_entitydb/data/entity_db/type_mappings/wiki/type_vocab.json") as f:
wiki_type_vocab = json.load(f)
with open("/checkpoint/simarora/open_domain_data/BOOTLEG_entitydb/data/entity_db/type_mappings/wiki/qid2typeids.json") as f:
qid2types_wiki = json.load(f)
with open("/checkpoint/simarora/mdr/wikipassages2sents.json") as f:
wikipassages2sents = json.load(f)
wiki_typeid2name = {}
for key, value in wiki_type_vocab.items():
wiki_typeid2name[value] = key
title2qid = {}
for k, v in qid2title.items():
title2qid[v] = k
type2freq = Counter()
type2qids = defaultdict(list)
for title in tqdm(wikititles):
if title in title2qid:
qid = title2qid[title]
types = qid2types_wiki[qid]
for ty in types:
type2freq[wiki_typeid2name[ty]] += 1
if len(wikipassages2sents[title]) > 1:
type2qids[wiki_typeid2name[ty]].append(qid)
# this is the list of types we want to allow for candidate entities
type2freq_filtered = {}
type2qids_filtered = {}
for ty, ct in type2freq.items():
if ct >= 1000:
type2freq_filtered[ty] = ct
type2qids_filtered[ty] = type2qids[ty]
with open("filteredEnronGlobalTypes.json", "w") as f:
json.dump(type2freq_filtered, f)
qid2types_wiki_filtered = {}
for qid, types_lst in tqdm(qid2types_wiki.items()):
filt_types = [wiki_typeid2name[ty] for ty in types_lst if wiki_typeid2name[ty] in type2freq_filtered]
qid2types_wiki_filtered[qid] = filt_types
return qid2types_wiki_filtered, title2qid, type2freq_filtered, type2qids_filtered, qid2title, type2qids
def filter_global_ent(title, wikipassages2sents, title2qid, qid2types_wiki_filtered):
filter = 0
MIN_PARAGRAPH_WORDS = 20
# Filter 1: the passage is too short, meaning it's probably super vague or specific
if len(wikipassages2sents[title]) <= 1:
filter = 1
# Filter 2: total words in the sentences for the passage, since there's probably too little content to write a q
total_words = 0
for sent in wikipassages2sents[title]:
total_words += len(sent.split())
if total_words < MIN_PARAGRAPH_WORDS:
filter = 1
# Filter 3: if the entity categories are not in the filtered types lists
if title not in title2qid:
filter = 1
else:
qid_a = title2qid[title]
if qid_a in qid2types_wiki_filtered:
types_a = qid2types_wiki_filtered[qid_a]
if not types_a:
filter = 1
return filter
def generate_passage_pairs():
# GENERATE PASSAGE PAIRS
def generate_global_global_pairs(wikititles, qid2types_wiki_filtered, title2qid):
print("Creating global, global passage pairs.")
random.seed(1)
ks = KnowledgeSource()
global_a = random.sample(wikititles, 50000)
# load wiki corpus information
wikititle_exists = defaultdict(int)
for title in wikititles:
wikititle_exists[title] = 1
with open("/checkpoint/simarora/mdr/wikipassages2sents.json") as f:
wikipassages2sents = json.load(f)
# produce pairs, first load existing saved anchors if it exists
if os.path.exists("page2anchors.json"):
with open("page2anchors.json") as f:
page2anchors = json.load(f)
else:
page2anchors = {}
GLOBAL_GLOBAL_PAIRS = []
added_wikients = []
for title in tqdm(global_a):
if title not in added_wikients:
if title in page2anchors:
anchors = page2anchors[title]
else:
page = ks.get_page_by_title(title)
if page:
anchors = page['anchors']
anchors_full = [anchor for anchor in anchors if anchor['paragraph_id'] == 1]
anchors = [anchor for anchor in anchors_full if wikititle_exists[anchor['text']]]
page2anchors[title] = anchors
if anchors:
for anchor in anchors:
a, b = title, anchor['text']
# Filter the kinds of anchors we want by granularity
filter_a = filter_global_ent(a, wikipassages2sents, title2qid, qid2types_wiki_filtered)
filter_b = filter_global_ent(b, wikipassages2sents, title2qid, qid2types_wiki_filtered)
if not filter_a and not filter_b:
GLOBAL_GLOBAL_PAIRS.append({'wiki1':a, 'wiki2':b})
added_wikients.append(title)
with open("page2anchors.json", "w") as f:
json.dump(page2anchors, f)
print(f"Collected {len(GLOBAL_GLOBAL_PAIRS)} global, global pairs\n")
return GLOBAL_GLOBAL_PAIRS
def generate_global_local_pairs(EnronPassages, duplicates_map, qid2types_wiki_filtered, title2qid):
print("Creating global, local passage pairs.")
MIN_PSG_ENTITIES = 3
with open("/checkpoint/simarora/mdr/wikipassages2sents.json") as f:
wikipassages2sents = json.load(f)
GLOBAL_LOCAL_PAIRS = []
for key, passage in tqdm(EnronPassages.items()):
is_duplicate = duplicates_map[key]
if is_duplicate:
continue
if len(passage['GLOBAL_ENTITIES']) + len(passage['LOCAL_ENTITIES']) < MIN_PSG_ENTITIES:
continue
for ent in passage["GLOBAL_ENTITIES"]:
filter_a = filter_global_ent(ent, wikipassages2sents, title2qid, qid2types_wiki_filtered)
if not filter_a:
GLOBAL_LOCAL_PAIRS.append({'enron':key, 'wiki':ent})
print(f"Collected {len(GLOBAL_LOCAL_PAIRS)} local, global pairs\n")
return GLOBAL_LOCAL_PAIRS
# Something I didn't include pairs-wise is local passage-pairs about global entities (due to the chance of knowledge intersection)
def generate_local_local_pairs(EnronPassages, duplicates_map, freq_local):
print("Creating local, local passage pairs.")
MIN_PSG_ENTITIES = 3
MAX_PSG_ENTITIES = 10
FILT_LOCAL_LOCAL_PAIRS = []
USED_PSG_PAIRS = []
# get a mapping of the passages that contain each local entity
localent2psgkey = defaultdict(list)
for key, passage in tqdm(EnronPassages.items()):
is_duplicate = duplicates_map[key]
if is_duplicate:
continue
TOTAL_ENTITIES = len(passage['GLOBAL_ENTITIES']) + len(passage['LOCAL_ENTITIES'])
if TOTAL_ENTITIES < MIN_PSG_ENTITIES or TOTAL_ENTITIES > MAX_PSG_ENTITIES:
continue
for ent in passage["LOCAL_ENTITIES"]:
if ent in freq_local:
localent2psgkey[ent].append(key)
# pick two passages that mention a local entity as a pair; one pair per local entity
pair_counter = Counter()
for ent, psgs in tqdm(localent2psgkey.items()):
for psg1 in psgs:
for psg2 in psgs:
if psg1 != psg2 and set([psg1, psg2]) not in USED_PSG_PAIRS:
FILT_LOCAL_LOCAL_PAIRS.append({'enron1':psg1, 'enron2':psg2, 'ent':ent})
USED_PSG_PAIRS.append(set([psg1, psg2]))
pair_counter[f"{psg1}_{psg2}"] += 1
break
LOCAL_LOCAL_PAIRS = []
for pair in tqdm(FILT_LOCAL_LOCAL_PAIRS):
# this filter is being used here as a sign of duplicacy
if pair_counter[f"{pair['enron1']}_{pair['enron2']}"] < 5:
LOCAL_LOCAL_PAIRS.append(pair)
print(f"Collected {len(LOCAL_LOCAL_PAIRS)} local, local pairs\n")
return LOCAL_LOCAL_PAIRS
# LOAD ENTITY SETS AND PERSONAL / GLOBAL CORPORA
with open("{MY_PATH}/wikititles.json") as f:
wikititles = json.load(f)
with open(f"{MY_PATH}/Enron_{INBOX}/EmailsCorpus_FILTERED.json") as f:
EnronPassages = json.load(f)
with open(f"{MY_PATH}/Enron_{INBOX}/global_entities.json") as f:
global_ents = json.load(f)
with open(f"{MY_PATH}/Enron_{INBOX}/local_entities.json") as f:
local_ents = json.load(f)
# Here we're choosing to use entities that appear above a THRESHOLD number of times in personal data
THRESH = 5
num = 0
freq_local = []
for key, value in local_ents.items():
if value >= THRESH:
num += 1
freq_local.append(key)
print(f"Found {len(local_ents)} local entities. and {num} entities appear over {THRESH} x.")
num = 0
freq_global = []
for key, value in global_ents.items():
if value >= THRESH:
num += 1
freq_global.append(key)
print(f"Found {len(global_ents)} global entities and {num} global entities appear over {THRESH} x.\n")
# GENERATE THE PASSAGE PAIRS
qid2types_wiki_filtered, title2qid, _, _, _, _ = global_entity_valid_types(wikititles)
fname = f"{MY_PATH}/Enron_{INBOX}/duplicate_enron_psg_map.json"
if os.path.isfile(fname):
with open(fname) as f:
duplicates_map = json.load(f)
else:
duplicates_map = identify_duplicates(EnronPassages)
with open(f"{MY_PATH}/Enron_{INBOX}/duplicate_enron_psg_map.json", "w") as f:
json.dump(duplicates_map, f)
print("Loaded duplicate passages map!\n")
# global global passages
GLOBAL_GLOBAL_PAIRS = generate_global_global_pairs(wikititles, qid2types_wiki_filtered, title2qid)
with open(f"{MY_PATH}/Enron_{INBOX}/global_global_pairs.json", "w") as f:
json.dump(GLOBAL_GLOBAL_PAIRS, f)
# global local passages
GLOBAL_LOCAL_PAIRS = generate_global_local_pairs(EnronPassages, duplicates_map, qid2types_wiki_filtered, title2qid)
with open(f"{MY_PATH}/Enron_{INBOX}/global_local_pairs.json", "w") as f:
json.dump(GLOBAL_LOCAL_PAIRS, f)
# local local passages
LOCAL_LOCAL_PAIRS = generate_local_local_pairs(EnronPassages, duplicates_map, freq_local)
with open(f"{MY_PATH}/Enron_{INBOX}/local_local_pairs.json", "w") as f:
json.dump(LOCAL_LOCAL_PAIRS, f)
def generate_comparison_passage_pairs():
def generate_local_local_comparison(EnronPassages, duplicates_map, freq_local, nertags2psgs_local):
print("Creating local, local passage pairs.")
MIN_PSG_ENTITIES = 2
MAX_PSG_ENTITIES = 10
FILT_LOCAL_LOCAL_PAIRS = []
USED_PSG_PAIRS = []
# get a mapping of the passages that contain each local entity
localent2psgkey = defaultdict(list)
has_enough_ents = []
for key, passage in tqdm(EnronPassages.items()):
is_duplicate = duplicates_map[key]
if is_duplicate:
continue
TOTAL_ENTITIES = len(passage['GLOBAL_ENTITIES']) + len(passage['LOCAL_ENTITIES'])
if TOTAL_ENTITIES < MIN_PSG_ENTITIES or TOTAL_ENTITIES > MAX_PSG_ENTITIES:
continue
has_enough_ents.append(key)
for ent in passage["LOCAL_ENTITIES"]:
if ent in freq_local:
localent2psgkey[ent].append(key)
# pick two passages that mention a local entity as a pair; one pair per local entity
ner2psgkeys = defaultdict(list)
psg2nertags = defaultdict(list)
for NER_TAG, psgs in nertags2psgs_local.items():
for psg in psgs:
ner2psgkeys[NER_TAG].append(psg[0])
psg2nertags[psg[0]].append([NER_TAG, psg[1]])
by_common_ent = 1
if by_common_ent:
pair_counter = Counter()
for NER_TAG, psgs in nertags2psgs_local.items():
passages = psgs.copy()
passages_keys = ner2psgkeys[NER_TAG].copy()
print(f"NER TAG: {NER_TAG}")
for tup1 in tqdm(passages):
inserted = 0
key1, title1 = tup1[0], tup1[1]
is_duplicate = duplicates_map[key1]
if is_duplicate:
continue
passage1 = EnronPassages[key1]
local_ents1 = [ent for ent in passage1['LOCAL_ENTITIES'] if ent != title1 and ent in freq_local]
for ent in local_ents1:
# iterate through passages with a matching local ent
other_passages = localent2psgkey[ent].copy()
random.shuffle(other_passages)
for other_psg in other_passages:
is_duplicate = duplicates_map[other_psg]
if is_duplicate:
continue
if other_psg in passages_keys:
other_nertags = psg2nertags[other_psg]
for tag in other_nertags:
title2 = tag[1]
key2 = other_psg
if tag[0] == NER_TAG and title2 != ent and title2 != title1 and key1 != key2 and set([key1, key2]) not in USED_PSG_PAIRS and key1 in has_enough_ents and key2 in has_enough_ents:
FILT_LOCAL_LOCAL_PAIRS.append({'enron1':key1, 'title1': title1, 'types':NER_TAG,
'enron2':key2, 'title2': title2, 'ent':ent})
USED_PSG_PAIRS.append(set([key1, key2]))
pair_counter[f"{key1}_{key2}"] += 1
inserted = 1
break
if inserted:
break
if inserted:
break
if inserted:
break
LOCAL_LOCAL_PAIRS = []
for pair in tqdm(FILT_LOCAL_LOCAL_PAIRS):
if pair_counter[f"{pair['enron1']}_{pair['enron2']}"] < 5:
LOCAL_LOCAL_PAIRS.append(pair)
print(f"Collected {len(LOCAL_LOCAL_PAIRS)} local, local pairs\n")
return LOCAL_LOCAL_PAIRS
def generate_global_local_comparison(EnronPassages, duplicates_map, wikititles, nertags2psgs_local, nertitles2types_local, wikipassages2sents):
_, _, type2freq_filtered, type2qids_filtered, qid2title, type2qids = global_entity_valid_types(wikititles)
print("Creating local, global passage pairs.")
MIN_PSG_ENTITIES = 2
MAX_PSG_ENTITIES = 10
# get a mapping of the passages that contain each local entity
localent2psgkey = defaultdict(list)
has_enough_ents = []
for key, passage in tqdm(EnronPassages.items()):
is_duplicate = duplicates_map[key]
if is_duplicate:
continue
TOTAL_ENTITIES = len(passage['GLOBAL_ENTITIES']) + len(passage['LOCAL_ENTITIES'])
if TOTAL_ENTITIES < MIN_PSG_ENTITIES or TOTAL_ENTITIES > MAX_PSG_ENTITIES:
continue
has_enough_ents.append(key)
for ent in passage["LOCAL_ENTITIES"]:
if ent in freq_local:
localent2psgkey[ent].append(key)
titlehasWikiTypes = {}
for tag, dic in nertitles2types_local.items():
titlehasWikiTypes[tag] = []
for title, lst in dic.items():
if lst:
titlehasWikiTypes[tag].append(title)
USED_TITLES = []
FILT_GLOBAL_LOCAL_PAIRS = []
USED_PSG_PAIRS = []
pair_counter = Counter()
for NER_TAG, psgs in nertags2psgs_local.items():
passages = psgs.copy()
print(f"NER TAG: {NER_TAG}")
for tup1 in tqdm(passages):
key1, title1 = tup1[0], tup1[1]
if title1 not in titlehasWikiTypes[NER_TAG] or key1 not in has_enough_ents or duplicates_map[key1]:
continue
types = nertitles2types_local[NER_TAG][title1]
qids_lst = type2qids[types[0]].copy()
while 1:
qid = random.choice(qids_lst)
wikititle = qid2title[qid]
qids_lst.remove(qid)
if len(wikipassages2sents[wikititle]) > 2:
break
if wikititle not in USED_TITLES:
USED_TITLES.append(wikititle)
FILT_GLOBAL_LOCAL_PAIRS.append({'enron1':key1, 'title1': title1, 'wiki':wikititle,
'types': types[0]})
USED_PSG_PAIRS.append(set([key1, wikititle]))
pair_counter[f"{key1}_{wikititle}"] += 1
print(f"Collected {len(FILT_GLOBAL_LOCAL_PAIRS)} local, global pairs\n")
return FILT_GLOBAL_LOCAL_PAIRS
# LOAD ENTITY SETS AND PERSONAL / GLOBAL CORPORA
with open(f"{MY_PATH}/wikititles.json") as f:
wikititles = json.load(f)
with open(f"{MY_PATH}/Enron_{INBOX}/EmailsCorpus_FILTERED.json") as f:
EnronPassages = json.load(f)
with open(f"{MY_PATH}/Enron_{INBOX}/global_entities.json") as f:
global_ents = json.load(f)
with open(f"{MY_PATH}/Enron_{INBOX}/local_entities.json") as f:
local_ents = json.load(f)
with open("/checkpoint/simarora/mdr/wikipassages2sents.json") as f:
wikipassages2sents = json.load(f)
with open(f"{MY_PATH}/Enron_{INBOX}/nertags2psgs_local.json",) as f:
nertags2psgs_local = json.load(f)
with open(f"{MY_PATH}/Enron_{INBOX}/nertags2psgs_global.json") as f:
nertags2psgs_global = json.load(f)
with open(f"{MY_PATH}/Enron_{INBOX}/nertitle2types_local.json") as f:
nertitles2types_local = json.load(f)
# Here we're choosing to use entities that appear above a THRESHOLD number of times in personal data
THRESH = 5
num = 0
freq_local = []
for key, value in local_ents.items():
if value >= THRESH:
num += 1
freq_local.append(key)
print(f"Found {len(local_ents)} local entities. and {num} entities appear over {THRESH} x.")
num = 0
freq_global = []
for key, value in global_ents.items():
if value >= THRESH:
num += 1
freq_global.append(key)
print(f"Found {len(global_ents)} global entities and {num} global entities appear over {THRESH} x.\n")
# GENERATE THE PASSAGE PAIRS
# qid2types_wiki_filtered, title2qid, _, _, _, _ = global_entity_valid_types(wikititles)
fname = f"{MY_PATH}/Enron_{INBOX}/duplicate_enron_psg_map.json"
if os.path.isfile(fname):
with open(fname) as f:
duplicates_map = json.load(f)
else:
assert 0, print("no duplicate passages map")
print("Loaded duplicate passages map!\n")
# global local passages
GLOBAL_LOCAL_PAIRS = generate_global_local_comparison(EnronPassages, duplicates_map, wikititles, nertags2psgs_local, nertitles2types_local, wikipassages2sents)
with open(f"{MY_PATH}/Enron_{INBOX}/compare_global_local_pairs.json", "w") as f:
json.dump(GLOBAL_LOCAL_PAIRS, f)
# local local passages
LOCAL_LOCAL_PAIRS = generate_local_local_comparison(EnronPassages, duplicates_map, freq_local, nertags2psgs_local)
with open(f"{MY_PATH}/Enron_{INBOX}/compare_local_local_pairs.json", "w") as f:
json.dump(LOCAL_LOCAL_PAIRS, f)
# COMPARISON Q HELPER FUNCTIONS
def filter_PERSON_ner(person_lst):
clean_person_lst = []
for tup in person_lst:
person = tup[1]
filt = 0
if any(wd.isupper() for wd in person.split()):
filt = 1
elif any(wd in person.lower()for wd in ["corp", "california", "<<", ">>", "email", "greetings", "enron", "business", "smart",
"socal", "@", "director", "inc", "ect", "auditorium", "+", "=", "cos.", "staff", "www.", "pro",
"department", "manager", "co.", "cos", "strategy", "other", "news", "copyright", "land", "english"]):
filt = 1
elif len(person) > 40:
filt = 1
elif len(person.split()) != 2 or (len(person.split()[0]) <= 2 or len(person.split()[1]) <= 2):
filt = 1
elif any(ch.isdigit() for ch in person):
filt = 1
elif person in ["Andrew Rich Pinot Noir", "Gordon Smith Announce Partnership", "Jeff Energy Boss", 'Morgan Chase', 'Sac Bee',
"Gary Cohen Importance", "Fleetguard Nelson", "Price Falls", "Grosvenor Estates", 'Ventana Editions',
"Saloman Smith Barney India", "Fleetwood Enter", "Adobe Adobe Photoshop", "GungHo Atmosphere", "Bayless Cos.",
"Long Haul", "eProcurement Inbox", "Pass Code", "Graham Berkeley", "Natexis Investissement", "Walker Digital"]:
filt = 1
if not filt:
clean_person_lst.append(tup)
return clean_person_lst
def filter_ORG_ner(org_lst):
clean_org_lst = []
for tup in org_lst:
org = tup[1]
filt = 0
if any((wd[0].islower() and wd not in stop_words) for wd in org.split()):
filt = 1
elif any(wd in org.lower()for wd in [","]):
filt = 1
elif len(org) > 50:
filt = 1
elif len(org.split()) >= 2 and (len(org.split()[0]) <= 2 or len(org.split()[1]) <= 2):
filt = 1
elif any(ch.isdigit() for ch in org):
filt = 1
if any(wd.lower() in ["council", "executive", "market"] for wd in org.split()):
filt = 1
if org in ["Philip Angelides", "Market Participant", "Independence Accounts"]:
filt = 1
elif org in []:
filt = 1
if not filt:
clean_org_lst.append(tup)
return clean_org_lst
def filter_EVENT_ner(event_lst):
clean_event_lst = []
event_words = [ "conference", "event", "session", "event", "weekend", "luncheon",
"festival", "workshop", "debate", "speech", "parade", "forum",
"summit", "briefing", "lecture", "night"
]
for tup in event_lst:
event = tup[1]
filt = 0
if any(wd in event.lower()for wd in [","]):
filt = 1
elif len(event) > 50:
filt = 1
elif len(event.split()) >= 2 and (len(event.split()[0]) <= 2 or len(event.split()[1]) <= 2):
filt = 1
elif any(ch.isdigit() for ch in event):
filt = 1
elif event in ["The Citysearch Weekend Guide", "Knowledge Forum", "Peyton Panel", "Bay Area Air"]:
filt = 1
elif not any(wd in event_words for wd in event.lower().split()):
filt = 1
if not filt:
clean_event_lst.append(tup)
return clean_event_lst
def filter_LOC_ner(event_lst):
clean_event_lst = []
event_words = [
'bay', 'west', 'valley', 'north', 'south', 'east', 'the', 'coast', 'southern', 'central', 'river', 'area', 'district', 'pacific', 'northwest', 'california',
'silicon', 'island', 'san', 'lake', 'northern', 'asia', 'air', 'park', 'america', 'gulf', 'quality', 'sea', 'city', 'asiapacific', 'atlantic',
'drive', 'region', 'capital', 'western', 'basin', 'round', 'new', 'europe', 'county', 'border', 'desert', 'blvd', 'water', 'el', 'arctic',
'summit', 'inn', 'plant', 'southwest', 'road', 'st.', 'offshore', 'wind', 'regional', 'middle', 'indian', 'pine', 'wildlife', 'arabian',
'chapter', 'point', 'rim', 'ventures', 'islands', 'eastern', 'dieg', 'hill', 'mt.', 'jose', 'mission', 'avenue', 'castle', 'cleone', 'gardens',
'mendocino', 'schools', 'redwood', 'persian', 'board', 'field', 'san', 'jose', 'land', 'bluff', 'creek', 'dorado', 'hills',
'refuge', 'walla', 'little', 'mount', 'tower', 'energy', 'morro', 'upper', 'lands', 'block', 'american', 'plaza',
'pac', 'location', 'rock', 'marina', 'salt', 'generators', 'rto', 'verde', 'hudson', 'belt', 'orange', 'valley', 'ave', 'palm', 'napa', 'region',
'town', 'coasts', 'international', 'white', 'plains', 'angels', 'las', 'vegas', 'japan', 'los', 'england', 'india', 'great', 'basin', 'ocean',
'new', 'york', 'long', 'isle', 'woodlands', 'holland', 'arkansas'
]
for tup in event_lst:
event = tup[1]
filt = 0
if len(event) > 50:
filt = 1
elif len(event.split()) >= 2 and (len(event.split()[0]) <= 2 or len(event.split()[1]) <= 2):
filt = 1
elif any(ch.isdigit() for ch in event):
filt = 1
elif any(wd.lower() in ["residents", "fund", "capital", "big", "council"] for wd in event.split()):
filt = 1
elif not any(wd in event_words for wd in event.lower().split()):
filt = 1
if not filt:
clean_event_lst.append(tup)
return clean_event_lst
def filter_LAW_ner(event_lst):
clean_event_lst = []
event_words = [
'act', 'agreement', 'code', 'reform', 'bill', 'amendment', 'rights',
'rules', 'constitution', 'law', 'clause', 'compliance', 'bill',
'protocol', 'certification', 'policy', 'contract', 'standards'
]
for tup in event_lst:
event = tup[1]
filt = 0
if len(event) > 50:
filt = 1
elif len(event.split()) >= 2 and (len(event.split()[0]) <= 2 or len(event.split()[1]) <= 2):
filt = 1
elif any(ch.isdigit() for ch in event):
filt = 1
elif not any(wd in event_words for wd in event.lower().split()):
filt = 1
if not filt:
clean_event_lst.append(tup)
return clean_event_lst
def filter_ner_maps():
with open(f"{MY_PATH}/Enron_{INBOX}/nertags2psgs_local.json") as f:
nertags2psgs_local = json.load(f)
with open(f"{MY_PATH}/Enron_{INBOX}/nertags2psgs_global.json") as f:
nertags2psgs_global = json.load(f)
# PEOPLE
clean_person_global = filter_PERSON_ner(nertags2psgs_global["PERSON"].copy())
clean_person_local = filter_PERSON_ner(nertags2psgs_local["PERSON"].copy())
nertags2psgs_global["PERSON"] = clean_person_global
nertags2psgs_local["PERSON"] = clean_person_local
# ORGS
clean_org_global = filter_ORG_ner(nertags2psgs_global["ORG"].copy())
clean_org_local = filter_ORG_ner(nertags2psgs_local["ORG"].copy())
nertags2psgs_global["ORG"] = clean_org_global
nertags2psgs_local["ORG"] = clean_org_local
# EVENTS
clean_event_global = filter_EVENT_ner(nertags2psgs_global["EVENT"].copy())
clean_event_local = filter_EVENT_ner(nertags2psgs_local["EVENT"].copy())
nertags2psgs_global["EVENT"] = clean_event_global
nertags2psgs_local["EVENT"] = clean_event_local
# LOC
clean_loc_global = filter_LOC_ner(nertags2psgs_global["LOC"].copy())
clean_loc_local = filter_LOC_ner(nertags2psgs_local["LOC"].copy())
nertags2psgs_global["LOC"] = clean_loc_global
nertags2psgs_local["LOC"] = clean_loc_local
# LAW
clean_law_global = filter_LAW_ner(nertags2psgs_global["LAW"].copy())
clean_law_local = filter_LAW_ner(nertags2psgs_local["LAW"].copy())
nertags2psgs_global["LAW"] = clean_law_global
nertags2psgs_local["LAW"] = clean_law_local
# PRODUCT
nertags2psgs_global.pop("PRODUCT", None)
nertags2psgs_local.pop("PRODUCT", None)
with open(f"{MY_PATH}/Enron_{INBOX}/nertags2psgs_local.json", 'w') as f:
json.dump(nertags2psgs_local, f)
with open(f"{MY_PATH}/Enron_{INBOX}/nertags2psgs_global.json", 'w') as f:
json.dump(nertags2psgs_global, f)
def get_bold_spans(ent_title, sents1=[], sents2=[]):
bold_spans1 = []
bold_spans2 = []
ent_words = ent_title.split()
ent_words = [wd for wd in ent_words if wd.lower() not in stop_words and (not any(ch.isdigit() for ch in wd))]
if sents1:
for sent in sents1:
sent_spans = []
for ind, tok in enumerate(sent.split()):
if any(wd in tok for wd in ent_words):
sent_spans.append(ind)
bold_spans1.append(sent_spans)
if sents2:
for sent in sents2:
sent_spans = []
for ind, tok in enumerate(sent.split()):
if any(wd in tok for wd in ent_words):
sent_spans.append(ind)
bold_spans2.append(sent_spans)
return bold_spans1, bold_spans2
# MAIN ALGORITHM
def save_final_passage_pairs():
# load key pairs
with open(f"{MY_PATH}/Enron_{INBOX}/global_global_pairs.json") as f:
GLOBAL_GLOBAL_PAIRS = json.load(f)
with open(f"{MY_PATH}/Enron_{INBOX}/global_local_pairs.json") as f:
GLOBAL_LOCAL_PAIRS = json.load(f)
with open(f"{MY_PATH}/Enron_{INBOX}/local_local_pairs.json") as f:
LOCAL_LOCAL_PAIRS = json.load(f)
# load corpora
with open(f"{MY_PATH}/wikititles.json") as f:
wikititles = json.load(f)
with open("/checkpoint/simarora/mdr/wikipassages2sents.json") as f:
wikipassages2sents = json.load(f)
with open(f"{MY_PATH}/Enron_{INBOX}/EmailsCorpus_FILTERED.json") as f:
EnronPassages = json.load(f)
# load category information
_, _, type2freq_filtered, type2qids_filtered, qid2title, _ = global_entity_valid_types(wikititles)
with open(f"{MY_PATH}/Enron_{INBOX}/nertags2psgs_local.json",) as f:
nertags2psgs_local = json.load(f)
with open(f"{MY_PATH}/Enron_{INBOX}/nertags2psgs_global.json") as f:
nertags2psgs_global = json.load(f)
# save final passage pairs
bridge_passage_pairs = defaultdict(dict)
pair_unq_idx = 0
DATASET_SIZE = 80000
while pair_unq_idx < DATASET_SIZE:
r1 = 1
hop1 = random.random() < 0.5
hop2 = random.random() < 0.5
unq_idx = f"PAIRIDX:{pair_unq_idx}"
if r1:
if hop1 and hop2:
if GLOBAL_GLOBAL_PAIRS:
pair = random.choice(GLOBAL_GLOBAL_PAIRS)
GLOBAL_GLOBAL_PAIRS.remove(pair)
boldspans_psg1, boldspans_psg2 = get_bold_spans(pair['wiki2'], sents1=wikipassages2sents[pair['wiki1']], sents2=wikipassages2sents[pair['wiki2']])
bridge_passage_pairs[pair_unq_idx] = {
'sents1': wikipassages2sents[pair['wiki1']],
'title1': pair['wiki1'],
'sents2': wikipassages2sents[pair['wiki2']],
'title2': pair['wiki2'],
'bold_spans1': boldspans_psg1,
'bold_spans2': boldspans_psg2,
'hint': f"Consider forming questions which use the entity '{pair['wiki2']}', since it's mentioned in both passages!",
'domains': [hop1, hop2],
'type':'bridge',
'unq_idx': unq_idx
}
pair_unq_idx += 1
elif hop1 and not hop2:
if GLOBAL_LOCAL_PAIRS:
pair = random.choice(GLOBAL_LOCAL_PAIRS)
GLOBAL_LOCAL_PAIRS.remove(pair)
boldspans_psg1, boldspans_psg2 = get_bold_spans(pair['wiki'], sents1=wikipassages2sents[pair['wiki']], sents2=EnronPassages[pair['enron']]['sents'])
bridge_passage_pairs[pair_unq_idx] = {
'sents2': EnronPassages[pair['enron']]['sents'],
'title2': f"Enron Email Number: {EnronPassages[pair['enron']]['id']}",
'sents1': wikipassages2sents[pair['wiki']],
'title1': pair['wiki'],
'bold_spans1': boldspans_psg1,
'bold_spans2': boldspans_psg2,
'bold_spans': [],
'hint': f"Consider forming questions which use the entity '{pair['wiki']}', since it's mentioned in both passages!",
'domains': [hop1, hop2],
'type':'bridge',
'unq_idx': unq_idx
}
pair_unq_idx += 1
elif not hop1 and hop2:
if GLOBAL_LOCAL_PAIRS:
pair = random.choice(GLOBAL_LOCAL_PAIRS)
GLOBAL_LOCAL_PAIRS.remove(pair)
boldspans_psg1, boldspans_psg2 = get_bold_spans(pair['wiki'], sents1=EnronPassages[pair['enron']]['sents'], sents2=wikipassages2sents[pair['wiki']])
bridge_passage_pairs[pair_unq_idx] = {
'sents1': EnronPassages[pair['enron']]['sents'],
'title1': f"Enron Email Number: {EnronPassages[pair['enron']]['id']}",
'sents2': wikipassages2sents[pair['wiki']],
'title2': pair['wiki'],
'bold_spans1': boldspans_psg1,
'bold_spans2': boldspans_psg2,
'bold_spans': [],
'hint': f"Consider forming questions which use the entity '{pair['wiki']}', since it's mentioned in both passages!",
'domains': [hop1, hop2],
'type':'bridge',
'unq_idx': unq_idx
}
pair_unq_idx += 1
elif not hop1 and not hop2:
if LOCAL_LOCAL_PAIRS:
pair = random.choice(LOCAL_LOCAL_PAIRS)
LOCAL_LOCAL_PAIRS.remove(pair)
boldspans_psg1, boldspans_psg2 = get_bold_spans(pair['ent'], sents1=EnronPassages[pair['enron1']]['sents'], sents2=EnronPassages[pair['enron2']]['sents'])
bridge_passage_pairs[pair_unq_idx] = {
'sents1': EnronPassages[pair['enron1']]['sents'],
'title1': f"Enron Email Number: {EnronPassages[pair['enron1']]['id']}",
'sents2': EnronPassages[pair['enron2']]['sents'],
'title2': f"Enron Email Number: {EnronPassages[pair['enron2']]['id']}",
'bold_spans1': boldspans_psg1,
'bold_spans2': boldspans_psg2,
'bold_spans': [],
'hint': f"Consider forming questions which use the entity '{pair['ent']}', since it's mentioned in both passages!",
'domains': [hop1, hop2],
'type':'bridge',
'unq_idx': unq_idx
}
pair_unq_idx += 1
else:
assert 0, print("Error in path selection!")
if pair_unq_idx % 1000 == 0:
print(f"Wrote {pair_unq_idx} questions.")
print("Done collecting bridge pairs.")
with open(f"{MY_PATH}/Enron_{INBOX}/compare_local_local_pairs.json") as f:
LOCAL_LOCAL_PAIRS = json.load(f)
with open(f"{MY_PATH}/Enron_{INBOX}/compare_global_local_pairs.json") as f:
GLOBAL_LOCAL_PAIRS = json.load(f)
comparison_passage_pairs = defaultdict(dict)
COMPARISON_SIZE = 30000
USED_TITLES = []
pair_unq_idx = DATASET_SIZE
while pair_unq_idx < DATASET_SIZE+COMPARISON_SIZE:
r2 = random.random() < 0.5
hop1 = random.random() < 0.5
hop2 = random.random() < 0.5
sents1 = []
sents2 = []
unq_idx = f"PAIRIDX:{pair_unq_idx}"
if 1:
if not hop1 or not hop2:
if not hop1 and not hop2:
if LOCAL_LOCAL_PAIRS:
pair = random.choice(LOCAL_LOCAL_PAIRS)
LOCAL_LOCAL_PAIRS.remove(pair)
title1 = pair['title1']
key1 = pair['enron1']
sents1 = EnronPassages[key1]['sents']
boldspans_psg1, _ = get_bold_spans(title1, sents1=sents1)
title2 = pair['title2']
key2 = pair['enron2']
sents2 = EnronPassages[key2]['sents']
_, boldspans_psg2 = get_bold_spans(title2, sents2=sents2)
words1 = title1.split()
words2 = title2.split()
if len(set(words1).intersection(set(words2))) > 0:
continue
boldspans_psg1_ent, boldspans_psg2_ent = get_bold_spans(pair['ent'], sents1=sents1, sents2=sents2)
types = NER_TYPES_DICT[pair['types']]
common_ent = f"The entity {pair['ent']} also appears in both these passages."
elif not hop1 and hop2:
if GLOBAL_LOCAL_PAIRS:
pair = random.choice(GLOBAL_LOCAL_PAIRS)
GLOBAL_LOCAL_PAIRS.remove(pair)
title1 = pair['title1']
key1 = pair['enron1']
sents1 = EnronPassages[key1]['sents']
boldspans_psg1, _ = get_bold_spans(title1, sents1=sents1)
title2 = pair['wiki']
sents2 = wikipassages2sents[title2]
_, boldspans_psg2 = get_bold_spans(title2, sents2=sents2)
words1 = title1.split()
words2 = title2.split()
if len(set(words1).intersection(set(words2))) > 0:
continue
types = pair['types']
common_ent =''
elif hop1 and not hop2:
if GLOBAL_LOCAL_PAIRS:
pair = random.choice(GLOBAL_LOCAL_PAIRS)
GLOBAL_LOCAL_PAIRS.remove(pair)
title1 = pair['wiki']
sents1 = wikipassages2sents[title1]
boldspans_psg1, _ = get_bold_spans(title1, sents1=sents1)
title2 = pair['title1']
key2 = pair['enron1']
sents2 = EnronPassages[key2]['sents']
_, boldspans_psg2 = get_bold_spans(title2, sents2=sents2)
words1 = title1.split()
words2 = title2.split()
if len(set(words1).intersection(set(words2))) > 0:
continue
types = pair['types']
common_ent= ''
if r2:
hint = f"Write a YES or NO question. Some information that can help you (feel free to ignore!) is that entity {title1} in passage 1 and entity 2 {title2} may have the '{types}' property in common. {common_ent}"
choices = [{"option":"Yes"}, {"option":"No"}]
else:
hint = f"Some information that can help you (feel free to ignore!) is that entity {title1} in passage 1 and entity 2 {title2} may have the '{types}' property in common. {common_ent}"
choices = [{"option":title1}, {"option":title2}]
else:
WikiCategory = random.choice(list(type2freq_filtered.keys()))
qids_lst = type2qids_filtered[WikiCategory].copy()
qid1 = random.choice(qids_lst)
qids_lst.remove(qid1)
title1 = qid2title[qid1]
if title1 not in USED_TITLES:
USED_TITLES.append(title1)
sents1 = wikipassages2sents[title1]
key1 = title1
boldspans_psg1, _ = get_bold_spans(title1, sents1=sents1)
qid2 = random.choice(qids_lst)
qids_lst.remove(qid2)
title2 = qid2title[qid2]
if title2 not in USED_TITLES:
USED_TITLES.append(title2)
sents2 = wikipassages2sents[title2]
key2 = title2
_, boldspans_psg2 = get_bold_spans(title2, sents2=sents2)
if r2:
hint = f"Both passages are about '{WikiCategory.upper()}' entities: '{title1} 'in paragraph 1, and '{title2}' in paragraph 2. Write a question that compares the two entities and can be answered with YES or NO."
choices = [{"option":"Yes"}, {"option":"No"}]
else:
hint = f"Both passages are about '{WikiCategory.upper()}' entities: '{title1}' in paragraph 1, and '{title2}' in paragraph 2. Write a question that compares the two entities."
choices = [{"option":title1}, {"option":title2}]
if sents1 and sents2:
comparison_passage_pairs[pair_unq_idx] = {
'sents1': sents1,
'title1': title1,
'entity1': key1,
'sents2': sents2,
'title2': title2,
'entity2': key2,
'bold_spans1': boldspans_psg1,
'bold_spans2': boldspans_psg2,
'multiple_choice': choices,
'hint': hint,
'domains': [hop1, hop2],
'type':'comparison',
'unq_idx': unq_idx
}
pair_unq_idx += 1
if pair_unq_idx % 1000 == 0:
print(f"Wrote {pair_unq_idx} questions.")
print("Done collecting comparison pairs.")
# format for the frontend interface
preprocessed = []
for key, pair in bridge_passage_pairs.items():
preprocessed.append(pair)
output_dict = {
'entries': preprocessed
}
with open(f"{MY_PATH}/Enron_{INBOX}/BRIDGE_PASSAGE_PAIRS_110121.json", "w") as f:
json.dump(output_dict, f)
preprocessed = []
for key, pair in comparison_passage_pairs.items():
preprocessed.append(pair)
output_dict = {
'entries': preprocessed
}
with open(f"{MY_PATH}/Enron_{INBOX}/COMPARISON_PASSAGE_PAIRS_01152022.json", "w") as f:
json.dump(output_dict, f)
def convert_pairs_to_batches():
BATCH_SIZE = 10
with open(f"{MY_PATH}/Enron_{INBOX}/BRIDGE_PASSAGE_PAIRS_110121.json") as f:
ALL_PAIRS = json.load(f)
with open(f"{MY_PATH}/Enron_{INBOX}/COMPARISON_PASSAGE_PAIRS_01152022.json") as f:
ALL_PAIRS = json.load(f)
BATCH = {'entries': []}
BATCH_NUM = 0
for entry in tqdm(ALL_PAIRS['entries']):
BATCH['entries'].append(entry)
if len(BATCH['entries']) > BATCH_SIZE:
with open(f"{MY_PATH}/Enron_{INBOX}/ComparisonBatches01152022/BATCH_{BATCH_NUM}.json", "w") as f:
json.dump(BATCH, f)
BATCH = {'entries': []}
BATCH_NUM += 1
if BATCH_NUM > 10:
BATCH_SIZE = 100
# trailing pairs in the final batch
if len(BATCH['entries']) > 0:
with open(f"{MY_PATH}/Enron_{INBOX}/ComparisonBatches01152022/BATCH_{BATCH_NUM}.json", "w") as f:
json.dump(BATCH, f)
BATCH = {'entries': []}
BATCH_NUM += 1
print(BATCH_NUM)
if __name__ == "__main__":
# DETERMINE WHICH STEPS TO RUN
create_corpus = 1
prepare_entity_maps = 1
prepare_comparison_qs = 1
generate_pairs = 1
####### PRODUCE CORPUS ########
if create_corpus:
# 90 minutes
create_local_passages_wrapper()
# 4.5 minutes
identify_duplicates_by_text()
######## REFINE LISTS OF LOCAL AND GLOBAL ENTITIES ########
if prepare_entity_maps:
# 0.5 minutes
st = time.time()
filter_named_entities()
print(f"Filtered named entities in time: {time.time() - st}")
# 35 minutes
st = time.time()
create_named_entity_maps()
print(f"Created named entities map in time: {time.time() - st}")
if prepare_comparison_qs:
filter_ner_maps()
######## GENERATE PASSAGE PAIRS ########
if generate_pairs:
extra_cleaning()
# 1 hour for global/global anchors are presaved, multiple days if we need to query the kilt database
# 1 minute for just global/local and local/local
generate_passage_pairs()
generate_comparison_passage_pairs()
# 1 minute
save_final_passage_pairs()
convert_pairs_to_batches()
|
concurrentqa-main
|
dataset_construction/cleanEnron.py
|
import os
import sys
import argparse
import json as json
import pandas as pd
from collections import Counter, defaultdict
from importlib import reload
from email.parser import Parser
# recursively get the document body
def get_body(body):
if type(body) == str:
return [body]
else:
body_results = []
for b in body:
b_value = b.get_payload()
if type(b_value) != str:
body_results.append(get_body(b_value))
else:
body_results.append(b_value)
return body_results
def parse_document(f):
try:
doc = f.read()
except Exception as e:
print(f"Exception, bad email: {e}!")
doc = ""
email = Parser().parsestr(doc)
parse = defaultdict(list)
for key in email.keys():
parse[key] = email[key]
body = email.get_payload()
parse["Body"] = get_body(body)
return parse
# recursive inspection because some sub directories have sub directories
def inspect_sub_dir(email_filename):
if os.path.isfile(email_filename):
with open(email_filename, "r") as f:
entry = parse_document(f)
entry["EMAIL_ID"] = email_filename
assert type(entry["Body"]) == list
return [entry]
else:
emails = os.listdir(email_filename)
emails.sort()
database = []
for email in emails:
file_name = email_filename + "/" + email
database.extend(inspect_sub_dir(file_name))
return database
def make_df(args, inbox):
database = []
sub_dirs = os.listdir(args.data_dir + inbox)
print(sub_dirs)
for sub_dir in sub_dirs:
emails_dir = args.data_dir + inbox + "/" + sub_dir
emails = os.listdir(emails_dir)
emails.sort()
for email in emails:
email_filename = emails_dir + "/" + email
database.extend(inspect_sub_dir(email_filename))
return database
if __name__ == "__main__":
parser = argparse.ArgumentParser(
"Load datasets for enron.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"--data_dir",
type=str,
default="/checkpoint/simarora/PersonalDatasets/Enron/maildir/",
help="Raw enron data.",
)
parser.add_argument(
"--db_dir",
type=str,
default="/checkpoint/simarora/PersonalDatasets/Enron/parsed_maildir",
help="Parsed emails directory.",
)
args = parser.parse_args()
inboxes = os.listdir(args.data_dir)
inboxes.sort()
for inbox in inboxes:
if os.path.exists(f"{args.db_dir}/{inbox}_09082021.csv"):
continue
print(f"STARTING FOR INBOX: {inbox}")
try:
database = make_df(args, inbox)
print(f"MADE INITIAL DB: {len(database)}")
email_keys = database[0].keys()
df = pd.DataFrame(database)
outfile = f"{args.db_dir}/{inbox}_09082021.csv"
df.to_csv(outfile)
except:
print(f"FAILED ON INBOX: {inbox}")
|
concurrentqa-main
|
dataset_construction/EnronParser.py
|
import os
import csv
import ujson
import json
from tqdm import tqdm
import requests
import pandas as pd
import numpy as np
import time
import ast
import random
from collections import Counter, defaultdict, OrderedDict
INBOX = "skilling-j"
def add_entry(q="", idx="", answer=[], sp1={}, sp2={}, typ="", domain=[]):
entry = {
'question': q,
'_id': idx,
'answer': answer,
'sp': [sp1, sp2],
'type': typ, # comparison or bridge
'domain': domain, # 0, 1
}
original_entry = {
'_id':idx,
'answer':answer[0],
'question':q,
'supporting_facts':[[sp1['title'], 0], [sp2['title'], 0]],
'context':[],
'type':typ,
'level':'hard'
}
return entry, original_entry
local_global_queries = []
original_queries= []
with open("/checkpoint/simarora/mdr/wikipassages2sents.json") as f:
wikipassages2sents = json.load(f)
with open(f"/checkpoint/simarora/PersonalDatasets/Enron/qa_runs/{INBOX}/subject2sents.json") as f:
subject2sents = json.load(f)
# question
entry, original_entry = add_entry(q="The company providing natural gas transmission between Western US states such as New Mexico and Texas is helping support credit lines worth how much money?",
idx="01P",
answer=["$1 billion"],
sp1={'title': 'Transwestern Pipeline',
"sents": wikipassages2sents['Transwestern Pipeline'],
'sp_sent_ids': [0]},
sp2={'title':'PERSONAL daily update',
"sents":subject2sents['PERSONAL daily update'],
'sp_sent_ids': [0, 1]},
typ="bridge",
domain=[0, 1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="The Texas Pacific Group is known for investments in what motorcycle company?",
idx="02P",
answer=["Ducati"],
sp1={'title': 'TPG Capital',
"sents": wikipassages2sents['TPG Capital'],
'sp_sent_ids': [0, 1]},
sp2={'title':'PERSONAL re: jeff skilling for tpg ceo conference',
"sents":subject2sents['PERSONAL re: jeff skilling for tpg ceo conference'],
'sp_sent_ids': [5]},
typ="bridge",
domain=[0, 1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="What type of partnership does Enron want to form with the media entertainment conglomerate, which is based in Burbank, California?",
idx="03P",
answer=["broadband"],
sp1={'title':'The Walt Disney Company',
"sents":wikipassages2sents['The Walt Disney Company'],
'sp_sent_ids': [0]},
sp2={'title':'PERSONAL re: broadband partnership with walt disney corp',
"sents":subject2sents['PERSONAL re: broadband partnership with walt disney corp'],
'sp_sent_ids': [0]},
typ="bridge",
domain=[0, 1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="How many times per year can the exam that the Enron candidate from Princeton took be taken?",
idx="04P",
answer=["five", "5"],
sp1={'title': 'PERSONAL enron candidate',
"sents":subject2sents['PERSONAL enron candidate'],
'sp_sent_ids': [0, 1, 2]},
sp2={'title':'Graduate Management Admission Test',
"sents":wikipassages2sents['Graduate Management Admission Test'],
'sp_sent_ids': [0, 4]},
typ="bridge",
domain=[1, 0])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="What is the current growth rate of the Fortune 500 company originally called Metropolitan Pathology Labs?",
idx="05",
answer=["50%", '50% per year'],
sp1={'title':'Quest Diagnostics',
"sents":wikipassages2sents['Quest Diagnostics'],
'sp_sent_ids': [0, 1]},
sp2={'title':'PERSONAL mischer-interfit health',
"sents":subject2sents['PERSONAL mischer-interfit health'],
'sp_sent_ids': [20]},
typ="bridge",
domain=[0,1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="How much is the Atlanta based contact technology company offering per newly referred customer?",
idx="06P",
answer=["$1,000.00", "$1,000.00 plus"],
sp1={'title':'Noble Systems Corporation',
"sents":wikipassages2sents['Noble Systems Corporation'],
'sp_sent_ids': [0]},
sp2={'title':'PERSONAL noble systems executive update + opportunities for you',
"sents":subject2sents['PERSONAL noble systems executive update + opportunities for you'],
'sp_sent_ids': [1]},
typ="bridge",
domain=[0,1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="Jim Kelly is the CEO participating in the Mastermind Keynote Interview. How many customers does the company Jim Kelly is from have?",
idx="07P",
answer=["7.9 million", "more than 7.9 million"],
sp1={'title':'United Parcel Service',
"sents":wikipassages2sents['United Parcel Service'],
'sp_sent_ids': [0, 3]},
sp2={'title':'PERSONAL re: invitation',
"sents":subject2sents['PERSONAL re: invitation'],
'sp_sent_ids': [5, 8]},
typ="bridge",
domain=[1,0])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="Does Enron earn money from projects conducted at the U.S. Navy facility located 100 km from Manila Bay?",
idx="08P",
answer=["yes"],
sp1={'title':'Subic Bay',
"sents":wikipassages2sents['Subic Bay'],
'sp_sent_ids': [0, 1]},
sp2={'title':'PERSONAL end of an era',
"sents":subject2sents['PERSONAL end of an era'],
'sp_sent_ids': [1]},
typ="bridge",
domain=[0, 1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="When was the dean just elected to the Enron Board of Directors born?",
idx="09P",
answer=["May 30, 1946", "1946"],
sp2={'title':'William Powers, Jr.',
"sents":wikipassages2sents['William Powers, Jr.'],
'sp_sent_ids': [0, 1]},
sp1={'title':'PERSONAL enron update0',
"sents":subject2sents['PERSONAL enron update0'],
'sp_sent_ids': [5]},
typ="bridge",
domain=[1, 0])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="After Enron announced the $1 billion credit line, it’s S&P rating was the same as Hyundai Haesang’s S&P rating?",
idx="10P",
answer=["yes"],
sp1={'title':'Hyundai Marine & Fire Insurance',
"sents":wikipassages2sents['Hyundai Marine & Fire Insurance'],
'sp_sent_ids': [0, 5]},
sp2={'title':'PERSONAL enron update',
"sents":subject2sents['PERSONAL enron update'],
'sp_sent_ids': [0]},
typ="comparison",
domain=[0, 1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="Did any of the selected guests who will be at the Insight Capital May 15th dinner work for Goldman Sachs?",
idx="11P",
answer=["yes"],
sp2={'title':'Robert Rubin',
"sents":wikipassages2sents['Robert Rubin'],
'sp_sent_ids': [0, 2]},
sp1={'title':'PERSONAL re: telephone call with jerry murdock15',
"sents":subject2sents['PERSONAL re: telephone call with jerry murdock15'],
'sp_sent_ids': [7, 8, 10]},
typ="bridge",
domain=[1, 0])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="Are any of my fellow invitees for the Insight Capital dinner chemical engineers?",
idx="12P",
answer=["yes"],
sp2={'title':'Jack Welch',
"sents":wikipassages2sents['Jack Welch'],
'sp_sent_ids': [0, 1]},
sp1={'title':'PERSONAL re: telephone call with jerry murdock15',
"sents":subject2sents['PERSONAL re: telephone call with jerry murdock15'],
'sp_sent_ids': [8, 10]},
typ="bridge",
domain=[1, 0])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="On what day is the upcoming luncheon with the co-founder of Netscape, Hewlett-Packard, and Mosaic?",
idx="13P",
answer=["Friday, June 22nd"],
sp1={'title':'Marc Andreessen',
"sents":wikipassages2sents['Marc Andreessen'],
'sp_sent_ids': [0, 1, 2, 3]},
sp2={'title':'PERSONAL marc andreessen in dallas 6/22...0',
"sents":subject2sents['PERSONAL marc andreessen in dallas 6/22...0'],
'sp_sent_ids': [1]},
typ="bridge",
domain=[0, 1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="Will there be lunch at the event for the Cambridge Ivy League institution?",
idx="14P",
answer=["no"], # there will be dinner...
sp1={'title':'Harvard University',
"sents":wikipassages2sents['Harvard University'],
'sp_sent_ids': [0]},
sp2={'title':'PERSONAL re: harvard forum 05/18/01 - second invite1',
"sents":subject2sents['PERSONAL re: harvard forum 05/18/01 - second invite1'],
'sp_sent_ids': [5,6]},
typ="bridge",
domain=[0,1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="Where is the headquarters for the association with an upcoming advertisement in 'On Scene' magazine?",
idx="15P",
answer=["Fairfax, Virginia"],
sp2={'title':"International Association of Fire Chiefs" ,
"sents":wikipassages2sents["International Association of Fire Chiefs"],
'sp_sent_ids': [0, 3]},
sp1={'title':'PERSONAL the list, legal opinion & other news',
"sents":subject2sents['PERSONAL the list, legal opinion & other news'],
'sp_sent_ids': [4, 5]},
typ="bridge",
domain=[1, 0])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="When visiting the affluent summer colony located south of Cape Cod, Jeff mentioned mentioned he wanted to walk through what?",
idx="16P",
answer=["house", "our house"],
sp1={'title':"Martha\'s Vineyard",
"sents":wikipassages2sents["Martha\'s Vineyard"],
'sp_sent_ids': [0]},
sp2={'title': 'PERSONAL re: christmas gathering2',
"sents":subject2sents['PERSONAL re: christmas gathering2'],
'sp_sent_ids': [4]},
typ="bridge",
domain=[0,1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="When was the speaker for Commercial and Political Perspectives luncheon born?",
idx="17P",
answer=["1956"],
sp2={'title':'Bernard Harris (disambiguation)',
"sents":wikipassages2sents['Bernard Harris (disambiguation)'],
'sp_sent_ids': [0]},
sp1={'title':'PERSONAL re: hbs april 25 luncheon - reminder1',
"sents":subject2sents['PERSONAL re: hbs april 25 luncheon - reminder1'],
'sp_sent_ids': [4,5,6,7]},
typ="bridge",
domain=[1,0])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="At the golf tournament on Fazio Course, the New York eCommerce Association will dedicate proceeds to an organization affiliated with which International Federation?",
idx="18P",
answer=["International Federation of Red Cross and Red Crescent Societies"],
sp1={'title':'American Red Cross',
"sents":wikipassages2sents['American Red Cross'],
'sp_sent_ids': [0, 1]},
sp2={'title':'PERSONAL upcoming golf tournament and venture capital conference',
"sents":subject2sents['PERSONAL upcoming golf tournament and venture capital conference'],
'sp_sent_ids': [0, 2]},
typ="bridge",
domain=[1, 0])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="For how many days is Cheryl going to the city historically known as Constantinople and Byzantium?",
idx="19P",
answer=["3"],
sp1={'title':'Istanbul',
"sents":wikipassages2sents['Istanbul'],
'sp_sent_ids': [0]},
sp2={'title':'PERSONAL a trip to turkey',
"sents":subject2sents['PERSONAL a trip to turkey'],
'sp_sent_ids': [2, 7]},
typ="bridge",
domain=[0, 1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="The 2002 television film The Junction Boys was based on a book by an author who signed a book for who?",
idx="20P",
answer=["Jim Bavouset"],
sp1={'title':'The Junction Boys (film)',
"sents":wikipassages2sents['The Junction Boys (film)'],
'sp_sent_ids': [0]},
sp2={'title':'PERSONAL re: [smu-betas] 76ers - it\'s time to hump it3',
"sents":subject2sents['PERSONAL re: [smu-betas] 76ers - it\'s time to hump it3'],
'sp_sent_ids': [7]},
typ="bridge",
domain=[0,1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="The popular author in Beta suggested which dinner location for Thursday of homecoming weekend?",
idx="21P",
answer=["The Double Tree and Central Expressway"],
sp1={'title':'PERSONAL re: [smu-betas] 76ers - it\'s time to hump it4',
"sents":subject2sents['PERSONAL re: [smu-betas] 76ers - it\'s time to hump it4'],
'sp_sent_ids': [3]},
sp2={'title':'PERSONAL -kai-3',
"sents":subject2sents['PERSONAL -kai-3'],
'sp_sent_ids': [1]},
typ="bridge",
domain=[1, 1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="The Beta who elaborated on another dinner idea for Thursday of HC is also organizing an outing for which sport?",
idx="22P",
answer=["golf"],
sp1={'title':'PERSONAL -kai-16',
"sents":subject2sents['PERSONAL -kai-16'],
'sp_sent_ids': [0, 1]},
sp2={'title':'PERSONAL -kai-19',
"sents":subject2sents['PERSONAL -kai-19'],
'sp_sent_ids': [1]},
typ="bridge",
domain=[1, 1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="Is the guy who Chuck Paul added to the Beta list arriving to HC weekend with his son?",
idx="23P",
answer=["no"],
sp1={'title':'PERSONAL -kai-3',
"sents":subject2sents['PERSONAL -kai-3'],
'sp_sent_ids': [0]},
sp2={'title':'PERSONAL -kai-5',
"sents":subject2sents['PERSONAL -kai-5'],
'sp_sent_ids': [0, 1]},
typ="bridge",
domain=[1, 1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="Is the newspaper which blasted Mayor Brown the largest in the United States by Sunday circulation?",
idx="24P",
answer=["no"],
sp1={'title':'PERSONAL chronicle article on hfd',
"sents":subject2sents['PERSONAL chronicle article on hfd'],
'sp_sent_ids': [0, 1]},
sp2={'title':'Houston Chronicle',
"sents":wikipassages2sents['Houston Chronicle'],
'sp_sent_ids': [0, 1]},
typ="bridge",
domain=[1, 0])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="The substituted CEO of Mexican Petroleums is close to which politician?",
idx="25P",
answer=["Francisco Labastida"],
sp1={'title':'Pemex',
"sents":wikipassages2sents['Pemex'],
'sp_sent_ids': [0]},
sp2={'title':'PERSONAL change at pemex',
"sents":subject2sents["PERSONAL change at pemex"],
'sp_sent_ids': [0, 2]},
typ="bridge",
domain=[0, 1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="Did Rogelio's close friend who was running for presidency win the 2000 presidential election?",
idx="26P",
answer=["no"],
sp2={'title':'Francisco Labastida',
"sents":wikipassages2sents['Francisco Labastida'],
'sp_sent_ids': [0]},
sp1={'title':'PERSONAL change at pemex',
"sents":subject2sents['PERSONAL change at pemex'],
'sp_sent_ids': [2]},
typ="bridge",
domain=[1, 0])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="The agency that that \"manages pension and health benefits for millions of California employees\" owns how many Enron shares?",
idx="27P",
answer=["2.6 million"],
sp2={'title':'PERSONAL jedi ii',
"sents":subject2sents['PERSONAL jedi ii'],
'sp_sent_ids': [1]},
sp1={'title':'CalPERS',
"sents":wikipassages2sents['CalPERS'],
'sp_sent_ids': [0]},
typ="bridge",
domain=[0,1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="Do Tonn Ostergard from YPO and Jim Dent live in the same state?",
idx="28P",
answer=["no"],
sp1={'title':'PERSONAL parent child mountain adventure, july 21-25, 2001',
"sents":subject2sents['PERSONAL parent child mountain adventure, july 21-25, 2001'],
'sp_sent_ids': [0, 1]},
sp2={'title':'PERSONAL re: [smu-betas] 76ers - it\'s time to hump it4',
"sents":subject2sents['PERSONAL re: [smu-betas] 76ers - it\'s time to hump it4'],
'sp_sent_ids': [3, 6]},
typ="comparison",
domain=[1, 1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="Who supposedly made up lies about the player who won 47 straight games in college football?",
idx="29P",
answer=["Dent"],
sp1={'title':'Bud Wilkinson',
"sents":wikipassages2sents['Bud Wilkinson'],
'sp_sent_ids': [0, 2]},
sp2={'title':'PERSONAL [smu-betas] dent pisses on bud wilkinson\'s grave',
"sents":subject2sents['PERSONAL [smu-betas] dent pisses on bud wilkinson\'s grave'],
'sp_sent_ids': [1]},
typ="bridge",
domain=[0, 1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="In what year was the athletic director of the Big 12 Conference Sooners born?",
idx="30P",
answer=["1957"],
sp1={'title':"Oklahoma Sooners",
"sents":wikipassages2sents["Oklahoma Sooners"],
'sp_sent_ids': [0, 2, 3]},
sp2={'title':'Joe Castiglione (athletic director)',
"sents":wikipassages2sents['Joe Castiglione (athletic director)'],
'sp_sent_ids': [0]},
typ="bridge",
domain=[0, 0])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="Who wrote the song that Bill Miller will need to sing in pink skivvies?",
idx="31P",
answer=["Arthur M Alden"],
sp2={'title':'Boomer Sooner',
"sents":wikipassages2sents['Boomer Sooner'],
'sp_sent_ids': [0, 1, 2]},
sp1={'title':'PERSONAL re: [smu-betas] dent\'s wrath',
"sents":subject2sents['PERSONAL re: [smu-betas] dent\'s wrath'],
'sp_sent_ids': [1, 3]},
typ="bridge",
domain=[1, 0])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="The fight song of the New Haven based Ivy League University is borrowed from a song written in which year?",
idx="32P",
answer=["1898"],
sp1={'title':'Yale University',
"sents":wikipassages2sents['Yale University'],
'sp_sent_ids': [0]},
sp2={'title':"Boomer Sooner",
"sents":wikipassages2sents["Boomer Sooner"],
'sp_sent_ids': [3]},
typ="bridge",
domain=[0, 0])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="For the Astros vs. Giants game at Enron Field, the Enron sign will feature the logo of a nonprofit organization that has how many offices throughout the country?",
idx="33P",
answer=["1,200"],
sp2={'title':'United Way of America',
"sents":wikipassages2sents['United Way of America'],
'sp_sent_ids': [0]},
sp1={'title':'PERSONAL enron and united way\'s continued partnership',
"sents":subject2sents['PERSONAL enron and united way\'s continued partnership'],
'sp_sent_ids': [4]},
typ="bridge",
domain=[1, 0])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="All communications between Enron and LJM must be preserved under an Act created in what year?",
idx="34P",
answer=["1995"],
sp2={'title':'Private Securities Litigation Reform Act',
"sents":wikipassages2sents['Private Securities Litigation Reform Act'],
'sp_sent_ids': [0]},
sp1={'title':'PERSONAL important announcement regarding document preservation',
"sents":subject2sents['PERSONAL important announcement regarding document preservation'],
'sp_sent_ids': [0, 2]},
typ="bridge",
domain=[1, 0])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="What is the approximate population of the city Mark visited in Georgia?",
idx="35P",
answer=["1.5 million people"],
sp2={'title':'Tbilisi',
"sents":wikipassages2sents['Tbilisi'],
'sp_sent_ids': [0]},
sp1={'title':'<6289674.1075845512831.JavaMail.evans@thyme>',
"sents":subject2sents['<6289674.1075845512831.JavaMail.evans@thyme>'],
'sp_sent_ids': [0]},
typ="bridge",
domain=[1, 0])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="Was the singer of Baby One More Time requested as a speaker for the Enron eSpeak event?",
idx="36P",
answer=["yes"],
sp1={'title':"Britney Spears",
"sents":wikipassages2sents["Britney Spears"],
'sp_sent_ids': [0, 2, 4]},
sp2={'title':'PERSONAL espeak survey: the results are in!0',
"sents":subject2sents['PERSONAL espeak survey: the results are in!0'],
'sp_sent_ids': [0, 2, 3]},
typ="bridge",
domain=[0, 1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="Do Steve Spar of ESX Engineering and Bruce Wrobel of EnCom hold the same position at their companies?",
idx="37P",
answer=["yes"], # CEO
sp1={'title':'PERSONAL status report on enron\'s investment in encom0',
"sents":subject2sents['PERSONAL status report on enron\'s investment in encom0'],
'sp_sent_ids': [0, 2]},
sp2={'title':'PERSONAL referred by jeff spar (mck - ny)',
"sents":subject2sents['PERSONAL referred by jeff spar (mck - ny)'],
'sp_sent_ids': [10]},
typ="comparison",
domain=[1, 1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="What is the nearest hospital to the area where Reliance is developing a liquid fuel fired power plant?",
idx="38P",
answer=["Dhirubhai Ambani Hospital"],
sp2={'title':'Patalganga',
"sents":wikipassages2sents['Patalganga'],
'sp_sent_ids': [3]},
sp1={'title':'PERSONAL re: maharashtra plants',
"sents":subject2sents['PERSONAL re: maharashtra plants'],
'sp_sent_ids': [3, 4]},
typ="bridge",
domain=[1, 0])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="Are Dabhol and Patalganga located in the same state in India?",
idx="39P",
answer=["yes"],
sp1={'title':'Patalganga',
"sents":wikipassages2sents['Patalganga'],
'sp_sent_ids': [0]},
sp2={'title':'Dabhol',
"sents":wikipassages2sents['Dabhol'],
'sp_sent_ids': [0]},
typ="comparison",
domain=[0, 0])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="What is the name of the online magazine of the Oil Patch District Federal Reserve Bank?",
idx="40P",
answer=["e-Perspectives"],
sp1={'title':'Federal Reserve Bank of Dallas',
"sents":wikipassages2sents['Federal Reserve Bank of Dallas'],
'sp_sent_ids': [0]},
sp2={'title':'PERSONAL welcome to the federal reserve bank of dallas community affairs\n announcement list',
"sents":subject2sents['PERSONAL welcome to the federal reserve bank of dallas community affairs\n announcement list'],
'sp_sent_ids': [0]},
typ="bridge",
domain=[0, 1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="The meeting with Merrill Lynch about the Houston based water services company is on what date?",
idx="41P",
answer=["Monday February 28"],
sp1={'title':"Azurix",
"sents":wikipassages2sents["Azurix"],
'sp_sent_ids': [0, 1]},
sp2={'title':'PERSONAL re: azurix investment banking meeting0',
"sents":subject2sents['PERSONAL re: azurix investment banking meeting0'],
'sp_sent_ids': [3, 6]},
typ="bridge",
domain=[0, 1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="Are Azurix and Enron headquartered in the same city?",
idx="42P",
answer=["yes"],
sp1={'title':'Azurix',
"sents":wikipassages2sents['Azurix'],
'sp_sent_ids': [0, 1]},
sp2={'title':'Enron',
"sents":wikipassages2sents['Enron'],
'sp_sent_ids': [0]},
typ="comparison",
domain=[0, 0])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="Skilling booked which suite to watch the MLB game for the team based in Houston?",
idx="43P",
answer=["Drayton McLane's"],
sp1={'title':"Houston Astros",
"sents":wikipassages2sents["Houston Astros"],
'sp_sent_ids': [0]},
sp2={'title':'PERSONAL astros game',
"sents":subject2sents['PERSONAL astros game'],
'sp_sent_ids': [0]},
typ="bridge",
domain=[0, 1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="A professor at the Pasadena based university co-founded which coporation that's selling a software framework to Enron?",
idx="44P",
answer=["iSpheres"],
sp1={'title':"California Institute of Technology",
"sents":wikipassages2sents["California Institute of Technology"],
'sp_sent_ids': [0]},
sp2={'title':'PERSONAL re: advanced arbitrage enabling technology out of caltech',
"sents":subject2sents['PERSONAL re: advanced arbitrage enabling technology out of caltech'],
'sp_sent_ids': [5]},
typ="bridge",
domain=[0, 1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="To support the longest-serving Republican senator in Montana history, to whom should the checks be made payable?",
idx="45P",
answer=["Friends of Conrad Burns"],
sp1={'title':"Conrad Burns",
"sents":wikipassages2sents["Conrad Burns"],
'sp_sent_ids': [0, 1]},
sp2={'title':'PERSONAL senator conrad burns contribution',
"sents":subject2sents['PERSONAL senator conrad burns contribution'],
'sp_sent_ids': [0, 1, 2]},
typ="bridge",
domain=[0, 1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="The partner Enron is in litigation with in Federal court proposed to acquire what Corp from Enron?",
idx="46P",
answer=["Enron Renewable Energy Corp"],
sp1={'title':'PERSONAL important announcement regarding document preservation',
"sents":subject2sents['PERSONAL important announcement regarding document preservation'],
'sp_sent_ids': [0]},
sp2={'title':'PERSONAL project aura; draft disclosures re ljm2',
"sents":subject2sents['PERSONAL project aura; draft disclosures re ljm2'],
'sp_sent_ids': [0]},
typ="bridge",
domain=[1, 1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="Did Rebecca Carter's replacement for corporate secretary receive her bachelor's degree at a university located in Houston?",
idx="47P",
answer=["no"], # College Station
sp2={'title':"Texas A&M University",
"sents":wikipassages2sents["Texas A&M University"],
'sp_sent_ids': [0]},
sp1={'title':'PERSONAL enron board elects new corporate secretary',
"sents":subject2sents['PERSONAL enron board elects new corporate secretary'],
'sp_sent_ids': [0, 1, 6]},
typ="bridge",
domain=[1, 0])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="The Austin based conservative think tank that's part of the State Policy Network is similar to which foundation in D.C.?",
idx="48P",
answer=["Heritage Foundation"],
sp1={'title':'Texas Public Policy Foundation',
"sents":wikipassages2sents['Texas Public Policy Foundation'],
'sp_sent_ids': [0, 1]},
sp2={'title':'PERSONAL texas public policy foundation dinner - thursday, february 15',
"sents":subject2sents['PERSONAL texas public policy foundation dinner - thursday, february 15'],
'sp_sent_ids': [0, 1, 2]},
typ="bridge",
domain=[0, 1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="The man who said Broadband held the future for delivery of Disney's entertainment product was born in what year?",
idx="49P",
answer=["1942"],
sp2={'title':'Michael Eisner',
"sents":wikipassages2sents['Michael Eisner'],
'sp_sent_ids': [0]},
sp1={'title':'PERSONAL re: broadband partnership with walt disney corp',
"sents":subject2sents['PERSONAL re: broadband partnership with walt disney corp'],
'sp_sent_ids': [2]},
typ="bridge",
domain=[1, 0])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="Are the two businessmen Jack Welch and Michael Eisner both of the same nationality?",
idx="50P",
answer=["yes"],
sp1={'title':'Jack Welch',
"sents":wikipassages2sents['Jack Welch'],
'sp_sent_ids': [0]},
sp2={'title':'Michael Eisner',
"sents":wikipassages2sents['Michael Eisner'],
'sp_sent_ids': [0]},
typ="comparison",
domain=[0, 0])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="Are emmployees able to access the web portal, which spun off from Time Warner in 2009, on Enron computers?",
idx="51P",
answer=["no"],
sp1={'title':'AOL',
"sents":wikipassages2sents['AOL'],
'sp_sent_ids': [0, 7]},
sp2={'title':'PERSONAL external e-mail sites',
"sents":subject2sents['PERSONAL external e-mail sites'],
'sp_sent_ids': [0]},
typ="bridge",
domain=[0, 1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="Why couldn't Wade attend the meeting with the Indian politician who serves as president of the Nationalist Congress Party?",
idx="52P",
answer=["he fell sick"],
sp1={'title':'Sharad Pawar',
"sents":wikipassages2sents['Sharad Pawar'],
'sp_sent_ids': [0]},
sp2={'title':'PERSONAL re: meetings with sharad pawar',
"sents":subject2sents['PERSONAL re: meetings with sharad pawar'],
'sp_sent_ids': [0]},
typ="bridge",
domain=[0, 1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="Which was founded first, the party founded by Sharad Pawar or the Indian National Congress?",
idx="53P",
answer=["Indian National Congress"],
sp2={'title':'Indian National Congress',
"sents":wikipassages2sents['Indian National Congress'],
'sp_sent_ids': [0, 1]},
sp1={'title':'Sharad Pawar',
"sents":wikipassages2sents['Sharad Pawar'],
'sp_sent_ids': [0]},
typ="comparison",
domain=[0, 0])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="Enron signed the manufacturer founded by Sidney and Bernard in 1953 to manufacture which offering?",
idx="54P",
answer=["CD/DVD"],
sp1={'title':'Harman Kardon',
"sents":wikipassages2sents['Harman Kardon'],
'sp_sent_ids': [0, 1]},
sp2={'title':'PERSONAL congrats & zapmedia...',
"sents":subject2sents['PERSONAL congrats & zapmedia...'],
'sp_sent_ids': [1]},
typ="bridge",
domain=[0, 1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="The Houston Astros have played as members of the Junior and \"Senior Circuit\"?",
idx="55P",
answer=["yes"],
sp1={'title':'Houston Astros',
"sents":wikipassages2sents['Houston Astros'],
'sp_sent_ids': [0, 1]},
sp2={'title':'American League',
"sents":wikipassages2sents['American League'],
'sp_sent_ids': [0, 2]},
typ="bridge",
domain=[0, 0])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="Do Azurix Corp and Wessex Water both operate in North America?",
idx="56P",
answer=["no"],
sp1={'title':'Wessex Water',
"sents":wikipassages2sents['Wessex Water'],
'sp_sent_ids': [0]},
sp2={'title':'Azurix',
"sents":wikipassages2sents['Azurix'],
'sp_sent_ids': [0, 2]},
typ="comparison",
domain=[0, 0])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="Will the sewerage utility company that serves 1.3 million people in England be part of the Enron Global Assets and Services unit?",
idx="57P",
answer=["no"],
sp1={'title':'Wessex Water',
"sents":wikipassages2sents['Wessex Water'],
'sp_sent_ids': [0, 1]},
sp2={'title':'PERSONAL organizational changes3',
"sents":subject2sents['PERSONAL organizational changes3'],
'sp_sent_ids': [0, 4]},
typ="bridge",
domain=[0, 1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="Who chose to donate to the company that's \"the equivalent of off-Broadway in Houston\" in the Enron Matching Gift Program?",
idx="58P",
answer=[" Rebecca Skupin"],
sp1={'title':'Stages Repertory Theatre',
"sents":wikipassages2sents['Stages Repertory Theatre'],
'sp_sent_ids': [0, 1]},
sp2={'title':'PERSONAL enron matching gift program winners',
"sents":subject2sents['PERSONAL enron matching gift program winners'],
'sp_sent_ids': [0, 6]},
typ="bridge",
domain=[0, 1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="The employee focusing on distressed counterparties in RAC will represent Enron in which case?",
idx="59P",
answer=["Pacific Gas and Electric Company bankruptcy case"],
sp2={'title':'PERSONAL pg&e bankruptcy case-- important',
"sents":subject2sents['PERSONAL pg&e bankruptcy case-- important'],
'sp_sent_ids': [1, 2]},
sp1={'title':'PERSONAL new legal team to assist rac',
"sents":subject2sents['PERSONAL new legal team to assist rac'],
'sp_sent_ids': [5]},
typ="bridge",
domain=[1, 1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="Are Kevin Hannon and Lisa Mellencamp part of the same Enron business units?",
idx="60P",
answer=["no"],
sp1={'title':'PERSONAL organizational changes3',
"sents":subject2sents['PERSONAL organizational changes3'],
'sp_sent_ids': [0, 1]},
sp2={'title':'PERSONAL new legal team to assist rac',
"sents":subject2sents['PERSONAL new legal team to assist rac'],
'sp_sent_ids': [0]},
typ="comparison",
domain=[1, 1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="Did any of the investors in the KnowledgeCube venture capital firm get caught for insider trading?",
idx="61P",
answer=["yes"],
sp1={'title':'PERSONAL mckinsey alums/energy fund',
"sents":subject2sents['PERSONAL mckinsey alums/energy fund'],
'sp_sent_ids': [3, 5]},
sp2={'title':'Rajat Gupta',
"sents":wikipassages2sents['Rajat Gupta'],
'sp_sent_ids': [0, 1]},
typ="bridge",
domain=[1, 0])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="How many Grand Prix wins does Luca Baldisserri's driver at the Monaco Grand Prix have?",
idx="62P",
answer=["91"],
sp2={'title':'Michael Schumacher',
"sents":wikipassages2sents['Michael Schumacher'],
'sp_sent_ids': [2]},
sp1={'title':'PERSONAL monaco grand prix',
"sents":subject2sents['PERSONAL monaco grand prix'],
'sp_sent_ids': [0, 1]},
typ="bridge",
domain=[1, 0])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="The championship Michael Schumaker has won seven times is sanctioned by which Federation?",
idx="63P",
answer=["Fédération Internationale de l'Automobile"],
sp1={'title':'Michael Schumacher',
"sents":wikipassages2sents['Michael Schumacher'],
'sp_sent_ids': [0, 1]},
sp2={'title':'Formula One',
"sents":wikipassages2sents['Formula One'],
'sp_sent_ids': [0]},
typ="bridge",
domain=[0, 0])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="The conference which has been held annually at Reliant Park since 1969 has drawn close to how many participants?",
idx="64P",
answer=["50,000"],
sp1={'title':'Offshore Technology Conference',
"sents":wikipassages2sents['Offshore Technology Conference'],
'sp_sent_ids': [0, 1]},
sp2={'title':'PERSONAL speaking opportunity, otc - may2001',
"sents":subject2sents['PERSONAL speaking opportunity, otc - may2001'],
'sp_sent_ids': [8]},
typ="bridge",
domain=[0, 1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="For which event did Jeff Skilling invite the man who developed the market for \"Junk Bonds\" to speak?",
idx="65P",
answer=["Key Executive breakfast"],
sp1={'title':'Michael Milken',
"sents":wikipassages2sents['Michael Milken'],
'sp_sent_ids': [0, 1]},
sp2={'title':'PERSONAL re: michael milken',
"sents":subject2sents['PERSONAL re: michael milken'],
'sp_sent_ids': [0, 8]},
typ="bridge",
domain=[0, 1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="When did the ISO that works with the Texas Reliability Entity begin processing switch requests according to PUCT?",
idx="66P",
answer=["July 31, 2001"],
sp1={'title':'Electric Reliability Council of Texas',
"sents":wikipassages2sents['Electric Reliability Council of Texas'],
'sp_sent_ids': [0, 1, 2]},
sp2={'title':'PERSONAL important update on your newpower service',
"sents":subject2sents['PERSONAL important update on your newpower service'],
'sp_sent_ids': [3, 4]},
typ="bridge",
domain=[0, 1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="The November Rally Against Terrorism will be held at a Hotel which is accross the street from which subway station?",
idx="67P",
answer=["Pennsylvania Station"],
sp2={'title':'Hotel Pennsylvania',
"sents":wikipassages2sents['Hotel Pennsylvania'],
'sp_sent_ids': [0]},
sp1={'title':'PERSONAL rally againt terrorism',
"sents":subject2sents['PERSONAL rally againt terrorism'],
'sp_sent_ids': [0, 1]},
typ="bridge",
domain=[1, 0])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="Are the Mastermind Keynote Interview in May and Creating Value with Internet Technologies conference in October located in the same city?",
idx="68P",
answer=["no"],
sp1={'title':'PERSONAL re: invitation',
"sents":subject2sents['PERSONAL re: invitation'],
'sp_sent_ids': [5]},
sp2={'title':'PERSONAL speaker invitation to economist conference 24-25 october',
"sents":subject2sents['PERSONAL speaker invitation to economist conference 24-25 october'],
'sp_sent_ids': [0]},
typ="comparison",
domain=[0, 0])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="Are Southern California Edison and Pacific Gas & Electric and San Diego Gas & Electric based in the same city?",
idx="69P",
answer=["no"],
sp1={'title':'San Diego Gas & Electric',
"sents":wikipassages2sents['San Diego Gas & Electric'],
'sp_sent_ids': [0, 1]},
sp2={'title':'Pacific Gas and Electric Company',
"sents":wikipassages2sents['Pacific Gas and Electric Company'],
'sp_sent_ids': [0]},
typ="comparison",
domain=[0, 0])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="Enron was referred to as Williams Companies in the information request for an Act administered by which U.S. Agency?",
idx="70P",
answer=["Environmental Protection Agency"],
sp2={'title':'Clean Water Act',
"sents":wikipassages2sents['Clean Water Act'],
'sp_sent_ids': [0, 3, 4]},
sp1={'title':'PERSONAL fw: 308 information request',
"sents":subject2sents['PERSONAL fw: 308 information request'],
'sp_sent_ids': [0, 2, 3]},
typ="bridge",
domain=[1, 0])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="How many mutual fund offerings does the firm Philippe Bibi is resigning from Enron to join have?",
idx="71P",
answer=["79"],
sp2={'title':'Putnam Investments',
"sents":wikipassages2sents['Putnam Investments'],
'sp_sent_ids': [0, 1]},
sp1={'title':'PERSONAL philippe bibi',
"sents":subject2sents['PERSONAL philippe bibi'],
'sp_sent_ids': [0, 1]},
typ="bridge",
domain=[1, 0])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="Which firm manages more assets, Galleon Group or Putnam Investments?",
idx="72P",
answer=["Putnam Investments"],
sp1={'title':'Galleon Group',
"sents":wikipassages2sents['Galleon Group'],
'sp_sent_ids': [0]},
sp2={'title':'Putnam Investments',
"sents":wikipassages2sents['Putnam Investments'],
'sp_sent_ids': [0, 1]},
typ="comparison",
domain=[0, 0])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="The 37th Governer of California has aggressively blamed companies from which state for California\'s energy meltdown?",
idx="73P",
answer=["Texas"],
sp1={'title':'Gray Davis',
"sents":wikipassages2sents['Gray Davis'],
'sp_sent_ids': [0, 1]},
sp2={'title':'PERSONAL la times article',
"sents":subject2sents['PERSONAL la times article'],
'sp_sent_ids': [3, 4]},
typ="bridge",
domain=[0, 1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="The man whos leadership leads CA to purchase power at $135 per megawatt hour appeared on which late-night talk show?",
idx="74P",
answer=["The Tonight Show with Jay Leno"],
sp1={'title':'PERSONAL encouraging poll results',
"sents":subject2sents['PERSONAL encouraging poll results'],
'sp_sent_ids': [3]},
sp2={'title':'PERSONAL the "dark" side of popular culture',
"sents":subject2sents['PERSONAL the "dark" side of popular culture'],
'sp_sent_ids': [1,4, 7, 8]},
typ="bridge",
domain=[1, 1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="The man who sent Wes Carberry an email about Jeff's departure has what street address?",
idx="75P",
answer=["1440 Smith Street"],
sp1={'title':'PERSONAL hope all is well...',
"sents":subject2sents['PERSONAL hope all is well...'],
'sp_sent_ids': [0, 8]},
sp2={'title':'PERSONAL fw: business development opportunity',
"sents":subject2sents['PERSONAL fw: business development opportunity'],
'sp_sent_ids': [1]},
typ="bridge",
domain=[1, 1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="Are the Texas Venture Capital Conference and Southwest Venture Capital Conference supported by any of the same organizations?",
idx="76P",
answer=["yes"], # Houston Technology Center
sp1={'title':'PERSONAL texas venture capital conference - 5.16.01',
"sents":subject2sents['PERSONAL texas venture capital conference - 5.16.01'],
'sp_sent_ids': [0]},
sp2={'title':'PERSONAL upcoming golf tournament and venture capital conference',
"sents":subject2sents['PERSONAL upcoming golf tournament and venture capital conference'],
'sp_sent_ids': [4]},
typ="comparison",
domain=[1, 1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="Frank Ianna reports to a leader from AT&T who is interested in Enron's value propositions from which team specifically?",
idx="77P",
answer=["Enron-Adventis team"],
sp1={'title':'PERSONAL talking points - at&t',
"sents":subject2sents['PERSONAL talking points - at&t'],
'sp_sent_ids': [2, 5]},
sp2={'title':'PERSONAL moving forward: urgent, urgent.',
"sents":subject2sents['PERSONAL moving forward: urgent, urgent.'],
'sp_sent_ids': [1, 6]},
typ="bridge",
domain=[1, 1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="In which year was the individual on the Cogent Communications Advisory Committee from Coca Cola born?",
idx="78P",
answer=["1945"],
sp1={'title':'PERSONAL cogent communications',
"sents":subject2sents['PERSONAL cogent communications'],
'sp_sent_ids': [2, 3, 6]},
sp2={'title':'Sergio Zyman',
"sents":wikipassages2sents['Sergio Zyman'],
'sp_sent_ids': [0]},
typ="bridge",
domain=[1, 0])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# # question
entry, original_entry = add_entry(q="Sergio Zyman is known for his failure to launch a product which was later renamed to what?",
idx="79P",
answer=["Coke II"],
sp1={'title':'Sergio Zyman',
"sents":wikipassages2sents['Sergio Zyman'],
'sp_sent_ids': [0]},
sp2={'title':'New Coke',
"sents":wikipassages2sents['New Coke'],
'sp_sent_ids': [1]},
typ="bridge",
domain=[0, 0])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="Which state has is larger by area, Calfornia or Texas?",
idx="80P",
answer=["Texas"],
sp1={'title':'California',
"sents":wikipassages2sents['California'],
'sp_sent_ids': [0]},
sp2={'title':'Texas',
"sents":wikipassages2sents['Texas'],
'sp_sent_ids': [0]},
typ="comparison",
domain=[0, 0])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="Did the organization which Nasim spoke to McMahon about exceed its income targets during this first half?",
idx="81P",
answer=["yes"],
sp1={'title':'PERSONAL follow-up on my conversation in november',
"sents":subject2sents['PERSONAL follow-up on my conversation in november'],
'sp_sent_ids': [2, 11]},
sp2={'title':'PERSONAL accomplishments-june 2001',
"sents":subject2sents['PERSONAL accomplishments-june 2001'],
'sp_sent_ids': [2]},
typ="bridge",
domain=[1, 1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="Who is the assistant to the man responsible for Enron's e-commerce systems development including ClickPaper.com?",
idx="82P",
answer=["Tina Spiller"],
sp1={'title':'PERSONAL re: fw: eworldtradex',
"sents":subject2sents['PERSONAL re: fw: eworldtradex'],
'sp_sent_ids': [12]},
sp2={'title':'PERSONAL your correspondence',
"sents":subject2sents['PERSONAL your correspondence'],
'sp_sent_ids': [3, 4, 5]},
typ="bridge",
domain=[1, 1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="Do Jeff Skilling and Greg Piper have the same assistant at Enron?",
idx="83P",
answer=["no"],
sp1={'title':'PERSONAL your correspondence',
"sents":subject2sents['PERSONAL your correspondence'],
'sp_sent_ids': [8]},
sp2={'title':'PERSONAL re: fw: eworldtradex',
"sents":subject2sents['PERSONAL re: fw: eworldtradex'],
'sp_sent_ids': [12]},
typ="comparison",
domain=[1, 1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# # question
entry, original_entry = add_entry(q="The location for Eyeforenergy Asia 2001 is how many degrees north of the equator?",
idx="84P",
answer=["one"],
sp2={'title':'Singapore',
"sents":wikipassages2sents['Singapore'],
'sp_sent_ids': [0, 1]},
sp1={'title':'PERSONAL t,h: eyeforenergy briefing',
"sents":subject2sents['PERSONAL t,h: eyeforenergy briefing'],
'sp_sent_ids': [7]},
typ="bridge",
domain=[1, 0])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# # question
entry, original_entry = add_entry(q="Where dit the organizer of EEO Europe: Energy Trading in the New Economy hold its Asia 2001 conference?",
idx="85P",
answer=["Singapore"],
sp1={'title':'PERSONAL eeo europe: energy trading in the new economy',
"sents":subject2sents['PERSONAL eeo europe: energy trading in the new economy'],
'sp_sent_ids': [0]},
sp2={'title':'PERSONAL t,h: eyeforenergy briefing',
"sents":subject2sents['PERSONAL t,h: eyeforenergy briefing'],
'sp_sent_ids': [7]},
typ="bridge",
domain=[1, 1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# # question
entry, original_entry = add_entry(q="The country that had a population of 14 million at its birth in 1923 is bordered by how many countries?",
idx="86P",
answer=["eight"],
sp2={'title':'Turkey',
"sents":wikipassages2sents['Turkey'],
'sp_sent_ids': [0, 1]},
sp1={'title':'<32040359.1075840066357.JavaMail.evans@thyme>',
"sents":subject2sents['<32040359.1075840066357.JavaMail.evans@thyme>'],
'sp_sent_ids': [0, 1]},
typ="bridge",
domain=[1, 0])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="After the earthquake in Turkey, Mark suggested sending contributions to an organization with national headquarters built between which years?",
idx="87P",
answer=["1915 and 1917"],
sp2={'title':'American Red Cross National Headquarters',
"sents":wikipassages2sents['American Red Cross National Headquarters'],
'sp_sent_ids': [0, 1]},
sp1={'title':'<32040359.1075840066357.JavaMail.evans@thyme>',
"sents":subject2sents['<32040359.1075840066357.JavaMail.evans@thyme>'],
'sp_sent_ids': [0, 3, 6]},
typ="bridge",
domain=[1, 0])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="Mark compared the Bosphorous Bridge to a silver version of a bridge that spans a straight how many miles long?",
idx="88P",
answer=["1 mi"],
sp1={'title':'Golden Gate Bridge',
"sents":wikipassages2sents['Golden Gate Bridge'],
'sp_sent_ids': [0]},
sp2={'title':'PERSONAL fwd: picture!',
"sents":subject2sents['PERSONAL fwd: picture!'],
'sp_sent_ids': [4]},
typ="bridge",
domain=[1, 0])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="How many people visit the 1545000 sqft retail space in Buckhead Atlanta annually?",
idx="89P",
answer=["25 million"],
sp1={'title':'Lenox Square',
"sents":wikipassages2sents['Lenox Square'],
'sp_sent_ids': [0, 1]},
sp2={'title':'PERSONAL lenox title sponsorship',
"sents":subject2sents['PERSONAL lenox title sponsorship'],
'sp_sent_ids': [0, 1, 2, 4]},
typ="bridge",
domain=[0, 1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="Which has a larger revenue, Genscape Inc., the two year old energy info provider, or Midcoast Energy Resources with its 4,100 miles of pipe?",
idx="90P",
answer=["Midcoast Energy Resources"],
sp1={'title':'PERSONAL david doctor & genscape, inc.',
"sents":subject2sents['PERSONAL david doctor & genscape, inc.'],
'sp_sent_ids': [6]},
sp2={'title':'PERSONAL acg october 9 lunch - reminder',
"sents":subject2sents['PERSONAL acg october 9 lunch - reminder'],
'sp_sent_ids': [7, 8]},
typ="comparison",
domain=[1, 1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="Is the trustworthy organization Mark sugggested for Turkey's recovery one of the organizations Enron contributed to for September 11 relief efforts?",
idx="91P",
answer=["yes"],
sp1={'title':'<32040359.1075840066357.JavaMail.evans@thyme>',
"sents":subject2sents['<32040359.1075840066357.JavaMail.evans@thyme>'],
'sp_sent_ids': [0, 6]},
sp2={'title':'PERSONAL our response to the u.s. tragedy',
"sents":subject2sents['PERSONAL our response to the u.s. tragedy'],
'sp_sent_ids': [0, 4]},
typ="comparison",
domain=[1, 1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="How many people were killed on the plane with Nick Humber, the Enron employee who was traveling to Los Angeles?",
idx="92P",
answer=["92"],
sp2={'title':'American Airlines Flight 11',
"sents":wikipassages2sents['American Airlines Flight 11'],
'sp_sent_ids': [0, 1]},
sp1={'title':'PERSONAL tragedy claims life of enron employee',
"sents":subject2sents['PERSONAL tragedy claims life of enron employee'],
'sp_sent_ids': [0, 1, 2, 3]},
typ="bridge",
domain=[1, 0])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# # question
entry, original_entry = add_entry(q="Who is the new Chief Financial Officer of the Enron group where Nick Humber was a director?",
idx="93P",
answer=["Tod Lindholm"],
sp1={'title':'PERSONAL tragedy claims life of enron employee',
"sents":subject2sents['PERSONAL tragedy claims life of enron employee'],
'sp_sent_ids': [2]},
sp2={'title':'PERSONAL enron wind',
"sents":subject2sents['PERSONAL enron wind'],
'sp_sent_ids': [4]},
typ="bridge",
domain=[1, 1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="Which has more miles, the GST transmission lines in the Carolinas or Midcoast's pipelines in 10 states?",
idx="94P",
answer=["transmission lines"],
sp1={'title':'PERSONAL gridsouth appointment',
"sents":subject2sents['PERSONAL gridsouth appointment'],
'sp_sent_ids': [4]},
sp2={'title':'PERSONAL acg october 9 lunch - reminder',
"sents":subject2sents['PERSONAL acg october 9 lunch - reminder'],
'sp_sent_ids': [8]},
typ="comparison",
domain=[1, 1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# # question
entry, original_entry = add_entry(q="Votenet Solutions, Inc released a Spanish language version of its software in celebration of a event which starts when?",
idx="95P",
answer=["September 15"],
sp2={'title':'National Hispanic Heritage Month',
"sents":wikipassages2sents['National Hispanic Heritage Month'],
'sp_sent_ids': [0]},
sp1={'title':'PERSONAL votenet announces online voter registration software in spanish',
"sents":subject2sents['PERSONAL votenet announces online voter registration software in spanish'],
'sp_sent_ids': [1, 2]},
typ="bridge",
domain=[1, 0])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="When was the company that acquired the EBS portfolio company, Amber Networks, founded?",
idx="96P",
answer=["1865"],
sp2={'title':'Nokia',
"sents":wikipassages2sents['Nokia'],
'sp_sent_ids': [0]},
sp1={'title':'PERSONAL amber and storageapps acquired',
"sents":subject2sents['PERSONAL amber and storageapps acquired'],
'sp_sent_ids': [1]},
typ="bridge",
domain=[1, 0])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="How many million dollars is the relationship between the largest U.S. newspaper publisher by daily circulation and Eric's company?",
idx="97P",
answer=["$270 million"],
sp1={'title':'Gannett Company',
"sents":wikipassages2sents['Gannett Company'],
'sp_sent_ids': [0, 2, 3]},
sp2={'title':'PERSONAL congrats & zapmedia...0',
"sents":subject2sents['PERSONAL congrats & zapmedia...0'],
'sp_sent_ids': [1, 2]},
typ="bridge",
domain=[0, 1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="Did Enron raise more money for United Way Scholars last year than the contribution amount for Enron's September 11 relief efforts?",
idx="98P",
answer=["yes"],
sp1={'title':'PERSONAL alexis de tocqueville breakfast and solicitation',
"sents":subject2sents['PERSONAL alexis de tocqueville breakfast and solicitation'],
'sp_sent_ids': [4, 5, 6]},
sp2={'title':'PERSONAL our response to the u.s. tragedy',
"sents":subject2sents['PERSONAL our response to the u.s. tragedy'],
'sp_sent_ids': [0, 4]},
typ="comparison",
domain=[1, 1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# # question
entry, original_entry = add_entry(q="The team owned by Bruce McCaw partnered with Enron in which year?",
idx="99P",
answer=["2001"],
sp1={'title':'PacWest Racing',
"sents":wikipassages2sents['PacWest Racing'],
'sp_sent_ids': [0]},
sp2={'title':'PERSONAL 2001 texaco/havoline grand prix',
"sents":subject2sents['PERSONAL 2001 texaco/havoline grand prix'],
'sp_sent_ids': [0]},
typ="bridge",
domain=[0, 1])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# question
entry, original_entry = add_entry(q="Are the New York City Subway and Washington Metro administered by the same Transit Authority agency?",
idx="100P",
answer=["no"],
sp1={'title':'New York City Subway',
"sents":wikipassages2sents['New York City Subway'],
'sp_sent_ids': [0]},
sp2={'title':'Washington Metro',
"sents":wikipassages2sents['Washington Metro'],
'sp_sent_ids': [0, 1]},
typ="comparison",
domain=[0, 0])
local_global_queries.append(entry.copy())
original_queries.append(original_entry.copy())
# save the queries
all_queries = []
all_queries.extend(local_global_queries.copy())
with open(f"/checkpoint/simarora/PersonalDatasets/Enron/qa_runs/{INBOX}/enron_wiki_qas_val_all.json", "w") as f:
for query in all_queries:
json.dump(query, f)
f.write("\n")
with open(f"/checkpoint/simarora/PersonalDatasets/Enron/qa_runs/{INBOX}/enron_wiki_qas_original.json", "w") as f:
json.dump(original_queries, f)
print(f"Saved all {len(all_queries)} queries!")
|
concurrentqa-main
|
dataset_construction/Enron_skilling-j/make_queries.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import os
import uuid
from pathlib import Path
import main as classification
import submitit
def parse_args():
classification_parser = classification.get_args_parser()
parser = argparse.ArgumentParser("Submitit for ConvNeXt", parents=[classification_parser])
parser.add_argument("--ngpus", default=8, type=int, help="Number of gpus to request on each node")
parser.add_argument("--nodes", default=2, type=int, help="Number of nodes to request")
parser.add_argument("--timeout", default=72, type=int, help="Duration of the job, in hours")
parser.add_argument("--job_name", default="convnext", type=str, help="Job name")
parser.add_argument("--job_dir", default="", type=str, help="Job directory; leave empty for default")
parser.add_argument("--partition", default="learnlab", type=str, help="Partition where to submit")
parser.add_argument("--use_volta32", action='store_true', default=True, help="Big models? Use this")
parser.add_argument('--comment', default="", type=str,
help='Comment to pass to scheduler, e.g. priority message')
return parser.parse_args()
def get_shared_folder() -> Path:
user = os.getenv("USER")
if Path("/checkpoint/").is_dir():
p = Path(f"/checkpoint/{user}/convnext")
p.mkdir(exist_ok=True)
return p
raise RuntimeError("No shared folder available")
def get_init_file():
# Init file must not exist, but it's parent dir must exist.
os.makedirs(str(get_shared_folder()), exist_ok=True)
init_file = get_shared_folder() / f"{uuid.uuid4().hex}_init"
if init_file.exists():
os.remove(str(init_file))
return init_file
class Trainer(object):
def __init__(self, args):
self.args = args
def __call__(self):
import main as classification
self._setup_gpu_args()
classification.main(self.args)
def checkpoint(self):
import os
import submitit
self.args.dist_url = get_init_file().as_uri()
self.args.auto_resume = True
print("Requeuing ", self.args)
empty_trainer = type(self)(self.args)
return submitit.helpers.DelayedSubmission(empty_trainer)
def _setup_gpu_args(self):
import submitit
from pathlib import Path
job_env = submitit.JobEnvironment()
self.args.output_dir = Path(self.args.job_dir)
self.args.gpu = job_env.local_rank
self.args.rank = job_env.global_rank
self.args.world_size = job_env.num_tasks
print(f"Process group: {job_env.num_tasks} tasks, rank: {job_env.global_rank}")
def main():
args = parse_args()
if args.job_dir == "":
args.job_dir = get_shared_folder() / "%j"
executor = submitit.AutoExecutor(folder=args.job_dir, slurm_max_num_timeout=30)
num_gpus_per_node = args.ngpus
nodes = args.nodes
timeout_min = args.timeout * 60
partition = args.partition
kwargs = {}
if args.use_volta32:
kwargs['slurm_constraint'] = 'volta32gb'
if args.comment:
kwargs['slurm_comment'] = args.comment
executor.update_parameters(
mem_gb=40 * num_gpus_per_node,
gpus_per_node=num_gpus_per_node,
tasks_per_node=num_gpus_per_node, # one task per GPU
cpus_per_task=10,
nodes=nodes,
timeout_min=timeout_min, # max is 60 * 72
# Below are cluster dependent parameters
slurm_partition=partition,
slurm_signal_delay_s=120,
**kwargs
)
executor.update_parameters(name=args.job_name)
args.dist_url = get_init_file().as_uri()
args.output_dir = args.job_dir
trainer = Trainer(args)
job = executor.submit(trainer)
print("Submitted job_id:", job.job_id)
if __name__ == "__main__":
main()
|
ConvNeXt-main
|
run_with_submitit.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
from torchvision import datasets, transforms
from timm.data.constants import \
IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD
from timm.data import create_transform
def build_dataset(is_train, args):
transform = build_transform(is_train, args)
print("Transform = ")
if isinstance(transform, tuple):
for trans in transform:
print(" - - - - - - - - - - ")
for t in trans.transforms:
print(t)
else:
for t in transform.transforms:
print(t)
print("---------------------------")
if args.data_set == 'CIFAR':
dataset = datasets.CIFAR100(args.data_path, train=is_train, transform=transform, download=True)
nb_classes = 100
elif args.data_set == 'IMNET':
print("reading from datapath", args.data_path)
root = os.path.join(args.data_path, 'train' if is_train else 'val')
dataset = datasets.ImageFolder(root, transform=transform)
nb_classes = 1000
elif args.data_set == "image_folder":
root = args.data_path if is_train else args.eval_data_path
dataset = datasets.ImageFolder(root, transform=transform)
nb_classes = args.nb_classes
assert len(dataset.class_to_idx) == nb_classes
else:
raise NotImplementedError()
print("Number of the class = %d" % nb_classes)
return dataset, nb_classes
def build_transform(is_train, args):
resize_im = args.input_size > 32
imagenet_default_mean_and_std = args.imagenet_default_mean_and_std
mean = IMAGENET_INCEPTION_MEAN if not imagenet_default_mean_and_std else IMAGENET_DEFAULT_MEAN
std = IMAGENET_INCEPTION_STD if not imagenet_default_mean_and_std else IMAGENET_DEFAULT_STD
if is_train:
# this should always dispatch to transforms_imagenet_train
transform = create_transform(
input_size=args.input_size,
is_training=True,
color_jitter=args.color_jitter,
auto_augment=args.aa,
interpolation=args.train_interpolation,
re_prob=args.reprob,
re_mode=args.remode,
re_count=args.recount,
mean=mean,
std=std,
)
if not resize_im:
transform.transforms[0] = transforms.RandomCrop(
args.input_size, padding=4)
return transform
t = []
if resize_im:
# warping (no cropping) when evaluated at 384 or larger
if args.input_size >= 384:
t.append(
transforms.Resize((args.input_size, args.input_size),
interpolation=transforms.InterpolationMode.BICUBIC),
)
print(f"Warping {args.input_size} size input images...")
else:
if args.crop_pct is None:
args.crop_pct = 224 / 256
size = int(args.input_size / args.crop_pct)
t.append(
# to maintain same ratio w.r.t. 224 images
transforms.Resize(size, interpolation=transforms.InterpolationMode.BICUBIC),
)
t.append(transforms.CenterCrop(args.input_size))
t.append(transforms.ToTensor())
t.append(transforms.Normalize(mean, std))
return transforms.Compose(t)
|
ConvNeXt-main
|
datasets.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import math
from typing import Iterable, Optional
import torch
from timm.data import Mixup
from timm.utils import accuracy, ModelEma
import utils
def train_one_epoch(model: torch.nn.Module, criterion: torch.nn.Module,
data_loader: Iterable, optimizer: torch.optim.Optimizer,
device: torch.device, epoch: int, loss_scaler, max_norm: float = 0,
model_ema: Optional[ModelEma] = None, mixup_fn: Optional[Mixup] = None, log_writer=None,
wandb_logger=None, start_steps=None, lr_schedule_values=None, wd_schedule_values=None,
num_training_steps_per_epoch=None, update_freq=None, use_amp=False):
model.train(True)
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
metric_logger.add_meter('min_lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
header = 'Epoch: [{}]'.format(epoch)
print_freq = 10
optimizer.zero_grad()
for data_iter_step, (samples, targets) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):
step = data_iter_step // update_freq
if step >= num_training_steps_per_epoch:
continue
it = start_steps + step # global training iteration
# Update LR & WD for the first acc
if lr_schedule_values is not None or wd_schedule_values is not None and data_iter_step % update_freq == 0:
for i, param_group in enumerate(optimizer.param_groups):
if lr_schedule_values is not None:
param_group["lr"] = lr_schedule_values[it] * param_group["lr_scale"]
if wd_schedule_values is not None and param_group["weight_decay"] > 0:
param_group["weight_decay"] = wd_schedule_values[it]
samples = samples.to(device, non_blocking=True)
targets = targets.to(device, non_blocking=True)
if mixup_fn is not None:
samples, targets = mixup_fn(samples, targets)
if use_amp:
with torch.cuda.amp.autocast():
output = model(samples)
loss = criterion(output, targets)
else: # full precision
output = model(samples)
loss = criterion(output, targets)
loss_value = loss.item()
if not math.isfinite(loss_value): # this could trigger if using AMP
print("Loss is {}, stopping training".format(loss_value))
assert math.isfinite(loss_value)
if use_amp:
# this attribute is added by timm on one optimizer (adahessian)
is_second_order = hasattr(optimizer, 'is_second_order') and optimizer.is_second_order
loss /= update_freq
grad_norm = loss_scaler(loss, optimizer, clip_grad=max_norm,
parameters=model.parameters(), create_graph=is_second_order,
update_grad=(data_iter_step + 1) % update_freq == 0)
if (data_iter_step + 1) % update_freq == 0:
optimizer.zero_grad()
if model_ema is not None:
model_ema.update(model)
else: # full precision
loss /= update_freq
loss.backward()
if (data_iter_step + 1) % update_freq == 0:
optimizer.step()
optimizer.zero_grad()
if model_ema is not None:
model_ema.update(model)
torch.cuda.synchronize()
if mixup_fn is None:
class_acc = (output.max(-1)[-1] == targets).float().mean()
else:
class_acc = None
metric_logger.update(loss=loss_value)
metric_logger.update(class_acc=class_acc)
min_lr = 10.
max_lr = 0.
for group in optimizer.param_groups:
min_lr = min(min_lr, group["lr"])
max_lr = max(max_lr, group["lr"])
metric_logger.update(lr=max_lr)
metric_logger.update(min_lr=min_lr)
weight_decay_value = None
for group in optimizer.param_groups:
if group["weight_decay"] > 0:
weight_decay_value = group["weight_decay"]
metric_logger.update(weight_decay=weight_decay_value)
if use_amp:
metric_logger.update(grad_norm=grad_norm)
if log_writer is not None:
log_writer.update(loss=loss_value, head="loss")
log_writer.update(class_acc=class_acc, head="loss")
log_writer.update(lr=max_lr, head="opt")
log_writer.update(min_lr=min_lr, head="opt")
log_writer.update(weight_decay=weight_decay_value, head="opt")
if use_amp:
log_writer.update(grad_norm=grad_norm, head="opt")
log_writer.set_step()
if wandb_logger:
wandb_logger._wandb.log({
'Rank-0 Batch Wise/train_loss': loss_value,
'Rank-0 Batch Wise/train_max_lr': max_lr,
'Rank-0 Batch Wise/train_min_lr': min_lr
}, commit=False)
if class_acc:
wandb_logger._wandb.log({'Rank-0 Batch Wise/train_class_acc': class_acc}, commit=False)
if use_amp:
wandb_logger._wandb.log({'Rank-0 Batch Wise/train_grad_norm': grad_norm}, commit=False)
wandb_logger._wandb.log({'Rank-0 Batch Wise/global_train_step': it})
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger)
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
@torch.no_grad()
def evaluate(data_loader, model, device, use_amp=False):
criterion = torch.nn.CrossEntropyLoss()
metric_logger = utils.MetricLogger(delimiter=" ")
header = 'Test:'
# switch to evaluation mode
model.eval()
for batch in metric_logger.log_every(data_loader, 10, header):
images = batch[0]
target = batch[-1]
images = images.to(device, non_blocking=True)
target = target.to(device, non_blocking=True)
# compute output
if use_amp:
with torch.cuda.amp.autocast():
output = model(images)
loss = criterion(output, target)
else:
output = model(images)
loss = criterion(output, target)
acc1, acc5 = accuracy(output, target, topk=(1, 5))
batch_size = images.shape[0]
metric_logger.update(loss=loss.item())
metric_logger.meters['acc1'].update(acc1.item(), n=batch_size)
metric_logger.meters['acc5'].update(acc5.item(), n=batch_size)
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print('* Acc@1 {top1.global_avg:.3f} Acc@5 {top5.global_avg:.3f} loss {losses.global_avg:.3f}'
.format(top1=metric_logger.acc1, top5=metric_logger.acc5, losses=metric_logger.loss))
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
|
ConvNeXt-main
|
engine.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import math
import time
from collections import defaultdict, deque
import datetime
import numpy as np
from timm.utils import get_state_dict
from pathlib import Path
import torch
import torch.distributed as dist
from torch._six import inf
from tensorboardX import SummaryWriter
class SmoothedValue(object):
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
"""
def __init__(self, window_size=20, fmt=None):
if fmt is None:
fmt = "{median:.4f} ({global_avg:.4f})"
self.deque = deque(maxlen=window_size)
self.total = 0.0
self.count = 0
self.fmt = fmt
def update(self, value, n=1):
self.deque.append(value)
self.count += n
self.total += value * n
def synchronize_between_processes(self):
"""
Warning: does not synchronize the deque!
"""
if not is_dist_avail_and_initialized():
return
t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda')
dist.barrier()
dist.all_reduce(t)
t = t.tolist()
self.count = int(t[0])
self.total = t[1]
@property
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item()
@property
def avg(self):
d = torch.tensor(list(self.deque), dtype=torch.float32)
return d.mean().item()
@property
def global_avg(self):
return self.total / self.count
@property
def max(self):
return max(self.deque)
@property
def value(self):
return self.deque[-1]
def __str__(self):
return self.fmt.format(
median=self.median,
avg=self.avg,
global_avg=self.global_avg,
max=self.max,
value=self.value)
class MetricLogger(object):
def __init__(self, delimiter="\t"):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for k, v in kwargs.items():
if v is None:
continue
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if attr in self.meters:
return self.meters[attr]
if attr in self.__dict__:
return self.__dict__[attr]
raise AttributeError("'{}' object has no attribute '{}'".format(
type(self).__name__, attr))
def __str__(self):
loss_str = []
for name, meter in self.meters.items():
loss_str.append(
"{}: {}".format(name, str(meter))
)
return self.delimiter.join(loss_str)
def synchronize_between_processes(self):
for meter in self.meters.values():
meter.synchronize_between_processes()
def add_meter(self, name, meter):
self.meters[name] = meter
def log_every(self, iterable, print_freq, header=None):
i = 0
if not header:
header = ''
start_time = time.time()
end = time.time()
iter_time = SmoothedValue(fmt='{avg:.4f}')
data_time = SmoothedValue(fmt='{avg:.4f}')
space_fmt = ':' + str(len(str(len(iterable)))) + 'd'
log_msg = [
header,
'[{0' + space_fmt + '}/{1}]',
'eta: {eta}',
'{meters}',
'time: {time}',
'data: {data}'
]
if torch.cuda.is_available():
log_msg.append('max mem: {memory:.0f}')
log_msg = self.delimiter.join(log_msg)
MB = 1024.0 * 1024.0
for obj in iterable:
data_time.update(time.time() - end)
yield obj
iter_time.update(time.time() - end)
if i % print_freq == 0 or i == len(iterable) - 1:
eta_seconds = iter_time.global_avg * (len(iterable) - i)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if torch.cuda.is_available():
print(log_msg.format(
i, len(iterable), eta=eta_string,
meters=str(self),
time=str(iter_time), data=str(data_time),
memory=torch.cuda.max_memory_allocated() / MB))
else:
print(log_msg.format(
i, len(iterable), eta=eta_string,
meters=str(self),
time=str(iter_time), data=str(data_time)))
i += 1
end = time.time()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('{} Total time: {} ({:.4f} s / it)'.format(
header, total_time_str, total_time / len(iterable)))
class TensorboardLogger(object):
def __init__(self, log_dir):
self.writer = SummaryWriter(logdir=log_dir)
self.step = 0
def set_step(self, step=None):
if step is not None:
self.step = step
else:
self.step += 1
def update(self, head='scalar', step=None, **kwargs):
for k, v in kwargs.items():
if v is None:
continue
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.writer.add_scalar(head + "/" + k, v, self.step if step is None else step)
def flush(self):
self.writer.flush()
class WandbLogger(object):
def __init__(self, args):
self.args = args
try:
import wandb
self._wandb = wandb
except ImportError:
raise ImportError(
"To use the Weights and Biases Logger please install wandb."
"Run `pip install wandb` to install it."
)
# Initialize a W&B run
if self._wandb.run is None:
self._wandb.init(
project=args.project,
config=args
)
def log_epoch_metrics(self, metrics, commit=True):
"""
Log train/test metrics onto W&B.
"""
# Log number of model parameters as W&B summary
self._wandb.summary['n_parameters'] = metrics.get('n_parameters', None)
metrics.pop('n_parameters', None)
# Log current epoch
self._wandb.log({'epoch': metrics.get('epoch')}, commit=False)
metrics.pop('epoch')
for k, v in metrics.items():
if 'train' in k:
self._wandb.log({f'Global Train/{k}': v}, commit=False)
elif 'test' in k:
self._wandb.log({f'Global Test/{k}': v}, commit=False)
self._wandb.log({})
def log_checkpoints(self):
output_dir = self.args.output_dir
model_artifact = self._wandb.Artifact(
self._wandb.run.id + "_model", type="model"
)
model_artifact.add_dir(output_dir)
self._wandb.log_artifact(model_artifact, aliases=["latest", "best"])
def set_steps(self):
# Set global training step
self._wandb.define_metric('Rank-0 Batch Wise/*', step_metric='Rank-0 Batch Wise/global_train_step')
# Set epoch-wise step
self._wandb.define_metric('Global Train/*', step_metric='epoch')
self._wandb.define_metric('Global Test/*', step_metric='epoch')
def setup_for_distributed(is_master):
"""
This function disables printing when not in master process
"""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop('force', False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
def is_dist_avail_and_initialized():
if not dist.is_available():
return False
if not dist.is_initialized():
return False
return True
def get_world_size():
if not is_dist_avail_and_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not is_dist_avail_and_initialized():
return 0
return dist.get_rank()
def is_main_process():
return get_rank() == 0
def save_on_master(*args, **kwargs):
if is_main_process():
torch.save(*args, **kwargs)
def init_distributed_mode(args):
if args.dist_on_itp:
args.rank = int(os.environ['OMPI_COMM_WORLD_RANK'])
args.world_size = int(os.environ['OMPI_COMM_WORLD_SIZE'])
args.gpu = int(os.environ['OMPI_COMM_WORLD_LOCAL_RANK'])
args.dist_url = "tcp://%s:%s" % (os.environ['MASTER_ADDR'], os.environ['MASTER_PORT'])
os.environ['LOCAL_RANK'] = str(args.gpu)
os.environ['RANK'] = str(args.rank)
os.environ['WORLD_SIZE'] = str(args.world_size)
# ["RANK", "WORLD_SIZE", "MASTER_ADDR", "MASTER_PORT", "LOCAL_RANK"]
elif 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:
args.rank = int(os.environ["RANK"])
args.world_size = int(os.environ['WORLD_SIZE'])
args.gpu = int(os.environ['LOCAL_RANK'])
elif 'SLURM_PROCID' in os.environ:
args.rank = int(os.environ['SLURM_PROCID'])
args.gpu = args.rank % torch.cuda.device_count()
os.environ['RANK'] = str(args.rank)
os.environ['LOCAL_RANK'] = str(args.gpu)
os.environ['WORLD_SIZE'] = str(args.world_size)
else:
print('Not using distributed mode')
args.distributed = False
return
args.distributed = True
torch.cuda.set_device(args.gpu)
args.dist_backend = 'nccl'
print('| distributed init (rank {}): {}, gpu {}'.format(
args.rank, args.dist_url, args.gpu), flush=True)
torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
torch.distributed.barrier()
setup_for_distributed(args.rank == 0)
def load_state_dict(model, state_dict, prefix='', ignore_missing="relative_position_index"):
missing_keys = []
unexpected_keys = []
error_msgs = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=''):
local_metadata = {} if metadata is None else metadata.get(
prefix[:-1], {})
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
load(model, prefix=prefix)
warn_missing_keys = []
ignore_missing_keys = []
for key in missing_keys:
keep_flag = True
for ignore_key in ignore_missing.split('|'):
if ignore_key in key:
keep_flag = False
break
if keep_flag:
warn_missing_keys.append(key)
else:
ignore_missing_keys.append(key)
missing_keys = warn_missing_keys
if len(missing_keys) > 0:
print("Weights of {} not initialized from pretrained model: {}".format(
model.__class__.__name__, missing_keys))
if len(unexpected_keys) > 0:
print("Weights from pretrained model not used in {}: {}".format(
model.__class__.__name__, unexpected_keys))
if len(ignore_missing_keys) > 0:
print("Ignored weights of {} not initialized from pretrained model: {}".format(
model.__class__.__name__, ignore_missing_keys))
if len(error_msgs) > 0:
print('\n'.join(error_msgs))
class NativeScalerWithGradNormCount:
state_dict_key = "amp_scaler"
def __init__(self):
self._scaler = torch.cuda.amp.GradScaler()
def __call__(self, loss, optimizer, clip_grad=None, parameters=None, create_graph=False, update_grad=True):
self._scaler.scale(loss).backward(create_graph=create_graph)
if update_grad:
if clip_grad is not None:
assert parameters is not None
self._scaler.unscale_(optimizer) # unscale the gradients of optimizer's assigned params in-place
norm = torch.nn.utils.clip_grad_norm_(parameters, clip_grad)
else:
self._scaler.unscale_(optimizer)
norm = get_grad_norm_(parameters)
self._scaler.step(optimizer)
self._scaler.update()
else:
norm = None
return norm
def state_dict(self):
return self._scaler.state_dict()
def load_state_dict(self, state_dict):
self._scaler.load_state_dict(state_dict)
def get_grad_norm_(parameters, norm_type: float = 2.0) -> torch.Tensor:
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
parameters = [p for p in parameters if p.grad is not None]
norm_type = float(norm_type)
if len(parameters) == 0:
return torch.tensor(0.)
device = parameters[0].grad.device
if norm_type == inf:
total_norm = max(p.grad.detach().abs().max().to(device) for p in parameters)
else:
total_norm = torch.norm(torch.stack([torch.norm(p.grad.detach(), norm_type).to(device) for p in parameters]), norm_type)
return total_norm
def cosine_scheduler(base_value, final_value, epochs, niter_per_ep, warmup_epochs=0,
start_warmup_value=0, warmup_steps=-1):
warmup_schedule = np.array([])
warmup_iters = warmup_epochs * niter_per_ep
if warmup_steps > 0:
warmup_iters = warmup_steps
print("Set warmup steps = %d" % warmup_iters)
if warmup_epochs > 0:
warmup_schedule = np.linspace(start_warmup_value, base_value, warmup_iters)
iters = np.arange(epochs * niter_per_ep - warmup_iters)
schedule = np.array(
[final_value + 0.5 * (base_value - final_value) * (1 + math.cos(math.pi * i / (len(iters)))) for i in iters])
schedule = np.concatenate((warmup_schedule, schedule))
assert len(schedule) == epochs * niter_per_ep
return schedule
def save_model(args, epoch, model, model_without_ddp, optimizer, loss_scaler, model_ema=None):
output_dir = Path(args.output_dir)
epoch_name = str(epoch)
checkpoint_paths = [output_dir / ('checkpoint-%s.pth' % epoch_name)]
for checkpoint_path in checkpoint_paths:
to_save = {
'model': model_without_ddp.state_dict(),
'optimizer': optimizer.state_dict(),
'epoch': epoch,
'scaler': loss_scaler.state_dict(),
'args': args,
}
if model_ema is not None:
to_save['model_ema'] = get_state_dict(model_ema)
save_on_master(to_save, checkpoint_path)
if is_main_process() and isinstance(epoch, int):
to_del = epoch - args.save_ckpt_num * args.save_ckpt_freq
old_ckpt = output_dir / ('checkpoint-%s.pth' % to_del)
if os.path.exists(old_ckpt):
os.remove(old_ckpt)
def auto_load_model(args, model, model_without_ddp, optimizer, loss_scaler, model_ema=None):
output_dir = Path(args.output_dir)
if args.auto_resume and len(args.resume) == 0:
import glob
all_checkpoints = glob.glob(os.path.join(output_dir, 'checkpoint-*.pth'))
latest_ckpt = -1
for ckpt in all_checkpoints:
t = ckpt.split('-')[-1].split('.')[0]
if t.isdigit():
latest_ckpt = max(int(t), latest_ckpt)
if latest_ckpt >= 0:
args.resume = os.path.join(output_dir, 'checkpoint-%d.pth' % latest_ckpt)
print("Auto resume checkpoint: %s" % args.resume)
if args.resume:
if args.resume.startswith('https'):
checkpoint = torch.hub.load_state_dict_from_url(
args.resume, map_location='cpu', check_hash=True)
else:
checkpoint = torch.load(args.resume, map_location='cpu')
model_without_ddp.load_state_dict(checkpoint['model'])
print("Resume checkpoint %s" % args.resume)
if 'optimizer' in checkpoint and 'epoch' in checkpoint:
optimizer.load_state_dict(checkpoint['optimizer'])
if not isinstance(checkpoint['epoch'], str): # does not support resuming with 'best', 'best-ema'
args.start_epoch = checkpoint['epoch'] + 1
else:
assert args.eval, 'Does not support resuming with checkpoint-best'
if hasattr(args, 'model_ema') and args.model_ema:
if 'model_ema' in checkpoint.keys():
model_ema.ema.load_state_dict(checkpoint['model_ema'])
else:
model_ema.ema.load_state_dict(checkpoint['model'])
if 'scaler' in checkpoint:
loss_scaler.load_state_dict(checkpoint['scaler'])
print("With optim & sched!")
|
ConvNeXt-main
|
utils.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.