python_code
stringlengths 0
4.04M
| repo_name
stringlengths 8
58
| file_path
stringlengths 5
147
|
---|---|---|
from setuptools import setup
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
setup(
name='PG_OP',
ext_modules=[
CUDAExtension('PG_OP', [
'src/bfs_cluster.cpp',
'src/bfs_cluster_kernel.cu',
])
],
cmdclass={'build_ext': BuildExtension}
)
|
ContrastiveSceneContexts-main
|
downstream/insseg/lib/bfs/ops/setup.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import random
import torch
import hydra
import numpy as np
from lib.ddp_trainer import DetectionTrainer
from lib.distributed import multi_proc_run
def single_proc_run(config):
if not torch.cuda.is_available():
raise Exception('No GPUs FOUND.')
trainer = DetectionTrainer(config)
if config.net.is_train:
trainer.train()
else:
trainer.test()
@hydra.main(config_path='config', config_name='default.yaml')
def main(config):
# fix seed
np.random.seed(config.misc.seed)
torch.manual_seed(config.misc.seed)
torch.cuda.manual_seed(config.misc.seed)
port = random.randint(10001, 20001)
if config.misc.num_gpus > 1:
multi_proc_run(config.misc.num_gpus, port, fun=single_proc_run, fun_args=(config,))
else:
single_proc_run(config)
if __name__ == '__main__':
__spec__ = None
os.environ['MKL_THREADING_LAYER'] = 'GNU'
main()
|
ContrastiveSceneContexts-main
|
downstream/votenet/ddp_main.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import sys
import os
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.append(os.path.join(ROOT_DIR, 'utils'))
class SunrgbdDatasetConfig(object):
def __init__(self):
self.num_class = 10
self.num_heading_bin = 12
self.num_size_cluster = 10
self.type2class={'bed':0, 'table':1, 'sofa':2, 'chair':3, 'toilet':4, 'desk':5, 'dresser':6, 'night_stand':7, 'bookshelf':8, 'bathtub':9}
self.class2type = {self.type2class[t]:t for t in self.type2class}
self.type2onehotclass={'bed':0, 'table':1, 'sofa':2, 'chair':3, 'toilet':4, 'desk':5, 'dresser':6, 'night_stand':7, 'bookshelf':8, 'bathtub':9}
self.type_mean_size = {'bathtub': np.array([0.765840,1.398258,0.472728]),
'bed': np.array([2.114256,1.620300,0.927272]),
'bookshelf': np.array([0.404671,1.071108,1.688889]),
'chair': np.array([0.591958,0.552978,0.827272]),
'desk': np.array([0.695190,1.346299,0.736364]),
'dresser': np.array([0.528526,1.002642,1.172878]),
'night_stand': np.array([0.500618,0.632163,0.683424]),
'sofa': np.array([0.923508,1.867419,0.845495]),
'table': np.array([0.791118,1.279516,0.718182]),
'toilet': np.array([0.699104,0.454178,0.756250])}
self.mean_size_arr = np.zeros((self.num_size_cluster, 3))
for i in range(self.num_size_cluster):
self.mean_size_arr[i,:] = self.type_mean_size[self.class2type[i]]
def size2class(self, size, type_name):
''' Convert 3D box size (l,w,h) to size class and size residual '''
size_class = self.type2class[type_name]
size_residual = size - self.type_mean_size[type_name]
return size_class, size_residual
def class2size(self, pred_cls, residual):
''' Inverse function to size2class '''
mean_size = self.type_mean_size[self.class2type[pred_cls]]
return mean_size + residual
def angle2class(self, angle):
''' Convert continuous angle to discrete class
[optinal] also small regression number from
class center angle to current angle.
angle is from 0-2pi (or -pi~pi), class center at 0, 1*(2pi/N), 2*(2pi/N) ... (N-1)*(2pi/N)
return is class of int32 of 0,1,...,N-1 and a number such that
class*(2pi/N) + number = angle
'''
num_class = self.num_heading_bin
angle = angle%(2*np.pi)
assert(angle>=0 and angle<=2*np.pi)
angle_per_class = 2*np.pi/float(num_class)
shifted_angle = (angle+angle_per_class/2)%(2*np.pi)
class_id = int(shifted_angle/angle_per_class)
residual_angle = shifted_angle - (class_id*angle_per_class+angle_per_class/2)
return class_id, residual_angle
def class2angle(self, pred_cls, residual, to_label_format=True):
''' Inverse function to angle2class '''
num_class = self.num_heading_bin
angle_per_class = 2*np.pi/float(num_class)
angle_center = pred_cls * angle_per_class
angle = angle_center + residual
if to_label_format and angle>np.pi:
angle = angle - 2*np.pi
return angle
def param2obb(self, center, heading_class, heading_residual, size_class, size_residual):
heading_angle = self.class2angle(heading_class, heading_residual)
box_size = self.class2size(int(size_class), size_residual)
obb = np.zeros((7,))
obb[0:3] = center
obb[3:6] = box_size
obb[6] = heading_angle*-1
return obb
|
ContrastiveSceneContexts-main
|
downstream/votenet/datasets/sunrgbd/model_util_sunrgbd.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
''' Provides Python helper function to read My SUNRGBD dataset.
Author: Charles R. Qi
Date: October, 2017
Updated by Charles R. Qi
Date: December, 2018
Note: removed basis loading.
'''
import numpy as np
import cv2
import os
import scipy.io as sio # to load .mat files for depth points
type2class={'bed':0, 'table':1, 'sofa':2, 'chair':3, 'toilet':4, 'desk':5, 'dresser':6, 'night_stand':7, 'bookshelf':8, 'bathtub':9}
class2type = {type2class[t]:t for t in type2class}
def flip_axis_to_camera(pc):
''' Flip X-right,Y-forward,Z-up to X-right,Y-down,Z-forward
Input and output are both (N,3) array
'''
pc2 = np.copy(pc)
pc2[:,[0,1,2]] = pc2[:,[0,2,1]] # cam X,Y,Z = depth X,-Z,Y
pc2[:,1] *= -1
return pc2
def flip_axis_to_depth(pc):
pc2 = np.copy(pc)
pc2[:,[0,1,2]] = pc2[:,[0,2,1]] # depth X,Y,Z = cam X,Z,-Y
pc2[:,2] *= -1
return pc2
class SUNObject3d(object):
def __init__(self, line):
data = line.split(' ')
data[1:] = [float(x) for x in data[1:]]
self.classname = data[0]
self.xmin = data[1]
self.ymin = data[2]
self.xmax = data[1]+data[3]
self.ymax = data[2]+data[4]
self.box2d = np.array([self.xmin,self.ymin,self.xmax,self.ymax])
self.centroid = np.array([data[5],data[6],data[7]])
self.unused_dimension = np.array([data[8],data[9],data[10]])
self.w = data[8]
self.l = data[9]
self.h = data[10]
self.orientation = np.zeros((3,))
self.orientation[0] = data[11]
self.orientation[1] = data[12]
self.heading_angle = -1 * np.arctan2(self.orientation[1], self.orientation[0])
class SUNRGBD_Calibration(object):
''' Calibration matrices and utils
We define five coordinate system in SUN RGBD dataset
camera coodinate:
Z is forward, Y is downward, X is rightward
depth coordinate:
Just change axis order and flip up-down axis from camera coord
upright depth coordinate: tilted depth coordinate by Rtilt such that Z is gravity direction,
Z is up-axis, Y is forward, X is right-ward
upright camera coordinate:
Just change axis order and flip up-down axis from upright depth coordinate
image coordinate:
----> x-axis (u)
|
v
y-axis (v)
depth points are stored in upright depth coordinate.
labels for 3d box (basis, centroid, size) are in upright depth coordinate.
2d boxes are in image coordinate
We generate frustum point cloud and 3d box in upright camera coordinate
'''
def __init__(self, calib_filepath):
lines = [line.rstrip() for line in open(calib_filepath)]
Rtilt = np.array([float(x) for x in lines[0].split(' ')])
self.Rtilt = np.reshape(Rtilt, (3,3), order='F')
K = np.array([float(x) for x in lines[1].split(' ')])
self.K = np.reshape(K, (3,3), order='F')
self.f_u = self.K[0,0]
self.f_v = self.K[1,1]
self.c_u = self.K[0,2]
self.c_v = self.K[1,2]
def project_upright_depth_to_camera(self, pc):
''' project point cloud from depth coord to camera coordinate
Input: (N,3) Output: (N,3)
'''
# Project upright depth to depth coordinate
pc2 = np.dot(np.transpose(self.Rtilt), np.transpose(pc[:,0:3])) # (3,n)
return flip_axis_to_camera(np.transpose(pc2))
def project_upright_depth_to_image(self, pc):
''' Input: (N,3) Output: (N,2) UV and (N,) depth '''
pc2 = self.project_upright_depth_to_camera(pc)
uv = np.dot(pc2, np.transpose(self.K)) # (n,3)
uv[:,0] /= uv[:,2]
uv[:,1] /= uv[:,2]
return uv[:,0:2], pc2[:,2]
def project_upright_depth_to_upright_camera(self, pc):
return flip_axis_to_camera(pc)
def project_upright_camera_to_upright_depth(self, pc):
return flip_axis_to_depth(pc)
def project_image_to_camera(self, uv_depth):
n = uv_depth.shape[0]
x = ((uv_depth[:,0]-self.c_u)*uv_depth[:,2])/self.f_u
y = ((uv_depth[:,1]-self.c_v)*uv_depth[:,2])/self.f_v
pts_3d_camera = np.zeros((n,3))
pts_3d_camera[:,0] = x
pts_3d_camera[:,1] = y
pts_3d_camera[:,2] = uv_depth[:,2]
return pts_3d_camera
def project_image_to_upright_camerea(self, uv_depth):
pts_3d_camera = self.project_image_to_camera(uv_depth)
pts_3d_depth = flip_axis_to_depth(pts_3d_camera)
pts_3d_upright_depth = np.transpose(np.dot(self.Rtilt, np.transpose(pts_3d_depth)))
return self.project_upright_depth_to_upright_camera(pts_3d_upright_depth)
def rotx(t):
"""Rotation about the x-axis."""
c = np.cos(t)
s = np.sin(t)
return np.array([[1, 0, 0],
[0, c, -s],
[0, s, c]])
def roty(t):
"""Rotation about the y-axis."""
c = np.cos(t)
s = np.sin(t)
return np.array([[c, 0, s],
[0, 1, 0],
[-s, 0, c]])
def rotz(t):
"""Rotation about the z-axis."""
c = np.cos(t)
s = np.sin(t)
return np.array([[c, -s, 0],
[s, c, 0],
[0, 0, 1]])
def transform_from_rot_trans(R, t):
"""Transforation matrix from rotation matrix and translation vector."""
R = R.reshape(3, 3)
t = t.reshape(3, 1)
return np.vstack((np.hstack([R, t]), [0, 0, 0, 1]))
def inverse_rigid_trans(Tr):
"""Inverse a rigid body transform matrix (3x4 as [R|t])
[R'|-R't; 0|1]
"""
inv_Tr = np.zeros_like(Tr) # 3x4
inv_Tr[0:3,0:3] = np.transpose(Tr[0:3,0:3])
inv_Tr[0:3,3] = np.dot(-np.transpose(Tr[0:3,0:3]), Tr[0:3,3])
return inv_Tr
def read_sunrgbd_label(label_filename):
lines = [line.rstrip() for line in open(label_filename)]
objects = [SUNObject3d(line) for line in lines]
return objects
def load_image(img_filename):
return cv2.imread(img_filename)
def load_depth_points(depth_filename):
depth = np.loadtxt(depth_filename)
return depth
def load_depth_points_mat(depth_filename):
depth = sio.loadmat(depth_filename)['instance']
return depth
def random_shift_box2d(box2d, shift_ratio=0.1):
''' Randomly shift box center, randomly scale width and height
'''
r = shift_ratio
xmin,ymin,xmax,ymax = box2d
h = ymax-ymin
w = xmax-xmin
cx = (xmin+xmax)/2.0
cy = (ymin+ymax)/2.0
cx2 = cx + w*r*(np.random.random()*2-1)
cy2 = cy + h*r*(np.random.random()*2-1)
h2 = h*(1+np.random.random()*2*r-r) # 0.9 to 1.1
w2 = w*(1+np.random.random()*2*r-r) # 0.9 to 1.1
return np.array([cx2-w2/2.0, cy2-h2/2.0, cx2+w2/2.0, cy2+h2/2.0])
def in_hull(p, hull):
from scipy.spatial import Delaunay
if not isinstance(hull,Delaunay):
hull = Delaunay(hull)
return hull.find_simplex(p)>=0
def extract_pc_in_box3d(pc, box3d):
''' pc: (N,3), box3d: (8,3) '''
box3d_roi_inds = in_hull(pc[:,0:3], box3d)
return pc[box3d_roi_inds,:], box3d_roi_inds
def my_compute_box_3d(center, size, heading_angle):
R = rotz(-1*heading_angle)
l,w,h = size
x_corners = [-l,l,l,-l,-l,l,l,-l]
y_corners = [w,w,-w,-w,w,w,-w,-w]
z_corners = [h,h,h,h,-h,-h,-h,-h]
corners_3d = np.dot(R, np.vstack([x_corners, y_corners, z_corners]))
corners_3d[0,:] += center[0]
corners_3d[1,:] += center[1]
corners_3d[2,:] += center[2]
return np.transpose(corners_3d)
def compute_box_3d(obj, calib):
''' Takes an object and a projection matrix (P) and projects the 3d
bounding box into the image plane.
Returns:
corners_2d: (8,2) array in image coord.
corners_3d: (8,3) array in in upright depth coord.
'''
center = obj.centroid
# compute rotational matrix around yaw axis
R = rotz(-1*obj.heading_angle)
#b,a,c = dimension
#print R, a,b,c
# 3d bounding box dimensions
l = obj.l # along heading arrow
w = obj.w # perpendicular to heading arrow
h = obj.h
# rotate and translate 3d bounding box
x_corners = [-l,l,l,-l,-l,l,l,-l]
y_corners = [w,w,-w,-w,w,w,-w,-w]
z_corners = [h,h,h,h,-h,-h,-h,-h]
corners_3d = np.dot(R, np.vstack([x_corners, y_corners, z_corners]))
corners_3d[0,:] += center[0]
corners_3d[1,:] += center[1]
corners_3d[2,:] += center[2]
# project the 3d bounding box into the image plane
corners_2d,_ = calib.project_upright_depth_to_image(np.transpose(corners_3d))
#print 'corners_2d: ', corners_2d
return corners_2d, np.transpose(corners_3d)
def compute_orientation_3d(obj, calib):
''' Takes an object and a projection matrix (P) and projects the 3d
object orientation vector into the image plane.
Returns:
orientation_2d: (2,2) array in image coord.
orientation_3d: (2,3) array in depth coord.
'''
# orientation in object coordinate system
ori = obj.orientation
orientation_3d = np.array([[0, ori[0]],[0, ori[1]],[0,0]])
center = obj.centroid
orientation_3d[0,:] = orientation_3d[0,:] + center[0]
orientation_3d[1,:] = orientation_3d[1,:] + center[1]
orientation_3d[2,:] = orientation_3d[2,:] + center[2]
# project orientation into the image plane
orientation_2d,_ = calib.project_upright_depth_to_image(np.transpose(orientation_3d))
return orientation_2d, np.transpose(orientation_3d)
def draw_projected_box3d(image, qs, color=(255,255,255), thickness=2):
''' Draw 3d bounding box in image
qs: (8,2) array of vertices for the 3d box in following order:
1 -------- 0
/| /|
2 -------- 3 .
| | | |
. 5 -------- 4
|/ |/
6 -------- 7
'''
qs = qs.astype(np.int32)
for k in range(0,4):
#http://docs.enthought.com/mayavi/mayavi/auto/mlab_helper_functions.html
i,j=k,(k+1)%4
cv2.line(image, (qs[i,0],qs[i,1]), (qs[j,0],qs[j,1]), color, thickness, cv2.CV_AA) # use LINE_AA for opencv3
i,j=k+4,(k+1)%4 + 4
cv2.line(image, (qs[i,0],qs[i,1]), (qs[j,0],qs[j,1]), color, thickness, cv2.CV_AA)
i,j=k,k+4
cv2.line(image, (qs[i,0],qs[i,1]), (qs[j,0],qs[j,1]), color, thickness, cv2.CV_AA)
return image
import pickle
import gzip
def save_zipped_pickle(obj, filename, protocol=-1):
with gzip.open(filename, 'wb') as f:
pickle.dump(obj, f, protocol)
def load_zipped_pickle(filename):
with gzip.open(filename, 'rb') as f:
loaded_object = pickle.load(f)
return loaded_object
|
ContrastiveSceneContexts-main
|
downstream/votenet/datasets/sunrgbd/sunrgbd_utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
''' Helper class and functions for loading SUN RGB-D objects
Author: Charles R. Qi
Date: December, 2018
Note: removed unused code for frustum preparation.
Changed a way for data visualization (removed depdency on mayavi).
Load depth with scipy.io
'''
import os
import sys
import numpy as np
import sys
import cv2
import argparse
from PIL import Image
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(BASE_DIR, '../utils/'))
import pc_util
import sunrgbd_utils
DEFAULT_TYPE_WHITELIST = ['bed','table','sofa','chair','toilet','desk','dresser','night_stand','bookshelf','bathtub']
class sunrgbd_object(object):
''' Load and parse object data '''
def __init__(self, root_dir, split='training', use_v1=False):
self.root_dir = root_dir
self.split = split
assert(self.split=='training')
self.split_dir = os.path.join(root_dir)
if split == 'training':
self.num_samples = 10335
elif split == 'testing':
self.num_samples = 2860
else:
print('Unknown split: %s' % (split))
exit(-1)
self.image_dir = os.path.join(self.split_dir, 'image')
self.calib_dir = os.path.join(self.split_dir, 'calib')
self.depth_dir = os.path.join(self.split_dir, 'depth')
if use_v1:
self.label_dir = os.path.join(self.split_dir, 'label_v1')
else:
self.label_dir = os.path.join(self.split_dir, 'label')
def __len__(self):
return self.num_samples
def get_image(self, idx):
img_filename = os.path.join(self.image_dir, '%06d.jpg'%(idx))
return sunrgbd_utils.load_image(img_filename)
def get_depth(self, idx):
depth_filename = os.path.join(self.depth_dir, '%06d.mat'%(idx))
return sunrgbd_utils.load_depth_points_mat(depth_filename)
def get_calibration(self, idx):
calib_filename = os.path.join(self.calib_dir, '%06d.txt'%(idx))
return sunrgbd_utils.SUNRGBD_Calibration(calib_filename)
def get_label_objects(self, idx):
label_filename = os.path.join(self.label_dir, '%06d.txt'%(idx))
return sunrgbd_utils.read_sunrgbd_label(label_filename)
def data_viz(data_dir, dump_dir=os.path.join(BASE_DIR, 'data_viz_dump')):
''' Examine and visualize SUN RGB-D data. '''
sunrgbd = sunrgbd_object(data_dir)
idxs = np.array(range(1,len(sunrgbd)+1))
np.random.seed(0)
np.random.shuffle(idxs)
for idx in range(len(sunrgbd)):
data_idx = idxs[idx]
print('-'*10, 'data index: ', data_idx)
pc = sunrgbd.get_depth(data_idx)
print('Point cloud shape:', pc.shape)
# Project points to image
calib = sunrgbd.get_calibration(data_idx)
uv,d = calib.project_upright_depth_to_image(pc[:,0:3])
print('Point UV:', uv)
print('Point depth:', d)
import matplotlib.pyplot as plt
cmap = plt.cm.get_cmap('hsv', 256)
cmap = np.array([cmap(i) for i in range(256)])[:,:3]*255
img = sunrgbd.get_image(data_idx)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
for i in range(uv.shape[0]):
depth = d[i]
color = cmap[int(120.0/depth),:]
cv2.circle(img, (int(np.round(uv[i,0])), int(np.round(uv[i,1]))), 2,
color=tuple(color), thickness=-1)
if not os.path.exists(dump_dir):
os.mkdir(dump_dir)
Image.fromarray(img).save(os.path.join(dump_dir,'img_depth.jpg'))
# Load box labels
objects = sunrgbd.get_label_objects(data_idx)
print('Objects:', objects)
# Draw 2D boxes on image
img = sunrgbd.get_image(data_idx)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
for i,obj in enumerate(objects):
cv2.rectangle(img, (int(obj.xmin),int(obj.ymin)),
(int(obj.xmax),int(obj.ymax)), (0,255,0), 2)
cv2.putText(img, '%d %s'%(i,obj.classname), (max(int(obj.xmin),15),
max(int(obj.ymin),15)), cv2.FONT_HERSHEY_SIMPLEX, 0.8,
(255,0,0), 2)
Image.fromarray(img).save(os.path.join(dump_dir, 'img_box2d.jpg'))
# Dump OBJ files for the colored point cloud
for num_point in [10000,20000,40000,80000]:
sampled_pcrgb = pc_util.random_sampling(pc, num_point)
pc_util.write_ply_rgb(sampled_pcrgb[:,0:3],
(sampled_pcrgb[:,3:]*256).astype(np.int8),
os.path.join(dump_dir, 'pcrgb_%dk.obj'%(num_point//1000)))
# Dump OBJ files for 3D bounding boxes
# l,w,h correspond to dx,dy,dz
# heading angle is from +X rotating towards -Y
# (+X is degree, -Y is 90 degrees)
oriented_boxes = []
for obj in objects:
obb = np.zeros((7))
obb[0:3] = obj.centroid
# Some conversion to map with default setting of w,l,h
# and angle in box dumping
obb[3:6] = np.array([obj.l,obj.w,obj.h])*2
obb[6] = -1 * obj.heading_angle
print('Object cls, heading, l, w, h:',\
obj.classname, obj.heading_angle, obj.l, obj.w, obj.h)
oriented_boxes.append(obb)
if len(oriented_boxes)>0:
oriented_boxes = np.vstack(tuple(oriented_boxes))
pc_util.write_oriented_bbox(oriented_boxes,
os.path.join(dump_dir, 'obbs.ply'))
else:
print('-'*30)
continue
# Draw 3D boxes on depth points
box3d = []
ori3d = []
for obj in objects:
corners_3d_image, corners_3d = sunrgbd_utils.compute_box_3d(obj, calib)
ori_3d_image, ori_3d = sunrgbd_utils.compute_orientation_3d(obj, calib)
print('Corners 3D: ', corners_3d)
box3d.append(corners_3d)
ori3d.append(ori_3d)
pc_box3d = np.concatenate(box3d, 0)
pc_ori3d = np.concatenate(ori3d, 0)
print(pc_box3d.shape)
print(pc_ori3d.shape)
pc_util.write_ply(pc_box3d, os.path.join(dump_dir, 'box3d_corners.ply'))
pc_util.write_ply(pc_ori3d, os.path.join(dump_dir, 'box3d_ori.ply'))
print('-'*30)
print('Point clouds and bounding boxes saved to PLY files under %s'%(dump_dir))
print('Type anything to continue to the next sample...')
input()
def extract_sunrgbd_data(idx_filename, split, output_folder, num_point=20000,
type_whitelist=DEFAULT_TYPE_WHITELIST,
save_votes=False, use_v1=False, skip_empty_scene=True):
""" Extract scene point clouds and
bounding boxes (centroids, box sizes, heading angles, semantic classes).
Dumped point clouds and boxes are in upright depth coord.
Args:
idx_filename: a TXT file where each line is an int number (index)
split: training or testing
save_votes: whether to compute and save Ground truth votes.
use_v1: use the SUN RGB-D V1 data
skip_empty_scene: if True, skip scenes that contain no object (no objet in whitelist)
Dumps:
<id>_pc.npz of (N,6) where N is for number of subsampled points and 6 is
for XYZ and RGB (in 0~1) in upright depth coord
<id>_bbox.npy of (K,8) where K is the number of objects, 8 is for
centroids (cx,cy,cz), dimension (l,w,h), heanding_angle and semantic_class
<id>_votes.npz of (N,10) with 0/1 indicating whether the point belongs to an object,
then three sets of GT votes for up to three objects. If the point is only in one
object's OBB, then the three GT votes are the same.
"""
dataset = sunrgbd_object('./sunrgbd_trainval', split, use_v1=use_v1)
data_idx_list = [int(line.rstrip()) for line in open(idx_filename)]
if not os.path.exists(output_folder):
os.mkdir(output_folder)
for data_idx in data_idx_list:
print('------------- ', data_idx)
objects = dataset.get_label_objects(data_idx)
# Skip scenes with 0 object
if skip_empty_scene and (len(objects)==0 or \
len([obj for obj in objects if obj.classname in type_whitelist])==0):
continue
object_list = []
for obj in objects:
if obj.classname not in type_whitelist: continue
obb = np.zeros((8))
obb[0:3] = obj.centroid
# Note that compared with that in data_viz, we do not time 2 to l,w.h
# neither do we flip the heading angle
obb[3:6] = np.array([obj.l,obj.w,obj.h])
obb[6] = obj.heading_angle
obb[7] = sunrgbd_utils.type2class[obj.classname]
object_list.append(obb)
if len(object_list)==0:
obbs = np.zeros((0,8))
else:
obbs = np.vstack(object_list) # (K,8)
pc_upright_depth = dataset.get_depth(data_idx)
pc_upright_depth_subsampled = pc_util.random_sampling(pc_upright_depth, num_point)
np.savez_compressed(os.path.join(output_folder,'%06d_pc.npz'%(data_idx)),
pc=pc_upright_depth_subsampled)
np.save(os.path.join(output_folder, '%06d_bbox.npy'%(data_idx)), obbs)
if save_votes:
N = pc_upright_depth_subsampled.shape[0]
point_votes = np.zeros((N,10)) # 3 votes and 1 vote mask
point_vote_idx = np.zeros((N)).astype(np.int32) # in the range of [0,2]
indices = np.arange(N)
for obj in objects:
if obj.classname not in type_whitelist: continue
try:
# Find all points in this object's OBB
box3d_pts_3d = sunrgbd_utils.my_compute_box_3d(obj.centroid,
np.array([obj.l,obj.w,obj.h]), obj.heading_angle)
pc_in_box3d,inds = sunrgbd_utils.extract_pc_in_box3d(\
pc_upright_depth_subsampled, box3d_pts_3d)
# Assign first dimension to indicate it is in an object box
point_votes[inds,0] = 1
# Add the votes (all 0 if the point is not in any object's OBB)
votes = np.expand_dims(obj.centroid,0) - pc_in_box3d[:,0:3]
sparse_inds = indices[inds] # turn dense True,False inds to sparse number-wise inds
for i in range(len(sparse_inds)):
j = sparse_inds[i]
point_votes[j, int(point_vote_idx[j]*3+1):int((point_vote_idx[j]+1)*3+1)] = votes[i,:]
# Populate votes with the fisrt vote
if point_vote_idx[j] == 0:
point_votes[j,4:7] = votes[i,:]
point_votes[j,7:10] = votes[i,:]
point_vote_idx[inds] = np.minimum(2, point_vote_idx[inds]+1)
except:
print('ERROR ----', data_idx, obj.classname)
np.savez_compressed(os.path.join(output_folder, '%06d_votes.npz'%(data_idx)),
point_votes = point_votes)
def get_box3d_dim_statistics(idx_filename,
type_whitelist=DEFAULT_TYPE_WHITELIST,
save_path=None):
""" Collect 3D bounding box statistics.
Used for computing mean box sizes. """
dataset = sunrgbd_object('./sunrgbd_trainval')
dimension_list = []
type_list = []
ry_list = []
data_idx_list = [int(line.rstrip()) for line in open(idx_filename)]
for data_idx in data_idx_list:
print('------------- ', data_idx)
calib = dataset.get_calibration(data_idx) # 3 by 4 matrix
objects = dataset.get_label_objects(data_idx)
for obj_idx in range(len(objects)):
obj = objects[obj_idx]
if obj.classname not in type_whitelist: continue
heading_angle = -1 * np.arctan2(obj.orientation[1], obj.orientation[0])
dimension_list.append(np.array([obj.l,obj.w,obj.h]))
type_list.append(obj.classname)
ry_list.append(heading_angle)
import cPickle as pickle
if save_path is not None:
with open(save_path,'wb') as fp:
pickle.dump(type_list, fp)
pickle.dump(dimension_list, fp)
pickle.dump(ry_list, fp)
# Get average box size for different catgories
box3d_pts = np.vstack(dimension_list)
for class_type in sorted(set(type_list)):
cnt = 0
box3d_list = []
for i in range(len(dimension_list)):
if type_list[i]==class_type:
cnt += 1
box3d_list.append(dimension_list[i])
median_box3d = np.median(box3d_list,0)
print("\'%s\': np.array([%f,%f,%f])," % \
(class_type, median_box3d[0]*2, median_box3d[1]*2, median_box3d[2]*2))
if __name__=='__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--viz', action='store_true', help='Run data visualization.')
parser.add_argument('--compute_median_size', action='store_true', help='Compute median 3D bounding box sizes for each class.')
parser.add_argument('--gen_v1_data', action='store_true', help='Generate V1 dataset.')
parser.add_argument('--gen_v2_data', action='store_true', help='Generate V2 dataset.')
args = parser.parse_args()
if args.viz:
data_viz(os.path.join(BASE_DIR, 'sunrgbd_trainval'))
exit()
if args.compute_median_size:
get_box3d_dim_statistics(os.path.join(BASE_DIR, 'sunrgbd_trainval/train_data_idx.txt'))
exit()
if args.gen_v1_data:
extract_sunrgbd_data(os.path.join(BASE_DIR, 'sunrgbd_trainval/train_data_idx.txt'),
split = 'training',
output_folder = os.path.join(BASE_DIR, 'sunrgbd_pc_bbox_votes_50k_v1_train'),
save_votes=True, num_point=50000, use_v1=True, skip_empty_scene=False)
extract_sunrgbd_data(os.path.join(BASE_DIR, 'sunrgbd_trainval/val_data_idx.txt'),
split = 'training',
output_folder = os.path.join(BASE_DIR, 'sunrgbd_pc_bbox_votes_50k_v1_val'),
save_votes=True, num_point=50000, use_v1=True, skip_empty_scene=False)
if args.gen_v2_data:
extract_sunrgbd_data(os.path.join(BASE_DIR, 'sunrgbd_trainval/train_data_idx.txt'),
split = 'training',
output_folder = os.path.join(BASE_DIR, 'sunrgbd_pc_bbox_votes_50k_v2_train'),
save_votes=True, num_point=50000, use_v1=False, skip_empty_scene=False)
extract_sunrgbd_data(os.path.join(BASE_DIR, 'sunrgbd_trainval/val_data_idx.txt'),
split = 'training',
output_folder = os.path.join(BASE_DIR, 'sunrgbd_pc_bbox_votes_50k_v2_val'),
save_votes=True, num_point=50000, use_v1=False, skip_empty_scene=False)
|
ContrastiveSceneContexts-main
|
downstream/votenet/datasets/sunrgbd/sunrgbd_data.py
|
# coding: utf-8
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
""" Dataset for 3D object detection on SUN RGB-D (with support of vote supervision).
A sunrgbd oriented bounding box is parameterized by (cx,cy,cz), (l,w,h) -- (dx,dy,dz) in upright depth coord
(Z is up, Y is forward, X is right ward), heading angle (from +X rotating to -Y) and semantic class
Point clouds are in **upright_depth coordinate (X right, Y forward, Z upward)**
Return heading class, heading residual, size class and size residual for 3D bounding boxes.
Oriented bounding box is parameterized by (cx,cy,cz), (l,w,h), heading_angle and semantic class label.
(cx,cy,cz) is in upright depth coordinate
(l,h,w) are *half length* of the object sizes
The heading angle is a rotation rad from +X rotating towards -Y. (+X is 0, -Y is pi/2)
Author: Charles R. Qi
Date: 2019
"""
import os
import sys
import numpy as np
from torch.utils.data import Dataset
import scipy.io as sio # to load .mat files for depth points
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(ROOT_DIR, 'utils'))
import pc_util
import sunrgbd_utils
from model_util_sunrgbd import SunrgbdDatasetConfig
DC = SunrgbdDatasetConfig() # dataset specific config
MAX_NUM_OBJ = 64 # maximum number of objects allowed per scene
MEAN_COLOR_RGB = np.array([0.5,0.5,0.5]) # sunrgbd color is in 0~1
class SunrgbdDetectionVotesDataset(Dataset):
def __init__(self, split_set='train', num_points=20000,
use_color=False, use_height=False, use_v1=False,
augment=False, scan_idx_list=None):
assert(num_points<=50000)
self.use_v1 = use_v1
if use_v1:
self.data_path = os.path.join(ROOT_DIR,
'sunrgbd/sunrgbd_pc_bbox_votes_50k_v1_%s'%(split_set))
else:
self.data_path = os.path.join(ROOT_DIR,
'sunrgbd/sunrgbd_pc_bbox_votes_50k_v2_%s'%(split_set))
self.raw_data_path = os.path.join(ROOT_DIR, 'sunrgbd/sunrgbd_trainval')
self.scan_names = sorted(list(set([os.path.basename(x)[0:6] \
for x in os.listdir(self.data_path)])))
if scan_idx_list is not None:
self.scan_names = [self.scan_names[i] for i in scan_idx_list]
self.scan_names = self.scan_names[:int(len(self.scan_names))]
self.num_points = num_points
self.augment = augment
self.use_color = use_color
self.use_height = use_height
def __len__(self):
return len(self.scan_names)
def __getitem__(self, idx):
"""
Returns a dict with following keys:
point_clouds: (N,3+C)
center_label: (MAX_NUM_OBJ,3) for GT box center XYZ
heading_class_label: (MAX_NUM_OBJ,) with int values in 0,...,NUM_HEADING_BIN-1
heading_residual_label: (MAX_NUM_OBJ,)
size_classe_label: (MAX_NUM_OBJ,) with int values in 0,...,NUM_SIZE_CLUSTER
size_residual_label: (MAX_NUM_OBJ,3)
sem_cls_label: (MAX_NUM_OBJ,) semantic class index
box_label_mask: (MAX_NUM_OBJ) as 0/1 with 1 indicating a unique box
vote_label: (N,9) with votes XYZ (3 votes: X1Y1Z1, X2Y2Z2, X3Y3Z3)
if there is only one vote than X1==X2==X3 etc.
vote_label_mask: (N,) with 0/1 with 1 indicating the point
is in one of the object's OBB.
scan_idx: int scan index in scan_names list
max_gt_bboxes: unused
"""
scan_name = self.scan_names[idx]
point_cloud = np.load(os.path.join(self.data_path, scan_name)+'_pc.npz')['pc'] # Nx6
bboxes = np.load(os.path.join(self.data_path, scan_name)+'_bbox.npy') # K,8
point_votes = np.load(os.path.join(self.data_path, scan_name)+'_votes.npz')['point_votes'] # Nx10
point_cloud = point_cloud[:,0:6]
point_cloud[:,3:] = (point_cloud[:,3:]-MEAN_COLOR_RGB)
if self.use_height:
floor_height = np.percentile(point_cloud[:,2],0.99)
height = point_cloud[:,2] - floor_height
point_cloud = np.concatenate([point_cloud, np.expand_dims(height, 1)],1) # (N,4) or (N,7)
# ------------------------------- DATA AUGMENTATION ------------------------------
if self.augment:
if np.random.random() > 0.5:
# Flipping along the YZ plane
point_cloud[:,0] = -1 * point_cloud[:,0]
bboxes[:,0] = -1 * bboxes[:,0]
bboxes[:,6] = np.pi - bboxes[:,6]
point_votes[:,[1,4,7]] = -1 * point_votes[:,[1,4,7]]
# Rotation along up-axis/Z-axis
rot_angle = (np.random.random()*np.pi/3) - np.pi/6 # -30 ~ +30 degree
rot_mat = sunrgbd_utils.rotz(rot_angle)
point_votes_end = np.zeros_like(point_votes)
point_votes_end[:,1:4] = np.dot(point_cloud[:,0:3] + point_votes[:,1:4], np.transpose(rot_mat))
point_votes_end[:,4:7] = np.dot(point_cloud[:,0:3] + point_votes[:,4:7], np.transpose(rot_mat))
point_votes_end[:,7:10] = np.dot(point_cloud[:,0:3] + point_votes[:,7:10], np.transpose(rot_mat))
point_cloud[:,0:3] = np.dot(point_cloud[:,0:3], np.transpose(rot_mat))
bboxes[:,0:3] = np.dot(bboxes[:,0:3], np.transpose(rot_mat))
bboxes[:,6] -= rot_angle
point_votes[:,1:4] = point_votes_end[:,1:4] - point_cloud[:,0:3]
point_votes[:,4:7] = point_votes_end[:,4:7] - point_cloud[:,0:3]
point_votes[:,7:10] = point_votes_end[:,7:10] - point_cloud[:,0:3]
# Augment RGB color
if self.use_color:
rgb_color = point_cloud[:,3:6] + MEAN_COLOR_RGB
rgb_color *= (1+0.4*np.random.random(3)-0.2) # brightness change for each channel
rgb_color += (0.1*np.random.random(3)-0.05) # color shift for each channel
rgb_color += np.expand_dims((0.05*np.random.random(point_cloud.shape[0])-0.025), -1) # jittering on each pixel
rgb_color = np.clip(rgb_color, 0, 1)
# randomly drop out 30% of the points' colors
rgb_color *= np.expand_dims(np.random.random(point_cloud.shape[0])>0.3,-1)
point_cloud[:,3:6] = rgb_color - MEAN_COLOR_RGB
# Augment point cloud scale: 0.85x-1.15x
scale_ratio = np.random.random()*0.3+0.85
scale_ratio = np.expand_dims(np.tile(scale_ratio,3),0)
point_cloud[:,0:3] *= scale_ratio
bboxes[:,0:3] *= scale_ratio
bboxes[:,3:6] *= scale_ratio
point_votes[:,1:4] *= scale_ratio
point_votes[:,4:7] *= scale_ratio
point_votes[:,7:10] *= scale_ratio
if self.use_height:
point_cloud[:,-1] *= scale_ratio[0,0]
# ------------------------------- LABELS ------------------------------
box3d_centers = np.zeros((MAX_NUM_OBJ, 3))
box3d_sizes = np.zeros((MAX_NUM_OBJ, 3))
angle_classes = np.zeros((MAX_NUM_OBJ,))
angle_residuals = np.zeros((MAX_NUM_OBJ,))
size_classes = np.zeros((MAX_NUM_OBJ,))
size_residuals = np.zeros((MAX_NUM_OBJ, 3))
label_mask = np.zeros((MAX_NUM_OBJ))
label_mask[0:bboxes.shape[0]] = 1
max_bboxes = np.zeros((MAX_NUM_OBJ, 8))
max_bboxes[0:bboxes.shape[0],:] = bboxes
for i in range(bboxes.shape[0]):
bbox = bboxes[i]
semantic_class = bbox[7]
box3d_center = bbox[0:3]
angle_class, angle_residual = DC.angle2class(bbox[6])
# NOTE: The mean size stored in size2class is of full length of box edges,
# while in sunrgbd_data.py data dumping we dumped *half* length l,w,h.. so have to time it by 2 here
box3d_size = bbox[3:6]*2
size_class, size_residual = DC.size2class(box3d_size, DC.class2type[semantic_class])
box3d_centers[i,:] = box3d_center
angle_classes[i] = angle_class
angle_residuals[i] = angle_residual
size_classes[i] = size_class
size_residuals[i] = size_residual
box3d_sizes[i,:] = box3d_size
target_bboxes_mask = label_mask
target_bboxes = np.zeros((MAX_NUM_OBJ, 6))
for i in range(bboxes.shape[0]):
bbox = bboxes[i]
corners_3d = sunrgbd_utils.my_compute_box_3d(bbox[0:3], bbox[3:6], bbox[6])
# compute axis aligned box
xmin = np.min(corners_3d[:,0])
ymin = np.min(corners_3d[:,1])
zmin = np.min(corners_3d[:,2])
xmax = np.max(corners_3d[:,0])
ymax = np.max(corners_3d[:,1])
zmax = np.max(corners_3d[:,2])
target_bbox = np.array([(xmin+xmax)/2, (ymin+ymax)/2, (zmin+zmax)/2, xmax-xmin, ymax-ymin, zmax-zmin])
target_bboxes[i,:] = target_bbox
point_cloud, choices = pc_util.random_sampling(point_cloud, self.num_points, return_choices=True)
point_votes_mask = point_votes[choices,0]
point_votes = point_votes[choices,1:]
ret_dict = {}
ret_dict['point_clouds'] = point_cloud.astype(np.float32)[:,:3]
ret_dict['center_label'] = target_bboxes.astype(np.float32)[:,0:3]
ret_dict['heading_class_label'] = angle_classes.astype(np.int64)
ret_dict['heading_residual_label'] = angle_residuals.astype(np.float32)
ret_dict['size_class_label'] = size_classes.astype(np.int64)
ret_dict['size_residual_label'] = size_residuals.astype(np.float32)
target_bboxes_semcls = np.zeros((MAX_NUM_OBJ))
target_bboxes_semcls[0:bboxes.shape[0]] = bboxes[:,-1] # from 0 to 9
ret_dict['sem_cls_label'] = target_bboxes_semcls.astype(np.int64)
ret_dict['box_label_mask'] = target_bboxes_mask.astype(np.float32)
ret_dict['vote_label'] = point_votes.astype(np.float32)
ret_dict['vote_label_mask'] = point_votes_mask.astype(np.int64)
ret_dict['scan_idx'] = np.array(idx).astype(np.int64)
ret_dict['max_gt_bboxes'] = max_bboxes
ret_dict['pcl_color'] = point_cloud.astype(np.float32)[:,3:6]
return ret_dict
def viz_votes(pc, point_votes, point_votes_mask):
""" Visualize point votes and point votes mask labels
pc: (N,3 or 6), point_votes: (N,9), point_votes_mask: (N,)
"""
inds = (point_votes_mask==1)
pc_obj = pc[inds,0:3]
pc_obj_voted1 = pc_obj + point_votes[inds,0:3]
pc_obj_voted2 = pc_obj + point_votes[inds,3:6]
pc_obj_voted3 = pc_obj + point_votes[inds,6:9]
pc_util.write_ply(pc_obj, 'pc_obj.ply')
pc_util.write_ply(pc_obj_voted1, 'pc_obj_voted1.ply')
pc_util.write_ply(pc_obj_voted2, 'pc_obj_voted2.ply')
pc_util.write_ply(pc_obj_voted3, 'pc_obj_voted3.ply')
def viz_obb(pc, label, mask, angle_classes, angle_residuals,
size_classes, size_residuals):
""" Visualize oriented bounding box ground truth
pc: (N,3)
label: (K,3) K == MAX_NUM_OBJ
mask: (K,)
angle_classes: (K,)
angle_residuals: (K,)
size_classes: (K,)
size_residuals: (K,3)
"""
oriented_boxes = []
K = label.shape[0]
for i in range(K):
if mask[i] == 0: continue
obb = np.zeros(7)
obb[0:3] = label[i,0:3]
heading_angle = DC.class2angle(angle_classes[i], angle_residuals[i])
box_size = DC.class2size(size_classes[i], size_residuals[i])
obb[3:6] = box_size
obb[6] = -1 * heading_angle
print(obb)
oriented_boxes.append(obb)
pc_util.write_oriented_bbox(oriented_boxes, 'gt_obbs.ply')
pc_util.write_ply(label[mask==1,:], 'gt_centroids.ply')
def get_sem_cls_statistics():
""" Compute number of objects for each semantic class """
d = SunrgbdDetectionVotesDataset(use_height=True, use_color=True, use_v1=True, augment=True)
sem_cls_cnt = {}
for i in range(len(d)):
if i%10==0: print(i)
sample = d[i]
pc = sample['point_clouds']
sem_cls = sample['sem_cls_label']
mask = sample['box_label_mask']
for j in sem_cls:
if mask[j] == 0: continue
if sem_cls[j] not in sem_cls_cnt:
sem_cls_cnt[sem_cls[j]] = 0
sem_cls_cnt[sem_cls[j]] += 1
print(sem_cls_cnt)
if __name__=='__main__':
d = SunrgbdDetectionVotesDataset(use_height=True, use_color=True, use_v1=True, augment=True)
sample = d[200]
print(sample['vote_label'].shape, sample['vote_label_mask'].shape)
pc_util.write_ply(sample['point_clouds'], 'pc.ply')
viz_votes(sample['point_clouds'], sample['vote_label'], sample['vote_label_mask'])
viz_obb(sample['point_clouds'], sample['center_label'], sample['box_label_mask'],
sample['heading_class_label'], sample['heading_residual_label'],
sample['size_class_label'], sample['size_residual_label'])
|
ContrastiveSceneContexts-main
|
downstream/votenet/datasets/sunrgbd/sunrgbd_detection_dataset.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
""" Utility functions for metric evaluation.
Author: Or Litany and Charles R. Qi
"""
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
import numpy as np
def calc_iou(box_a, box_b):
"""Computes IoU of two axis aligned bboxes.
Args:
box_a, box_b: 6D of center and lengths
Returns:
iou
"""
max_a = box_a[3:6]
max_b = box_b[3:6]
min_max = np.array([max_a, max_b]).min(0)
min_a = box_a[0:3]
min_b = box_b[0:3]
max_min = np.array([min_a, min_b]).max(0)
if not ((min_max > max_min).all()):
return 0.0
intersection = (min_max - max_min).prod()
vol_a = (box_a[3:6] - box_a[0:3]).prod()
vol_b = (box_b[3:6] - box_b[0:3]).prod()
union = vol_a + vol_b - intersection
return 1.0*intersection / union
|
ContrastiveSceneContexts-main
|
downstream/votenet/datasets/evaluation/metric_util.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import os, sys, argparse
import inspect
from copy import deepcopy
from evaluate_object_detection_helper import eval_det
import numpy as np
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0,parentdir)
import util
import util_3d
parser = argparse.ArgumentParser()
parser.add_argument('--pred_path', required=True, help='path to directory of predicted .txt files')
parser.add_argument('--gt_path', required=True, help='path to directory of gt .txt files')
parser.add_argument('--output_file', default='', help='output file [default: pred_path/object_detection_evaluation.txt]')
opt = parser.parse_args()
if opt.output_file == '':
opt.output_file = os.path.join(opt.pred_path, 'object_detection_evaluation.txt')
CLASS_LABELS = ['cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window', 'bookshelf', 'picture', 'counter', 'desk', 'curtain', 'refrigerator', 'shower curtain', 'toilet', 'sink', 'bathtub', 'otherfurniture']
VALID_CLASS_IDS = np.array([3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39])
ID_TO_LABEL = {}
LABEL_TO_ID = {}
for i in range(len(VALID_CLASS_IDS)):
LABEL_TO_ID[CLASS_LABELS[i]] = VALID_CLASS_IDS[i]
ID_TO_LABEL[VALID_CLASS_IDS[i]] = CLASS_LABELS[i]
opt.overlaps = np.array([0.5,0.25])
# minimum region size for evaluation [verts]
opt.min_region_sizes = np.array( [ 100 ] )
# distance thresholds [m]
opt.distance_threshes = np.array( [ float('inf') ] )
# distance confidences
opt.distance_confs = np.array( [ -float('inf') ] )
def compute_averages(aps):
d_inf = 0
o50 = np.where(np.isclose(opt.overlaps,0.5))
o25 = np.where(np.isclose(opt.overlaps,0.25))
avg_dict = {}
#avg_dict['all_ap'] = np.nanmean(aps[ d_inf,:,: ])
avg_dict['all_ap_50%'] = np.nanmean(aps[ d_inf,:,o50])
avg_dict['all_ap_25%'] = np.nanmean(aps[ d_inf,:,o25])
avg_dict["classes"] = {}
for (li,label_name) in enumerate(CLASS_LABELS):
avg_dict["classes"][label_name] = {}
#avg_dict["classes"][label_name]["ap"] = np.average(aps[ d_inf,li, :])
avg_dict["classes"][label_name]["ap50%"] = np.average(aps[ d_inf,li,o50])
avg_dict["classes"][label_name]["ap25%"] = np.average(aps[ d_inf,li,o25])
return avg_dict
def print_results(avgs):
sep = ""
col1 = ":"
lineLen = 64
print("")
print("#"*lineLen)
line = ""
line += "{:<15}".format("what" ) + sep + col1
line += "{:>15}".format("AP_50%" ) + sep
line += "{:>15}".format("AP_25%" ) + sep
print(line)
print("#"*lineLen)
for (li,label_name) in enumerate(CLASS_LABELS):
ap_50o = avgs["classes"][label_name]["ap50%"]
ap_25o = avgs["classes"][label_name]["ap25%"]
line = "{:<15}".format(label_name) + sep + col1
line += sep + "{:>15.3f}".format(ap_50o ) + sep
line += sep + "{:>15.3f}".format(ap_25o ) + sep
print(line)
all_ap_50o = avgs["all_ap_50%"]
all_ap_25o = avgs["all_ap_25%"]
print("-"*lineLen)
line = "{:<15}".format("average") + sep + col1
line += "{:>15.3f}".format(all_ap_50o) + sep
line += "{:>15.3f}".format(all_ap_25o) + sep
print(line)
print("")
def write_result_file(avgs, filename):
_SPLITTER = ','
with open(filename, 'w') as f:
f.write(_SPLITTER.join(['class', 'class id', 'ap50', 'ap25']) + '\n')
for i in range(len(VALID_CLASS_IDS)):
class_name = CLASS_LABELS[i]
class_id = VALID_CLASS_IDS[i]
ap50 = avgs["classes"][class_name]["ap50%"]
ap25 = avgs["classes"][class_name]["ap25%"]
f.write(_SPLITTER.join([str(x) for x in [class_name, class_id, ap50, ap25]]) + '\n')
def evaluate(pred_files, gt_files, pred_path, output_file):
print('evaluating', len(pred_files), 'scans...')
overlaps = opt.overlaps
ap_scores = np.zeros( (1, len(CLASS_LABELS) , len(overlaps)) , np.float )
pred_all = {}
gt_all = {}
for i in range(len(pred_files)):
matches_key = os.path.abspath(gt_files[i])
image_id = os.path.basename(matches_key)
# assign gt to predictions
pred_all[image_id] = []
gt_all[image_id] = []
#read prediction file
lines = open(pred_files[i]).read().splitlines()
for line in lines:
parts = line.split(' ')
if len(parts) != 8:
util.print_error('invalid object detection prediction file. Expected (per line): [minx] [miny] [minz] [maxx] [maxy] [maxz] [label_id] [score]', user_fault=True)
bbox = np.array([float(parts[0]), float(parts[1]), float(parts[2]), float(parts[3]), float(parts[4]), float(parts[5])])
class_id =int(float(parts[6]))
if not class_id in VALID_CLASS_IDS:
continue
classname = ID_TO_LABEL[class_id]
score = float(parts[7])
pred_all[image_id].append((classname, bbox, score))
#read ground truth file
lines = open(gt_files[i]).read().splitlines()
for line in lines:
parts = line.split(' ')
if len(parts) != 7:
util.print_error('invalid object detection ground truth file. Expected (per line): [minx] [miny] [minz] [maxx] [maxy] [maxz] [label_id]', user_fault=True)
bbox = np.array([float(parts[0]), float(parts[1]), float(parts[2]), float(parts[3]), float(parts[4]), float(parts[5])])
class_id =int(float(parts[6]))
if not class_id in VALID_CLASS_IDS:
continue
classname = ID_TO_LABEL[class_id]
gt_all[image_id].append((classname, bbox))
for oi, overlap_th in enumerate(overlaps):
_,_,ap_dict = eval_det(pred_all, gt_all, ovthresh=overlap_th)
for label in ap_dict:
id = CLASS_LABELS.index(label)
ap_scores[0,id, oi] = ap_dict[label]
#print(ap_scores)
avgs = compute_averages(ap_scores)
# print
print_results(avgs)
write_result_file(avgs, output_file)
def main():
pred_files = [f for f in os.listdir(opt.pred_path) if f.endswith('.txt') and f != 'object_detection_evaluation.txt']
gt_files = []
if len(pred_files) == 0:
util.print_error('No result files found.', user_fault=True)
for i in range(len(pred_files)):
gt_file = os.path.join(opt.gt_path, pred_files[i])
if not os.path.isfile(gt_file):
util.print_error('Result file {} does not match any gt file'.format(pred_files[i]), user_fault=True)
gt_files.append(gt_file)
pred_files[i] = os.path.join(opt.pred_path, pred_files[i])
# evaluate
evaluate(pred_files, gt_files, opt.pred_path, opt.output_file)
if __name__ == '__main__':
main()
|
ContrastiveSceneContexts-main
|
downstream/votenet/datasets/evaluation/evaluate_object_detection.py
|
import os, sys
import csv
import numpy as np
import imageio
# print an error message and quit
def print_error(message, user_fault=False):
sys.stderr.write('ERROR: ' + str(message) + '\n')
if user_fault:
sys.exit(2)
sys.exit(-1)
# if string s represents an int
def represents_int(s):
try:
int(s)
return True
except ValueError:
return False
def read_label_mapping(filename, label_from='raw_category', label_to='nyu40id'):
assert os.path.isfile(filename)
mapping = dict()
with open(filename) as csvfile:
reader = csv.DictReader(csvfile, delimiter='\t')
for row in reader:
mapping[row[label_from]] = int(row[label_to])
# if ints convert
if represents_int(mapping.keys()[0]):
mapping = {int(k):v for k,v in mapping.items()}
return mapping
# input: scene_types.txt or scene_types_all.txt
def read_scene_types_mapping(filename, remove_spaces=True):
assert os.path.isfile(filename)
mapping = dict()
lines = open(filename).read().splitlines()
lines = [line.split('\t') for line in lines]
if remove_spaces:
mapping = { x[1].strip():int(x[0]) for x in lines }
else:
mapping = { x[1]:int(x[0]) for x in lines }
return mapping
# color by label
def visualize_label_image(filename, image):
height = image.shape[0]
width = image.shape[1]
vis_image = np.zeros([height, width, 3], dtype=np.uint8)
color_palette = create_color_palette()
for idx, color in enumerate(color_palette):
vis_image[image==idx] = color
imageio.imwrite(filename, vis_image)
# color by different instances (mod length of color palette)
def visualize_instance_image(filename, image):
height = image.shape[0]
width = image.shape[1]
vis_image = np.zeros([height, width, 3], dtype=np.uint8)
color_palette = create_color_palette()
instances = np.unique(image)
for idx, inst in enumerate(instances):
vis_image[image==inst] = color_palette[inst%len(color_palette)]
imageio.imwrite(filename, vis_image)
# color palette for nyu40 labels
def create_color_palette():
return [
(0, 0, 0),
(174, 199, 232), # wall
(152, 223, 138), # floor
(31, 119, 180), # cabinet
(255, 187, 120), # bed
(188, 189, 34), # chair
(140, 86, 75), # sofa
(255, 152, 150), # table
(214, 39, 40), # door
(197, 176, 213), # window
(148, 103, 189), # bookshelf
(196, 156, 148), # picture
(23, 190, 207), # counter
(178, 76, 76),
(247, 182, 210), # desk
(66, 188, 102),
(219, 219, 141), # curtain
(140, 57, 197),
(202, 185, 52),
(51, 176, 203),
(200, 54, 131),
(92, 193, 61),
(78, 71, 183),
(172, 114, 82),
(255, 127, 14), # refrigerator
(91, 163, 138),
(153, 98, 156),
(140, 153, 101),
(158, 218, 229), # shower curtain
(100, 125, 154),
(178, 127, 135),
(120, 185, 128),
(146, 111, 194),
(44, 160, 44), # toilet
(112, 128, 144), # sink
(96, 207, 209),
(227, 119, 194), # bathtub
(213, 92, 176),
(94, 106, 211),
(82, 84, 163), # otherfurn
(100, 85, 144)
]
|
ContrastiveSceneContexts-main
|
downstream/votenet/datasets/evaluation/util.py
|
import os, sys
import json
import numpy as np
from plyfile import PlyData, PlyElement
import util
# matrix: 4x4 np array
# points Nx3 np array
def transform_points(matrix, points):
assert len(points.shape) == 2 and points.shape[1] == 3
num_points = points.shape[0]
p = np.concatenate([points, np.ones((num_points, 1))], axis=1)
p = np.matmul(matrix, np.transpose(p))
p = np.transpose(p)
p[:,:3] /= p[:,3,None]
return p[:,:3]
def export_ids(filename, ids):
with open(filename, 'w') as f:
for id in ids:
f.write('%d\n' % id)
def load_ids(filename):
ids = open(filename).read().splitlines()
ids = np.array(ids, dtype=np.int64)
return ids
def read_mesh_vertices(filename):
assert os.path.isfile(filename)
with open(filename, 'rb') as f:
plydata = PlyData.read(f)
num_verts = plydata['vertex'].count
vertices = np.zeros(shape=[num_verts, 3], dtype=np.float32)
vertices[:,0] = plydata['vertex'].data['x']
vertices[:,1] = plydata['vertex'].data['y']
vertices[:,2] = plydata['vertex'].data['z']
return vertices
# export 3d instance labels for instance evaluation
def export_instance_ids_for_eval(filename, label_ids, instance_ids):
assert label_ids.shape[0] == instance_ids.shape[0]
output_mask_path_relative = 'pred_mask'
name = os.path.splitext(os.path.basename(filename))[0]
output_mask_path = os.path.join(os.path.dirname(filename), output_mask_path_relative)
if not os.path.isdir(output_mask_path):
os.mkdir(output_mask_path)
insts = np.unique(instance_ids)
zero_mask = np.zeros(shape=(instance_ids.shape[0]), dtype=np.int32)
with open(filename, 'w') as f:
for idx, inst_id in enumerate(insts):
if inst_id == 0: # 0 -> no instance for this vertex
continue
output_mask_file = os.path.join(output_mask_path_relative, name + '_' + str(idx) + '.txt')
loc = np.where(instance_ids == inst_id)
label_id = label_ids[loc[0][0]]
f.write('%s %d %f\n' % (output_mask_file, label_id, 1.0))
# write mask
mask = np.copy(zero_mask)
mask[loc[0]] = 1
export_ids(output_mask_file, mask)
def export_detection_ids_for_eval(filename, mesh_vertices, label_ids, instance_ids):
'''export the prediction file for object detection task
'''
assert label_ids.shape[0] == instance_ids.shape[0]
insts = np.unique(instance_ids)
with open(filename, 'w') as f:
for idx, inst_id in enumerate(insts):
if inst_id == 0: # 0 -> no instance for this vertex
continue
loc = np.where(instance_ids == inst_id)
inst_coord = mesh_vertices[loc[0]]
max_coord = np.amax(inst_coord, axis = 0)
min_coord = np.amin(inst_coord, axis = 0)
maxx, maxy, maxz = max_coord[0], max_coord[1], max_coord[2]
minx, miny, minz = min_coord[0], min_coord[1], min_coord[2]
label_id = label_ids[loc[0][0]]
f.write('%.2f %.2f %.2f %.2f %.2f %.2f %d\n' % (minx, miny, minz, maxx, maxy, maxz, label_id))
# ------------ Instance Utils ------------ #
class Instance(object):
instance_id = 0
label_id = 0
vert_count = 0
med_dist = -1
dist_conf = 0.0
def __init__(self, mesh_vert_instances, instance_id):
if (instance_id == -1):
return
self.instance_id = int(instance_id)
self.label_id = int(self.get_label_id(instance_id))
self.vert_count = int(self.get_instance_verts(mesh_vert_instances, instance_id))
def get_label_id(self, instance_id):
return int(instance_id // 1000)
def get_instance_verts(self, mesh_vert_instances, instance_id):
return (mesh_vert_instances == instance_id).sum()
def to_json(self):
return json.dumps(self, default=lambda o: o.__dict__, sort_keys=True, indent=4)
def to_dict(self):
dict = {}
dict["instance_id"] = self.instance_id
dict["label_id"] = self.label_id
dict["vert_count"] = self.vert_count
dict["med_dist"] = self.med_dist
dict["dist_conf"] = self.dist_conf
return dict
def from_json(self, data):
self.instance_id = int(data["instance_id"])
self.label_id = int(data["label_id"])
self.vert_count = int(data["vert_count"])
if ("med_dist" in data):
self.med_dist = float(data["med_dist"])
self.dist_conf = float(data["dist_conf"])
def __str__(self):
return "("+str(self.instance_id)+")"
def read_instance_prediction_file(filename, pred_path):
lines = open(filename).read().splitlines()
instance_info = {}
abs_pred_path = os.path.abspath(pred_path)
for line in lines:
parts = line.split(' ')
if len(parts) != 3:
util.print_error('invalid instance prediction file. Expected (per line): [rel path prediction] [label id prediction] [confidence prediction]', user_fault=True)
if os.path.isabs(parts[0]):
util.print_error('invalid instance prediction file. First entry in line must be a relative path', user_fault=True)
mask_file = os.path.join(os.path.dirname(filename), parts[0])
mask_file = os.path.abspath(mask_file)
# check that mask_file lives inside prediction path
if os.path.commonprefix([mask_file, abs_pred_path]) != abs_pred_path:
util.print_error('predicted mask {} in prediction text file {} points outside of prediction path.'.format(mask_file,filename), user_fault=True)
info = {}
info["label_id"] = int(float(parts[1]))
info["conf"] = float(parts[2])
instance_info[mask_file] = info
return instance_info
def get_instances(ids, class_ids, class_labels, id2label):
instances = {}
for label in class_labels:
instances[label] = []
instance_ids = np.unique(ids)
for id in instance_ids:
if id == 0:
continue
inst = Instance(ids, id)
if inst.label_id in class_ids:
instances[id2label[inst.label_id]].append(inst.to_dict())
return instances
|
ContrastiveSceneContexts-main
|
downstream/votenet/datasets/evaluation/util_3d.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
""" Generic Code for Object Detection Evaluation
Input:
For each class:
For each image:
Predictions: box, score
Groundtruths: box
Output:
For each class:
precision-recal and average precision
Author: Charles R. Qi
Ref: https://raw.githubusercontent.com/rbgirshick/py-faster-rcnn/master/lib/datasets/voc_eval.py
"""
import numpy as np
def voc_ap(rec, prec, use_07_metric=False):
""" ap = voc_ap(rec, prec, [use_07_metric])
Compute VOC AP given precision and recall.
If use_07_metric is true, uses the
VOC 07 11 point method (default:False).
"""
if use_07_metric:
# 11 point metric
ap = 0.
for t in np.arange(0., 1.1, 0.1):
if np.sum(rec >= t) == 0:
p = 0
else:
p = np.max(prec[rec >= t])
ap = ap + p / 11.
else:
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.], rec, [1.]))
mpre = np.concatenate(([0.], prec, [0.]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
from metric_util import calc_iou # axis-aligned 3D box IoU
def get_iou(bb1, bb2):
""" Compute IoU of two bounding boxes.
** Define your bod IoU function HERE **
"""
#pass
iou3d = calc_iou(bb1, bb2)
return iou3d
#from lib.utils.box_util import box3d_iou
#def get_iou_obb(bb1,bb2):
# iou3d, iou2d = box3d_iou(bb1,bb2)
# return iou3d
def get_iou_main(get_iou_func, args):
return get_iou_func(*args)
def eval_det_cls(pred, gt, ovthresh=0.25, use_07_metric=False, get_iou_func=get_iou):
""" Generic functions to compute precision/recall for object detection
for a single class.
Input:
pred: map of {img_id: [(bbox, score)]} where bbox is numpy array of size 6
gt: map of {img_id: [bbox]}
ovthresh: scalar, iou threshold
use_07_metric: bool, if True use VOC07 11 point method
Output:
rec: numpy array of length nd
prec: numpy array of length nd
ap: scalar, average precision
"""
# construct gt objects
class_recs = {} # {img_id: {'bbox': bbox list, 'det': matched list}}
npos = 0
for img_id in gt.keys():
bbox = np.array(gt[img_id]) # bbox: (n,6). n: number of bounding box in an img_id. 6 value is 3 value of center and 3 value of length
det = [False] * len(bbox) # length = n
npos += len(bbox) # sum of GT bounding boxes in all scenes
class_recs[img_id] = {'bbox': bbox, 'det': det}
# pad empty list to all other imgids
for img_id in pred.keys():
if img_id not in gt:
class_recs[img_id] = {'bbox': np.array([]), 'det': []}
# construct dets
image_ids = []
confidence = []
BB = []
for img_id in pred.keys():
for box,score in pred[img_id]:
image_ids.append(img_id)
confidence.append(score)
BB.append(box)
confidence = np.array(confidence)
BB = np.array(BB) # (nd,4 or 8,3 or 6)
# sort by confidence
sorted_ind = np.argsort(-confidence) # sort in descending order. Meaning: largest confidence first
sorted_scores = np.sort(-confidence)
BB = BB[sorted_ind, ...]
image_ids = [image_ids[x] for x in sorted_ind]
# go down dets and mark TPs and FPs
nd = len(image_ids) #nd: number of bounding boxes in all scenes
tp = np.zeros(nd)
fp = np.zeros(nd)
for d in range(nd):
#if d%100==0: print(d)
R = class_recs[image_ids[d]] # all GT BB in a scene according to the level of confidence
bb = BB[d,...].astype(float)
ovmax = -np.inf
BBGT = R['bbox'].astype(float)
if BBGT.size > 0:
# compute overlaps
for j in range(BBGT.shape[0]):
iou = get_iou_main(get_iou_func, (bb, BBGT[j,...]))
if iou > ovmax:
ovmax = iou # ovmax is the largest iou between BB and all ground truth boxes in BBGT
jmax = j
#print d, ovmax
if ovmax > ovthresh:
if not R['det'][jmax]:
tp[d] = 1.# if this BB have IoU > 0.25 with a GT box in BBGT, and this GTbox still not has any BB assigned to it, then this BB is TP
R['det'][jmax] = 1
else:
fp[d] = 1. #else, if this BB have IoU > 0.25 with a GT box in BBGT, and this GTbox already has a BB1 assigned to it (meaning that BB1 has higher confidence than this BB), then this BB is FP
else:
fp[d] = 1.#if this BB does not have IoU>0.25 with any GT boxes in BBGT, then it is FP
# compute precision recall
fp = np.cumsum(fp)
tp = np.cumsum(tp)
rec = tp / float(npos)
#print('NPOS: ', npos)
# avoid divide by zero in case the first detection matches a difficult
# ground truth
prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
ap = voc_ap(rec, prec, use_07_metric)
return rec, prec, ap
def eval_det_cls_wrapper(arguments):
pred, gt, ovthresh, use_07_metric, get_iou_func = arguments
rec, prec, ap = eval_det_cls(pred, gt, ovthresh, use_07_metric, get_iou_func)
return (rec, prec, ap)
def eval_det(pred_all, gt_all, ovthresh=0.25, use_07_metric=False, get_iou_func=get_iou):
""" Generic functions to compute precision/recall for object detection
for multiple classes.
Input:
pred_all: map of {img_id: [(classname, bbox, score)]} where
_img_id: anything, can be integer or string
_classname: can be string or integer
_bbox: numpy array of size 6
_score: float
gt_all: map of {img_id: [(classname, bbox)]}
ovthresh: scalar, iou threshold
use_07_metric: bool, if true use VOC07 11 point method
Output:
rec: {classname: rec}
prec: {classname: prec_all}
ap: {classname: scalar}
"""
pred = {} # map {classname: pred}
gt = {} # map {classname: gt}
for img_id in pred_all.keys():
for classname, bbox, score in pred_all[img_id]:
if classname not in pred: pred[classname] = {}
if img_id not in pred[classname]:
pred[classname][img_id] = []
if classname not in gt: gt[classname] = {}
if img_id not in gt[classname]:
gt[classname][img_id] = []
pred[classname][img_id].append((bbox,score))
for img_id in gt_all.keys():
for classname, bbox in gt_all[img_id]:
if classname not in gt: gt[classname] = {}
if img_id not in gt[classname]:
gt[classname][img_id] = []
gt[classname][img_id].append(bbox)
rec = {}
prec = {}
ap = {}
for classname in gt.keys():
rec[classname], prec[classname], ap[classname] = eval_det_cls(pred[classname], gt[classname], ovthresh, use_07_metric, get_iou_func)
return rec, prec, ap
from multiprocessing import Pool
def eval_det_multiprocessing(pred_all, gt_all, ovthresh=0.25, use_07_metric=False, get_iou_func=get_iou):
""" Generic functions to compute precision/recall for object detection
for multiple classes.
Input:
pred_all: map of {img_id: [(classname, bbox, score)]}
gt_all: map of {img_id: [(classname, bbox)]}
ovthresh: scalar, iou threshold
use_07_metric: bool, if true use VOC07 11 point method
Output:
rec: {classname: rec}
prec: {classname: prec_all}
ap: {classname: scalar}
"""
pred = {} # map {classname: pred}
gt = {} # map {classname: gt}
for img_id in pred_all.keys():
for classname, bbox, score in pred_all[img_id]:
if classname not in pred: pred[classname] = {}
if img_id not in pred[classname]:
pred[classname][img_id] = []
if classname not in gt: gt[classname] = {}
if img_id not in gt[classname]:
gt[classname][img_id] = []
pred[classname][img_id].append((bbox,score))
for img_id in gt_all.keys():
for classname, bbox in gt_all[img_id]:
if classname not in gt: gt[classname] = {}
if img_id not in gt[classname]:
gt[classname][img_id] = []
gt[classname][img_id].append(bbox)
rec = {}
prec = {}
ap = {}
p = Pool(processes=10)
ret_values = p.map(eval_det_cls_wrapper, [(pred[classname], gt[classname], ovthresh, use_07_metric, get_iou_func) for classname in gt.keys() if classname in pred])
p.close()
for i, classname in enumerate(gt.keys()):
if classname in pred:
rec[classname], prec[classname], ap[classname] = ret_values[i]
else:
rec[classname] = 0
prec[classname] = 0
ap[classname] = 0
print(classname, ap[classname])
return rec, prec, ap
|
ContrastiveSceneContexts-main
|
downstream/votenet/datasets/evaluation/evaluate_object_detection_helper.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
""" Load Scannet scenes with vertices and ground truth labels
for semantic and instance segmentations
"""
# python imports
import math
import os, sys, argparse
import inspect
import json
import pdb
try:
import numpy as np
except:
print("Failed to import numpy package.")
sys.exit(-1)
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
import scannet_utils
def read_aggregation(filename):
assert os.path.isfile(filename)
object_id_to_segs = {}
label_to_segs = {}
with open(filename) as f:
data = json.load(f)
num_objects = len(data['segGroups'])
for i in range(num_objects):
object_id = data['segGroups'][i]['objectId'] + 1 # instance ids should be 1-indexed
label = data['segGroups'][i]['label']
segs = data['segGroups'][i]['segments']
object_id_to_segs[object_id] = segs
if label in label_to_segs:
label_to_segs[label].extend(segs)
else:
label_to_segs[label] = segs
return object_id_to_segs, label_to_segs
def read_segmentation(filename):
assert os.path.isfile(filename)
seg_to_verts = {}
with open(filename) as f:
data = json.load(f)
num_verts = len(data['segIndices'])
for i in range(num_verts):
seg_id = data['segIndices'][i]
if seg_id in seg_to_verts:
seg_to_verts[seg_id].append(i)
else:
seg_to_verts[seg_id] = [i]
return seg_to_verts, num_verts
def export(mesh_file, agg_file, seg_file, meta_file, label_map_file, output_file=None):
""" points are XYZ RGB (RGB in 0-255),
semantic label as nyu40 ids,
instance label as 1-#instance,
box as (cx,cy,cz,dx,dy,dz,semantic_label)
"""
label_map = scannet_utils.read_label_mapping(label_map_file,
label_from='raw_category', label_to='nyu40id')
mesh_vertices = scannet_utils.read_mesh_vertices_rgb(mesh_file)
# Load scene axis alignment matrix
lines = open(meta_file).readlines()
for line in lines:
if 'axisAlignment' in line:
axis_align_matrix = [float(x) \
for x in line.rstrip().strip('axisAlignment = ').split(' ')]
break
axis_align_matrix = np.array(axis_align_matrix).reshape((4,4))
pts = np.ones((mesh_vertices.shape[0], 4))
pts[:,0:3] = mesh_vertices[:,0:3]
pts = np.dot(pts, axis_align_matrix.transpose()) # Nx4
mesh_vertices[:,0:3] = pts[:,0:3]
# Load semantic and instance labels
object_id_to_segs, label_to_segs = read_aggregation(agg_file)
seg_to_verts, num_verts = read_segmentation(seg_file)
label_ids = np.zeros(shape=(num_verts), dtype=np.uint32) # 0: unannotated
object_id_to_label_id = {}
for label, segs in label_to_segs.items():
label_id = label_map[label]
for seg in segs:
verts = seg_to_verts[seg]
label_ids[verts] = label_id
instance_ids = np.zeros(shape=(num_verts), dtype=np.uint32) # 0: unannotated
num_instances = len(np.unique(list(object_id_to_segs.keys())))
for object_id, segs in object_id_to_segs.items():
for seg in segs:
verts = seg_to_verts[seg]
instance_ids[verts] = object_id
if object_id not in object_id_to_label_id:
object_id_to_label_id[object_id] = label_ids[verts][0]
instance_bboxes = np.zeros((num_instances,7))
for obj_id in object_id_to_segs:
label_id = object_id_to_label_id[obj_id]
obj_pc = mesh_vertices[instance_ids==obj_id, 0:3]
if len(obj_pc) == 0: continue
# Compute axis aligned box
# An axis aligned bounding box is parameterized by
# (cx,cy,cz) and (dx,dy,dz) and label id
# where (cx,cy,cz) is the center point of the box,
# dx is the x-axis length of the box.
xmin = np.min(obj_pc[:,0])
ymin = np.min(obj_pc[:,1])
zmin = np.min(obj_pc[:,2])
xmax = np.max(obj_pc[:,0])
ymax = np.max(obj_pc[:,1])
zmax = np.max(obj_pc[:,2])
bbox = np.array([(xmin+xmax)/2, (ymin+ymax)/2, (zmin+zmax)/2,
xmax-xmin, ymax-ymin, zmax-zmin, label_id])
# NOTE: this assumes obj_id is in 1,2,3,.,,,.NUM_INSTANCES
instance_bboxes[obj_id-1,:] = bbox
if output_file is not None:
np.save(output_file+'_vert.npy', mesh_vertices)
np.save(output_file+'_sem_label.npy', label_ids)
np.save(output_file+'_ins_label.npy', instance_ids)
np.save(output_file+'_bbox.npy', instance_bboxes)
return mesh_vertices, label_ids, instance_ids,\
instance_bboxes, object_id_to_label_id
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--scan_path', required=True, help='path to scannet scene (e.g., data/ScanNet/v2/scene0000_00')
parser.add_argument('--output_file', required=True, help='output file')
parser.add_argument('--label_map_file', required=True, help='path to scannetv2-labels.combined.tsv')
opt = parser.parse_args()
scan_name = os.path.split(opt.scan_path)[-1]
mesh_file = os.path.join(opt.scan_path, scan_name + '_vh_clean_2.ply')
agg_file = os.path.join(opt.scan_path, scan_name + '.aggregation.json')
seg_file = os.path.join(opt.scan_path, scan_name + '_vh_clean_2.0.010000.segs.json')
meta_file = os.path.join(opt.scan_path, scan_name + '.txt') # includes axisAlignment info for the train set scans.
export(mesh_file, agg_file, seg_file, meta_file, opt.label_map_file, opt.output_file)
if __name__ == '__main__':
main()
|
ContrastiveSceneContexts-main
|
downstream/votenet/datasets/scannet/load_scannet_data.py
|
# coding: utf-8
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
""" Dataset for object bounding box regression.
An axis aligned bounding box is parameterized by (cx,cy,cz) and (dx,dy,dz)
where (cx,cy,cz) is the center point of the box, dx is the x-axis length of the box.
"""
import os
import sys
import torch
import numpy as np
from torch.utils.data import Dataset
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(BASE_DIR)
from lib.utils import pc_util
from datasets.scannet.model_util_scannet import rotate_aligned_boxes, ScannetDatasetConfig
DC = ScannetDatasetConfig()
MAX_NUM_OBJ = 64
MEAN_COLOR_RGB = np.array([109.8, 97.2, 83.8])
class ScannetDetectionDataset(Dataset):
def __init__(self, split_set='train', num_points=20000,
use_color=False, use_height=False, augment=False, by_scenes=None, by_points=None):
self.data_path = os.path.join(BASE_DIR, 'scannet_train_detection_data')
all_scan_names = list(set([os.path.basename(x)[0:12] \
for x in os.listdir(self.data_path) if x.startswith('scene')]))
#if split_set=='all':
# self.scan_names = all_scan_names
if split_set in ['train', 'val', 'test']:
split_filenames = os.path.join(ROOT_DIR, 'scannet/meta_data',
'scannetv2_{}.txt'.format(split_set))
if by_scenes != None and split_set == 'train':
split_filenames = by_scenes
self.sampled_bbox = {}
if by_points !=None and split_set == 'train':
self.sampled_bbox = torch.load(by_points)
with open(split_filenames, 'r') as f:
self.scan_names = f.read().splitlines()
# remove unavailiable scans
num_scans = len(self.scan_names)
self.scan_names = [sname for sname in self.scan_names if sname in all_scan_names]
print('kept {} scans out of {}'.format(len(self.scan_names), num_scans))
num_scans = len(self.scan_names)
else:
print('illegal split name')
return
self.num_points = num_points
self.use_color = use_color
self.use_height = use_height
self.augment = augment
def __len__(self):
return len(self.scan_names)
def __getitem__(self, idx):
"""
Returns a dict with following keys:
point_clouds: (N,3+C)
center_label: (MAX_NUM_OBJ,3) for GT box center XYZ
sem_cls_label: (MAX_NUM_OBJ,) semantic class index
angle_class_label: (MAX_NUM_OBJ,) with int values in 0,...,NUM_HEADING_BIN-1
angle_residual_label: (MAX_NUM_OBJ,)
size_classe_label: (MAX_NUM_OBJ,) with int values in 0,...,NUM_SIZE_CLUSTER
size_residual_label: (MAX_NUM_OBJ,3)
box_label_mask: (MAX_NUM_OBJ) as 0/1 with 1 indicating a unique box
point_votes: (N,3) with votes XYZ
point_votes_mask: (N,) with 0/1 with 1 indicating the point is in one of the object's OBB.
scan_idx: int scan index in scan_names list
pcl_color: unused
"""
scan_name = self.scan_names[idx]
mesh_vertices = np.load(os.path.join(self.data_path, scan_name)+'_vert.npy')
if os.path.exists(os.path.join(self.data_path, scan_name)+'_ins_label.npy'):
instance_labels = np.load(os.path.join(self.data_path, scan_name)+'_ins_label.npy')
semantic_labels = np.load(os.path.join(self.data_path, scan_name)+'_sem_label.npy')
instance_bboxes = np.load(os.path.join(self.data_path, scan_name)+'_bbox.npy')
else:
instance_labels = np.ones(mesh_vertices.shape[0])
semantic_labels = np.ones(mesh_vertices.shape[0])
instance_bboxes = np.ones((12,7)) + 2
#from lib.utils.io3d import generate_bbox_mesh, write_triangle_mesh
#new_instance_box = np.zeros_like(instance_bboxes)
#new_instance_box[:, 0] = instance_bboxes[:, 0] - instance_bboxes[:, 3] / 2.0
#new_instance_box[:, 1] = instance_bboxes[:, 1] - instance_bboxes[:, 4] / 2.0
#new_instance_box[:, 2] = instance_bboxes[:, 2] - instance_bboxes[:, 5] / 2.0
#new_instance_box[:, 3] = instance_bboxes[:, 0] + instance_bboxes[:, 3] / 2.0
#new_instance_box[:, 4] = instance_bboxes[:, 1] + instance_bboxes[:, 4] / 2.0
#new_instance_box[:, 5] = instance_bboxes[:, 2] + instance_bboxes[:, 5] / 2.0
#import ipdb
#ipdb.set_trace()
#vertices, _, faces = generate_bbox_mesh(new_instance_box)
#write_triangle_mesh(vertices, None, faces, 'test1.ply')
if self.sampled_bbox and scan_name in self.sampled_bbox:
sampled_bbox = self.sampled_bbox[scan_name][0]
sampled_instances = self.sampled_bbox[scan_name][1]
mask_valid = np.zeros_like(instance_labels).astype(np.bool)
for sampled_instance in sampled_instances:
mask_valid = mask_valid | (instance_labels == sampled_instance)
mask_nonvalid = ~mask_valid
semantic_labels[mask_nonvalid] = -1
instance_labels[mask_nonvalid] = -1
if len(instance_bboxes) != 0:
instance_bboxes = instance_bboxes[sampled_bbox]
# subsampling happens here
point_cloud = mesh_vertices[:,0:3] # do not use color for now
pcl_color = (mesh_vertices[:,3:6]-MEAN_COLOR_RGB)/256.0
#pcl_color = np.ones_like(mesh_vertices[:,3:6])
if self.use_height:
floor_height = np.percentile(point_cloud[:,2],0.99)
height = point_cloud[:,2] - floor_height
point_cloud = np.concatenate([point_cloud, np.expand_dims(height, 1)],1)
# ------------------------------- LABELS ------------------------------
target_bboxes = np.zeros((MAX_NUM_OBJ, 6))
target_bboxes_mask = np.zeros((MAX_NUM_OBJ))
angle_classes = np.zeros((MAX_NUM_OBJ,))
angle_residuals = np.zeros((MAX_NUM_OBJ,))
size_classes = np.zeros((MAX_NUM_OBJ,))
size_residuals = np.zeros((MAX_NUM_OBJ, 3))
point_cloud, choices = pc_util.random_sampling(point_cloud,
self.num_points, return_choices=True)
instance_labels = instance_labels[choices]
semantic_labels = semantic_labels[choices]
pcl_color = pcl_color[choices]
target_bboxes_mask[0:instance_bboxes.shape[0]] = 1
target_bboxes[0:instance_bboxes.shape[0],:] = instance_bboxes[:,0:6]
# ------------------------------- DATA AUGMENTATION ------------------------------
if self.augment:
if np.random.random() > 0.5:
# Flipping along the YZ plane
point_cloud[:,0] = -1 * point_cloud[:,0]
target_bboxes[:,0] = -1 * target_bboxes[:,0]
if np.random.random() > 0.5:
# Flipping along the XZ plane
point_cloud[:,1] = -1 * point_cloud[:,1]
target_bboxes[:,1] = -1 * target_bboxes[:,1]
# Rotation along up-axis/Z-axis
rot_angle = (np.random.random()*np.pi/18) - np.pi/36 # -5 ~ +5 degree
rot_mat = pc_util.rotz(rot_angle)
point_cloud[:,0:3] = np.dot(point_cloud[:,0:3], np.transpose(rot_mat))
target_bboxes = rotate_aligned_boxes(target_bboxes, rot_mat)
# compute votes *AFTER* augmentation
# generate votes
# Note: since there's no map between bbox instance labels and
# pc instance_labels (it had been filtered
# in the data preparation step) we'll compute the instance bbox
# from the points sharing the same instance label.
point_votes = np.zeros([self.num_points, 3])
point_votes_mask = np.zeros(self.num_points)
for i_instance in np.unique(instance_labels):
# find all points belong to that instance
ind = np.where(instance_labels == i_instance)[0]
# find the semantic label
if semantic_labels[ind[0]] in DC.nyu40ids:
x = point_cloud[ind,:3]
center = 0.5*(x.min(0) + x.max(0))
point_votes[ind, :] = center - x
point_votes_mask[ind] = 1.0
point_votes = np.tile(point_votes, (1, 3)) # make 3 votes identical
class_ind = [np.where(DC.nyu40ids == x)[0][0] for x in instance_bboxes[:,-1]]
# NOTE: set size class as semantic class. Consider use size2class.
size_classes[0:instance_bboxes.shape[0]] = class_ind
size_residuals[0:instance_bboxes.shape[0], :] = \
target_bboxes[0:instance_bboxes.shape[0], 3:6] - DC.mean_size_arr[class_ind,:]
ret_dict = {}
ret_dict['point_clouds'] = point_cloud.astype(np.float32)
ret_dict['center_label'] = target_bboxes.astype(np.float32)[:,0:3]
ret_dict['heading_class_label'] = angle_classes.astype(np.int64)
ret_dict['heading_residual_label'] = angle_residuals.astype(np.float32)
ret_dict['size_class_label'] = size_classes.astype(np.int64)
ret_dict['size_residual_label'] = size_residuals.astype(np.float32)
target_bboxes_semcls = np.zeros((MAX_NUM_OBJ))
target_bboxes_semcls[0:instance_bboxes.shape[0]] = \
[DC.nyu40id2class[x] for x in instance_bboxes[:,-1][0:instance_bboxes.shape[0]]]
ret_dict['sem_cls_label'] = target_bboxes_semcls.astype(np.int64)
ret_dict['box_label_mask'] = target_bboxes_mask.astype(np.float32)
ret_dict['vote_label'] = point_votes.astype(np.float32)
ret_dict['vote_label_mask'] = point_votes_mask.astype(np.int64)
ret_dict['scan_idx'] = np.array(idx).astype(np.int64)
ret_dict['pcl_color'] = pcl_color
ret_dict['scan_name'] = scan_name
return ret_dict
############# Visualizaion ########
def viz_votes(pc, point_votes, point_votes_mask, name=''):
""" Visualize point votes and point votes mask labels
pc: (N,3 or 6), point_votes: (N,9), point_votes_mask: (N,)
"""
inds = (point_votes_mask==1)
pc_obj = pc[inds,0:3]
pc_obj_voted1 = pc_obj + point_votes[inds,0:3]
pc_util.write_ply(pc_obj, 'pc_obj{}.ply'.format(name))
pc_util.write_ply(pc_obj_voted1, 'pc_obj_voted1{}.ply'.format(name))
def viz_obb(pc, label, mask, angle_classes, angle_residuals,
size_classes, size_residuals, name=''):
""" Visualize oriented bounding box ground truth
pc: (N,3)
label: (K,3) K == MAX_NUM_OBJ
mask: (K,)
angle_classes: (K,)
angle_residuals: (K,)
size_classes: (K,)
size_residuals: (K,3)
"""
oriented_boxes = []
K = label.shape[0]
for i in range(K):
if mask[i] == 0: continue
obb = np.zeros(7)
obb[0:3] = label[i,0:3]
heading_angle = 0 # hard code to 0
box_size = DC.mean_size_arr[size_classes[i], :] + size_residuals[i, :]
obb[3:6] = box_size
obb[6] = -1 * heading_angle
print(obb)
oriented_boxes.append(obb)
pc_util.write_oriented_bbox(oriented_boxes, 'gt_obbs{}.ply'.format(name))
pc_util.write_ply(label[mask==1,:], 'gt_centroids{}.ply'.format(name))
if __name__=='__main__':
dset = ScannetDetectionDataset(use_height=True, num_points=40000)
for i_example in range(4):
example = dset.__getitem__(1)
pc_util.write_ply(example['point_clouds'], 'pc_{}.ply'.format(i_example))
viz_votes(example['point_clouds'], example['vote_label'],
example['vote_label_mask'],name=i_example)
viz_obb(pc=example['point_clouds'], label=example['center_label'],
mask=example['box_label_mask'],
angle_classes=None, angle_residuals=None,
size_classes=example['size_class_label'], size_residuals=example['size_residual_label'],
name=i_example)
|
ContrastiveSceneContexts-main
|
downstream/votenet/datasets/scannet/scannet_detection_dataset.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
""" Batch mode in loading Scannet scenes with vertices and ground truth labels
for semantic and instance segmentations
Usage example: python ./batch_load_scannet_data.py
"""
import os
import sys
import datetime
import numpy as np
from load_scannet_data import export
import pdb
SCANNET_DIR = 'scans'
TRAIN_SCAN_NAMES = [line.rstrip() for line in open('meta_data/scannet_train.txt')]
LABEL_MAP_FILE = 'meta_data/scannetv2-labels.combined.tsv'
DONOTCARE_CLASS_IDS = np.array([])
OBJ_CLASS_IDS = np.array([3,4,5,6,7,8,9,10,11,12,14,16,24,28,33,34,36,39])
MAX_NUM_POINT = 50000
OUTPUT_FOLDER = './scannet_train_detection_data'
def export_one_scan(scan_name, output_filename_prefix):
mesh_file = os.path.join(SCANNET_DIR, scan_name, scan_name + '_vh_clean_2.ply')
agg_file = os.path.join(SCANNET_DIR, scan_name, scan_name + '.aggregation.json')
seg_file = os.path.join(SCANNET_DIR, scan_name, scan_name + '_vh_clean_2.0.010000.segs.json')
meta_file = os.path.join(SCANNET_DIR, scan_name, scan_name + '.txt') # includes axisAlignment info for the train set scans.
mesh_vertices, semantic_labels, instance_labels, instance_bboxes, instance2semantic = \
export(mesh_file, agg_file, seg_file, meta_file, LABEL_MAP_FILE, None)
mask = np.logical_not(np.in1d(semantic_labels, DONOTCARE_CLASS_IDS))
mesh_vertices = mesh_vertices[mask,:]
semantic_labels = semantic_labels[mask]
instance_labels = instance_labels[mask]
num_instances = len(np.unique(instance_labels))
print('Num of instances: ', num_instances)
bbox_mask = np.in1d(instance_bboxes[:,-1], OBJ_CLASS_IDS)
instance_bboxes = instance_bboxes[bbox_mask,:]
print('Num of care instances: ', instance_bboxes.shape[0])
N = mesh_vertices.shape[0]
if N > MAX_NUM_POINT:
choices = np.random.choice(N, MAX_NUM_POINT, replace=False)
mesh_vertices = mesh_vertices[choices, :]
semantic_labels = semantic_labels[choices]
instance_labels = instance_labels[choices]
np.save(output_filename_prefix+'_vert.npy', mesh_vertices)
np.save(output_filename_prefix+'_sem_label.npy', semantic_labels)
np.save(output_filename_prefix+'_ins_label.npy', instance_labels)
np.save(output_filename_prefix+'_bbox.npy', instance_bboxes)
def batch_export():
if not os.path.exists(OUTPUT_FOLDER):
print('Creating new data folder: {}'.format(OUTPUT_FOLDER))
os.mkdir(OUTPUT_FOLDER)
for scan_name in TRAIN_SCAN_NAMES:
print('-'*20+'begin')
print(datetime.datetime.now())
print(scan_name)
output_filename_prefix = os.path.join(OUTPUT_FOLDER, scan_name)
if os.path.isfile(output_filename_prefix+'_vert.npy'):
print('File already exists. skipping.')
print('-'*20+'done')
continue
try:
export_one_scan(scan_name, output_filename_prefix)
except:
print('Failed export scan: %s'%(scan_name))
print('-'*20+'done')
if __name__=='__main__':
batch_export()
|
ContrastiveSceneContexts-main
|
downstream/votenet/datasets/scannet/batch_load_scannet_data.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import sys
import os
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.append(os.path.join(ROOT_DIR, 'utils'))
from box_util import get_3d_box
class ScannetDatasetConfig(object):
def __init__(self):
self.num_class = 18
self.num_heading_bin = 1
self.num_size_cluster = 18
self.type2class = {'cabinet':0, 'bed':1, 'chair':2, 'sofa':3, 'table':4, 'door':5,
'window':6,'bookshelf':7,'picture':8, 'counter':9, 'desk':10, 'curtain':11,
'refrigerator':12, 'showercurtrain':13, 'toilet':14, 'sink':15, 'bathtub':16, 'garbagebin':17}
self.class2type = {self.type2class[t]:t for t in self.type2class}
self.nyu40ids = np.array([3,4,5,6,7,8,9,10,11,12,14,16,24,28,33,34,36,39])
self.nyu40id2class = {nyu40id: i for i,nyu40id in enumerate(list(self.nyu40ids))}
self.mean_size_arr = np.load(os.path.join(ROOT_DIR,'scannet/meta_data/scannet_means.npz'))['arr_0']
self.type_mean_size = {}
for i in range(self.num_size_cluster):
self.type_mean_size[self.class2type[i]] = self.mean_size_arr[i,:]
def angle2class(self, angle):
''' Convert continuous angle to discrete class
[optinal] also small regression number from
class center angle to current angle.
angle is from 0-2pi (or -pi~pi), class center at 0, 1*(2pi/N), 2*(2pi/N) ... (N-1)*(2pi/N)
return is class of int32 of 0,1,...,N-1 and a number such that
class*(2pi/N) + number = angle
NOT USED.
'''
assert(False)
def class2angle(self, pred_cls, residual, to_label_format=True):
''' Inverse function to angle2class.
As ScanNet only has axis-alined boxes so angles are always 0. '''
return 0
def size2class(self, size, type_name):
''' Convert 3D box size (l,w,h) to size class and size residual '''
size_class = self.type2class[type_name]
size_residual = size - self.type_mean_size[type_name]
return size_class, size_residual
def class2size(self, pred_cls, residual):
''' Inverse function to size2class '''
return self.mean_size_arr[pred_cls, :] + residual
def param2obb(self, center, heading_class, heading_residual, size_class, size_residual):
heading_angle = self.class2angle(heading_class, heading_residual)
box_size = self.class2size(int(size_class), size_residual)
obb = np.zeros((7,))
obb[0:3] = center
obb[3:6] = box_size
obb[6] = heading_angle*-1
return obb
def rotate_aligned_boxes(input_boxes, rot_mat):
centers, lengths = input_boxes[:,0:3], input_boxes[:,3:6]
new_centers = np.dot(centers, np.transpose(rot_mat))
dx, dy = lengths[:,0]/2.0, lengths[:,1]/2.0
new_x = np.zeros((dx.shape[0], 4))
new_y = np.zeros((dx.shape[0], 4))
for i, crnr in enumerate([(-1,-1), (1, -1), (1, 1), (-1, 1)]):
crnrs = np.zeros((dx.shape[0], 3))
crnrs[:,0] = crnr[0]*dx
crnrs[:,1] = crnr[1]*dy
crnrs = np.dot(crnrs, np.transpose(rot_mat))
new_x[:,i] = crnrs[:,0]
new_y[:,i] = crnrs[:,1]
new_dx = 2.0*np.max(new_x, 1)
new_dy = 2.0*np.max(new_y, 1)
new_lengths = np.stack((new_dx, new_dy, lengths[:,2]), axis=1)
return np.concatenate([new_centers, new_lengths], axis=1)
|
ContrastiveSceneContexts-main
|
downstream/votenet/datasets/scannet/model_util_scannet.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import sys
import os
BASE_DIR = os.path.dirname(__file__)
sys.path.append(BASE_DIR)
import numpy as np
import pc_util
scene_name = 'scannet_train_detection_data/scene0002_00'
output_folder = 'data_viz_dump'
data = np.load(scene_name+'_vert.npy')
scene_points = data[:,0:3]
colors = data[:,3:]
instance_labels = np.load(scene_name+'_ins_label.npy')
semantic_labels = np.load(scene_name+'_sem_label.npy')
instance_bboxes = np.load(scene_name+'_bbox.npy')
print(np.unique(instance_labels))
print(np.unique(semantic_labels))
input()
if not os.path.exists(output_folder):
os.mkdir(output_folder)
# Write scene as OBJ file for visualization
pc_util.write_ply_rgb(scene_points, colors, os.path.join(output_folder, 'scene.obj'))
pc_util.write_ply_color(scene_points, instance_labels, os.path.join(output_folder, 'scene_instance.obj'))
pc_util.write_ply_color(scene_points, semantic_labels, os.path.join(output_folder, 'scene_semantic.obj'))
from model_util_scannet import ScannetDatasetConfig
DC = ScannetDatasetConfig()
print(instance_bboxes.shape)
|
ContrastiveSceneContexts-main
|
downstream/votenet/datasets/scannet/data_viz.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
''' Ref: https://github.com/ScanNet/ScanNet/blob/master/BenchmarkScripts '''
import os
import sys
import json
import csv
try:
import numpy as np
except:
print("Failed to import numpy package.")
sys.exit(-1)
try:
from plyfile import PlyData, PlyElement
except:
print("Please install the module 'plyfile' for PLY i/o, e.g.")
print("pip install plyfile")
sys.exit(-1)
def represents_int(s):
''' if string s represents an int. '''
try:
int(s)
return True
except ValueError:
return False
def read_label_mapping(filename, label_from='raw_category', label_to='nyu40id'):
assert os.path.isfile(filename)
mapping = dict()
with open(filename) as csvfile:
reader = csv.DictReader(csvfile, delimiter='\t')
for row in reader:
mapping[row[label_from]] = int(row[label_to])
if represents_int(list(mapping.keys())[0]):
mapping = {int(k):v for k,v in mapping.items()}
return mapping
def read_mesh_vertices(filename):
""" read XYZ for each vertex.
"""
assert os.path.isfile(filename)
with open(filename, 'rb') as f:
plydata = PlyData.read(f)
num_verts = plydata['vertex'].count
vertices = np.zeros(shape=[num_verts, 3], dtype=np.float32)
vertices[:,0] = plydata['vertex'].data['x']
vertices[:,1] = plydata['vertex'].data['y']
vertices[:,2] = plydata['vertex'].data['z']
return vertices
def read_mesh_vertices_rgb(filename):
""" read XYZ RGB for each vertex.
Note: RGB values are in 0-255
"""
assert os.path.isfile(filename)
with open(filename, 'rb') as f:
plydata = PlyData.read(f)
num_verts = plydata['vertex'].count
vertices = np.zeros(shape=[num_verts, 6], dtype=np.float32)
vertices[:,0] = plydata['vertex'].data['x']
vertices[:,1] = plydata['vertex'].data['y']
vertices[:,2] = plydata['vertex'].data['z']
vertices[:,3] = plydata['vertex'].data['red']
vertices[:,4] = plydata['vertex'].data['green']
vertices[:,5] = plydata['vertex'].data['blue']
return vertices
|
ContrastiveSceneContexts-main
|
downstream/votenet/datasets/scannet/scannet_utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import numpy as np
import sys
import os
from lib.utils.nn_distance import nn_distance, huber_loss
FAR_THRESHOLD = 0.6
NEAR_THRESHOLD = 0.3
GT_VOTE_FACTOR = 3 # number of GT votes per point
OBJECTNESS_CLS_WEIGHTS = [0.2,0.8] # put larger weights on positive objectness
def compute_vote_loss(end_points):
""" Compute vote loss: Match predicted votes to GT votes.
Args:
end_points: dict (read-only)
Returns:
vote_loss: scalar Tensor
Overall idea:
If the seed point belongs to an object (votes_label_mask == 1),
then we require it to vote for the object center.
Each seed point may vote for multiple translations v1,v2,v3
A seed point may also be in the boxes of multiple objects:
o1,o2,o3 with corresponding GT votes c1,c2,c3
Then the loss for this seed point is:
min(d(v_i,c_j)) for i=1,2,3 and j=1,2,3
"""
# Load ground truth votes and assign them to seed points
batch_size = end_points['seed_xyz'].shape[0]
num_seed = end_points['seed_xyz'].shape[1] # B,num_seed,3
vote_xyz = end_points['vote_xyz'] # B,num_seed*vote_factor,3
seed_inds = end_points['seed_inds'].long() # B,num_seed in [0,num_points-1]
# Get groundtruth votes for the seed points
# vote_label_mask: Use gather to select B,num_seed from B,num_point
# non-object point has no GT vote mask = 0, object point has mask = 1
# vote_label: Use gather to select B,num_seed,9 from B,num_point,9
# with inds in shape B,num_seed,9 and 9 = GT_VOTE_FACTOR * 3
seed_gt_votes_mask = torch.gather(end_points['vote_label_mask'], 1, seed_inds)
seed_inds_expand = seed_inds.view(batch_size,num_seed,1).repeat(1,1,3*GT_VOTE_FACTOR)
seed_gt_votes = torch.gather(end_points['vote_label'], 1, seed_inds_expand)
seed_gt_votes += end_points['seed_xyz'].repeat(1,1,3)
# Compute the min of min of distance
vote_xyz_reshape = vote_xyz.view(batch_size*num_seed, -1, 3) # from B,num_seed*vote_factor,3 to B*num_seed,vote_factor,3
seed_gt_votes_reshape = seed_gt_votes.view(batch_size*num_seed, GT_VOTE_FACTOR, 3) # from B,num_seed,3*GT_VOTE_FACTOR to B*num_seed,GT_VOTE_FACTOR,3
# A predicted vote to no where is not penalized as long as there is a good vote near the GT vote.
dist1, _, dist2, _ = nn_distance(vote_xyz_reshape, seed_gt_votes_reshape, l1=True)
votes_dist, _ = torch.min(dist2, dim=1) # (B*num_seed,vote_factor) to (B*num_seed,)
votes_dist = votes_dist.view(batch_size, num_seed)
vote_loss = torch.sum(votes_dist*seed_gt_votes_mask.float())/(torch.sum(seed_gt_votes_mask.float())+1e-6)
return vote_loss
def compute_objectness_loss(end_points):
""" Compute objectness loss for the proposals.
Args:
end_points: dict (read-only)
Returns:
objectness_loss: scalar Tensor
objectness_label: (batch_size, num_seed) Tensor with value 0 or 1
objectness_mask: (batch_size, num_seed) Tensor with value 0 or 1
object_assignment: (batch_size, num_seed) Tensor with long int
within [0,num_gt_object-1]
"""
# Associate proposal and GT objects by point-to-point distances
aggregated_vote_xyz = end_points['aggregated_vote_xyz']
gt_center = end_points['center_label'][:,:,0:3]
B = gt_center.shape[0]
K = aggregated_vote_xyz.shape[1]
K2 = gt_center.shape[1]
dist1, ind1, dist2, _ = nn_distance(aggregated_vote_xyz, gt_center) # dist1: BxK, dist2: BxK2
# Generate objectness label and mask
# objectness_label: 1 if pred object center is within NEAR_THRESHOLD of any GT object
# objectness_mask: 0 if pred object center is in gray zone (DONOTCARE), 1 otherwise
euclidean_dist1 = torch.sqrt(dist1+1e-6)
objectness_label = torch.zeros((B,K), dtype=torch.long).cuda()
objectness_mask = torch.zeros((B,K)).cuda()
objectness_label[euclidean_dist1<NEAR_THRESHOLD] = 1
objectness_mask[euclidean_dist1<NEAR_THRESHOLD] = 1
objectness_mask[euclidean_dist1>FAR_THRESHOLD] = 1
# Compute objectness loss
objectness_scores = end_points['objectness_scores']
criterion = nn.CrossEntropyLoss(torch.Tensor(OBJECTNESS_CLS_WEIGHTS).cuda(), reduction='none')
objectness_loss = criterion(objectness_scores.transpose(2,1), objectness_label)
objectness_loss = torch.sum(objectness_loss * objectness_mask)/(torch.sum(objectness_mask)+1e-6)
# Set assignment
object_assignment = ind1 # (B,K) with values in 0,1,...,K2-1
return objectness_loss, objectness_label, objectness_mask, object_assignment
def compute_box_and_sem_cls_loss(end_points, config):
""" Compute 3D bounding box and semantic classification loss.
Args:
end_points: dict (read-only)
Returns:
center_loss
heading_cls_loss
heading_reg_loss
size_cls_loss
size_reg_loss
sem_cls_loss
"""
num_heading_bin = config.num_heading_bin
num_size_cluster = config.num_size_cluster
num_class = config.num_class
mean_size_arr = config.mean_size_arr
object_assignment = end_points['object_assignment']
batch_size = object_assignment.shape[0]
# Compute center loss
pred_center = end_points['center']
gt_center = end_points['center_label'][:,:,0:3]
dist1, ind1, dist2, _ = nn_distance(pred_center, gt_center) # dist1: BxK, dist2: BxK2
box_label_mask = end_points['box_label_mask']
objectness_label = end_points['objectness_label'].float()
centroid_reg_loss1 = \
torch.sum(dist1*objectness_label)/(torch.sum(objectness_label)+1e-6)
centroid_reg_loss2 = \
torch.sum(dist2*box_label_mask)/(torch.sum(box_label_mask)+1e-6)
center_loss = centroid_reg_loss1 + centroid_reg_loss2
# Compute heading loss
heading_class_label = torch.gather(end_points['heading_class_label'], 1, object_assignment) # select (B,K) from (B,K2)
criterion_heading_class = nn.CrossEntropyLoss(reduction='none')
heading_class_loss = criterion_heading_class(end_points['heading_scores'].transpose(2,1), heading_class_label) # (B,K)
heading_class_loss = torch.sum(heading_class_loss * objectness_label)/(torch.sum(objectness_label)+1e-6)
heading_residual_label = torch.gather(end_points['heading_residual_label'], 1, object_assignment) # select (B,K) from (B,K2)
heading_residual_normalized_label = heading_residual_label / (np.pi/num_heading_bin)
# Ref: https://discuss.pytorch.org/t/convert-int-into-one-hot-format/507/3
heading_label_one_hot = torch.cuda.FloatTensor(batch_size, heading_class_label.shape[1], num_heading_bin).zero_()
heading_label_one_hot.scatter_(2, heading_class_label.unsqueeze(-1), 1) # src==1 so it's *one-hot* (B,K,num_heading_bin)
heading_residual_normalized_loss = huber_loss(torch.sum(end_points['heading_residuals_normalized']*heading_label_one_hot, -1) - heading_residual_normalized_label, delta=1.0) # (B,K)
heading_residual_normalized_loss = torch.sum(heading_residual_normalized_loss*objectness_label)/(torch.sum(objectness_label)+1e-6)
# Compute size loss
size_class_label = torch.gather(end_points['size_class_label'], 1, object_assignment) # select (B,K) from (B,K2)
criterion_size_class = nn.CrossEntropyLoss(reduction='none')
size_class_loss = criterion_size_class(end_points['size_scores'].transpose(2,1), size_class_label) # (B,K)
size_class_loss = torch.sum(size_class_loss * objectness_label)/(torch.sum(objectness_label)+1e-6)
size_residual_label = torch.gather(end_points['size_residual_label'], 1, object_assignment.unsqueeze(-1).repeat(1,1,3)) # select (B,K,3) from (B,K2,3)
size_label_one_hot = torch.cuda.FloatTensor(batch_size, size_class_label.shape[1], num_size_cluster).zero_()
size_label_one_hot.scatter_(2, size_class_label.unsqueeze(-1), 1) # src==1 so it's *one-hot* (B,K,num_size_cluster)
size_label_one_hot_tiled = size_label_one_hot.unsqueeze(-1).repeat(1,1,1,3) # (B,K,num_size_cluster,3)
predicted_size_residual_normalized = torch.sum(end_points['size_residuals_normalized']*size_label_one_hot_tiled, 2) # (B,K,3)
mean_size_arr_expanded = torch.from_numpy(mean_size_arr.astype(np.float32)).cuda().unsqueeze(0).unsqueeze(0) # (1,1,num_size_cluster,3)
mean_size_label = torch.sum(size_label_one_hot_tiled * mean_size_arr_expanded, 2) # (B,K,3)
size_residual_label_normalized = size_residual_label / mean_size_label # (B,K,3)
size_residual_normalized_loss = torch.mean(huber_loss(predicted_size_residual_normalized - size_residual_label_normalized, delta=1.0), -1) # (B,K,3) -> (B,K)
size_residual_normalized_loss = torch.sum(size_residual_normalized_loss*objectness_label)/(torch.sum(objectness_label)+1e-6)
# 3.4 Semantic cls loss
sem_cls_label = torch.gather(end_points['sem_cls_label'], 1, object_assignment) # select (B,K) from (B,K2)
criterion_sem_cls = nn.CrossEntropyLoss(reduction='none')
sem_cls_loss = criterion_sem_cls(end_points['sem_cls_scores'].transpose(2,1), sem_cls_label) # (B,K)
sem_cls_loss = torch.sum(sem_cls_loss * objectness_label)/(torch.sum(objectness_label)+1e-6)
return center_loss, heading_class_loss, heading_residual_normalized_loss, size_class_loss, size_residual_normalized_loss, sem_cls_loss
def get_loss(end_points, config):
""" Loss functions
Args:
end_points: dict
{
seed_xyz, seed_inds, vote_xyz,
center,
heading_scores, heading_residuals_normalized,
size_scores, size_residuals_normalized,
sem_cls_scores, #seed_logits,#
center_label,
heading_class_label, heading_residual_label,
size_class_label, size_residual_label,
sem_cls_label,
box_label_mask,
vote_label, vote_label_mask
}
config: dataset config instance
Returns:
loss: pytorch scalar tensor
end_points: dict
"""
# Vote loss
vote_loss = compute_vote_loss(end_points)
end_points['vote_loss'] = vote_loss
# Obj loss
objectness_loss, objectness_label, objectness_mask, object_assignment = \
compute_objectness_loss(end_points)
end_points['objectness_loss'] = objectness_loss
end_points['objectness_label'] = objectness_label
end_points['objectness_mask'] = objectness_mask
end_points['object_assignment'] = object_assignment
total_num_proposal = objectness_label.shape[0]*objectness_label.shape[1]
end_points['pos_ratio'] = \
torch.sum(objectness_label.float().cuda())/float(total_num_proposal)
end_points['neg_ratio'] = \
torch.sum(objectness_mask.float())/float(total_num_proposal) - end_points['pos_ratio']
# Box loss and sem cls loss
center_loss, heading_cls_loss, heading_reg_loss, size_cls_loss, size_reg_loss, sem_cls_loss = \
compute_box_and_sem_cls_loss(end_points, config)
end_points['center_loss'] = center_loss
end_points['heading_cls_loss'] = heading_cls_loss
end_points['heading_reg_loss'] = heading_reg_loss
end_points['size_cls_loss'] = size_cls_loss
end_points['size_reg_loss'] = size_reg_loss
end_points['sem_cls_loss'] = sem_cls_loss
box_loss = center_loss + 0.1*heading_cls_loss + heading_reg_loss + 0.1*size_cls_loss + size_reg_loss
end_points['box_loss'] = box_loss
# Final loss function
loss = vote_loss + 0.5*objectness_loss + box_loss + 0.1*sem_cls_loss
loss *= 10
end_points['loss'] = loss
# --------------------------------------------
# Some other statistics
obj_pred_val = torch.argmax(end_points['objectness_scores'], 2) # B,K
obj_acc = torch.sum((obj_pred_val==objectness_label.long()).float()*objectness_mask)/(torch.sum(objectness_mask)+1e-6)
end_points['obj_acc'] = obj_acc
return loss, end_points
|
ContrastiveSceneContexts-main
|
downstream/votenet/models/loss_helper.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
import os
import sys
from lib.utils import pc_util
DUMP_CONF_THRESH = 0.5 # Dump boxes with obj prob larger than that.
def softmax(x):
''' Numpy function for softmax'''
shape = x.shape
probs = np.exp(x - np.max(x, axis=len(shape)-1, keepdims=True))
probs /= np.sum(probs, axis=len(shape)-1, keepdims=True)
return probs
def dump_results(end_points, dump_dir, config, inference_switch=False):
''' Dump results.
Args:
end_points: dict
{..., pred_mask}
pred_mask is a binary mask array of size (batch_size, num_proposal) computed by running NMS and empty box removal
Returns:
None
'''
if not os.path.exists(dump_dir):
os.system('mkdir %s'%(dump_dir))
# INPUT
point_clouds = end_points['point_clouds'].cpu().numpy()
batch_size = point_clouds.shape[0]
# NETWORK OUTPUTS
seed_xyz = end_points['seed_xyz'].detach().cpu().numpy() # (B,num_seed,3)
if 'vote_xyz' in end_points:
aggregated_vote_xyz = end_points['aggregated_vote_xyz'].detach().cpu().numpy()
vote_xyz = end_points['vote_xyz'].detach().cpu().numpy() # (B,num_seed,3)
aggregated_vote_xyz = end_points['aggregated_vote_xyz'].detach().cpu().numpy()
objectness_scores = end_points['objectness_scores'].detach().cpu().numpy() # (B,K,2)
pred_center = end_points['center'].detach().cpu().numpy() # (B,K,3)
pred_heading_class = torch.argmax(end_points['heading_scores'], -1) # B,num_proposal
pred_heading_residual = torch.gather(end_points['heading_residuals'], 2, pred_heading_class.unsqueeze(-1)) # B,num_proposal,1
pred_heading_class = pred_heading_class.detach().cpu().numpy() # B,num_proposal
pred_heading_residual = pred_heading_residual.squeeze(2).detach().cpu().numpy() # B,num_proposal
pred_size_class = torch.argmax(end_points['size_scores'], -1) # B,num_proposal
pred_size_residual = torch.gather(end_points['size_residuals'], 2, pred_size_class.unsqueeze(-1).unsqueeze(-1).repeat(1,1,1,3)) # B,num_proposal,1,3
pred_size_residual = pred_size_residual.squeeze(2).detach().cpu().numpy() # B,num_proposal,3
# OTHERS
pred_mask = end_points['pred_mask'] # B,num_proposal
idx_beg = 0
for i in range(batch_size):
pc = point_clouds[i,:,:]
objectness_prob = softmax(objectness_scores[i,:,:])[:,1] # (K,)
# Dump various point clouds
pc_util.write_ply(pc, os.path.join(dump_dir, '%06d_pc.ply'%(idx_beg+i)))
pc_util.write_ply(seed_xyz[i,:,:], os.path.join(dump_dir, '%06d_seed_pc.ply'%(idx_beg+i)))
if 'vote_xyz' in end_points:
pc_util.write_ply(end_points['vote_xyz'][i,:,:], os.path.join(dump_dir, '%06d_vgen_pc.ply'%(idx_beg+i)))
pc_util.write_ply(aggregated_vote_xyz[i,:,:], os.path.join(dump_dir, '%06d_aggregated_vote_pc.ply'%(idx_beg+i)))
pc_util.write_ply(aggregated_vote_xyz[i,:,:], os.path.join(dump_dir, '%06d_aggregated_vote_pc.ply'%(idx_beg+i)))
pc_util.write_ply(pred_center[i,:,0:3], os.path.join(dump_dir, '%06d_proposal_pc.ply'%(idx_beg+i)))
if np.sum(objectness_prob>DUMP_CONF_THRESH)>0:
pc_util.write_ply(pred_center[i,objectness_prob>DUMP_CONF_THRESH,0:3], os.path.join(dump_dir, '%06d_confident_proposal_pc.ply'%(idx_beg+i)))
# Dump predicted bounding boxes
if np.sum(objectness_prob>DUMP_CONF_THRESH)>0:
num_proposal = pred_center.shape[1]
obbs = []
for j in range(num_proposal):
obb = config.param2obb(pred_center[i,j,0:3], pred_heading_class[i,j], pred_heading_residual[i,j],
pred_size_class[i,j], pred_size_residual[i,j])
obbs.append(obb)
if len(obbs)>0:
obbs = np.vstack(tuple(obbs)) # (num_proposal, 7)
pc_util.write_oriented_bbox(obbs[objectness_prob>DUMP_CONF_THRESH,:], os.path.join(dump_dir, '%06d_pred_confident_bbox.ply'%(idx_beg+i)))
pc_util.write_oriented_bbox(obbs[np.logical_and(objectness_prob>DUMP_CONF_THRESH, pred_mask[i,:]==1),:], os.path.join(dump_dir, '%06d_pred_confident_nms_bbox.ply'%(idx_beg+i)))
pc_util.write_oriented_bbox(obbs[pred_mask[i,:]==1,:], os.path.join(dump_dir, '%06d_pred_nms_bbox.ply'%(idx_beg+i)))
pc_util.write_oriented_bbox(obbs, os.path.join(dump_dir, '%06d_pred_bbox.ply'%(idx_beg+i)))
# Return if it is at inference time. No dumping of groundtruths
if inference_switch:
return
# LABELS
gt_center = end_points['center_label'].cpu().numpy() # (B,MAX_NUM_OBJ,3)
gt_mask = end_points['box_label_mask'].cpu().numpy() # B,K2
gt_heading_class = end_points['heading_class_label'].cpu().numpy() # B,K2
gt_heading_residual = end_points['heading_residual_label'].cpu().numpy() # B,K2
gt_size_class = end_points['size_class_label'].cpu().numpy() # B,K2
gt_size_residual = end_points['size_residual_label'].cpu().numpy() # B,K2,3
objectness_label = end_points['objectness_label'].detach().cpu().numpy() # (B,K,)
objectness_mask = end_points['objectness_mask'].detach().cpu().numpy() # (B,K,)
for i in range(batch_size):
if np.sum(objectness_label[i,:])>0:
pc_util.write_ply(pred_center[i,objectness_label[i,:]>0,0:3], os.path.join(dump_dir, '%06d_gt_positive_proposal_pc.ply'%(idx_beg+i)))
if np.sum(objectness_mask[i,:])>0:
pc_util.write_ply(pred_center[i,objectness_mask[i,:]>0,0:3], os.path.join(dump_dir, '%06d_gt_mask_proposal_pc.ply'%(idx_beg+i)))
pc_util.write_ply(gt_center[i,:,0:3], os.path.join(dump_dir, '%06d_gt_centroid_pc.ply'%(idx_beg+i)))
pc_util.write_ply_color(pred_center[i,:,0:3], objectness_label[i,:], os.path.join(dump_dir, '%06d_proposal_pc_objectness_label.obj'%(idx_beg+i)))
# Dump GT bounding boxes
obbs = []
for j in range(gt_center.shape[1]):
if gt_mask[i,j] == 0: continue
obb = config.param2obb(gt_center[i,j,0:3], gt_heading_class[i,j], gt_heading_residual[i,j],
gt_size_class[i,j], gt_size_residual[i,j])
obbs.append(obb)
if len(obbs)>0:
obbs = np.vstack(tuple(obbs)) # (num_gt_objects, 7)
pc_util.write_oriented_bbox(obbs, os.path.join(dump_dir, '%06d_gt_bbox.ply'%(idx_beg+i)))
# OPTIONALL, also dump prediction and gt details
if 'batch_pred_map_cls' in end_points:
for ii in range(batch_size):
fout = open(os.path.join(dump_dir, '%06d_pred_map_cls.txt'%(ii)), 'w')
for t in end_points['batch_pred_map_cls'][ii]:
fout.write(str(t[0])+' ')
fout.write(",".join([str(x) for x in list(t[1].flatten())]))
fout.write(' '+str(t[2]))
fout.write('\n')
fout.close()
if 'batch_gt_map_cls' in end_points:
for ii in range(batch_size):
fout = open(os.path.join(dump_dir, '%06d_gt_map_cls.txt'%(ii)), 'w')
for t in end_points['batch_gt_map_cls'][ii]:
fout.write(str(t[0])+' ')
fout.write(",".join([str(x) for x in list(t[1].flatten())]))
fout.write('\n')
fout.close()
def dump_results_(end_points, dump_dir, config):
''' Dump results.
Args:
end_points: dict
{..., pred_mask}
pred_mask is a binary mask array of size (batch_size, num_proposal) computed by running NMS and empty box removal
Returns:
None
'''
if not os.path.exists(dump_dir):
os.system('mkdir %s'%(dump_dir))
# INPUT
point_clouds = end_points['point_clouds'].cpu().numpy()
batch_size = point_clouds.shape[0]
# NETWORK OUTPUTS
objectness_scores = end_points['objectness_scores'].detach().cpu().numpy() # (B,K,2)
pred_center = end_points['center'].detach().cpu().numpy() # (B,K,3)
pred_heading_class = torch.argmax(end_points['heading_scores'], -1) # B,num_proposal
pred_heading_residual = torch.gather(end_points['heading_residuals'], 2, pred_heading_class.unsqueeze(-1)) # B,num_proposal,1
pred_heading_class = pred_heading_class.detach().cpu().numpy() # B,num_proposal
pred_heading_residual = pred_heading_residual.squeeze(2).detach().cpu().numpy() # B,num_proposal
pred_size_class = torch.argmax(end_points['size_scores'], -1) # B,num_proposal
pred_size_residual = torch.gather(end_points['size_residuals'], 2, pred_size_class.unsqueeze(-1).unsqueeze(-1).repeat(1,1,1,3)) # B,num_proposal,1,3
pred_size_residual = pred_size_residual.squeeze(2).detach().cpu().numpy() # B,num_proposal,3
# OTHERS
pred_mask = end_points['pred_mask'] # B,num_proposal
pc = point_clouds[0,:,:]
objectness_prob = softmax(objectness_scores[0,:,:])[:,1] # (K,)
# Dump various point clouds
scan_idx = end_points['scan_idx']
scan_idx = str(scan_idx.cpu().numpy()[0])
os.makedirs(os.path.join(dump_dir, scan_idx))
pc_util.write_ply(pc, os.path.join(dump_dir, scan_idx, 'pc.ply'))
# Dump predicted bounding boxes
if np.sum(objectness_prob>DUMP_CONF_THRESH)>0:
num_proposal = pred_center.shape[1]
obbs = []
for j in range(num_proposal):
obb = config.param2obb(pred_center[0,j,0:3], pred_heading_class[0,j], pred_heading_residual[0,j],
pred_size_class[0,j], pred_size_residual[0,j])
obbs.append(obb)
if len(obbs)>0:
obbs = np.vstack(tuple(obbs)) # (num_proposal, 7)
obbs = obbs[np.logical_and(objectness_prob>DUMP_CONF_THRESH, pred_mask[0,:]==1),:]
for idx, obb in enumerate(obbs):
pc_util.write_oriented_bbox_(obb, os.path.join(dump_dir, scan_idx, '{}.ply'.format(idx)))
|
ContrastiveSceneContexts-main
|
downstream/votenet/models/dump_helper.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import numpy as np
import sys
import os
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.append(os.path.join(ROOT_DIR, 'utils'))
from nn_distance import nn_distance, huber_loss
sys.path.append(BASE_DIR)
from loss_helper import compute_box_and_sem_cls_loss
OBJECTNESS_CLS_WEIGHTS = [0.2,0.8] # put larger weights on positive objectness
def compute_objectness_loss(end_points):
""" Compute objectness loss for the proposals.
Args:
end_points: dict (read-only)
Returns:
objectness_loss: scalar Tensor
objectness_label: (batch_size, num_seed) Tensor with value 0 or 1
objectness_mask: (batch_size, num_seed) Tensor with value 0 or 1
object_assignment: (batch_size, num_seed) Tensor with long int
within [0,num_gt_object-1]
"""
# Associate proposal and GT objects by point-to-point distances
aggregated_vote_xyz = end_points['aggregated_vote_xyz']
gt_center = end_points['center_label'][:,:,0:3]
B = gt_center.shape[0]
K = aggregated_vote_xyz.shape[1]
K2 = gt_center.shape[1]
dist1, ind1, dist2, _ = nn_distance(aggregated_vote_xyz, gt_center) # dist1: BxK, dist2: BxK2
# Generate objectness label and mask
# NOTE: Different from VoteNet, here we use seed label as objectness label.
seed_inds = end_points['seed_inds'].long() # B,num_seed in [0,num_points-1]
seed_gt_votes_mask = torch.gather(end_points['vote_label_mask'], 1, seed_inds)
end_points['seed_labels'] = seed_gt_votes_mask
aggregated_vote_inds = end_points['aggregated_vote_inds']
objectness_label = torch.gather(end_points['seed_labels'], 1, aggregated_vote_inds.long()) # select (B,K) from (B,1024)
objectness_mask = torch.ones((objectness_label.shape[0], objectness_label.shape[1])).cuda() # no ignore zone anymore
# Compute objectness loss
objectness_scores = end_points['objectness_scores']
criterion = nn.CrossEntropyLoss(torch.Tensor(OBJECTNESS_CLS_WEIGHTS).cuda(), reduction='none')
objectness_loss = criterion(objectness_scores.transpose(2,1), objectness_label)
objectness_loss = torch.sum(objectness_loss * objectness_mask)/(torch.sum(objectness_mask)+1e-6)
# Set assignment
object_assignment = ind1 # (B,K) with values in 0,1,...,K2-1
return objectness_loss, objectness_label, objectness_mask, object_assignment
def get_loss(end_points, config):
""" Loss functions
Args:
end_points: dict
{
seed_xyz, seed_inds,
center,
heading_scores, heading_residuals_normalized,
size_scores, size_residuals_normalized,
sem_cls_scores, #seed_logits,#
center_label,
heading_class_label, heading_residual_label,
size_class_label, size_residual_label,
sem_cls_label,
box_label_mask,
vote_label, vote_label_mask
}
config: dataset config instance
Returns:
loss: pytorch scalar tensor
end_points: dict
"""
# Obj loss
objectness_loss, objectness_label, objectness_mask, object_assignment = \
compute_objectness_loss(end_points)
end_points['objectness_loss'] = objectness_loss
end_points['objectness_label'] = objectness_label
end_points['objectness_mask'] = objectness_mask
end_points['object_assignment'] = object_assignment
total_num_proposal = objectness_label.shape[0]*objectness_label.shape[1]
end_points['pos_ratio'] = \
torch.sum(objectness_label.float().cuda())/float(total_num_proposal)
end_points['neg_ratio'] = \
torch.sum(objectness_mask.float())/float(total_num_proposal) - end_points['pos_ratio']
# Box loss and sem cls loss
center_loss, heading_cls_loss, heading_reg_loss, size_cls_loss, size_reg_loss, sem_cls_loss = \
compute_box_and_sem_cls_loss(end_points, config)
end_points['center_loss'] = center_loss
end_points['heading_cls_loss'] = heading_cls_loss
end_points['heading_reg_loss'] = heading_reg_loss
end_points['size_cls_loss'] = size_cls_loss
end_points['size_reg_loss'] = size_reg_loss
end_points['sem_cls_loss'] = sem_cls_loss
box_loss = center_loss + 0.1*heading_cls_loss + heading_reg_loss + 0.1*size_cls_loss + size_reg_loss
end_points['box_loss'] = box_loss
# Final loss function
loss = 0.5*objectness_loss + box_loss + 0.1*sem_cls_loss
loss *= 10
end_points['loss'] = loss
# --------------------------------------------
# Some other statistics
obj_pred_val = torch.argmax(end_points['objectness_scores'], 2) # B,K
obj_acc = torch.sum((obj_pred_val==objectness_label.long()).float()*objectness_mask)/(torch.sum(objectness_mask)+1e-6)
end_points['obj_acc'] = obj_acc
return loss, end_points
|
ContrastiveSceneContexts-main
|
downstream/votenet/models/loss_helper_boxnet.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
""" Helper functions and class to calculate Average Precisions for 3D object detection.
"""
import os
import sys
import numpy as np
import torch
from lib.utils.eval_det import eval_det_cls, eval_det_multiprocessing
from lib.utils.eval_det import get_iou_obb
from lib.utils.nms import nms_2d_faster, nms_3d_faster, nms_3d_faster_samecls
from lib.utils.box_util import get_3d_box
from datasets.sunrgbd.sunrgbd_utils import extract_pc_in_box3d
def flip_axis_back_camera(pc):
''' Flip X-right,Y-forward,Z-up to X-right,Y-down,Z-forward
Input and output are both (N,3) array
'''
pc2 = np.copy(pc)
pc2[...,1] *= -1
pc2[...,[0,1,2]] = pc2[...,[0,2,1]] # cam X,Y,Z = depth X,-Z,Y
return pc2
def flip_axis_to_camera(pc):
''' Flip X-right,Y-forward,Z-up to X-right,Y-down,Z-forward
Input and output are both (N,3) array
'''
pc2 = np.copy(pc)
pc2[...,[0,1,2]] = pc2[...,[0,2,1]] # cam X,Y,Z = depth X,-Z,Y
pc2[...,1] *= -1
return pc2
def flip_axis_to_depth(pc):
pc2 = np.copy(pc)
pc2[...,[0,1,2]] = pc2[...,[0,2,1]] # depth X,Y,Z = cam X,Z,-Y
pc2[...,2] *= -1
return pc2
def softmax(x):
''' Numpy function for softmax'''
shape = x.shape
probs = np.exp(x - np.max(x, axis=len(shape)-1, keepdims=True))
probs /= np.sum(probs, axis=len(shape)-1, keepdims=True)
return probs
def parse_predictions(end_points, config_dict):
""" Parse predictions to OBB parameters and suppress overlapping boxes
Args:
end_points: dict
{point_clouds, center, heading_scores, heading_residuals,
size_scores, size_residuals, sem_cls_scores}
config_dict: dict
{dataset_config, remove_empty_box, use_3d_nms, nms_iou,
use_old_type_nms, conf_thresh, per_class_proposal}
Returns:
batch_pred_map_cls: a list of len == batch size (BS)
[pred_list_i], i = 0, 1, ..., BS-1
where pred_list_i = [(pred_sem_cls, box_params, box_score)_j]
where j = 0, ..., num of valid detections - 1 from sample input i
"""
pred_center = end_points['center'] # B,num_proposal,3
pred_heading_class = torch.argmax(end_points['heading_scores'], -1) # B,num_proposal
pred_heading_residual = torch.gather(end_points['heading_residuals'], 2,
pred_heading_class.unsqueeze(-1)) # B,num_proposal,1
pred_heading_residual.squeeze_(2)
pred_size_class = torch.argmax(end_points['size_scores'], -1) # B,num_proposal
pred_size_residual = torch.gather(end_points['size_residuals'], 2,
pred_size_class.unsqueeze(-1).unsqueeze(-1).repeat(1,1,1,3)) # B,num_proposal,1,3
pred_size_residual.squeeze_(2)
pred_sem_cls = torch.argmax(end_points['sem_cls_scores'], -1) # B,num_proposal
sem_cls_probs = softmax(end_points['sem_cls_scores'].detach().cpu().numpy()) # B,num_proposal,10
pred_sem_cls_prob = np.max(sem_cls_probs,-1) # B,num_proposal
num_proposal = pred_center.shape[1]
# Since we operate in upright_depth coord for points, while util functions
# assume upright_camera coord.
bsize = pred_center.shape[0]
pred_corners_3d_upright_camera = np.zeros((bsize, num_proposal, 8, 3))
pred_center_upright_camera = flip_axis_to_camera(pred_center.detach().cpu().numpy())
for i in range(bsize):
for j in range(num_proposal):
heading_angle = config_dict['dataset_config'].class2angle(\
pred_heading_class[i,j].detach().cpu().numpy(), pred_heading_residual[i,j].detach().cpu().numpy())
box_size = config_dict['dataset_config'].class2size(\
int(pred_size_class[i,j].detach().cpu().numpy()), pred_size_residual[i,j].detach().cpu().numpy())
corners_3d_upright_camera = get_3d_box(box_size, heading_angle, pred_center_upright_camera[i,j,:])
pred_corners_3d_upright_camera[i,j] = corners_3d_upright_camera
K = pred_center.shape[1] # K==num_proposal
nonempty_box_mask = np.ones((bsize, K))
if config_dict['remove_empty_box']:
# -------------------------------------
# Remove predicted boxes without any point within them..
batch_pc = end_points['point_clouds'].cpu().numpy()[:,:,0:3] # B,N,3
for i in range(bsize):
pc = batch_pc[i,:,:] # (N,3)
for j in range(K):
box3d = pred_corners_3d_upright_camera[i,j,:,:] # (8,3)
box3d = flip_axis_to_depth(box3d)
pc_in_box,inds = extract_pc_in_box3d(pc, box3d)
if len(pc_in_box) < 5:
nonempty_box_mask[i,j] = 0
# -------------------------------------
obj_logits = end_points['objectness_scores'].detach().cpu().numpy()
obj_prob = softmax(obj_logits)[:,:,1] # (B,K)
if not config_dict['use_3d_nms']:
# ---------- NMS input: pred_with_prob in (B,K,7) -----------
pred_mask = np.zeros((bsize, K))
for i in range(bsize):
boxes_2d_with_prob = np.zeros((K,5))
for j in range(K):
boxes_2d_with_prob[j,0] = np.min(pred_corners_3d_upright_camera[i,j,:,0])
boxes_2d_with_prob[j,2] = np.max(pred_corners_3d_upright_camera[i,j,:,0])
boxes_2d_with_prob[j,1] = np.min(pred_corners_3d_upright_camera[i,j,:,2])
boxes_2d_with_prob[j,3] = np.max(pred_corners_3d_upright_camera[i,j,:,2])
boxes_2d_with_prob[j,4] = obj_prob[i,j]
nonempty_box_inds = np.where(nonempty_box_mask[i,:]==1)[0]
pick = nms_2d_faster(boxes_2d_with_prob[nonempty_box_mask[i,:]==1,:],
config_dict['nms_iou'], config_dict['use_old_type_nms'])
assert(len(pick)>0)
pred_mask[i, nonempty_box_inds[pick]] = 1
end_points['pred_mask'] = pred_mask
# ---------- NMS output: pred_mask in (B,K) -----------
elif config_dict['use_3d_nms'] and (not config_dict['cls_nms']):
# ---------- NMS input: pred_with_prob in (B,K,7) -----------
pred_mask = np.zeros((bsize, K))
for i in range(bsize):
boxes_3d_with_prob = np.zeros((K,7))
for j in range(K):
boxes_3d_with_prob[j,0] = np.min(pred_corners_3d_upright_camera[i,j,:,0])
boxes_3d_with_prob[j,1] = np.min(pred_corners_3d_upright_camera[i,j,:,1])
boxes_3d_with_prob[j,2] = np.min(pred_corners_3d_upright_camera[i,j,:,2])
boxes_3d_with_prob[j,3] = np.max(pred_corners_3d_upright_camera[i,j,:,0])
boxes_3d_with_prob[j,4] = np.max(pred_corners_3d_upright_camera[i,j,:,1])
boxes_3d_with_prob[j,5] = np.max(pred_corners_3d_upright_camera[i,j,:,2])
boxes_3d_with_prob[j,6] = obj_prob[i,j]
nonempty_box_inds = np.where(nonempty_box_mask[i,:]==1)[0]
pick = nms_3d_faster(boxes_3d_with_prob[nonempty_box_mask[i,:]==1,:],
config_dict['nms_iou'], config_dict['use_old_type_nms'])
assert(len(pick)>0)
pred_mask[i, nonempty_box_inds[pick]] = 1
end_points['pred_mask'] = pred_mask
# ---------- NMS output: pred_mask in (B,K) -----------
elif config_dict['use_3d_nms'] and config_dict['cls_nms']:
# ---------- NMS input: pred_with_prob in (B,K,8) -----------
pred_mask = np.zeros((bsize, K))
for i in range(bsize):
boxes_3d_with_prob = np.zeros((K,8))
for j in range(K):
boxes_3d_with_prob[j,0] = np.min(pred_corners_3d_upright_camera[i,j,:,0])
boxes_3d_with_prob[j,1] = np.min(pred_corners_3d_upright_camera[i,j,:,1])
boxes_3d_with_prob[j,2] = np.min(pred_corners_3d_upright_camera[i,j,:,2])
boxes_3d_with_prob[j,3] = np.max(pred_corners_3d_upright_camera[i,j,:,0])
boxes_3d_with_prob[j,4] = np.max(pred_corners_3d_upright_camera[i,j,:,1])
boxes_3d_with_prob[j,5] = np.max(pred_corners_3d_upright_camera[i,j,:,2])
boxes_3d_with_prob[j,6] = obj_prob[i,j]
boxes_3d_with_prob[j,7] = pred_sem_cls[i,j] # only suppress if the two boxes are of the same class!!
nonempty_box_inds = np.where(nonempty_box_mask[i,:]==1)[0]
pick = nms_3d_faster_samecls(boxes_3d_with_prob[nonempty_box_mask[i,:]==1,:],
config_dict['nms_iou'], config_dict['use_old_type_nms'])
assert(len(pick)>0)
pred_mask[i, nonempty_box_inds[pick]] = 1
end_points['pred_mask'] = pred_mask
# ---------- NMS output: pred_mask in (B,K) -----------
batch_pred_map_cls = [] # a list (len: batch_size) of list (len: num of predictions per sample) of tuples of pred_cls, pred_box and conf (0-1)
for i in range(bsize):
if config_dict['per_class_proposal']:
cur_list = []
for ii in range(config_dict['dataset_config'].num_class):
cur_list += [(ii, pred_corners_3d_upright_camera[i,j], sem_cls_probs[i,j,ii]*obj_prob[i,j]) \
for j in range(pred_center.shape[1]) if pred_mask[i,j]==1 and obj_prob[i,j]>config_dict['conf_thresh']]
batch_pred_map_cls.append(cur_list)
else:
batch_pred_map_cls.append([(pred_sem_cls[i,j].item(), pred_corners_3d_upright_camera[i,j], obj_prob[i,j]) \
for j in range(pred_center.shape[1]) if pred_mask[i,j]==1 and obj_prob[i,j]>config_dict['conf_thresh']])
end_points['batch_pred_map_cls'] = batch_pred_map_cls
return batch_pred_map_cls
def parse_groundtruths(end_points, config_dict):
""" Parse groundtruth labels to OBB parameters.
Args:
end_points: dict
{center_label, heading_class_label, heading_residual_label,
size_class_label, size_residual_label, sem_cls_label,
box_label_mask}
config_dict: dict
{dataset_config}
Returns:
batch_gt_map_cls: a list of len == batch_size (BS)
[gt_list_i], i = 0, 1, ..., BS-1
where gt_list_i = [(gt_sem_cls, gt_box_params)_j]
where j = 0, ..., num of objects - 1 at sample input i
"""
center_label = end_points['center_label']
heading_class_label = end_points['heading_class_label']
heading_residual_label = end_points['heading_residual_label']
size_class_label = end_points['size_class_label']
size_residual_label = end_points['size_residual_label']
box_label_mask = end_points['box_label_mask']
sem_cls_label = end_points['sem_cls_label']
bsize = center_label.shape[0]
K2 = center_label.shape[1] # K2==MAX_NUM_OBJ
gt_corners_3d_upright_camera = np.zeros((bsize, K2, 8, 3))
gt_center_upright_camera = flip_axis_to_camera(center_label[:,:,0:3].detach().cpu().numpy())
for i in range(bsize):
for j in range(K2):
if box_label_mask[i,j] == 0: continue
heading_angle = config_dict['dataset_config'].class2angle(heading_class_label[i,j].detach().cpu().numpy(), heading_residual_label[i,j].detach().cpu().numpy())
box_size = config_dict['dataset_config'].class2size(int(size_class_label[i,j].detach().cpu().numpy()), size_residual_label[i,j].detach().cpu().numpy())
corners_3d_upright_camera = get_3d_box(box_size, heading_angle, gt_center_upright_camera[i,j,:])
gt_corners_3d_upright_camera[i,j] = corners_3d_upright_camera
batch_gt_map_cls = []
for i in range(bsize):
batch_gt_map_cls.append([(sem_cls_label[i,j].item(), gt_corners_3d_upright_camera[i,j]) for j in range(gt_corners_3d_upright_camera.shape[1]) if box_label_mask[i,j]==1])
end_points['batch_gt_map_cls'] = batch_gt_map_cls
return batch_gt_map_cls
class APCalculator(object):
''' Calculating Average Precision '''
def __init__(self, ap_iou_thresh=0.25, class2type_map=None):
"""
Args:
ap_iou_thresh: float between 0 and 1.0
IoU threshold to judge whether a prediction is positive.
class2type_map: [optional] dict {class_int:class_name}
"""
self.ap_iou_thresh = ap_iou_thresh
self.class2type_map = class2type_map
self.reset()
def step(self, batch_pred_map_cls, batch_gt_map_cls):
""" Accumulate one batch of prediction and groundtruth.
Args:
batch_pred_map_cls: a list of lists [[(pred_cls, pred_box_params, score),...],...]
batch_gt_map_cls: a list of lists [[(gt_cls, gt_box_params),...],...]
should have the same length with batch_pred_map_cls (batch_size)
"""
bsize = len(batch_pred_map_cls)
assert(bsize == len(batch_gt_map_cls))
for i in range(bsize):
self.gt_map_cls[self.scan_cnt] = batch_gt_map_cls[i]
self.pred_map_cls[self.scan_cnt] = batch_pred_map_cls[i]
self.scan_cnt += 1
def compute_metrics(self):
""" Use accumulated predictions and groundtruths to compute Average Precision.
"""
rec, prec, ap = eval_det_multiprocessing(self.pred_map_cls, self.gt_map_cls, ovthresh=self.ap_iou_thresh, get_iou_func=get_iou_obb)
ret_dict = {}
for key in sorted(ap.keys()):
clsname = self.class2type_map[key] if self.class2type_map else str(key)
ret_dict['%s Average Precision'%(clsname)] = ap[key]
ret_dict['mAP'] = np.mean(list(ap.values()))
rec_list = []
for key in sorted(ap.keys()):
clsname = self.class2type_map[key] if self.class2type_map else str(key)
try:
ret_dict['%s Recall'%(clsname)] = rec[key][-1]
rec_list.append(rec[key][-1])
except:
ret_dict['%s Recall'%(clsname)] = 0
rec_list.append(0)
ret_dict['AR'] = np.mean(rec_list)
return ret_dict
def reset(self):
self.gt_map_cls = {} # {scan_id: [(classname, bbox)]}
self.pred_map_cls = {} # {scan_id: [(classname, bbox, score)]}
self.scan_cnt = 0
|
ContrastiveSceneContexts-main
|
downstream/votenet/models/ap_helper.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
''' Voting module: generate votes from XYZ and features of seed points.
Date: July, 2019
Author: Charles R. Qi and Or Litany
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class VotingModule(nn.Module):
def __init__(self, vote_factor, seed_feature_dim):
""" Votes generation from seed point features.
Args:
vote_facotr: int
number of votes generated from each seed point
seed_feature_dim: int
number of channels of seed point features
vote_feature_dim: int
number of channels of vote features
"""
super().__init__()
self.vote_factor = vote_factor
self.in_dim = seed_feature_dim
self.out_dim = self.in_dim # due to residual feature, in_dim has to be == out_dim
self.conv1 = torch.nn.Conv1d(self.in_dim, self.in_dim, 1)
self.conv2 = torch.nn.Conv1d(self.in_dim, self.in_dim, 1)
self.conv3 = torch.nn.Conv1d(self.in_dim, (3+self.out_dim) * self.vote_factor, 1)
self.bn1 = torch.nn.BatchNorm1d(self.in_dim)
self.bn2 = torch.nn.BatchNorm1d(self.in_dim)
def forward(self, seed_xyz, seed_features):
""" Forward pass.
Arguments:
seed_xyz: (batch_size, num_seed, 3) Pytorch tensor
seed_features: (batch_size, feature_dim, num_seed) Pytorch tensor
Returns:
vote_xyz: (batch_size, num_seed*vote_factor, 3)
vote_features: (batch_size, vote_feature_dim, num_seed*vote_factor)
"""
batch_size = seed_xyz.shape[0]
num_seed = seed_xyz.shape[1]
num_vote = num_seed*self.vote_factor
net = F.relu(self.bn1(self.conv1(seed_features)))
net = F.relu(self.bn2(self.conv2(net)))
net = self.conv3(net) # (batch_size, (3+out_dim)*vote_factor, num_seed)
net = net.transpose(2,1).view(batch_size, num_seed, self.vote_factor, 3+self.out_dim)
offset = net[:,:,:,0:3]
vote_xyz = seed_xyz.unsqueeze(2) + offset
vote_xyz = vote_xyz.contiguous().view(batch_size, num_vote, 3)
residual_features = net[:,:,:,3:] # (batch_size, num_seed, vote_factor, out_dim)
vote_features = seed_features.transpose(2,1).unsqueeze(2) + residual_features
vote_features = vote_features.contiguous().view(batch_size, num_vote, self.out_dim)
vote_features = vote_features.transpose(2,1).contiguous()
return vote_xyz, vote_features
if __name__=='__main__':
net = VotingModule(2, 256).cuda()
xyz, features = net(torch.rand(8,1024,3).cuda(), torch.rand(8,256,1024).cuda())
print('xyz', xyz.shape)
print('features', features.shape)
|
ContrastiveSceneContexts-main
|
downstream/votenet/models/voting_module.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.append(os.path.join(ROOT_DIR, 'pointnet2'))
from pointnet2_modules import PointnetSAModuleVotes
import pointnet2_utils
def decode_scores(net, end_points, num_class, num_heading_bin, num_size_cluster, mean_size_arr):
net_transposed = net.transpose(2,1) # (batch_size, 1024, ..)
batch_size = net_transposed.shape[0]
num_proposal = net_transposed.shape[1]
objectness_scores = net_transposed[:,:,0:2]
end_points['objectness_scores'] = objectness_scores
base_xyz = end_points['aggregated_vote_xyz'] # (batch_size, num_proposal, 3)
center = base_xyz + net_transposed[:,:,2:5] # (batch_size, num_proposal, 3)
end_points['center'] = center
heading_scores = net_transposed[:,:,5:5+num_heading_bin]
heading_residuals_normalized = net_transposed[:,:,5+num_heading_bin:5+num_heading_bin*2]
end_points['heading_scores'] = heading_scores # Bxnum_proposalxnum_heading_bin
end_points['heading_residuals_normalized'] = heading_residuals_normalized # Bxnum_proposalxnum_heading_bin (should be -1 to 1)
end_points['heading_residuals'] = heading_residuals_normalized * (np.pi/num_heading_bin) # Bxnum_proposalxnum_heading_bin
size_scores = net_transposed[:,:,5+num_heading_bin*2:5+num_heading_bin*2+num_size_cluster]
size_residuals_normalized = net_transposed[:,:,5+num_heading_bin*2+num_size_cluster:5+num_heading_bin*2+num_size_cluster*4].view([batch_size, num_proposal, num_size_cluster, 3]) # Bxnum_proposalxnum_size_clusterx3
end_points['size_scores'] = size_scores
end_points['size_residuals_normalized'] = size_residuals_normalized
end_points['size_residuals'] = size_residuals_normalized * torch.from_numpy(mean_size_arr.astype(np.float32)).cuda().unsqueeze(0).unsqueeze(0)
sem_cls_scores = net_transposed[:,:,5+num_heading_bin*2+num_size_cluster*4:] # Bxnum_proposalx10
end_points['sem_cls_scores'] = sem_cls_scores
return end_points
class ProposalModule(nn.Module):
def __init__(self, num_class, num_heading_bin, num_size_cluster, mean_size_arr, num_proposal, sampling, seed_feat_dim=256):
super().__init__()
self.num_class = num_class
self.num_heading_bin = num_heading_bin
self.num_size_cluster = num_size_cluster
self.mean_size_arr = mean_size_arr
self.num_proposal = num_proposal
self.sampling = sampling
self.seed_feat_dim = seed_feat_dim
# Vote clustering
self.vote_aggregation = PointnetSAModuleVotes(
npoint=self.num_proposal,
radius=0.3,
nsample=16,
mlp=[self.seed_feat_dim, 128, 128, 128],
use_xyz=True,
normalize_xyz=True
)
# Object proposal/detection
# Objectness scores (2), center residual (3),
# heading class+residual (num_heading_bin*2), size class+residual(num_size_cluster*4)
self.conv1 = torch.nn.Conv1d(128,128,1)
self.conv2 = torch.nn.Conv1d(128,128,1)
self.conv3 = torch.nn.Conv1d(128,2+3+num_heading_bin*2+num_size_cluster*4+self.num_class,1)
self.bn1 = torch.nn.BatchNorm1d(128)
self.bn2 = torch.nn.BatchNorm1d(128)
def forward(self, xyz, features, end_points):
"""
Args:
xyz: (B,K,3)
features: (B,C,K)
Returns:
scores: (B,num_proposal,2+3+NH*2+NS*4)
"""
if self.sampling == 'vote_fps':
# Farthest point sampling (FPS) on votes
xyz, features, fps_inds = self.vote_aggregation(xyz, features)
sample_inds = fps_inds
elif self.sampling == 'seed_fps':
# FPS on seed and choose the votes corresponding to the seeds
# This gets us a slightly better coverage of *object* votes than vote_fps (which tends to get more cluster votes)
sample_inds = pointnet2_utils.furthest_point_sample(end_points['seed_xyz'], self.num_proposal)
xyz, features, _ = self.vote_aggregation(xyz, features, sample_inds)
elif self.sampling == 'random':
# Random sampling from the votes
num_seed = end_points['seed_xyz'].shape[1]
batch_size = end_points['seed_xyz'].shape[0]
sample_inds = torch.randint(0, num_seed, (batch_size, self.num_proposal), dtype=torch.int).cuda()
xyz, features, _ = self.vote_aggregation(xyz, features, sample_inds)
else:
log_string('Unknown sampling strategy: %s. Exiting!'%(self.sampling))
exit()
end_points['aggregated_vote_xyz'] = xyz # (batch_size, num_proposal, 3)
end_points['aggregated_vote_inds'] = sample_inds # (batch_size, num_proposal,) # should be 0,1,2,...,num_proposal
# --------- PROPOSAL GENERATION ---------
net = F.relu(self.bn1(self.conv1(features)))
net = F.relu(self.bn2(self.conv2(net)))
net = self.conv3(net) # (batch_size, 2+3+num_heading_bin*2+num_size_cluster*4, num_proposal)
end_points = decode_scores(net, end_points, self.num_class, self.num_heading_bin, self.num_size_cluster, self.mean_size_arr)
return end_points
if __name__=='__main__':
sys.path.append(os.path.join(ROOT_DIR, 'sunrgbd'))
from sunrgbd_detection_dataset import SunrgbdDetectionVotesDataset, DC
net = ProposalModule(DC.num_class, DC.num_heading_bin,
DC.num_size_cluster, DC.mean_size_arr,
128, 'seed_fps').cuda()
end_points = {'seed_xyz': torch.rand(8,1024,3).cuda()}
out = net(torch.rand(8,1024,3).cuda(), torch.rand(8,256,1024).cuda(), end_points)
for key in out:
print(key, out[key].shape)
|
ContrastiveSceneContexts-main
|
downstream/votenet/models/proposal_module.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import numpy as np
import sys
import os
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.append(BASE_DIR)
from backbone_module import Pointnet2Backbone
from proposal_module import ProposalModule
from dump_helper import dump_results
from loss_helper_boxnet import get_loss
class BoxNet(nn.Module):
r"""
A deep neural network for 3D object detection with end-to-end optimizable hough voting.
Parameters
----------
num_class: int
Number of semantics classes to predict over -- size of softmax classifier
num_heading_bin: int
num_size_cluster: int
input_feature_dim: (default: 0)
Input dim in the feature descriptor for each point. If the point cloud is Nx9, this
value should be 6 as in an Nx9 point cloud, 3 of the channels are xyz, and 6 are feature descriptors
num_proposal: int (default: 128)
Number of proposals/detections generated from the network. Each proposal is a 3D OBB with a semantic class.
vote_factor: (default: 1)
Number of votes generated from each seed point.
"""
def __init__(self, num_class, num_heading_bin, num_size_cluster, mean_size_arr,
input_feature_dim=0, num_proposal=128, vote_factor=1, sampling='vote_fps', backbone=None):
super().__init__()
self.num_class = num_class
self.num_heading_bin = num_heading_bin
self.num_size_cluster = num_size_cluster
self.mean_size_arr = mean_size_arr
assert(mean_size_arr.shape[0] == self.num_size_cluster)
self.input_feature_dim = input_feature_dim
self.num_proposal = num_proposal
self.vote_factor = vote_factor
self.sampling=sampling
# Backbone point feature learning
self.backbone_net = Pointnet2Backbone(input_feature_dim=self.input_feature_dim)
# Box proposal, aggregation and detection
self.pnet = ProposalModule(num_class, num_heading_bin, num_size_cluster,
mean_size_arr, num_proposal, sampling)
def forward(self, inputs):
""" Forward pass of the network
Args:
inputs: dict
{point_clouds}
point_clouds: Variable(torch.cuda.FloatTensor)
(B, N, 3 + input_channels) tensor
Point cloud to run predicts on
Each point in the point-cloud MUST
be formated as (x, y, z, features...)
Returns:
end_points: dict
"""
end_points = {}
batch_size = inputs['point_clouds'].shape[0]
end_points = self.backbone_net(inputs['point_clouds'], end_points)
xyz = end_points['fp2_xyz']
features = end_points['fp2_features']
end_points['seed_inds'] = end_points['fp2_inds']
end_points['seed_xyz'] = xyz
end_points['seed_features'] = features
# Directly predict bounding boxes (skips voting)
end_points = self.pnet(xyz, features, end_points)
return end_points
if __name__=='__main__':
sys.path.append(os.path.join(ROOT_DIR, 'sunrgbd'))
from sunrgbd_detection_dataset import SunrgbdDetectionVotesDataset, DC
# Define dataset
TRAIN_DATASET = SunrgbdDetectionVotesDataset('train', num_points=20000, use_v1=True)
# Define model
model = BoxNet(10,12,10,np.random.random((10,3))).cuda()
# Model forward pass
sample = TRAIN_DATASET[5]
inputs = {'point_clouds': torch.from_numpy(sample['point_clouds']).unsqueeze(0).cuda()}
end_points = model(inputs)
for key in end_points:
print(key, end_points[key])
# Compute loss
for key in sample:
end_points[key] = torch.from_numpy(sample[key]).unsqueeze(0).cuda()
loss, end_points = get_loss(end_points, DC)
print('loss', loss)
end_points['point_clouds'] = inputs['point_clouds']
end_points['pred_mask'] = np.ones((1,128))
dump_results(end_points, 'tmp', DC)
|
ContrastiveSceneContexts-main
|
downstream/votenet/models/boxnet.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import sys
import os
from models.backbone.pointnet2.pointnet2_modules import PointnetSAModuleVotes, PointnetFPModule
from models.backbone.pointnet2.pointnet2_utils import furthest_point_sample
from models.backbone.sparseconv.config import get_config
from models.backbone.sparseconv.models_sparseconv import load_model
import MinkowskiEngine as ME
class Pointnet2Backbone(nn.Module):
r"""
Backbone network for point cloud feature learning.
Based on Pointnet++ single-scale grouping network.
Parameters
----------
input_feature_dim: int
Number of input channels in the feature descriptor for each point.
e.g. 3 for RGB.
"""
def __init__(self, input_feature_dim=0):
super().__init__()
self.sa1 = PointnetSAModuleVotes(
npoint=2048,
radius=0.2,
nsample=64,
mlp=[input_feature_dim, 64, 64, 128],
use_xyz=True,
normalize_xyz=True
)
self.sa2 = PointnetSAModuleVotes(
npoint=1024,
radius=0.4,
nsample=32,
mlp=[128, 128, 128, 256],
use_xyz=True,
normalize_xyz=True
)
self.sa3 = PointnetSAModuleVotes(
npoint=512,
radius=0.8,
nsample=16,
mlp=[256, 128, 128, 256],
use_xyz=True,
normalize_xyz=True
)
self.sa4 = PointnetSAModuleVotes(
npoint=256,
radius=1.2,
nsample=16,
mlp=[256, 128, 128, 256],
use_xyz=True,
normalize_xyz=True
)
self.fp1 = PointnetFPModule(mlp=[256+256,256,256])
self.fp2 = PointnetFPModule(mlp=[256+256,256,256])
def _break_up_pc(self, pc):
xyz = pc[..., 0:3].contiguous()
features = (
pc[..., 3:].transpose(1, 2).contiguous()
if pc.size(-1) > 3 else None
)
return xyz, features
def forward(self, pointcloud: torch.cuda.FloatTensor, end_points=None):
r"""
Forward pass of the network
Parameters
----------
pointcloud: Variable(torch.cuda.FloatTensor)
(B, N, 3 + input_feature_dim) tensor
Point cloud to run predicts on
Each point in the point-cloud MUST
be formated as (x, y, z, features...)
Returns
----------
end_points: {XXX_xyz, XXX_features, XXX_inds}
XXX_xyz: float32 Tensor of shape (B,K,3)
XXX_features: float32 Tensor of shape (B,K,D)
XXX-inds: int64 Tensor of shape (B,K) values in [0,N-1]
"""
if not end_points: end_points = {}
batch_size = pointcloud.shape[0]
xyz, features = self._break_up_pc(pointcloud)
# --------- 4 SET ABSTRACTION LAYERS ---------
xyz, features, fps_inds = self.sa1(xyz, features)
end_points['sa1_inds'] = fps_inds
end_points['sa1_xyz'] = xyz
end_points['sa1_features'] = features
xyz, features, fps_inds = self.sa2(xyz, features) # this fps_inds is just 0,1,...,1023
end_points['sa2_inds'] = fps_inds
end_points['sa2_xyz'] = xyz
end_points['sa2_features'] = features
xyz, features, fps_inds = self.sa3(xyz, features) # this fps_inds is just 0,1,...,511
end_points['sa3_xyz'] = xyz
end_points['sa3_features'] = features
xyz, features, fps_inds = self.sa4(xyz, features) # this fps_inds is just 0,1,...,255
end_points['sa4_xyz'] = xyz
end_points['sa4_features'] = features
# --------- 2 FEATURE UPSAMPLING LAYERS --------
features = self.fp1(end_points['sa3_xyz'], end_points['sa4_xyz'], end_points['sa3_features'], end_points['sa4_features'])
features = self.fp2(end_points['sa2_xyz'], end_points['sa3_xyz'], end_points['sa2_features'], features)
end_points['fp2_features'] = features
end_points['fp2_xyz'] = end_points['sa2_xyz']
num_seed = end_points['fp2_xyz'].shape[1]
end_points['fp2_inds'] = end_points['sa1_inds'][:,0:num_seed] # indices among the entire input point clouds
return end_points
class SparseConvBackbone(nn.Module):
def __init__(self,
input_feature_dim=3,
output_feature_dim=256,
num_seed=1024,
model='Res16UNet34C',
config=None):
super().__init__()
config = get_config(["--conv1_kernel_size", "3", "--model", model])
# from pdb import set_trace; set_trace()
self.net = load_model(model)(
input_feature_dim, output_feature_dim, config)
self.num_seed = num_seed
def forward(self, points, coords, feats, inds, end_points=None):
inputs = ME.SparseTensor(feats.cpu(), coords=coords.cpu().int()).to(coords.device)
outputs = self.net(inputs)
features = outputs.F
# randomly down-sample to num_seed points & create batches
bsz, num_points, _ = points.size()
points = points.view(-1, 3)
batch_ids = coords[:, 0]
voxel_ids = inds + batch_ids * num_points
sampled_inds, sampled_feartures, sampled_points = [], [], []
for b in range(bsz):
sampled_id = furthest_point_sample(
points[voxel_ids[batch_ids == b]].unsqueeze(0),
self.num_seed).squeeze(0).long()
sampled_inds.append(inds[batch_ids == b][sampled_id])
sampled_feartures.append(features[batch_ids == b][sampled_id])
sampled_points.append(points[voxel_ids[batch_ids == b]][sampled_id])
end_points['fp2_features'] = torch.stack(sampled_feartures, 0).transpose(1, 2)
end_points['fp2_xyz'] = torch.stack(sampled_points, 0)
end_points['fp2_inds'] = torch.stack(sampled_inds, 0)
# from pdb import set_trace; set_trace()
return end_points
if __name__=='__main__':
backbone_net = Pointnet2Backbone(input_feature_dim=3).cuda()
print(backbone_net)
backbone_net.eval()
out = backbone_net(torch.rand(16,20000,6).cuda())
for key in sorted(out.keys()):
print(key, '\t', out[key].shape)
|
ContrastiveSceneContexts-main
|
downstream/votenet/models/backbone_module.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
""" Deep hough voting network for 3D object detection in point clouds.
Author: Charles R. Qi and Or Litany
"""
import torch
import torch.nn as nn
import numpy as np
import sys
import os
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.append(BASE_DIR)
from backbone_module import Pointnet2Backbone, SparseConvBackbone
from voting_module import VotingModule
from proposal_module import ProposalModule
from dump_helper import dump_results
from loss_helper import get_loss
class VoteNet(nn.Module):
r"""
A deep neural network for 3D object detection with end-to-end optimizable hough voting.
Parameters
----------
num_class: int
Number of semantics classes to predict over -- size of softmax classifier
num_heading_bin: int
num_size_cluster: int
input_feature_dim: (default: 0)
Input dim in the feature descriptor for each point. If the point cloud is Nx9, this
value should be 6 as in an Nx9 point cloud, 3 of the channels are xyz, and 6 are feature descriptors
num_proposal: int (default: 128)
Number of proposals/detections generated from the network. Each proposal is a 3D OBB with a semantic class.
vote_factor: (default: 1)
Number of votes generated from each seed point.
"""
def __init__(self, num_class, num_heading_bin, num_size_cluster, mean_size_arr,
input_feature_dim=0, num_proposal=128, vote_factor=1, sampling='vote_fps',
backbone='pointnet2'):
super().__init__()
self.num_class = num_class
self.num_heading_bin = num_heading_bin
self.num_size_cluster = num_size_cluster
self.mean_size_arr = mean_size_arr
assert(mean_size_arr.shape[0] == self.num_size_cluster)
self.input_feature_dim = input_feature_dim
self.num_proposal = num_proposal
self.vote_factor = vote_factor
self.sampling=sampling
self.backbone = backbone
# Backbone point feature learning
if backbone == 'pointnet2':
self.backbone_net = Pointnet2Backbone(input_feature_dim=self.input_feature_dim)
else:
self.backbone_net = SparseConvBackbone(
input_feature_dim=self.input_feature_dim + 3,
output_feature_dim=256,
num_seed=1024)
# from pdb import set_trace; set_trace()
# Hough voting
self.vgen = VotingModule(self.vote_factor, 256)
# Vote aggregation and detection
self.pnet = ProposalModule(num_class, num_heading_bin, num_size_cluster,
mean_size_arr, num_proposal, sampling)
def forward(self, inputs):
""" Forward pass of the network
Args:
inputs: dict
{point_clouds}
point_clouds: Variable(torch.cuda.FloatTensor)
(B, N, 3 + input_channels) tensor
Point cloud to run predicts on
Each point in the point-cloud MUST
be formated as (x, y, z, features...)
Returns:
end_points: dict
"""
end_points = {}
batch_size = inputs['point_clouds'].shape[0]
if self.backbone == 'pointnet2':
end_points = self.backbone_net(
inputs['point_clouds'], end_points)
else:
end_points = self.backbone_net(
inputs['point_clouds'],
inputs['voxel_coords'],
inputs['voxel_feats'],
inputs['voxel_inds'],
end_points)
# from pdb import set_trace; set_trace()
# --------- HOUGH VOTING ---------
xyz = end_points['fp2_xyz']
features = end_points['fp2_features']
end_points['seed_inds'] = end_points['fp2_inds']
end_points['seed_xyz'] = xyz
end_points['seed_features'] = features
xyz, features = self.vgen(xyz, features)
features_norm = torch.norm(features, p=2, dim=1)
features = features.div(features_norm.unsqueeze(1))
end_points['vote_xyz'] = xyz
end_points['vote_features'] = features
end_points = self.pnet(xyz, features, end_points)
return end_points
if __name__=='__main__':
sys.path.append(os.path.join(ROOT_DIR, 'sunrgbd'))
from sunrgbd_detection_dataset import SunrgbdDetectionVotesDataset, DC
from loss_helper import get_loss
# Define model
model = VoteNet(10,12,10,np.random.random((10,3))).cuda()
try:
# Define dataset
TRAIN_DATASET = SunrgbdDetectionVotesDataset('train', num_points=20000, use_v1=True)
# Model forward pass
sample = TRAIN_DATASET[5]
inputs = {'point_clouds': torch.from_numpy(sample['point_clouds']).unsqueeze(0).cuda()}
except:
print('Dataset has not been prepared. Use a random sample.')
inputs = {'point_clouds': torch.rand((20000,3)).unsqueeze(0).cuda()}
end_points = model(inputs)
for key in end_points:
print(key, end_points[key])
try:
# Compute loss
for key in sample:
end_points[key] = torch.from_numpy(sample[key]).unsqueeze(0).cuda()
loss, end_points = get_loss(end_points, DC)
print('loss', loss)
end_points['point_clouds'] = inputs['point_clouds']
end_points['pred_mask'] = np.ones((1,128))
dump_results(end_points, 'tmp', DC)
except:
print('Dataset has not been prepared. Skip loss and dump.')
|
ContrastiveSceneContexts-main
|
downstream/votenet/models/votenet.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import torch
def str2opt(arg):
assert arg in ['SGD', 'Adam']
return arg
def str2scheduler(arg):
assert arg in ['StepLR', 'PolyLR', 'ExpLR', 'SquaredLR']
return arg
def str2bool(v):
return v.lower() in ('true', '1')
def str2list(l):
return [int(i) for i in l.split(',')]
def add_argument_group(name):
arg = parser.add_argument_group(name)
arg_lists.append(arg)
return arg
arg_lists = []
parser = argparse.ArgumentParser()
# Network
net_arg = add_argument_group('Network')
net_arg.add_argument('--model', type=str,
default='ResUNet14', help='Model name')
net_arg.add_argument(
'--conv1_kernel_size', type=int, default=3, help='First layer conv kernel size')
net_arg.add_argument('--weights', type=str, default='None',
help='Saved weights to load')
net_arg.add_argument(
'--weights_for_inner_model',
type=str2bool,
default=False,
help='Weights for model inside a wrapper')
net_arg.add_argument(
'--dilations', type=str2list, default='1,1,1,1', help='Dilations used for ResNet or DenseNet')
# Wrappers
net_arg.add_argument('--wrapper_type', default='None',
type=str, help='Wrapper on the network')
net_arg.add_argument(
'--wrapper_region_type',
default=1,
type=int,
help='Wrapper connection types 0: hypercube, 1: hypercross, (default: 1)')
net_arg.add_argument('--wrapper_kernel_size', default=3,
type=int, help='Wrapper kernel size')
net_arg.add_argument(
'--wrapper_lr',
default=1e-1,
type=float,
help='Used for freezing or using small lr for the base model, freeze if negative')
# Meanfield arguments
net_arg.add_argument(
'--meanfield_iterations', type=int, default=10, help='Number of meanfield iterations')
net_arg.add_argument('--crf_spatial_sigma', default=1,
type=int, help='Trilateral spatial sigma')
net_arg.add_argument(
'--crf_chromatic_sigma', default=12, type=int, help='Trilateral chromatic sigma')
# Optimizer arguments
opt_arg = add_argument_group('Optimizer')
opt_arg.add_argument('--optimizer', type=str, default='SGD')
opt_arg.add_argument('--lr', type=float, default=1e-2)
opt_arg.add_argument('--sgd_momentum', type=float, default=0.9)
opt_arg.add_argument('--sgd_dampening', type=float, default=0.1)
opt_arg.add_argument('--adam_beta1', type=float, default=0.9)
opt_arg.add_argument('--adam_beta2', type=float, default=0.999)
opt_arg.add_argument('--weight_decay', type=float, default=1e-4)
opt_arg.add_argument('--param_histogram_freq', type=int, default=100)
opt_arg.add_argument('--save_param_histogram', type=str2bool, default=False)
opt_arg.add_argument('--iter_size', type=int, default=1,
help='accumulate gradient')
opt_arg.add_argument('--bn_momentum', type=float, default=0.02)
# Scheduler
opt_arg.add_argument('--scheduler', type=str2scheduler, default='StepLR')
opt_arg.add_argument('--max_iter', type=int, default=6e4)
opt_arg.add_argument('--step_size', type=int, default=2e4)
opt_arg.add_argument('--step_gamma', type=float, default=0.1)
opt_arg.add_argument('--poly_power', type=float, default=0.9)
opt_arg.add_argument('--exp_gamma', type=float, default=0.95)
opt_arg.add_argument('--exp_step_size', type=float, default=445)
# Directories
dir_arg = add_argument_group('Directories')
dir_arg.add_argument('--log_dir', type=str, default='outputs/default')
dir_arg.add_argument('--data_dir', type=str, default='data')
# Data
data_arg = add_argument_group('Data')
data_arg.add_argument('--dataset', type=str,
default='ScannetVoxelization2cmDataset')
data_arg.add_argument('--temporal_dilation', type=int, default=30)
data_arg.add_argument('--temporal_numseq', type=int, default=3)
data_arg.add_argument('--point_lim', type=int, default=-1)
data_arg.add_argument('--pre_point_lim', type=int, default=-1)
data_arg.add_argument('--batch_size', type=int, default=16)
data_arg.add_argument('--val_batch_size', type=int, default=1)
data_arg.add_argument('--test_batch_size', type=int, default=1)
data_arg.add_argument('--cache_data', type=str2bool, default=False)
data_arg.add_argument(
'--num_workers', type=int, default=1, help='num workers for train/test dataloader')
data_arg.add_argument('--num_val_workers', type=int,
default=1, help='num workers for val dataloader')
data_arg.add_argument('--ignore_label', type=int, default=255)
data_arg.add_argument('--return_transformation', type=str2bool, default=False)
data_arg.add_argument('--ignore_duplicate_class', type=str2bool, default=False)
data_arg.add_argument('--partial_crop', type=float, default=0.)
data_arg.add_argument('--train_limit_numpoints', type=int, default=0)
# Point Cloud Dataset
data_arg.add_argument(
'--synthia_path',
type=str,
default='/home/chrischoy/datasets/Synthia/Synthia4D',
help='Point Cloud dataset root dir')
# For temporal sequences
data_arg.add_argument(
'--synthia_camera_path', type=str, default='/home/chrischoy/datasets/Synthia/%s/CameraParams/')
data_arg.add_argument('--synthia_camera_intrinsic_file',
type=str, default='intrinsics.txt')
data_arg.add_argument(
'--synthia_camera_extrinsics_file', type=str, default='Stereo_Right/Omni_F/%s.txt')
data_arg.add_argument('--temporal_rand_dilation', type=str2bool, default=False)
data_arg.add_argument('--temporal_rand_numseq', type=str2bool, default=False)
data_arg.add_argument(
'--scannet_path',
type=str,
default='/home/chrischoy/datasets/scannet/scannet_preprocessed',
help='Scannet online voxelization dataset root dir')
data_arg.add_argument(
'--stanford3d_path',
type=str,
default='/home/chrischoy/datasets/Stanford3D',
help='Stanford precropped dataset root dir')
# Training / test parameters
train_arg = add_argument_group('Training')
train_arg.add_argument('--is_train', type=str2bool, default=True)
train_arg.add_argument('--stat_freq', type=int,
default=40, help='print frequency')
train_arg.add_argument('--test_stat_freq', type=int,
default=100, help='print frequency')
train_arg.add_argument('--save_freq', type=int,
default=1000, help='save frequency')
train_arg.add_argument('--val_freq', type=int,
default=1000, help='validation frequency')
train_arg.add_argument(
'--empty_cache_freq', type=int, default=1, help='Clear pytorch cache frequency')
train_arg.add_argument('--train_phase', type=str,
default='train', help='Dataset for training')
train_arg.add_argument('--val_phase', type=str,
default='val', help='Dataset for validation')
train_arg.add_argument(
'--overwrite_weights', type=str2bool, default=True, help='Overwrite checkpoint during training')
train_arg.add_argument(
'--resume', default=None, type=str, help='path to latest checkpoint (default: none)')
train_arg.add_argument(
'--resume_optimizer',
default=True,
type=str2bool,
help='Use checkpoint optimizer states when resume training')
train_arg.add_argument('--eval_upsample', type=str2bool, default=False)
train_arg.add_argument(
'--lenient_weight_loading',
type=str2bool,
default=False,
help='Weights with the same size will be loaded')
# Distributed Training configurations
ddp_arg = add_argument_group('Distributed')
ddp_arg.add_argument('--distributed-world-size', type=int, metavar='N',
default=max(1, torch.cuda.device_count()),
help='total number of GPUs across all nodes (default: all visible GPUs)')
ddp_arg.add_argument('--distributed-rank', default=0, type=int,
help='rank of the current worker')
ddp_arg.add_argument('--distributed-backend', default='nccl', type=str,
help='distributed backend')
ddp_arg.add_argument('--distributed-init-method', default=None, type=str,
help='typically tcp://hostname:port that will be used to '
'establish initial connetion')
ddp_arg.add_argument('--distributed-port', default=-1, type=int,
help='port number (not required if using --distributed-init-method)')
ddp_arg.add_argument('--device-id', '--local_rank', default=0, type=int,
help='which GPU to use (usually configured automatically)')
ddp_arg.add_argument('--distributed-no-spawn', action='store_true',
help='do not spawn multiple processes even if multiple GPUs are visible')
ddp_arg.add_argument('--ddp-backend', default='c10d', type=str,
choices=['c10d', 'no_c10d'],
help='DistributedDataParallel backend')
ddp_arg.add_argument('--bucket-cap-mb', default=25, type=int, metavar='MB',
help='bucket size for reduction')
# Data augmentation
data_aug_arg = add_argument_group('DataAugmentation')
data_aug_arg.add_argument(
'--use_feat_aug', type=str2bool, default=True, help='Simple feat augmentation')
data_aug_arg.add_argument(
'--data_aug_color_trans_ratio', type=float, default=0.10, help='Color translation range')
data_aug_arg.add_argument(
'--data_aug_color_jitter_std', type=float, default=0.05, help='STD of color jitter')
data_aug_arg.add_argument('--normalize_color', type=str2bool, default=True)
data_aug_arg.add_argument('--data_aug_scale_min', type=float, default=0.9)
data_aug_arg.add_argument('--data_aug_scale_max', type=float, default=1.1)
data_aug_arg.add_argument(
'--data_aug_hue_max', type=float, default=0.5, help='Hue translation range. [0, 1]')
data_aug_arg.add_argument(
'--data_aug_saturation_max',
type=float,
default=0.20,
help='Saturation translation range, [0, 1]')
# Test
test_arg = add_argument_group('Test')
test_arg.add_argument('--visualize', type=str2bool, default=False)
test_arg.add_argument('--test_temporal_average', type=str2bool, default=False)
test_arg.add_argument('--visualize_path', type=str,
default='outputs/visualize')
test_arg.add_argument('--save_prediction', type=str2bool, default=False)
test_arg.add_argument('--save_pred_dir', type=str, default='outputs/pred')
test_arg.add_argument('--test_phase', type=str,
default='test', help='Dataset for test')
test_arg.add_argument(
'--evaluate_original_pointcloud',
type=str2bool,
default=False,
help='Test on the original pointcloud space during network evaluation using voxel projection.')
test_arg.add_argument(
'--test_original_pointcloud',
type=str2bool,
default=False,
help='Test on the original pointcloud space as given by the dataset using kd-tree.')
# Misc
misc_arg = add_argument_group('Misc')
misc_arg.add_argument('--is_cuda', type=str2bool, default=True)
misc_arg.add_argument('--load_path', type=str, default='')
misc_arg.add_argument('--log_step', type=int, default=50)
misc_arg.add_argument('--log_level', type=str,
default='INFO', choices=['INFO', 'DEBUG', 'WARN'])
misc_arg.add_argument('--num_gpu', type=str2bool, default=1)
misc_arg.add_argument('--seed', type=int, default=123)
def get_config(args=None):
config = parser.parse_args(args=args)
return config # Training settings
|
ContrastiveSceneContexts-main
|
downstream/votenet/models/backbone/sparseconv/config.py
|
# coding: utf-8
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import sys
import numpy as np
import torch
from torch.utils.data import Dataset
from torch.utils.data._utils.collate import default_collate
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(ROOT_DIR, 'utils'))
import MinkowskiEngine as ME
class VoxelizationDataset(Dataset):
"""
Wrapper dataset which voxelize the original point clouds
"""
def __init__(self, dataset, voxel_size=0.05):
self.dataset = dataset
self.VOXEL_SIZE = voxel_size
def __len__(self):
return len(self.dataset)
def __getitem__(self, idx):
ret_dict = self.dataset[idx]
# voxelization
coords = np.floor(ret_dict['point_clouds'] / self.VOXEL_SIZE)
inds = ME.utils.sparse_quantize(coords, return_index=True)
coords = coords[inds].astype(np.int32)
colors = ret_dict['pcl_color'][inds]
ret_dict['voxel'] = (coords, np.array(inds, dtype=np.int32), colors)
return ret_dict
def collate_fn(samples):
data, voxel = [], []
for sample in samples:
data.append({w: sample[w] for w in sample if w != 'voxel'})
voxel.append(sample['voxel'])
# for non-voxel data, use default collate
data_batch = default_collate(data)
batch_ids = np.array(
[b for b, v in enumerate(voxel) for _ in range(v[0].shape[0])])
voxel_ids = np.concatenate([v[1] for v in voxel], 0)
coords = np.concatenate([v[0] for v in voxel], 0)
coords = np.concatenate([batch_ids[:, None], coords], 1)
colors = np.concatenate([v[2] for v in voxel], 0)
data_batch['voxel_coords'] = torch.from_numpy(coords)
data_batch['voxel_inds'] = torch.from_numpy(voxel_ids)
#data_batch['voxel_feats'] = data_batch['point_clouds'].new_ones(batch_ids.shape[0], 3)
data_batch['voxel_feats'] = torch.from_numpy(colors).float()
return data_batch
|
ContrastiveSceneContexts-main
|
downstream/votenet/models/backbone/sparseconv/voxelized_dataset.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
|
ContrastiveSceneContexts-main
|
downstream/votenet/models/backbone/sparseconv/__init__.py
|
import collections
import numpy as np
import MinkowskiEngine as ME
from scipy.linalg import expm, norm
# Rotation matrix along axis with angle theta
def M(axis, theta):
return expm(np.cross(np.eye(3), axis / norm(axis) * theta))
class Voxelizer:
def __init__(self,
voxel_size=1,
clip_bound=None,
use_augmentation=False,
scale_augmentation_bound=None,
rotation_augmentation_bound=None,
translation_augmentation_ratio_bound=None,
ignore_label=255):
"""
Args:
voxel_size: side length of a voxel
clip_bound: boundary of the voxelizer. Points outside the bound will be deleted
expects either None or an array like ((-100, 100), (-100, 100), (-100, 100)).
scale_augmentation_bound: None or (0.9, 1.1)
rotation_augmentation_bound: None or ((np.pi / 6, np.pi / 6), None, None) for 3 axis.
Use random order of x, y, z to prevent bias.
translation_augmentation_bound: ((-5, 5), (0, 0), (-10, 10))
ignore_label: label assigned for ignore (not a training label).
"""
self.voxel_size = voxel_size
self.clip_bound = clip_bound
self.ignore_label = ignore_label
# Augmentation
self.use_augmentation = use_augmentation
self.scale_augmentation_bound = scale_augmentation_bound
self.rotation_augmentation_bound = rotation_augmentation_bound
self.translation_augmentation_ratio_bound = translation_augmentation_ratio_bound
def get_transformation_matrix(self):
voxelization_matrix, rotation_matrix = np.eye(4), np.eye(4)
# Get clip boundary from config or pointcloud.
# Get inner clip bound to crop from.
# Transform pointcloud coordinate to voxel coordinate.
# 1. Random rotation
rot_mat = np.eye(3)
if self.use_augmentation and self.rotation_augmentation_bound is not None:
if isinstance(self.rotation_augmentation_bound, collections.Iterable):
rot_mats = []
for axis_ind, rot_bound in enumerate(self.rotation_augmentation_bound):
theta = 0
axis = np.zeros(3)
axis[axis_ind] = 1
if rot_bound is not None:
theta = np.random.uniform(*rot_bound)
rot_mats.append(M(axis, theta))
# Use random order
np.random.shuffle(rot_mats)
rot_mat = rot_mats[0] @ rot_mats[1] @ rot_mats[2]
else:
raise ValueError()
rotation_matrix[:3, :3] = rot_mat
# 2. Scale and translate to the voxel space.
scale = 1 / self.voxel_size
if self.use_augmentation and self.scale_augmentation_bound is not None:
scale *= np.random.uniform(*self.scale_augmentation_bound)
np.fill_diagonal(voxelization_matrix[:3, :3], scale)
# Get final transformation matrix.
return voxelization_matrix, rotation_matrix
def clip(self, coords, center=None, trans_aug_ratio=None):
bound_min = np.min(coords, 0).astype(float)
bound_max = np.max(coords, 0).astype(float)
bound_size = bound_max - bound_min
if center is None:
center = bound_min + bound_size * 0.5
if trans_aug_ratio is not None:
trans = np.multiply(trans_aug_ratio, bound_size)
center += trans
lim = self.clip_bound
if isinstance(self.clip_bound, (int, float)):
if bound_size.max() < self.clip_bound:
return None
else:
clip_inds = ((coords[:, 0] >= (-lim + center[0])) &
(coords[:, 0] < (lim + center[0])) &
(coords[:, 1] >= (-lim + center[1])) &
(coords[:, 1] < (lim + center[1])) &
(coords[:, 2] >= (-lim + center[2])) &
(coords[:, 2] < (lim + center[2])))
return clip_inds
# Clip points outside the limit
clip_inds = ((coords[:, 0] >= (lim[0][0] + center[0])) &
(coords[:, 0] < (lim[0][1] + center[0])) &
(coords[:, 1] >= (lim[1][0] + center[1])) &
(coords[:, 1] < (lim[1][1] + center[1])) &
(coords[:, 2] >= (lim[2][0] + center[2])) &
(coords[:, 2] < (lim[2][1] + center[2])))
return clip_inds
def voxelize(self, coords, feats, labels, center=None):
assert coords.shape[1] == 3 and coords.shape[0] == feats.shape[0] and coords.shape[0]
if self.clip_bound is not None:
trans_aug_ratio = np.zeros(3)
if self.use_augmentation and self.translation_augmentation_ratio_bound is not None:
for axis_ind, trans_ratio_bound in enumerate(self.translation_augmentation_ratio_bound):
trans_aug_ratio[axis_ind] = np.random.uniform(
*trans_ratio_bound)
clip_inds = self.clip(coords, center, trans_aug_ratio)
if clip_inds is not None:
coords, feats = coords[clip_inds], feats[clip_inds]
if labels is not None:
labels = labels[clip_inds]
# Get rotation and scale
M_v, M_r = self.get_transformation_matrix()
# Apply transformations
rigid_transformation = M_v
if self.use_augmentation:
rigid_transformation = M_r @ rigid_transformation
homo_coords = np.hstack(
(coords, np.ones((coords.shape[0], 1), dtype=coords.dtype)))
coords_aug = np.floor(homo_coords @ rigid_transformation.T[:, :3])
# Align all coordinates to the origin.
min_coords = coords_aug.min(0)
M_t = np.eye(4)
M_t[:3, -1] = -min_coords
rigid_transformation = M_t @ rigid_transformation
coords_aug = np.floor(coords_aug - min_coords)
# key = self.hash(coords_aug) # floor happens by astype(np.uint64)
coords_aug, feats, labels = ME.utils.sparse_quantize(
coords_aug, feats, labels=labels, ignore_label=self.ignore_label)
return coords_aug, feats, labels, rigid_transformation.flatten()
def voxelize_temporal(self,
coords_t,
feats_t,
labels_t,
centers=None,
return_transformation=False):
# Legacy code, remove
if centers is None:
centers = [
None,
] * len(coords_t)
coords_tc, feats_tc, labels_tc, transformation_tc = [], [], [], []
# ######################### Data Augmentation #############################
# Get rotation and scale
M_v, M_r = self.get_transformation_matrix()
# Apply transformations
rigid_transformation = M_v
if self.use_augmentation:
rigid_transformation = M_r @ rigid_transformation
# ######################### Voxelization #############################
# Voxelize coords
for coords, feats, labels, center in zip(coords_t, feats_t, labels_t, centers):
###################################
# Clip the data if bound exists
if self.clip_bound is not None:
trans_aug_ratio = np.zeros(3)
if self.use_augmentation and self.translation_augmentation_ratio_bound is not None:
for axis_ind, trans_ratio_bound in enumerate(self.translation_augmentation_ratio_bound):
trans_aug_ratio[axis_ind] = np.random.uniform(
*trans_ratio_bound)
clip_inds = self.clip(coords, center, trans_aug_ratio)
if clip_inds is not None:
coords, feats = coords[clip_inds], feats[clip_inds]
if labels is not None:
labels = labels[clip_inds]
###################################
homo_coords = np.hstack(
(coords, np.ones((coords.shape[0], 1), dtype=coords.dtype)))
coords_aug = np.floor(homo_coords @ rigid_transformation.T)[:, :3]
coords_aug, feats, labels = ME.utils.sparse_quantize(
coords_aug, feats, labels=labels, ignore_label=self.ignore_label)
coords_tc.append(coords_aug)
feats_tc.append(feats)
labels_tc.append(labels)
transformation_tc.append(rigid_transformation.flatten())
return_args = [coords_tc, feats_tc, labels_tc]
if return_transformation:
return_args.append(transformation_tc)
return tuple(return_args)
def test():
N = 16575
coords = np.random.rand(N, 3) * 10
feats = np.random.rand(N, 4)
labels = np.floor(np.random.rand(N) * 3)
coords[:3] = 0
labels[:3] = 2
voxelizer = Voxelizer()
print(voxelizer.voxelize(coords, feats, labels))
if __name__ == '__main__':
test()
|
ContrastiveSceneContexts-main
|
downstream/votenet/models/backbone/sparseconv/voxelizer.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import random
from torch.nn import Module
from MinkowskiEngine import SparseTensor
class Wrapper(Module):
"""
Wrapper for the segmentation networks.
"""
OUT_PIXEL_DIST = -1
def __init__(self, NetClass, in_nchannel, out_nchannel, config):
super(Wrapper, self).__init__()
self.initialize_filter(NetClass, in_nchannel, out_nchannel, config)
def initialize_filter(self, NetClass, in_nchannel, out_nchannel, config):
raise NotImplementedError('Must initialize a model and a filter')
def forward(self, x, coords, colors=None):
soutput = self.model(x)
# During training, make the network invariant to the filter
if not self.training or random.random() < 0.5:
# Filter requires the model to finish the forward pass
wrapper_coords = self.filter.initialize_coords(self.model, coords, colors)
finput = SparseTensor(soutput.F, wrapper_coords)
soutput = self.filter(finput)
return soutput
|
ContrastiveSceneContexts-main
|
downstream/votenet/models/backbone/sparseconv/models_sparseconv/wrapper.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from models.backbone.sparseconv.models_sparseconv.resnet import ResNetBase, get_norm
from models.backbone.sparseconv.models_sparseconv.modules.common import ConvType, NormType, conv, conv_tr
from models.backbone.sparseconv.models_sparseconv.modules.resnet_block import BasicBlock, BasicBlockINBN, Bottleneck
import torch.nn as nn
import MinkowskiEngine as ME
from MinkowskiEngine import MinkowskiReLU
import MinkowskiEngine.MinkowskiOps as me
class MinkUNetBase(ResNetBase):
BLOCK = None
PLANES = (64, 128, 256, 512, 256, 128, 128)
DILATIONS = (1, 1, 1, 1, 1, 1)
LAYERS = (2, 2, 2, 2, 2, 2)
INIT_DIM = 64
OUT_PIXEL_DIST = 1
NORM_TYPE = NormType.BATCH_NORM
NON_BLOCK_CONV_TYPE = ConvType.SPATIAL_HYPERCUBE
CONV_TYPE = ConvType.SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS
# To use the model, must call initialize_coords before forward pass.
# Once data is processed, call clear to reset the model before calling initialize_coords
def __init__(self, in_channels, out_channels, config, D=3, **kwargs):
super(MinkUNetBase, self).__init__(in_channels, out_channels, config, D)
def network_initialization(self, in_channels, out_channels, config, D):
# Setup net_metadata
dilations = self.DILATIONS
bn_momentum = config.bn_momentum
def space_n_time_m(n, m):
return n if D == 3 else [n, n, n, m]
if D == 4:
self.OUT_PIXEL_DIST = space_n_time_m(self.OUT_PIXEL_DIST, 1)
# Output of the first conv concated to conv6
self.inplanes = self.INIT_DIM
self.conv1p1s1 = conv(
in_channels,
self.inplanes,
kernel_size=space_n_time_m(config.conv1_kernel_size, 1),
stride=1,
dilation=1,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bn1 = get_norm(self.NORM_TYPE, self.PLANES[0], D, bn_momentum=bn_momentum)
self.block1 = self._make_layer(
self.BLOCK,
self.PLANES[0],
self.LAYERS[0],
dilation=dilations[0],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.conv2p1s2 = conv(
self.inplanes,
self.inplanes,
kernel_size=space_n_time_m(2, 1),
stride=space_n_time_m(2, 1),
dilation=1,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bn2 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum)
self.block2 = self._make_layer(
self.BLOCK,
self.PLANES[1],
self.LAYERS[1],
dilation=dilations[1],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.conv3p2s2 = conv(
self.inplanes,
self.inplanes,
kernel_size=space_n_time_m(2, 1),
stride=space_n_time_m(2, 1),
dilation=1,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bn3 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum)
self.block3 = self._make_layer(
self.BLOCK,
self.PLANES[2],
self.LAYERS[2],
dilation=dilations[2],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.conv4p4s2 = conv(
self.inplanes,
self.inplanes,
kernel_size=space_n_time_m(2, 1),
stride=space_n_time_m(2, 1),
dilation=1,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bn4 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum)
self.block4 = self._make_layer(
self.BLOCK,
self.PLANES[3],
self.LAYERS[3],
dilation=dilations[3],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.convtr4p8s2 = conv_tr(
self.inplanes,
self.PLANES[4],
kernel_size=space_n_time_m(2, 1),
upsample_stride=space_n_time_m(2, 1),
dilation=1,
bias=False,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bntr4 = get_norm(self.NORM_TYPE, self.PLANES[4], D, bn_momentum=bn_momentum)
self.inplanes = self.PLANES[4] + self.PLANES[2] * self.BLOCK.expansion
self.block5 = self._make_layer(
self.BLOCK,
self.PLANES[4],
self.LAYERS[4],
dilation=dilations[4],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.convtr5p4s2 = conv_tr(
self.inplanes,
self.PLANES[5],
kernel_size=space_n_time_m(2, 1),
upsample_stride=space_n_time_m(2, 1),
dilation=1,
bias=False,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bntr5 = get_norm(self.NORM_TYPE, self.PLANES[5], D, bn_momentum=bn_momentum)
self.inplanes = self.PLANES[5] + self.PLANES[1] * self.BLOCK.expansion
self.block6 = self._make_layer(
self.BLOCK,
self.PLANES[5],
self.LAYERS[5],
dilation=dilations[5],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.convtr6p2s2 = conv_tr(
self.inplanes,
self.PLANES[6],
kernel_size=space_n_time_m(2, 1),
upsample_stride=space_n_time_m(2, 1),
dilation=1,
bias=False,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bntr6 = get_norm(self.NORM_TYPE, self.PLANES[6], D, bn_momentum=bn_momentum)
self.relu = MinkowskiReLU(inplace=True)
self.final = nn.Sequential(
conv(
self.PLANES[6] + self.PLANES[0] * self.BLOCK.expansion,
512,
kernel_size=1,
stride=1,
dilation=1,
bias=False,
D=D), ME.MinkowskiBatchNorm(512), ME.MinkowskiReLU(),
conv(512, out_channels, kernel_size=1, stride=1, dilation=1, bias=True, D=D))
def forward(self, x):
out = self.conv1p1s1(x)
out = self.bn1(out)
out = self.relu(out)
out_b1p1 = self.block1(out)
out = self.conv2p1s2(out_b1p1)
out = self.bn2(out)
out = self.relu(out)
out_b2p2 = self.block2(out)
out = self.conv3p2s2(out_b2p2)
out = self.bn3(out)
out = self.relu(out)
out_b3p4 = self.block3(out)
out = self.conv4p4s2(out_b3p4)
out = self.bn4(out)
out = self.relu(out)
# pixel_dist=8
out = self.block4(out)
out = self.convtr4p8s2(out)
out = self.bntr4(out)
out = self.relu(out)
out = me.cat(out, out_b3p4)
out = self.block5(out)
out = self.convtr5p4s2(out)
out = self.bntr5(out)
out = self.relu(out)
out = me.cat(out, out_b2p2)
out = self.block6(out)
out = self.convtr6p2s2(out)
out = self.bntr6(out)
out = self.relu(out)
out = me.cat(out, out_b1p1)
return self.final(out)
class ResUNet14(MinkUNetBase):
BLOCK = BasicBlock
LAYERS = (1, 1, 1, 1, 1, 1)
class ResUNet18(MinkUNetBase):
BLOCK = BasicBlock
LAYERS = (2, 2, 2, 2, 2, 2)
class ResUNet18INBN(ResUNet18):
NORM_TYPE = NormType.INSTANCE_BATCH_NORM
BLOCK = BasicBlockINBN
class ResUNet34(MinkUNetBase):
BLOCK = BasicBlock
LAYERS = (3, 4, 6, 3, 2, 2)
class ResUNet50(MinkUNetBase):
BLOCK = Bottleneck
LAYERS = (3, 4, 6, 3, 2, 2)
class ResUNet101(MinkUNetBase):
BLOCK = Bottleneck
LAYERS = (3, 4, 23, 3, 2, 2)
class ResUNet14D(ResUNet14):
PLANES = (64, 128, 256, 512, 512, 512, 512)
class ResUNet18D(ResUNet18):
PLANES = (64, 128, 256, 512, 512, 512, 512)
class ResUNet34D(ResUNet34):
PLANES = (64, 128, 256, 512, 512, 512, 512)
class ResUNet34E(ResUNet34):
INIT_DIM = 32
PLANES = (32, 64, 128, 256, 128, 64, 64)
class ResUNet34F(ResUNet34):
INIT_DIM = 32
PLANES = (32, 64, 128, 256, 128, 64, 32)
class MinkUNetHyper(MinkUNetBase):
BLOCK = None
PLANES = (64, 128, 256, 512, 256, 128, 128)
DILATIONS = (1, 1, 1, 1, 1, 1)
LAYERS = (2, 2, 2, 2, 2, 2)
INIT_DIM = 64
OUT_PIXEL_DIST = 1
NORM_TYPE = NormType.BATCH_NORM
NON_BLOCK_CONV_TYPE = ConvType.SPATIAL_HYPERCUBE
CONV_TYPE = ConvType.SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS
# To use the model, must call initialize_coords before forward pass.
# Once data is processed, call clear to reset the model before calling initialize_coords
def __init__(self, in_channels, out_channels, config, D=3, **kwargs):
super(MinkUNetBase, self).__init__(in_channels, out_channels, config, D)
def network_initialization(self, in_channels, out_channels, config, D):
# Setup net_metadata
dilations = self.DILATIONS
bn_momentum = config.bn_momentum
def space_n_time_m(n, m):
return n if D == 3 else [n, n, n, m]
if D == 4:
self.OUT_PIXEL_DIST = space_n_time_m(self.OUT_PIXEL_DIST, 1)
# Output of the first conv concated to conv6
self.inplanes = self.INIT_DIM
self.conv1p1s1 = conv(
in_channels,
self.inplanes,
kernel_size=space_n_time_m(config.conv1_kernel_size, 1),
stride=1,
dilation=1,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bn1 = get_norm(self.NORM_TYPE, self.PLANES[0], D, bn_momentum=bn_momentum)
self.block1 = self._make_layer(
self.BLOCK,
self.PLANES[0],
self.LAYERS[0],
dilation=dilations[0],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.conv2p1s2 = conv(
self.inplanes,
self.inplanes,
kernel_size=space_n_time_m(2, 1),
stride=space_n_time_m(2, 1),
dilation=1,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bn2 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum)
self.block2 = self._make_layer(
self.BLOCK,
self.PLANES[1],
self.LAYERS[1],
dilation=dilations[1],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.conv3p2s2 = conv(
self.inplanes,
self.inplanes,
kernel_size=space_n_time_m(2, 1),
stride=space_n_time_m(2, 1),
dilation=1,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bn3 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum)
self.block3 = self._make_layer(
self.BLOCK,
self.PLANES[2],
self.LAYERS[2],
dilation=dilations[2],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.conv4p4s2 = conv(
self.inplanes,
self.inplanes,
kernel_size=space_n_time_m(2, 1),
stride=space_n_time_m(2, 1),
dilation=1,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bn4 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum)
self.block4 = self._make_layer(
self.BLOCK,
self.PLANES[3],
self.LAYERS[3],
dilation=dilations[3],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.pool_tr4 = ME.MinkowskiPoolingTranspose(kernel_size=8, stride=8, dimension=D)
out_pool4 = self.inplanes
self.convtr4p8s2 = conv_tr(
self.inplanes,
self.PLANES[4],
kernel_size=space_n_time_m(2, 1),
upsample_stride=space_n_time_m(2, 1),
dilation=1,
bias=False,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bntr4 = get_norm(self.NORM_TYPE, self.PLANES[4], D, bn_momentum=bn_momentum)
self.inplanes = self.PLANES[4] + self.PLANES[2] * self.BLOCK.expansion
self.block5 = self._make_layer(
self.BLOCK,
self.PLANES[4],
self.LAYERS[4],
dilation=dilations[4],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.pool_tr5 = ME.MinkowskiPoolingTranspose(kernel_size=4, stride=4, dimension=D)
out_pool5 = self.inplanes
self.convtr5p4s2 = conv_tr(
self.inplanes,
self.PLANES[5],
kernel_size=space_n_time_m(2, 1),
upsample_stride=space_n_time_m(2, 1),
dilation=1,
bias=False,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bntr5 = get_norm(self.NORM_TYPE, self.PLANES[5], D, bn_momentum=bn_momentum)
self.inplanes = self.PLANES[5] + self.PLANES[1] * self.BLOCK.expansion
self.block6 = self._make_layer(
self.BLOCK,
self.PLANES[5],
self.LAYERS[5],
dilation=dilations[5],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.pool_tr6 = ME.MinkowskiPoolingTranspose(kernel_size=2, stride=2, dimension=D)
out_pool6 = self.inplanes
self.convtr6p2s2 = conv_tr(
self.inplanes,
self.PLANES[6],
kernel_size=space_n_time_m(2, 1),
upsample_stride=space_n_time_m(2, 1),
dilation=1,
bias=False,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bntr6 = get_norm(self.NORM_TYPE, self.PLANES[6], D, bn_momentum=bn_momentum)
self.relu = MinkowskiReLU(inplace=True)
self.final = nn.Sequential(
conv(
out_pool5 + out_pool6 + self.PLANES[6] + self.PLANES[0] * self.BLOCK.expansion,
512,
kernel_size=1,
bias=False,
D=D), ME.MinkowskiBatchNorm(512), ME.MinkowskiReLU(),
conv(512, out_channels, kernel_size=1, bias=True, D=D))
def forward(self, x):
out = self.conv1p1s1(x)
out = self.bn1(out)
out = self.relu(out)
out_b1p1 = self.block1(out)
out = self.conv2p1s2(out_b1p1)
out = self.bn2(out)
out = self.relu(out)
out_b2p2 = self.block2(out)
out = self.conv3p2s2(out_b2p2)
out = self.bn3(out)
out = self.relu(out)
out_b3p4 = self.block3(out)
out = self.conv4p4s2(out_b3p4)
out = self.bn4(out)
out = self.relu(out)
# pixel_dist=8
out = self.block4(out)
out = self.convtr4p8s2(out)
out = self.bntr4(out)
out = self.relu(out)
out = me.cat(out, out_b3p4)
out = self.block5(out)
out_5 = self.pool_tr5(out)
out = self.convtr5p4s2(out)
out = self.bntr5(out)
out = self.relu(out)
out = me.cat(out, out_b2p2)
out = self.block6(out)
out_6 = self.pool_tr6(out)
out = self.convtr6p2s2(out)
out = self.bntr6(out)
out = self.relu(out)
out = me.cat(out, out_b1p1, out_6, out_5)
return self.final(out)
class MinkUNetHyper14INBN(MinkUNetHyper):
NORM_TYPE = NormType.INSTANCE_BATCH_NORM
BLOCK = BasicBlockINBN
class STMinkUNetBase(MinkUNetBase):
CONV_TYPE = ConvType.SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS
def __init__(self, in_channels, out_channels, config, D=4, **kwargs):
super(STMinkUNetBase, self).__init__(in_channels, out_channels, config, D, **kwargs)
class STResUNet14(STMinkUNetBase, ResUNet14):
pass
class STResUNet18(STMinkUNetBase, ResUNet18):
pass
class STResUNet34(STMinkUNetBase, ResUNet34):
pass
class STResUNet50(STMinkUNetBase, ResUNet50):
pass
class STResUNet101(STMinkUNetBase, ResUNet101):
pass
class STResTesseractUNetBase(STMinkUNetBase):
CONV_TYPE = ConvType.HYPERCUBE
class STResTesseractUNet14(STResTesseractUNetBase, ResUNet14):
pass
class STResTesseractUNet18(STResTesseractUNetBase, ResUNet18):
pass
class STResTesseractUNet34(STResTesseractUNetBase, ResUNet34):
pass
class STResTesseractUNet50(STResTesseractUNetBase, ResUNet50):
pass
class STResTesseractUNet101(STResTesseractUNetBase, ResUNet101):
pass
|
ContrastiveSceneContexts-main
|
downstream/votenet/models/backbone/sparseconv/models_sparseconv/resunet.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from models.backbone.sparseconv.models_sparseconv import resunet as resunet
from models.backbone.sparseconv.models_sparseconv import res16unet as res16unet
# from models.trilateral_crf import TrilateralCRF
from models.backbone.sparseconv.models_sparseconv.conditional_random_fields import BilateralCRF, TrilateralCRF
MODELS = []
def add_models(module):
MODELS.extend([getattr(module, a) for a in dir(module) if 'Net' in a])
add_models(resunet)
add_models(res16unet)
WRAPPERS = [BilateralCRF, TrilateralCRF]
def get_models():
'''Returns a tuple of sample models.'''
return MODELS
def get_wrappers():
return WRAPPERS
def load_model(name):
'''Creates and returns an instance of the model given its class name.
'''
# Find the model class from its name
all_models = get_models()
mdict = {model.__name__: model for model in all_models}
if name not in mdict:
print('Invalid model index. Options are:')
# Display a list of valid model names
for model in all_models:
print('\t* {}'.format(model.__name__))
return None
NetClass = mdict[name]
return NetClass
def load_wrapper(name):
'''Creates and returns an instance of the model given its class name.
'''
# Find the model class from its name
all_wrappers = get_wrappers()
mdict = {wrapper.__name__: wrapper for wrapper in all_wrappers}
if name not in mdict:
print('Invalid wrapper index. Options are:')
# Display a list of valid model names
for wrapper in all_wrappers:
print('\t* {}'.format(wrapper.__name__))
return None
WrapperClass = mdict[name]
return WrapperClass
|
ContrastiveSceneContexts-main
|
downstream/votenet/models/backbone/sparseconv/models_sparseconv/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from models.backbone.sparseconv.models_sparseconv.resnet import ResNetBase, get_norm
from models.backbone.sparseconv.models_sparseconv.modules.common import ConvType, NormType, conv, conv_tr
from models.backbone.sparseconv.models_sparseconv.modules.resnet_block import BasicBlock, Bottleneck
from MinkowskiEngine import MinkowskiReLU
import MinkowskiEngine.MinkowskiOps as me
class Res16UNetBase(ResNetBase):
BLOCK = None
PLANES = (32, 64, 128, 256, 256, 256, 256, 256)
DILATIONS = (1, 1, 1, 1, 1, 1, 1, 1)
LAYERS = (2, 2, 2, 2, 2, 2, 2, 2)
INIT_DIM = 32
OUT_PIXEL_DIST = 1
NORM_TYPE = NormType.BATCH_NORM
NON_BLOCK_CONV_TYPE = ConvType.SPATIAL_HYPERCUBE
CONV_TYPE = ConvType.SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS
# To use the model, must call initialize_coords before forward pass.
# Once data is processed, call clear to reset the model before calling initialize_coords
def __init__(self, in_channels, out_channels, config, D=3, **kwargs):
super(Res16UNetBase, self).__init__(in_channels, out_channels, config, D)
def network_initialization(self, in_channels, out_channels, config, D):
# Setup net_metadata
dilations = self.DILATIONS
bn_momentum = config.bn_momentum
def space_n_time_m(n, m):
return n if D == 3 else [n, n, n, m]
if D == 4:
self.OUT_PIXEL_DIST = space_n_time_m(self.OUT_PIXEL_DIST, 1)
# Output of the first conv concated to conv6
self.inplanes = self.INIT_DIM
self.conv0p1s1 = conv(
in_channels,
self.inplanes,
kernel_size=space_n_time_m(config.conv1_kernel_size, 1),
stride=1,
dilation=1,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bn0 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum)
self.conv1p1s2 = conv(
self.inplanes,
self.inplanes,
kernel_size=space_n_time_m(2, 1),
stride=space_n_time_m(2, 1),
dilation=1,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bn1 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum)
self.block1 = self._make_layer(
self.BLOCK,
self.PLANES[0],
self.LAYERS[0],
dilation=dilations[0],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.conv2p2s2 = conv(
self.inplanes,
self.inplanes,
kernel_size=space_n_time_m(2, 1),
stride=space_n_time_m(2, 1),
dilation=1,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bn2 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum)
self.block2 = self._make_layer(
self.BLOCK,
self.PLANES[1],
self.LAYERS[1],
dilation=dilations[1],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.conv3p4s2 = conv(
self.inplanes,
self.inplanes,
kernel_size=space_n_time_m(2, 1),
stride=space_n_time_m(2, 1),
dilation=1,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bn3 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum)
self.block3 = self._make_layer(
self.BLOCK,
self.PLANES[2],
self.LAYERS[2],
dilation=dilations[2],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.conv4p8s2 = conv(
self.inplanes,
self.inplanes,
kernel_size=space_n_time_m(2, 1),
stride=space_n_time_m(2, 1),
dilation=1,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bn4 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum)
self.block4 = self._make_layer(
self.BLOCK,
self.PLANES[3],
self.LAYERS[3],
dilation=dilations[3],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.convtr4p16s2 = conv_tr(
self.inplanes,
self.PLANES[4],
kernel_size=space_n_time_m(2, 1),
upsample_stride=space_n_time_m(2, 1),
dilation=1,
bias=False,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bntr4 = get_norm(self.NORM_TYPE, self.PLANES[4], D, bn_momentum=bn_momentum)
self.inplanes = self.PLANES[4] + self.PLANES[2] * self.BLOCK.expansion
self.block5 = self._make_layer(
self.BLOCK,
self.PLANES[4],
self.LAYERS[4],
dilation=dilations[4],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.convtr5p8s2 = conv_tr(
self.inplanes,
self.PLANES[5],
kernel_size=space_n_time_m(2, 1),
upsample_stride=space_n_time_m(2, 1),
dilation=1,
bias=False,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bntr5 = get_norm(self.NORM_TYPE, self.PLANES[5], D, bn_momentum=bn_momentum)
self.inplanes = self.PLANES[5] + self.PLANES[1] * self.BLOCK.expansion
self.block6 = self._make_layer(
self.BLOCK,
self.PLANES[5],
self.LAYERS[5],
dilation=dilations[5],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.convtr6p4s2 = conv_tr(
self.inplanes,
self.PLANES[6],
kernel_size=space_n_time_m(2, 1),
upsample_stride=space_n_time_m(2, 1),
dilation=1,
bias=False,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bntr6 = get_norm(self.NORM_TYPE, self.PLANES[6], D, bn_momentum=bn_momentum)
self.inplanes = self.PLANES[6] + self.PLANES[0] * self.BLOCK.expansion
self.block7 = self._make_layer(
self.BLOCK,
self.PLANES[6],
self.LAYERS[6],
dilation=dilations[6],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.convtr7p2s2 = conv_tr(
self.inplanes,
self.PLANES[7],
kernel_size=space_n_time_m(2, 1),
upsample_stride=space_n_time_m(2, 1),
dilation=1,
bias=False,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bntr7 = get_norm(self.NORM_TYPE, self.PLANES[7], D, bn_momentum=bn_momentum)
self.inplanes = self.PLANES[7] + self.INIT_DIM
self.block8 = self._make_layer(
self.BLOCK,
self.PLANES[7],
self.LAYERS[7],
dilation=dilations[7],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.final = conv(self.PLANES[7], out_channels, kernel_size=1, stride=1, bias=True, D=D)
self.relu = MinkowskiReLU(inplace=True)
def forward(self, x):
out = self.conv0p1s1(x)
out = self.bn0(out)
out_p1 = self.relu(out)
out = self.conv1p1s2(out_p1)
out = self.bn1(out)
out = self.relu(out)
out_b1p2 = self.block1(out)
out = self.conv2p2s2(out_b1p2)
out = self.bn2(out)
out = self.relu(out)
out_b2p4 = self.block2(out)
out = self.conv3p4s2(out_b2p4)
out = self.bn3(out)
out = self.relu(out)
out_b3p8 = self.block3(out)
# pixel_dist=16
out = self.conv4p8s2(out_b3p8)
out = self.bn4(out)
out = self.relu(out)
out = self.block4(out)
# pixel_dist=8
out = self.convtr4p16s2(out)
out = self.bntr4(out)
out = self.relu(out)
out = me.cat(out, out_b3p8)
out = self.block5(out)
# pixel_dist=4
out = self.convtr5p8s2(out)
out = self.bntr5(out)
out = self.relu(out)
out = me.cat(out, out_b2p4)
out = self.block6(out)
# pixel_dist=2
out = self.convtr6p4s2(out)
out = self.bntr6(out)
out = self.relu(out)
out = me.cat(out, out_b1p2)
out = self.block7(out)
# pixel_dist=1
out = self.convtr7p2s2(out)
out = self.bntr7(out)
out = self.relu(out)
out = me.cat(out, out_p1)
out = self.block8(out)
return self.final(out)
class Res16UNet14(Res16UNetBase):
BLOCK = BasicBlock
LAYERS = (1, 1, 1, 1, 1, 1, 1, 1)
class Res16UNet18(Res16UNetBase):
BLOCK = BasicBlock
LAYERS = (2, 2, 2, 2, 2, 2, 2, 2)
class Res16UNet34(Res16UNetBase):
BLOCK = BasicBlock
LAYERS = (2, 3, 4, 6, 2, 2, 2, 2)
class Res16UNet50(Res16UNetBase):
BLOCK = Bottleneck
LAYERS = (2, 3, 4, 6, 2, 2, 2, 2)
class Res16UNet101(Res16UNetBase):
BLOCK = Bottleneck
LAYERS = (2, 3, 4, 23, 2, 2, 2, 2)
class Res16UNet14A(Res16UNet14):
PLANES = (32, 64, 128, 256, 128, 128, 96, 96)
class Res16UNet14A2(Res16UNet14A):
LAYERS = (1, 1, 1, 1, 2, 2, 2, 2)
class Res16UNet14B(Res16UNet14):
PLANES = (32, 64, 128, 256, 128, 128, 128, 128)
class Res16UNet14B2(Res16UNet14B):
LAYERS = (1, 1, 1, 1, 2, 2, 2, 2)
class Res16UNet14B3(Res16UNet14B):
LAYERS = (2, 2, 2, 2, 1, 1, 1, 1)
class Res16UNet14C(Res16UNet14):
PLANES = (32, 64, 128, 256, 192, 192, 128, 128)
class Res16UNet14D(Res16UNet14):
PLANES = (32, 64, 128, 256, 384, 384, 384, 384)
class Res16UNet18A(Res16UNet18):
PLANES = (32, 64, 128, 256, 128, 128, 96, 96)
class Res16UNet18B(Res16UNet18):
PLANES = (32, 64, 128, 256, 128, 128, 128, 128)
class Res16UNet18D(Res16UNet18):
PLANES = (32, 64, 128, 256, 384, 384, 384, 384)
class Res16UNet34A(Res16UNet34):
PLANES = (32, 64, 128, 256, 256, 128, 64, 64)
class Res16UNet34B(Res16UNet34):
PLANES = (32, 64, 128, 256, 256, 128, 64, 32)
class Res16UNet34C(Res16UNet34):
PLANES = (32, 64, 128, 256, 256, 128, 96, 96)
class STRes16UNetBase(Res16UNetBase):
CONV_TYPE = ConvType.SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS
def __init__(self, in_channels, out_channels, config, D=4, **kwargs):
super(STRes16UNetBase, self).__init__(in_channels, out_channels, config, D, **kwargs)
class STRes16UNet14(STRes16UNetBase, Res16UNet14):
pass
class STRes16UNet14A(STRes16UNetBase, Res16UNet14A):
pass
class STRes16UNet18(STRes16UNetBase, Res16UNet18):
pass
class STRes16UNet34(STRes16UNetBase, Res16UNet34):
pass
class STRes16UNet50(STRes16UNetBase, Res16UNet50):
pass
class STRes16UNet101(STRes16UNetBase, Res16UNet101):
pass
class STRes16UNet18A(STRes16UNet18):
PLANES = (32, 64, 128, 256, 128, 128, 96, 96)
class STResTesseract16UNetBase(STRes16UNetBase):
CONV_TYPE = ConvType.HYPERCUBE
class STResTesseract16UNet18A(STRes16UNet18A, STResTesseract16UNetBase):
pass
|
ContrastiveSceneContexts-main
|
downstream/votenet/models/backbone/sparseconv/models_sparseconv/res16unet.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from MinkowskiEngine import MinkowskiNetwork
class Model(MinkowskiNetwork):
"""
Base network for all sparse convnet
By default, all networks are segmentation networks.
"""
OUT_PIXEL_DIST = -1
def __init__(self, in_channels, out_channels, config, D, **kwargs):
super(Model, self).__init__(D)
self.in_channels = in_channels
self.out_channels = out_channels
self.config = config
class HighDimensionalModel(Model):
"""
Base network for all spatio (temporal) chromatic sparse convnet
"""
def __init__(self, in_channels, out_channels, config, D, **kwargs):
assert D > 4, "Num dimension smaller than 5"
super(HighDimensionalModel, self).__init__(in_channels, out_channels, config, D, **kwargs)
|
ContrastiveSceneContexts-main
|
downstream/votenet/models/backbone/sparseconv/models_sparseconv/model.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch.nn as nn
import MinkowskiEngine as ME
from models.backbone.sparseconv.models_sparseconv.model import Model
from models.backbone.sparseconv.models_sparseconv.modules.common import ConvType, NormType, get_norm, conv, sum_pool
from models.backbone.sparseconv.models_sparseconv.modules.resnet_block import BasicBlock, Bottleneck
class ResNetBase(Model):
BLOCK = None
LAYERS = ()
INIT_DIM = 64
PLANES = (64, 128, 256, 512)
OUT_PIXEL_DIST = 32
HAS_LAST_BLOCK = False
CONV_TYPE = ConvType.HYPERCUBE
def __init__(self, in_channels, out_channels, config, D=3, **kwargs):
assert self.BLOCK is not None
assert self.OUT_PIXEL_DIST > 0
super(ResNetBase, self).__init__(in_channels, out_channels, config, D, **kwargs)
self.network_initialization(in_channels, out_channels, config, D)
self.weight_initialization()
def network_initialization(self, in_channels, out_channels, config, D):
def space_n_time_m(n, m):
return n if D == 3 else [n, n, n, m]
if D == 4:
self.OUT_PIXEL_DIST = space_n_time_m(self.OUT_PIXEL_DIST, 1)
dilations = config.dilations
bn_momentum = config.bn_momentum
self.inplanes = self.INIT_DIM
self.conv1 = conv(
in_channels,
self.inplanes,
kernel_size=space_n_time_m(config.conv1_kernel_size, 1),
stride=1,
D=D)
self.bn1 = get_norm(NormType.BATCH_NORM, self.inplanes, D=self.D, bn_momentum=bn_momentum)
self.relu = ME.MinkowskiReLU(inplace=True)
self.pool = sum_pool(kernel_size=space_n_time_m(2, 1), stride=space_n_time_m(2, 1), D=D)
self.layer1 = self._make_layer(
self.BLOCK,
self.PLANES[0],
self.LAYERS[0],
stride=space_n_time_m(2, 1),
dilation=space_n_time_m(dilations[0], 1))
self.layer2 = self._make_layer(
self.BLOCK,
self.PLANES[1],
self.LAYERS[1],
stride=space_n_time_m(2, 1),
dilation=space_n_time_m(dilations[1], 1))
self.layer3 = self._make_layer(
self.BLOCK,
self.PLANES[2],
self.LAYERS[2],
stride=space_n_time_m(2, 1),
dilation=space_n_time_m(dilations[2], 1))
self.layer4 = self._make_layer(
self.BLOCK,
self.PLANES[3],
self.LAYERS[3],
stride=space_n_time_m(2, 1),
dilation=space_n_time_m(dilations[3], 1))
self.final = conv(
self.PLANES[3] * self.BLOCK.expansion, out_channels, kernel_size=1, bias=True, D=D)
def weight_initialization(self):
for m in self.modules():
if isinstance(m, ME.MinkowskiBatchNorm):
nn.init.constant_(m.bn.weight, 1)
nn.init.constant_(m.bn.bias, 0)
def _make_layer(self,
block,
planes,
blocks,
stride=1,
dilation=1,
norm_type=NormType.BATCH_NORM,
bn_momentum=0.1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv(
self.inplanes,
planes * block.expansion,
kernel_size=1,
stride=stride,
bias=False,
D=self.D),
get_norm(norm_type, planes * block.expansion, D=self.D, bn_momentum=bn_momentum),
)
layers = []
layers.append(
block(
self.inplanes,
planes,
stride=stride,
dilation=dilation,
downsample=downsample,
conv_type=self.CONV_TYPE,
D=self.D))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(
block(
self.inplanes,
planes,
stride=1,
dilation=dilation,
conv_type=self.CONV_TYPE,
D=self.D))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.pool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.final(x)
return x
class ResNet14(ResNetBase):
BLOCK = BasicBlock
LAYERS = (1, 1, 1, 1)
class ResNet18(ResNetBase):
BLOCK = BasicBlock
LAYERS = (2, 2, 2, 2)
class ResNet34(ResNetBase):
BLOCK = BasicBlock
LAYERS = (3, 4, 6, 3)
class ResNet50(ResNetBase):
BLOCK = Bottleneck
LAYERS = (3, 4, 6, 3)
class ResNet101(ResNetBase):
BLOCK = Bottleneck
LAYERS = (3, 4, 23, 3)
class STResNetBase(ResNetBase):
CONV_TYPE = ConvType.SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS
def __init__(self, in_channels, out_channels, config, D=4, **kwargs):
super(STResNetBase, self).__init__(in_channels, out_channels, config, D, **kwargs)
class STResNet14(STResNetBase, ResNet14):
pass
class STResNet18(STResNetBase, ResNet18):
pass
class STResNet34(STResNetBase, ResNet34):
pass
class STResNet50(STResNetBase, ResNet50):
pass
class STResNet101(STResNetBase, ResNet101):
pass
class STResTesseractNetBase(STResNetBase):
CONV_TYPE = ConvType.HYPERCUBE
class STResTesseractNet14(STResTesseractNetBase, STResNet14):
pass
class STResTesseractNet18(STResTesseractNetBase, STResNet18):
pass
class STResTesseractNet34(STResTesseractNetBase, STResNet34):
pass
class STResTesseractNet50(STResTesseractNetBase, STResNet50):
pass
class STResTesseractNet101(STResTesseractNetBase, STResNet101):
pass
|
ContrastiveSceneContexts-main
|
downstream/votenet/models/backbone/sparseconv/models_sparseconv/resnet.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
from torch.autograd import Variable
from MinkowskiEngine import SparseTensor, MinkowskiConvolution, MinkowskiConvolutionFunction, convert_to_int_tensor
from MinkowskiEngine import convert_region_type as me_convert_region_type
from models.backbone.sparseconv.lib.math_functions import SparseMM
from models.backbone.sparseconv.models_sparseconv.model import HighDimensionalModel
from models.backbone.sparseconv.models_sparseconv.wrapper import Wrapper
from models.backbone.sparseconv.models_sparseconv.modules.common import convert_region_type
class MeanField(HighDimensionalModel):
"""
Abstract class for the bilateral and trilateral meanfield
"""
OUT_PIXEL_DIST = 1
# To use the model, must call initialize_coords before forward pass.
# Once data is processed, call clear to reset the model before calling
# initialize_coords
def __init__(self, nchannels, spatial_sigma, chromatic_sigma, meanfield_iterations, is_temporal,
config, **kwargs):
D = 7 if is_temporal else 6
self.is_temporal = is_temporal
# Setup metadata
super(MeanField, self).__init__(nchannels, nchannels, config, D=D)
self.spatial_sigma = spatial_sigma
self.chromatic_sigma = chromatic_sigma
# temporal sigma is 1
self.meanfield_iterations = meanfield_iterations
self.pixel_dist = 1
self.stride = 1
self.dilation = 1
conv = MinkowskiConvolution(
nchannels,
nchannels,
kernel_size=config.wrapper_kernel_size,
has_bias=False,
region_type=convert_region_type(config.wrapper_region_type),
dimension=D)
# Create a region_offset
self.region_type_, self.region_offset_, _ = me_convert_region_type(
conv.region_type, 1, conv.kernel_size, conv.up_stride, conv.dilation, conv.region_offset,
conv.axis_types, conv.dimension)
# Check whether the mapping is required
self.requires_mapping = False
self.conv = conv
self.kernel = conv.kernel
self.convs = {}
self.softmaxes = {}
for i in range(self.meanfield_iterations):
self.softmaxes[i] = nn.Softmax(dim=1)
self.convs[i] = MinkowskiConvolutionFunction()
def initialize_coords(self, model, in_coords, in_color):
if torch.prod(convert_to_int_tensor(model.OUT_PIXEL_DIST, model.D)) != 1:
self.requires_mapping = True
out_coords = model.get_coords(model.OUT_PIXEL_DIST)
out_color = model.permute_feature(in_color, model.OUT_PIXEL_DIST).int()
# Tri/Bi-lateral grid
out_tri_coords = torch.cat(
[
(torch.floor(out_coords[:, :3].float() / self.spatial_sigma)).int(),
(torch.floor(out_color.float() / self.chromatic_sigma)).int(),
out_coords[:, 3:] # (time and) batch
],
dim=1)
orig_tri_coords = torch.cat(
[
(torch.floor(in_coords[:, :3].float() / self.spatial_sigma)).int(),
(torch.floor(in_color.float() / self.chromatic_sigma)).int(),
in_coords[:, 3:] # (time and) batch
],
dim=1)
crf_tri_coords = torch.cat((out_tri_coords, orig_tri_coords), dim=0)
# Create a trilateral Grid
# super(MeanField, self).initialize_coords_with_duplicates(crf_tri_coords)
# Create Sparse matrix mappings to/from the CRF coords
in_cols = self.get_index_map(out_tri_coords, 1)
self.in_mapping = torch.sparse.FloatTensor(
torch.stack((in_cols.long(), torch.arange(in_cols.size(0), out=torch.LongTensor()))),
torch.ones(in_cols.size(0)), torch.Size((self.n_rows, in_cols.size(0))))
out_cols = self.get_index_map(orig_tri_coords, 1)
self.out_mapping = torch.sparse.FloatTensor(
torch.stack((torch.arange(out_cols.size(0), out=torch.LongTensor()), out_cols.long())),
torch.ones(out_cols.size(0)), torch.Size((out_cols.size(0), self.n_rows)))
if self.config.is_cuda:
self.in_mapping, self.out_mapping = self.in_mapping.cuda(), self.out_mapping.cuda()
else:
self.requires_mapping = False
out_coords = in_coords
out_color = in_color
crf_tri_coords = torch.cat(
[
(torch.floor(in_coords[:, :3].float() / self.spatial_sigma)).int(),
(torch.floor(in_color.float() / self.chromatic_sigma)).int(),
in_coords[:, 3:], # (time and) batch
],
dim=1)
return crf_tri_coords
def forward(self, x):
xf = x.F
if self.requires_mapping:
# Map the network output to CRF input
xf = SparseMM()(Variable(self.in_mapping), xf)
out = xf
for i in range(self.meanfield_iterations): # Meanfield iteration
# Normalization
out = self.softmaxes[i](out)
# Pairwise potential
out = self.convs[i].apply(out, self.conv.kernel, x.pixel_dist, self.conv.stride,
self.conv.kernel_size, self.conv.dilation, self.region_type_,
self.region_offset_, x.coords_key, x.coords_key, x.C)
# Add unary
out += xf
if self.requires_mapping:
# Map the CRF output to the origianl space
out = SparseMM()(Variable(self.out_mapping), out)
return SparseTensor(out, coords_key=x.coords_key, coords_manager=x.C)
class BilateralCRF(Wrapper):
OUT_PIXEL_DIST = 1
def initialize_filter(self, NetClass, in_nchannel, out_nchannel, config):
self.model = NetClass(in_nchannel, out_nchannel, config)
self.filter = MeanField(
out_nchannel,
spatial_sigma=config.crf_spatial_sigma,
chromatic_sigma=config.crf_chromatic_sigma,
meanfield_iterations=config.meanfield_iterations,
is_temporal=False,
config=config)
class TrilateralCRF(Wrapper):
OUT_PIXEL_DIST = 1
def initialize_filter(self, NetClass, in_nchannel, out_nchannel, config):
self.model = NetClass(in_nchannel, out_nchannel, config)
self.filter = MeanField(
out_nchannel,
spatial_sigma=config.crf_spatial_sigma,
chromatic_sigma=config.crf_chromatic_sigma,
meanfield_iterations=config.meanfield_iterations,
is_temporal=True,
config=config)
|
ContrastiveSceneContexts-main
|
downstream/votenet/models/backbone/sparseconv/models_sparseconv/conditional_random_fields.py
|
import torch.nn as nn
from models.backbone.sparseconv.models_sparseconv.modules.common import ConvType, NormType, get_norm, conv
from MinkowskiEngine import MinkowskiReLU
class BasicBlockBase(nn.Module):
expansion = 1
NORM_TYPE = NormType.BATCH_NORM
def __init__(self,
inplanes,
planes,
stride=1,
dilation=1,
downsample=None,
conv_type=ConvType.HYPERCUBE,
bn_momentum=0.1,
D=3):
super(BasicBlockBase, self).__init__()
self.conv1 = conv(
inplanes, planes, kernel_size=3, stride=stride, dilation=dilation, conv_type=conv_type, D=D)
self.norm1 = get_norm(self.NORM_TYPE, planes, D, bn_momentum=bn_momentum)
self.conv2 = conv(
planes,
planes,
kernel_size=3,
stride=1,
dilation=dilation,
bias=False,
conv_type=conv_type,
D=D)
self.norm2 = get_norm(self.NORM_TYPE, planes, D, bn_momentum=bn_momentum)
self.relu = MinkowskiReLU(inplace=True)
self.downsample = downsample
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.norm2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class BasicBlock(BasicBlockBase):
NORM_TYPE = NormType.BATCH_NORM
class BasicBlockIN(BasicBlockBase):
NORM_TYPE = NormType.INSTANCE_NORM
class BasicBlockINBN(BasicBlockBase):
NORM_TYPE = NormType.INSTANCE_BATCH_NORM
class BottleneckBase(nn.Module):
expansion = 4
NORM_TYPE = NormType.BATCH_NORM
def __init__(self,
inplanes,
planes,
stride=1,
dilation=1,
downsample=None,
conv_type=ConvType.HYPERCUBE,
bn_momentum=0.1,
D=3):
super(BottleneckBase, self).__init__()
self.conv1 = conv(inplanes, planes, kernel_size=1, D=D)
self.norm1 = get_norm(self.NORM_TYPE, planes, D, bn_momentum=bn_momentum)
self.conv2 = conv(
planes, planes, kernel_size=3, stride=stride, dilation=dilation, conv_type=conv_type, D=D)
self.norm2 = get_norm(self.NORM_TYPE, planes, D, bn_momentum=bn_momentum)
self.conv3 = conv(planes, planes * self.expansion, kernel_size=1, D=D)
self.norm3 = get_norm(self.NORM_TYPE, planes * self.expansion, D, bn_momentum=bn_momentum)
self.relu = MinkowskiReLU(inplace=True)
self.downsample = downsample
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.norm2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.norm3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(BottleneckBase):
NORM_TYPE = NormType.BATCH_NORM
class BottleneckIN(BottleneckBase):
NORM_TYPE = NormType.INSTANCE_NORM
class BottleneckINBN(BottleneckBase):
NORM_TYPE = NormType.INSTANCE_BATCH_NORM
|
ContrastiveSceneContexts-main
|
downstream/votenet/models/backbone/sparseconv/models_sparseconv/modules/resnet_block.py
|
import torch.nn as nn
import MinkowskiEngine as ME
from models.modules.common import ConvType, NormType
from models.modules.resnet_block import BasicBlock, Bottleneck
class SELayer(nn.Module):
def __init__(self, channel, reduction=16, D=-1):
# Global coords does not require coords_key
super(SELayer, self).__init__()
self.fc = nn.Sequential(
ME.MinkowskiLinear(channel, channel // reduction), ME.MinkowskiReLU(inplace=True),
ME.MinkowskiLinear(channel // reduction, channel), ME.MinkowskiSigmoid())
self.pooling = ME.MinkowskiGlobalPooling(dimension=D)
self.broadcast_mul = ME.MinkowskiBroadcastMultiplication(dimension=D)
def forward(self, x):
y = self.pooling(x)
y = self.fc(y)
return self.broadcast_mul(x, y)
class SEBasicBlock(BasicBlock):
def __init__(self,
inplanes,
planes,
stride=1,
dilation=1,
downsample=None,
conv_type=ConvType.HYPERCUBE,
reduction=16,
D=-1):
super(SEBasicBlock, self).__init__(
inplanes,
planes,
stride=stride,
dilation=dilation,
downsample=downsample,
conv_type=conv_type,
D=D)
self.se = SELayer(planes, reduction=reduction, D=D)
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.norm2(out)
out = self.se(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class SEBasicBlockSN(SEBasicBlock):
NORM_TYPE = NormType.SPARSE_SWITCH_NORM
class SEBasicBlockIN(SEBasicBlock):
NORM_TYPE = NormType.SPARSE_INSTANCE_NORM
class SEBasicBlockLN(SEBasicBlock):
NORM_TYPE = NormType.SPARSE_LAYER_NORM
class SEBottleneck(Bottleneck):
def __init__(self,
inplanes,
planes,
stride=1,
dilation=1,
downsample=None,
conv_type=ConvType.HYPERCUBE,
D=3,
reduction=16):
super(SEBottleneck, self).__init__(
inplanes,
planes,
stride=stride,
dilation=dilation,
downsample=downsample,
conv_type=conv_type,
D=D)
self.se = SELayer(planes * self.expansion, reduction=reduction, D=D)
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.norm2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.norm3(out)
out = self.se(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class SEBottleneckSN(SEBottleneck):
NORM_TYPE = NormType.SPARSE_SWITCH_NORM
class SEBottleneckIN(SEBottleneck):
NORM_TYPE = NormType.SPARSE_INSTANCE_NORM
class SEBottleneckLN(SEBottleneck):
NORM_TYPE = NormType.SPARSE_LAYER_NORM
|
ContrastiveSceneContexts-main
|
downstream/votenet/models/backbone/sparseconv/models_sparseconv/modules/senet_block.py
|
ContrastiveSceneContexts-main
|
downstream/votenet/models/backbone/sparseconv/models_sparseconv/modules/__init__.py
|
|
import collections
from enum import Enum
import torch.nn as nn
import MinkowskiEngine as ME
class NormType(Enum):
BATCH_NORM = 0
INSTANCE_NORM = 1
INSTANCE_BATCH_NORM = 2
def get_norm(norm_type, n_channels, D, bn_momentum=0.1):
if norm_type == NormType.BATCH_NORM:
return ME.MinkowskiBatchNorm(n_channels, momentum=bn_momentum)
elif norm_type == NormType.INSTANCE_NORM:
return ME.MinkowskiInstanceNorm(n_channels)
elif norm_type == NormType.INSTANCE_BATCH_NORM:
return nn.Sequential(
ME.MinkowskiInstanceNorm(n_channels),
ME.MinkowskiBatchNorm(n_channels, momentum=bn_momentum))
else:
raise ValueError(f'Norm type: {norm_type} not supported')
class ConvType(Enum):
"""
Define the kernel region type
"""
HYPERCUBE = 0, 'HYPERCUBE'
SPATIAL_HYPERCUBE = 1, 'SPATIAL_HYPERCUBE'
SPATIO_TEMPORAL_HYPERCUBE = 2, 'SPATIO_TEMPORAL_HYPERCUBE'
HYPERCROSS = 3, 'HYPERCROSS'
SPATIAL_HYPERCROSS = 4, 'SPATIAL_HYPERCROSS'
SPATIO_TEMPORAL_HYPERCROSS = 5, 'SPATIO_TEMPORAL_HYPERCROSS'
SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS = 6, 'SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS '
def __new__(cls, value, name):
member = object.__new__(cls)
member._value_ = value
member.fullname = name
return member
def __int__(self):
return self.value
# Covert the ConvType var to a RegionType var
conv_to_region_type = {
# kernel_size = [k, k, k, 1]
ConvType.HYPERCUBE: ME.RegionType.HYPERCUBE,
ConvType.SPATIAL_HYPERCUBE: ME.RegionType.HYPERCUBE,
ConvType.SPATIO_TEMPORAL_HYPERCUBE: ME.RegionType.HYPERCUBE,
ConvType.HYPERCROSS: ME.RegionType.HYPERCROSS,
ConvType.SPATIAL_HYPERCROSS: ME.RegionType.HYPERCROSS,
ConvType.SPATIO_TEMPORAL_HYPERCROSS: ME.RegionType.HYPERCROSS,
ConvType.SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS: ME.RegionType.HYBRID
}
int_to_region_type = {m.value: m for m in ME.RegionType}
def convert_region_type(region_type):
"""
Convert the integer region_type to the corresponding RegionType enum object.
"""
return int_to_region_type[region_type]
def convert_conv_type(conv_type, kernel_size, D):
assert isinstance(conv_type, ConvType), "conv_type must be of ConvType"
region_type = conv_to_region_type[conv_type]
axis_types = None
if conv_type == ConvType.SPATIAL_HYPERCUBE:
# No temporal convolution
if isinstance(kernel_size, collections.Sequence):
kernel_size = kernel_size[:3]
else:
kernel_size = [
kernel_size,
] * 3
if D == 4:
kernel_size.append(1)
elif conv_type == ConvType.SPATIO_TEMPORAL_HYPERCUBE:
# conv_type conversion already handled
assert D == 4
elif conv_type == ConvType.HYPERCUBE:
# conv_type conversion already handled
pass
elif conv_type == ConvType.SPATIAL_HYPERCROSS:
if isinstance(kernel_size, collections.Sequence):
kernel_size = kernel_size[:3]
else:
kernel_size = [
kernel_size,
] * 3
if D == 4:
kernel_size.append(1)
elif conv_type == ConvType.HYPERCROSS:
# conv_type conversion already handled
pass
elif conv_type == ConvType.SPATIO_TEMPORAL_HYPERCROSS:
# conv_type conversion already handled
assert D == 4
elif conv_type == ConvType.SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS:
# Define the CUBIC conv kernel for spatial dims and CROSS conv for temp dim
axis_types = [
ME.RegionType.HYPERCUBE,
] * 3
if D == 4:
axis_types.append(ME.RegionType.HYPERCROSS)
return region_type, axis_types, kernel_size
def conv(in_planes,
out_planes,
kernel_size,
stride=1,
dilation=1,
bias=False,
conv_type=ConvType.HYPERCUBE,
D=-1):
assert D > 0, 'Dimension must be a positive integer'
region_type, axis_types, kernel_size = convert_conv_type(conv_type, kernel_size, D)
kernel_generator = ME.KernelGenerator(
kernel_size, stride, dilation, region_type=region_type, axis_types=axis_types, dimension=D)
return ME.MinkowskiConvolution(
in_channels=in_planes,
out_channels=out_planes,
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
has_bias=bias,
kernel_generator=kernel_generator,
dimension=D)
def conv_tr(in_planes,
out_planes,
kernel_size,
upsample_stride=1,
dilation=1,
bias=False,
conv_type=ConvType.HYPERCUBE,
D=-1):
assert D > 0, 'Dimension must be a positive integer'
region_type, axis_types, kernel_size = convert_conv_type(conv_type, kernel_size, D)
kernel_generator = ME.KernelGenerator(
kernel_size,
upsample_stride,
dilation,
region_type=region_type,
axis_types=axis_types,
dimension=D)
return ME.MinkowskiConvolutionTranspose(
in_channels=in_planes,
out_channels=out_planes,
kernel_size=kernel_size,
stride=upsample_stride,
dilation=dilation,
has_bias=bias,
kernel_generator=kernel_generator,
dimension=D)
def avg_pool(kernel_size,
stride=1,
dilation=1,
conv_type=ConvType.HYPERCUBE,
in_coords_key=None,
D=-1):
assert D > 0, 'Dimension must be a positive integer'
region_type, axis_types, kernel_size = convert_conv_type(conv_type, kernel_size, D)
kernel_generator = ME.KernelGenerator(
kernel_size, stride, dilation, region_type=region_type, axis_types=axis_types, dimension=D)
return ME.MinkowskiAvgPooling(
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
kernel_generator=kernel_generator,
dimension=D)
def avg_unpool(kernel_size, stride=1, dilation=1, conv_type=ConvType.HYPERCUBE, D=-1):
assert D > 0, 'Dimension must be a positive integer'
region_type, axis_types, kernel_size = convert_conv_type(conv_type, kernel_size, D)
kernel_generator = ME.KernelGenerator(
kernel_size, stride, dilation, region_type=region_type, axis_types=axis_types, dimension=D)
return ME.MinkowskiAvgUnpooling(
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
kernel_generator=kernel_generator,
dimension=D)
def sum_pool(kernel_size, stride=1, dilation=1, conv_type=ConvType.HYPERCUBE, D=-1):
assert D > 0, 'Dimension must be a positive integer'
region_type, axis_types, kernel_size = convert_conv_type(conv_type, kernel_size, D)
kernel_generator = ME.KernelGenerator(
kernel_size, stride, dilation, region_type=region_type, axis_types=axis_types, dimension=D)
return ME.MinkowskiSumPooling(
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
kernel_generator=kernel_generator,
dimension=D)
|
ContrastiveSceneContexts-main
|
downstream/votenet/models/backbone/sparseconv/models_sparseconv/modules/common.py
|
ContrastiveSceneContexts-main
|
downstream/votenet/models/backbone/sparseconv/lib/__init__.py
|
|
from scipy.sparse import csr_matrix
import torch
class SparseMM(torch.autograd.Function):
"""
Sparse x dense matrix multiplication with autograd support.
Implementation by Soumith Chintala:
https://discuss.pytorch.org/t/
does-pytorch-support-autograd-on-sparse-matrix/6156/7
"""
def forward(self, matrix1, matrix2):
self.save_for_backward(matrix1, matrix2)
return torch.mm(matrix1, matrix2)
def backward(self, grad_output):
matrix1, matrix2 = self.saved_tensors
grad_matrix1 = grad_matrix2 = None
if self.needs_input_grad[0]:
grad_matrix1 = torch.mm(grad_output, matrix2.t())
if self.needs_input_grad[1]:
grad_matrix2 = torch.mm(matrix1.t(), grad_output)
return grad_matrix1, grad_matrix2
def sparse_float_tensor(values, indices, size=None):
"""
Return a torch sparse matrix give values and indices (row_ind, col_ind).
If the size is an integer, return a square matrix with side size.
If the size is a torch.Size, use it to initialize the out tensor.
If none, the size is inferred.
"""
indices = torch.stack(indices).int()
sargs = [indices, values.float()]
if size is not None:
# Use the provided size
if isinstance(size, int):
size = torch.Size((size, size))
sargs.append(size)
if values.is_cuda:
return torch.cuda.sparse.FloatTensor(*sargs)
else:
return torch.sparse.FloatTensor(*sargs)
def diags(values, size=None):
values = values.view(-1)
n = values.nelement()
size = torch.Size((n, n))
indices = (torch.arange(0, n), torch.arange(0, n))
return sparse_float_tensor(values, indices, size)
def sparse_to_csr_matrix(tensor):
tensor = tensor.cpu()
inds = tensor._indices().numpy()
vals = tensor._values().numpy()
return csr_matrix((vals, (inds[0], inds[1])), shape=[s for s in tensor.shape])
def csr_matrix_to_sparse(mat):
row_ind, col_ind = mat.nonzero()
return sparse_float_tensor(
torch.from_numpy(mat.data),
(torch.from_numpy(row_ind), torch.from_numpy(col_ind)),
size=torch.Size(mat.shape))
|
ContrastiveSceneContexts-main
|
downstream/votenet/models/backbone/sparseconv/lib/math_functions.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
''' Modified based on Ref: https://github.com/erikwijmans/Pointnet2_PyTorch '''
import torch
import torch.nn as nn
from typing import List, Tuple
class SharedMLP(nn.Sequential):
def __init__(
self,
args: List[int],
*,
bn: bool = False,
activation=nn.ReLU(inplace=True),
preact: bool = False,
first: bool = False,
name: str = ""
):
super().__init__()
for i in range(len(args) - 1):
self.add_module(
name + 'layer{}'.format(i),
Conv2d(
args[i],
args[i + 1],
bn=(not first or not preact or (i != 0)) and bn,
activation=activation
if (not first or not preact or (i != 0)) else None,
preact=preact
)
)
class _BNBase(nn.Sequential):
def __init__(self, in_size, batch_norm=None, name=""):
super().__init__()
self.add_module(name + "bn", batch_norm(in_size))
nn.init.constant_(self[0].weight, 1.0)
nn.init.constant_(self[0].bias, 0)
class BatchNorm1d(_BNBase):
def __init__(self, in_size: int, *, name: str = ""):
super().__init__(in_size, batch_norm=nn.BatchNorm1d, name=name)
class BatchNorm2d(_BNBase):
def __init__(self, in_size: int, name: str = ""):
super().__init__(in_size, batch_norm=nn.BatchNorm2d, name=name)
class BatchNorm3d(_BNBase):
def __init__(self, in_size: int, name: str = ""):
super().__init__(in_size, batch_norm=nn.BatchNorm3d, name=name)
class _ConvBase(nn.Sequential):
def __init__(
self,
in_size,
out_size,
kernel_size,
stride,
padding,
activation,
bn,
init,
conv=None,
batch_norm=None,
bias=True,
preact=False,
name=""
):
super().__init__()
bias = bias and (not bn)
conv_unit = conv(
in_size,
out_size,
kernel_size=kernel_size,
stride=stride,
padding=padding,
bias=bias
)
init(conv_unit.weight)
if bias:
nn.init.constant_(conv_unit.bias, 0)
if bn:
if not preact:
bn_unit = batch_norm(out_size)
else:
bn_unit = batch_norm(in_size)
if preact:
if bn:
self.add_module(name + 'bn', bn_unit)
if activation is not None:
self.add_module(name + 'activation', activation)
self.add_module(name + 'conv', conv_unit)
if not preact:
if bn:
self.add_module(name + 'bn', bn_unit)
if activation is not None:
self.add_module(name + 'activation', activation)
class Conv1d(_ConvBase):
def __init__(
self,
in_size: int,
out_size: int,
*,
kernel_size: int = 1,
stride: int = 1,
padding: int = 0,
activation=nn.ReLU(inplace=True),
bn: bool = False,
init=nn.init.kaiming_normal_,
bias: bool = True,
preact: bool = False,
name: str = ""
):
super().__init__(
in_size,
out_size,
kernel_size,
stride,
padding,
activation,
bn,
init,
conv=nn.Conv1d,
batch_norm=BatchNorm1d,
bias=bias,
preact=preact,
name=name
)
class Conv2d(_ConvBase):
def __init__(
self,
in_size: int,
out_size: int,
*,
kernel_size: Tuple[int, int] = (1, 1),
stride: Tuple[int, int] = (1, 1),
padding: Tuple[int, int] = (0, 0),
activation=nn.ReLU(inplace=True),
bn: bool = False,
init=nn.init.kaiming_normal_,
bias: bool = True,
preact: bool = False,
name: str = ""
):
super().__init__(
in_size,
out_size,
kernel_size,
stride,
padding,
activation,
bn,
init,
conv=nn.Conv2d,
batch_norm=BatchNorm2d,
bias=bias,
preact=preact,
name=name
)
class Conv3d(_ConvBase):
def __init__(
self,
in_size: int,
out_size: int,
*,
kernel_size: Tuple[int, int, int] = (1, 1, 1),
stride: Tuple[int, int, int] = (1, 1, 1),
padding: Tuple[int, int, int] = (0, 0, 0),
activation=nn.ReLU(inplace=True),
bn: bool = False,
init=nn.init.kaiming_normal_,
bias: bool = True,
preact: bool = False,
name: str = ""
):
super().__init__(
in_size,
out_size,
kernel_size,
stride,
padding,
activation,
bn,
init,
conv=nn.Conv3d,
batch_norm=BatchNorm3d,
bias=bias,
preact=preact,
name=name
)
class FC(nn.Sequential):
def __init__(
self,
in_size: int,
out_size: int,
*,
activation=nn.ReLU(inplace=True),
bn: bool = False,
init=None,
preact: bool = False,
name: str = ""
):
super().__init__()
fc = nn.Linear(in_size, out_size, bias=not bn)
if init is not None:
init(fc.weight)
if not bn:
nn.init.constant_(fc.bias, 0)
if preact:
if bn:
self.add_module(name + 'bn', BatchNorm1d(in_size))
if activation is not None:
self.add_module(name + 'activation', activation)
self.add_module(name + 'fc', fc)
if not preact:
if bn:
self.add_module(name + 'bn', BatchNorm1d(out_size))
if activation is not None:
self.add_module(name + 'activation', activation)
def set_bn_momentum_default(bn_momentum):
def fn(m):
if isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d)):
m.momentum = bn_momentum
return fn
class BNMomentumScheduler(object):
def __init__(
self, model, bn_lambda, last_epoch=-1,
setter=set_bn_momentum_default
):
if not isinstance(model, nn.Module):
raise RuntimeError(
"Class '{}' is not a PyTorch nn Module".format(
type(model).__name__
)
)
self.model = model
self.setter = setter
self.lmbd = bn_lambda
self.step(last_epoch + 1)
self.last_epoch = last_epoch
def step(self, epoch=None):
if epoch is None:
epoch = self.last_epoch + 1
self.last_epoch = epoch
self.model.apply(self.setter(self.lmbd(epoch)))
|
ContrastiveSceneContexts-main
|
downstream/votenet/models/backbone/pointnet2/pytorch_utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import glob
import os
from setuptools import setup
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
this_dir = os.path.dirname(os.path.abspath(__file__))
_ext_src_root = "_ext_src"
_ext_sources = glob.glob("{}/src/*.cpp".format(_ext_src_root)) + glob.glob(
"{}/src/*.cu".format(_ext_src_root)
)
setup(
name='pointnet2',
ext_modules=[
CUDAExtension(
name='pointnet2._ext',
sources=_ext_sources,
extra_compile_args={
"cxx": ["-O3"],
"nvcc": ["-O3", "-Xfatbin", "-compress-all"],
},
include_dirs=[os.path.join(this_dir, _ext_src_root, "include")],
)
],
cmdclass={
'build_ext': BuildExtension
}
)
|
ContrastiveSceneContexts-main
|
downstream/votenet/models/backbone/pointnet2/setup.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
''' Modified based on: https://github.com/erikwijmans/Pointnet2_PyTorch '''
from __future__ import (
division,
absolute_import,
with_statement,
print_function,
unicode_literals,
)
import torch
from torch.autograd import Function
import torch.nn as nn
import pytorch_utils as pt_utils
import sys
try:
import builtins
except:
import __builtin__ as builtins
try:
import pointnet2._ext as _ext
except ImportError:
if not getattr(builtins, "__POINTNET2_SETUP__", False):
raise ImportError(
"Could not import _ext module.\n"
"Please see the setup instructions in the README: "
"https://github.com/erikwijmans/Pointnet2_PyTorch/blob/master/README.rst"
)
if False:
# Workaround for type hints without depending on the `typing` module
from typing import *
class RandomDropout(nn.Module):
def __init__(self, p=0.5, inplace=False):
super(RandomDropout, self).__init__()
self.p = p
self.inplace = inplace
def forward(self, X):
theta = torch.Tensor(1).uniform_(0, self.p)[0]
return pt_utils.feature_dropout_no_scaling(X, theta, self.train, self.inplace)
class FurthestPointSampling(Function):
@staticmethod
def forward(ctx, xyz, npoint):
# type: (Any, torch.Tensor, int) -> torch.Tensor
r"""
Uses iterative furthest point sampling to select a set of npoint features that have the largest
minimum distance
Parameters
----------
xyz : torch.Tensor
(B, N, 3) tensor where N > npoint
npoint : int32
number of features in the sampled set
Returns
-------
torch.Tensor
(B, npoint) tensor containing the set
"""
fps_inds = _ext.furthest_point_sampling(xyz, npoint)
ctx.mark_non_differentiable(fps_inds)
return fps_inds
@staticmethod
def backward(xyz, a=None):
return None, None
furthest_point_sample = FurthestPointSampling.apply
class GatherOperation(Function):
@staticmethod
def forward(ctx, features, idx):
# type: (Any, torch.Tensor, torch.Tensor) -> torch.Tensor
r"""
Parameters
----------
features : torch.Tensor
(B, C, N) tensor
idx : torch.Tensor
(B, npoint) tensor of the features to gather
Returns
-------
torch.Tensor
(B, C, npoint) tensor
"""
_, C, N = features.size()
ctx.for_backwards = (idx, C, N)
return _ext.gather_points(features, idx)
@staticmethod
def backward(ctx, grad_out):
idx, C, N = ctx.for_backwards
grad_features = _ext.gather_points_grad(grad_out.contiguous(), idx, N)
return grad_features, None
gather_operation = GatherOperation.apply
class ThreeNN(Function):
@staticmethod
def forward(ctx, unknown, known):
# type: (Any, torch.Tensor, torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]
r"""
Find the three nearest neighbors of unknown in known
Parameters
----------
unknown : torch.Tensor
(B, n, 3) tensor of known features
known : torch.Tensor
(B, m, 3) tensor of unknown features
Returns
-------
dist : torch.Tensor
(B, n, 3) l2 distance to the three nearest neighbors
idx : torch.Tensor
(B, n, 3) index of 3 nearest neighbors
"""
dist2, idx = _ext.three_nn(unknown, known)
return torch.sqrt(dist2), idx
@staticmethod
def backward(ctx, a=None, b=None):
return None, None
three_nn = ThreeNN.apply
class ThreeInterpolate(Function):
@staticmethod
def forward(ctx, features, idx, weight):
# type(Any, torch.Tensor, torch.Tensor, torch.Tensor) -> Torch.Tensor
r"""
Performs weight linear interpolation on 3 features
Parameters
----------
features : torch.Tensor
(B, c, m) Features descriptors to be interpolated from
idx : torch.Tensor
(B, n, 3) three nearest neighbors of the target features in features
weight : torch.Tensor
(B, n, 3) weights
Returns
-------
torch.Tensor
(B, c, n) tensor of the interpolated features
"""
B, c, m = features.size()
n = idx.size(1)
ctx.three_interpolate_for_backward = (idx, weight, m)
return _ext.three_interpolate(features, idx, weight)
@staticmethod
def backward(ctx, grad_out):
# type: (Any, torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]
r"""
Parameters
----------
grad_out : torch.Tensor
(B, c, n) tensor with gradients of ouputs
Returns
-------
grad_features : torch.Tensor
(B, c, m) tensor with gradients of features
None
None
"""
idx, weight, m = ctx.three_interpolate_for_backward
grad_features = _ext.three_interpolate_grad(
grad_out.contiguous(), idx, weight, m
)
return grad_features, None, None
three_interpolate = ThreeInterpolate.apply
class GroupingOperation(Function):
@staticmethod
def forward(ctx, features, idx):
# type: (Any, torch.Tensor, torch.Tensor) -> torch.Tensor
r"""
Parameters
----------
features : torch.Tensor
(B, C, N) tensor of features to group
idx : torch.Tensor
(B, npoint, nsample) tensor containing the indicies of features to group with
Returns
-------
torch.Tensor
(B, C, npoint, nsample) tensor
"""
B, nfeatures, nsample = idx.size()
_, C, N = features.size()
ctx.for_backwards = (idx, N)
return _ext.group_points(features, idx)
@staticmethod
def backward(ctx, grad_out):
# type: (Any, torch.tensor) -> Tuple[torch.Tensor, torch.Tensor]
r"""
Parameters
----------
grad_out : torch.Tensor
(B, C, npoint, nsample) tensor of the gradients of the output from forward
Returns
-------
torch.Tensor
(B, C, N) gradient of the features
None
"""
idx, N = ctx.for_backwards
grad_features = _ext.group_points_grad(grad_out.contiguous(), idx, N)
return grad_features, None
grouping_operation = GroupingOperation.apply
class BallQuery(Function):
@staticmethod
def forward(ctx, radius, nsample, xyz, new_xyz):
# type: (Any, float, int, torch.Tensor, torch.Tensor) -> torch.Tensor
r"""
Parameters
----------
radius : float
radius of the balls
nsample : int
maximum number of features in the balls
xyz : torch.Tensor
(B, N, 3) xyz coordinates of the features
new_xyz : torch.Tensor
(B, npoint, 3) centers of the ball query
Returns
-------
torch.Tensor
(B, npoint, nsample) tensor with the indicies of the features that form the query balls
"""
inds = _ext.ball_query(new_xyz, xyz, radius, nsample)
ctx.mark_non_differentiable(inds)
return inds
@staticmethod
def backward(ctx, a=None):
return None, None, None, None
ball_query = BallQuery.apply
class QueryAndGroup(nn.Module):
r"""
Groups with a ball query of radius
Parameters
---------
radius : float32
Radius of ball
nsample : int32
Maximum number of features to gather in the ball
"""
def __init__(self, radius, nsample, use_xyz=True, ret_grouped_xyz=False, normalize_xyz=False, sample_uniformly=False, ret_unique_cnt=False):
# type: (QueryAndGroup, float, int, bool) -> None
super(QueryAndGroup, self).__init__()
self.radius, self.nsample, self.use_xyz = radius, nsample, use_xyz
self.ret_grouped_xyz = ret_grouped_xyz
self.normalize_xyz = normalize_xyz
self.sample_uniformly = sample_uniformly
self.ret_unique_cnt = ret_unique_cnt
if self.ret_unique_cnt:
assert(self.sample_uniformly)
def forward(self, xyz, new_xyz, features=None):
# type: (QueryAndGroup, torch.Tensor. torch.Tensor, torch.Tensor) -> Tuple[Torch.Tensor]
r"""
Parameters
----------
xyz : torch.Tensor
xyz coordinates of the features (B, N, 3)
new_xyz : torch.Tensor
centriods (B, npoint, 3)
features : torch.Tensor
Descriptors of the features (B, C, N)
Returns
-------
new_features : torch.Tensor
(B, 3 + C, npoint, nsample) tensor
"""
idx = ball_query(self.radius, self.nsample, xyz, new_xyz)
if self.sample_uniformly:
unique_cnt = torch.zeros((idx.shape[0], idx.shape[1]))
for i_batch in range(idx.shape[0]):
for i_region in range(idx.shape[1]):
unique_ind = torch.unique(idx[i_batch, i_region, :])
num_unique = unique_ind.shape[0]
unique_cnt[i_batch, i_region] = num_unique
sample_ind = torch.randint(0, num_unique, (self.nsample - num_unique,), dtype=torch.long)
all_ind = torch.cat((unique_ind, unique_ind[sample_ind]))
idx[i_batch, i_region, :] = all_ind
xyz_trans = xyz.transpose(1, 2).contiguous()
grouped_xyz = grouping_operation(xyz_trans, idx) # (B, 3, npoint, nsample)
grouped_xyz -= new_xyz.transpose(1, 2).unsqueeze(-1)
if self.normalize_xyz:
grouped_xyz /= self.radius
if features is not None:
grouped_features = grouping_operation(features, idx)
if self.use_xyz:
new_features = torch.cat(
[grouped_xyz, grouped_features], dim=1
) # (B, C + 3, npoint, nsample)
else:
new_features = grouped_features
else:
assert (
self.use_xyz
), "Cannot have not features and not use xyz as a feature!"
new_features = grouped_xyz
ret = [new_features]
if self.ret_grouped_xyz:
ret.append(grouped_xyz)
if self.ret_unique_cnt:
ret.append(unique_cnt)
if len(ret) == 1:
return ret[0]
else:
return tuple(ret)
class GroupAll(nn.Module):
r"""
Groups all features
Parameters
---------
"""
def __init__(self, use_xyz=True, ret_grouped_xyz=False):
# type: (GroupAll, bool) -> None
super(GroupAll, self).__init__()
self.use_xyz = use_xyz
def forward(self, xyz, new_xyz, features=None):
# type: (GroupAll, torch.Tensor, torch.Tensor, torch.Tensor) -> Tuple[torch.Tensor]
r"""
Parameters
----------
xyz : torch.Tensor
xyz coordinates of the features (B, N, 3)
new_xyz : torch.Tensor
Ignored
features : torch.Tensor
Descriptors of the features (B, C, N)
Returns
-------
new_features : torch.Tensor
(B, C + 3, 1, N) tensor
"""
grouped_xyz = xyz.transpose(1, 2).unsqueeze(2)
if features is not None:
grouped_features = features.unsqueeze(2)
if self.use_xyz:
new_features = torch.cat(
[grouped_xyz, grouped_features], dim=1
) # (B, 3 + C, 1, N)
else:
new_features = grouped_features
else:
new_features = grouped_xyz
if self.ret_grouped_xyz:
return new_features, grouped_xyz
else:
return new_features
|
ContrastiveSceneContexts-main
|
downstream/votenet/models/backbone/pointnet2/pointnet2_utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
''' Testing customized ops. '''
import torch
from torch.autograd import gradcheck
import numpy as np
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
import pointnet2_utils
def test_interpolation_grad():
batch_size = 1
feat_dim = 2
m = 4
feats = torch.randn(batch_size, feat_dim, m, requires_grad=True).float().cuda()
def interpolate_func(inputs):
idx = torch.from_numpy(np.array([[[0,1,2],[1,2,3]]])).int().cuda()
weight = torch.from_numpy(np.array([[[1,1,1],[2,2,2]]])).float().cuda()
interpolated_feats = pointnet2_utils.three_interpolate(inputs, idx, weight)
return interpolated_feats
assert (gradcheck(interpolate_func, feats, atol=1e-1, rtol=1e-1))
if __name__=='__main__':
test_interpolation_grad()
|
ContrastiveSceneContexts-main
|
downstream/votenet/models/backbone/pointnet2/pointnet2_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
''' Pointnet2 layers.
Modified based on: https://github.com/erikwijmans/Pointnet2_PyTorch
Extended with the following:
1. Uniform sampling in each local region (sample_uniformly)
2. Return sampled points indices to support votenet.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
import pointnet2_utils
import pytorch_utils as pt_utils
from typing import List
class _PointnetSAModuleBase(nn.Module):
def __init__(self):
super().__init__()
self.npoint = None
self.groupers = None
self.mlps = None
def forward(self, xyz: torch.Tensor,
features: torch.Tensor = None) -> (torch.Tensor, torch.Tensor):
r"""
Parameters
----------
xyz : torch.Tensor
(B, N, 3) tensor of the xyz coordinates of the features
features : torch.Tensor
(B, N, C) tensor of the descriptors of the the features
Returns
-------
new_xyz : torch.Tensor
(B, npoint, 3) tensor of the new features' xyz
new_features : torch.Tensor
(B, npoint, \sum_k(mlps[k][-1])) tensor of the new_features descriptors
"""
new_features_list = []
xyz_flipped = xyz.transpose(1, 2).contiguous()
new_xyz = pointnet2_utils.gather_operation(
xyz_flipped,
pointnet2_utils.furthest_point_sample(xyz, self.npoint)
).transpose(1, 2).contiguous() if self.npoint is not None else None
for i in range(len(self.groupers)):
new_features = self.groupers[i](
xyz, new_xyz, features
) # (B, C, npoint, nsample)
new_features = self.mlps[i](
new_features
) # (B, mlp[-1], npoint, nsample)
new_features = F.max_pool2d(
new_features, kernel_size=[1, new_features.size(3)]
) # (B, mlp[-1], npoint, 1)
new_features = new_features.squeeze(-1) # (B, mlp[-1], npoint)
new_features_list.append(new_features)
return new_xyz, torch.cat(new_features_list, dim=1)
class PointnetSAModuleMSG(_PointnetSAModuleBase):
r"""Pointnet set abstrction layer with multiscale grouping
Parameters
----------
npoint : int
Number of features
radii : list of float32
list of radii to group with
nsamples : list of int32
Number of samples in each ball query
mlps : list of list of int32
Spec of the pointnet before the global max_pool for each scale
bn : bool
Use batchnorm
"""
def __init__(
self,
*,
npoint: int,
radii: List[float],
nsamples: List[int],
mlps: List[List[int]],
bn: bool = True,
use_xyz: bool = True,
sample_uniformly: bool = False
):
super().__init__()
assert len(radii) == len(nsamples) == len(mlps)
self.npoint = npoint
self.groupers = nn.ModuleList()
self.mlps = nn.ModuleList()
for i in range(len(radii)):
radius = radii[i]
nsample = nsamples[i]
self.groupers.append(
pointnet2_utils.QueryAndGroup(radius, nsample, use_xyz=use_xyz, sample_uniformly=sample_uniformly)
if npoint is not None else pointnet2_utils.GroupAll(use_xyz)
)
mlp_spec = mlps[i]
if use_xyz:
mlp_spec[0] += 3
self.mlps.append(pt_utils.SharedMLP(mlp_spec, bn=bn))
class PointnetSAModule(PointnetSAModuleMSG):
r"""Pointnet set abstrction layer
Parameters
----------
npoint : int
Number of features
radius : float
Radius of ball
nsample : int
Number of samples in the ball query
mlp : list
Spec of the pointnet before the global max_pool
bn : bool
Use batchnorm
"""
def __init__(
self,
*,
mlp: List[int],
npoint: int = None,
radius: float = None,
nsample: int = None,
bn: bool = True,
use_xyz: bool = True
):
super().__init__(
mlps=[mlp],
npoint=npoint,
radii=[radius],
nsamples=[nsample],
bn=bn,
use_xyz=use_xyz
)
class PointnetSAModuleVotes(nn.Module):
''' Modified based on _PointnetSAModuleBase and PointnetSAModuleMSG
with extra support for returning point indices for getting their GT votes '''
def __init__(
self,
*,
mlp: List[int],
npoint: int = None,
radius: float = None,
nsample: int = None,
bn: bool = True,
use_xyz: bool = True,
pooling: str = 'max',
sigma: float = None, # for RBF pooling
normalize_xyz: bool = False, # noramlize local XYZ with radius
sample_uniformly: bool = False,
ret_unique_cnt: bool = False
):
super().__init__()
self.npoint = npoint
self.radius = radius
self.nsample = nsample
self.pooling = pooling
self.mlp_module = None
self.use_xyz = use_xyz
self.sigma = sigma
if self.sigma is None:
self.sigma = self.radius/2
self.normalize_xyz = normalize_xyz
self.ret_unique_cnt = ret_unique_cnt
if npoint is not None:
self.grouper = pointnet2_utils.QueryAndGroup(radius, nsample,
use_xyz=use_xyz, ret_grouped_xyz=True, normalize_xyz=normalize_xyz,
sample_uniformly=sample_uniformly, ret_unique_cnt=ret_unique_cnt)
else:
self.grouper = pointnet2_utils.GroupAll(use_xyz, ret_grouped_xyz=True)
mlp_spec = mlp
if use_xyz and len(mlp_spec)>0:
mlp_spec[0] += 3
self.mlp_module = pt_utils.SharedMLP(mlp_spec, bn=bn)
def forward(self, xyz: torch.Tensor,
features: torch.Tensor = None,
inds: torch.Tensor = None) -> (torch.Tensor, torch.Tensor):
r"""
Parameters
----------
xyz : torch.Tensor
(B, N, 3) tensor of the xyz coordinates of the features
features : torch.Tensor
(B, C, N) tensor of the descriptors of the the features
inds : torch.Tensor
(B, npoint) tensor that stores index to the xyz points (values in 0-N-1)
Returns
-------
new_xyz : torch.Tensor
(B, npoint, 3) tensor of the new features' xyz
new_features : torch.Tensor
(B, \sum_k(mlps[k][-1]), npoint) tensor of the new_features descriptors
inds: torch.Tensor
(B, npoint) tensor of the inds
"""
xyz_flipped = xyz.transpose(1, 2).contiguous()
if inds is None:
inds = pointnet2_utils.furthest_point_sample(xyz, self.npoint)
else:
assert(inds.shape[1] == self.npoint)
new_xyz = pointnet2_utils.gather_operation(
xyz_flipped, inds
).transpose(1, 2).contiguous() if self.npoint is not None else None
if not self.ret_unique_cnt:
grouped_features, grouped_xyz = self.grouper(
xyz, new_xyz, features
) # (B, C, npoint, nsample)
else:
grouped_features, grouped_xyz, unique_cnt = self.grouper(
xyz, new_xyz, features
) # (B, C, npoint, nsample), (B,3,npoint,nsample), (B,npoint)
new_features = self.mlp_module(
grouped_features
) # (B, mlp[-1], npoint, nsample)
if self.pooling == 'max':
new_features = F.max_pool2d(
new_features, kernel_size=[1, new_features.size(3)]
) # (B, mlp[-1], npoint, 1)
elif self.pooling == 'avg':
new_features = F.avg_pool2d(
new_features, kernel_size=[1, new_features.size(3)]
) # (B, mlp[-1], npoint, 1)
elif self.pooling == 'rbf':
# Use radial basis function kernel for weighted sum of features (normalized by nsample and sigma)
# Ref: https://en.wikipedia.org/wiki/Radial_basis_function_kernel
rbf = torch.exp(-1 * grouped_xyz.pow(2).sum(1,keepdim=False) / (self.sigma**2) / 2) # (B, npoint, nsample)
new_features = torch.sum(new_features * rbf.unsqueeze(1), -1, keepdim=True) / float(self.nsample) # (B, mlp[-1], npoint, 1)
new_features = new_features.squeeze(-1) # (B, mlp[-1], npoint)
if not self.ret_unique_cnt:
return new_xyz, new_features, inds
else:
return new_xyz, new_features, inds, unique_cnt
class PointnetSAModuleMSGVotes(nn.Module):
''' Modified based on _PointnetSAModuleBase and PointnetSAModuleMSG
with extra support for returning point indices for getting their GT votes '''
def __init__(
self,
*,
mlps: List[List[int]],
npoint: int,
radii: List[float],
nsamples: List[int],
bn: bool = True,
use_xyz: bool = True,
sample_uniformly: bool = False
):
super().__init__()
assert(len(mlps) == len(nsamples) == len(radii))
self.npoint = npoint
self.groupers = nn.ModuleList()
self.mlps = nn.ModuleList()
for i in range(len(radii)):
radius = radii[i]
nsample = nsamples[i]
self.groupers.append(
pointnet2_utils.QueryAndGroup(radius, nsample, use_xyz=use_xyz, sample_uniformly=sample_uniformly)
if npoint is not None else pointnet2_utils.GroupAll(use_xyz)
)
mlp_spec = mlps[i]
if use_xyz:
mlp_spec[0] += 3
self.mlps.append(pt_utils.SharedMLP(mlp_spec, bn=bn))
def forward(self, xyz: torch.Tensor,
features: torch.Tensor = None, inds: torch.Tensor = None) -> (torch.Tensor, torch.Tensor):
r"""
Parameters
----------
xyz : torch.Tensor
(B, N, 3) tensor of the xyz coordinates of the features
features : torch.Tensor
(B, C, C) tensor of the descriptors of the the features
inds : torch.Tensor
(B, npoint) tensor that stores index to the xyz points (values in 0-N-1)
Returns
-------
new_xyz : torch.Tensor
(B, npoint, 3) tensor of the new features' xyz
new_features : torch.Tensor
(B, \sum_k(mlps[k][-1]), npoint) tensor of the new_features descriptors
inds: torch.Tensor
(B, npoint) tensor of the inds
"""
new_features_list = []
xyz_flipped = xyz.transpose(1, 2).contiguous()
if inds is None:
inds = pointnet2_utils.furthest_point_sample(xyz, self.npoint)
new_xyz = pointnet2_utils.gather_operation(
xyz_flipped, inds
).transpose(1, 2).contiguous() if self.npoint is not None else None
for i in range(len(self.groupers)):
new_features = self.groupers[i](
xyz, new_xyz, features
) # (B, C, npoint, nsample)
new_features = self.mlps[i](
new_features
) # (B, mlp[-1], npoint, nsample)
new_features = F.max_pool2d(
new_features, kernel_size=[1, new_features.size(3)]
) # (B, mlp[-1], npoint, 1)
new_features = new_features.squeeze(-1) # (B, mlp[-1], npoint)
new_features_list.append(new_features)
return new_xyz, torch.cat(new_features_list, dim=1), inds
class PointnetFPModule(nn.Module):
r"""Propigates the features of one set to another
Parameters
----------
mlp : list
Pointnet module parameters
bn : bool
Use batchnorm
"""
def __init__(self, *, mlp: List[int], bn: bool = True):
super().__init__()
self.mlp = pt_utils.SharedMLP(mlp, bn=bn)
def forward(
self, unknown: torch.Tensor, known: torch.Tensor,
unknow_feats: torch.Tensor, known_feats: torch.Tensor
) -> torch.Tensor:
r"""
Parameters
----------
unknown : torch.Tensor
(B, n, 3) tensor of the xyz positions of the unknown features
known : torch.Tensor
(B, m, 3) tensor of the xyz positions of the known features
unknow_feats : torch.Tensor
(B, C1, n) tensor of the features to be propigated to
known_feats : torch.Tensor
(B, C2, m) tensor of features to be propigated
Returns
-------
new_features : torch.Tensor
(B, mlp[-1], n) tensor of the features of the unknown features
"""
if known is not None:
dist, idx = pointnet2_utils.three_nn(unknown, known)
dist_recip = 1.0 / (dist + 1e-8)
norm = torch.sum(dist_recip, dim=2, keepdim=True)
weight = dist_recip / norm
interpolated_feats = pointnet2_utils.three_interpolate(
known_feats, idx, weight
)
else:
interpolated_feats = known_feats.expand(
*known_feats.size()[0:2], unknown.size(1)
)
if unknow_feats is not None:
new_features = torch.cat([interpolated_feats, unknow_feats],
dim=1) #(B, C2 + C1, n)
else:
new_features = interpolated_feats
new_features = new_features.unsqueeze(-1)
new_features = self.mlp(new_features)
return new_features.squeeze(-1)
class PointnetLFPModuleMSG(nn.Module):
''' Modified based on _PointnetSAModuleBase and PointnetSAModuleMSG
learnable feature propagation layer.'''
def __init__(
self,
*,
mlps: List[List[int]],
radii: List[float],
nsamples: List[int],
post_mlp: List[int],
bn: bool = True,
use_xyz: bool = True,
sample_uniformly: bool = False
):
super().__init__()
assert(len(mlps) == len(nsamples) == len(radii))
self.post_mlp = pt_utils.SharedMLP(post_mlp, bn=bn)
self.groupers = nn.ModuleList()
self.mlps = nn.ModuleList()
for i in range(len(radii)):
radius = radii[i]
nsample = nsamples[i]
self.groupers.append(
pointnet2_utils.QueryAndGroup(radius, nsample, use_xyz=use_xyz,
sample_uniformly=sample_uniformly)
)
mlp_spec = mlps[i]
if use_xyz:
mlp_spec[0] += 3
self.mlps.append(pt_utils.SharedMLP(mlp_spec, bn=bn))
def forward(self, xyz2: torch.Tensor, xyz1: torch.Tensor,
features2: torch.Tensor, features1: torch.Tensor) -> torch.Tensor:
r""" Propagate features from xyz1 to xyz2.
Parameters
----------
xyz2 : torch.Tensor
(B, N2, 3) tensor of the xyz coordinates of the features
xyz1 : torch.Tensor
(B, N1, 3) tensor of the xyz coordinates of the features
features2 : torch.Tensor
(B, C2, N2) tensor of the descriptors of the the features
features1 : torch.Tensor
(B, C1, N1) tensor of the descriptors of the the features
Returns
-------
new_features1 : torch.Tensor
(B, \sum_k(mlps[k][-1]), N1) tensor of the new_features descriptors
"""
new_features_list = []
for i in range(len(self.groupers)):
new_features = self.groupers[i](
xyz1, xyz2, features1
) # (B, C1, N2, nsample)
new_features = self.mlps[i](
new_features
) # (B, mlp[-1], N2, nsample)
new_features = F.max_pool2d(
new_features, kernel_size=[1, new_features.size(3)]
) # (B, mlp[-1], N2, 1)
new_features = new_features.squeeze(-1) # (B, mlp[-1], N2)
if features2 is not None:
new_features = torch.cat([new_features, features2],
dim=1) #(B, mlp[-1] + C2, N2)
new_features = new_features.unsqueeze(-1)
new_features = self.post_mlp(new_features)
new_features_list.append(new_features)
return torch.cat(new_features_list, dim=1).squeeze(-1)
if __name__ == "__main__":
from torch.autograd import Variable
torch.manual_seed(1)
torch.cuda.manual_seed_all(1)
xyz = Variable(torch.randn(2, 9, 3).cuda(), requires_grad=True)
xyz_feats = Variable(torch.randn(2, 9, 6).cuda(), requires_grad=True)
test_module = PointnetSAModuleMSG(
npoint=2, radii=[5.0, 10.0], nsamples=[6, 3], mlps=[[9, 3], [9, 6]]
)
test_module.cuda()
print(test_module(xyz, xyz_feats))
for _ in range(1):
_, new_features = test_module(xyz, xyz_feats)
new_features.backward(
torch.cuda.FloatTensor(*new_features.size()).fill_(1)
)
print(new_features)
print(xyz.grad)
|
ContrastiveSceneContexts-main
|
downstream/votenet/models/backbone/pointnet2/pointnet2_modules.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import os
import sys
import logging
import numpy as np
import importlib
import warnings
import argparse
import torch.optim as optim
import torch.nn as nn
from datetime import datetime
from models.loss_helper import get_loss as criterion
from tensorboardX import SummaryWriter
from torch.optim import lr_scheduler
warnings.simplefilter(action='ignore', category=FutureWarning)
from models.backbone.pointnet2.pytorch_utils import BNMomentumScheduler
from models.dump_helper import dump_results, dump_results_
from models.ap_helper import APCalculator, parse_predictions, parse_groundtruths
from omegaconf import OmegaConf
from torch.utils.data import DataLoader
from torch.serialization import default_restore_location
from lib.distributed import multi_proc_run, is_master_proc, get_world_size
class DetectionTrainer():
def __init__(self, config):
self.is_master = is_master_proc(get_world_size()) if get_world_size() > 1 else True
self.cur_device = torch.cuda.current_device()
# load the configurations
self.setup_logging()
if os.path.exists('config.yaml'):
logging.info('===> Loading exsiting config file')
config = OmegaConf.load('config.yaml')
logging.info('===> Loaded exsiting config file')
logging.info('===> Configurations')
logging.info(config.pretty())
# Create Dataset and Dataloader
if config.data.dataset == 'sunrgbd':
from datasets.sunrgbd.sunrgbd_detection_dataset import SunrgbdDetectionVotesDataset, MAX_NUM_OBJ
from datasets.sunrgbd.model_util_sunrgbd import SunrgbdDatasetConfig
dataset_config = SunrgbdDatasetConfig()
train_dataset = SunrgbdDetectionVotesDataset('train',
num_points=config.data.num_points,
augment=True,
use_color=config.data.use_color,
use_height=(not config.data.no_height),
use_v1=(not config.data.use_sunrgbd_v2))
test_dataset = SunrgbdDetectionVotesDataset(config.test.phase,
num_points=config.data.num_points,
augment=False,
use_color=config.data.use_color,
use_height=(not config.data.no_height),
use_v1=(not config.data.use_sunrgbd_v2))
elif config.data.dataset == 'scannet':
from datasets.scannet.scannet_detection_dataset import ScannetDetectionDataset, MAX_NUM_OBJ
from datasets.scannet.model_util_scannet import ScannetDatasetConfig
dataset_config = ScannetDatasetConfig()
train_dataset = ScannetDetectionDataset('train',
num_points=config.data.num_points,
augment=True,
use_color=config.data.use_color,
use_height=(not config.data.no_height),
by_scenes=config.data.by_scenes,
by_points=config.data.by_points)
test_dataset = ScannetDetectionDataset(config.test.phase,
num_points=config.data.num_points,
augment=False,
use_color=config.data.use_color,
use_height=(not config.data.no_height))
else:
logging.info('Unknown dataset %s. Exiting...'%(config.data.dataset))
exit(-1)
COLLATE_FN = None
if config.data.voxelization:
from models.backbone.sparseconv.voxelized_dataset import VoxelizationDataset, collate_fn
train_dataset = VoxelizationDataset(train_dataset, config.data.voxel_size)
test_dataset = VoxelizationDataset(test_dataset, config.data.voxel_size)
COLLATE_FN = collate_fn
logging.info('training: {}, testing: {}'.format(len(train_dataset), len(test_dataset)))
self.sampler = torch.utils.data.distributed.DistributedSampler(train_dataset) if get_world_size() > 1 else None
train_dataloader = DataLoader(
train_dataset,
batch_size=config.data.batch_size // config.misc.num_gpus,
shuffle=(self.sampler is None),
sampler=self.sampler,
num_workers=config.data.num_workers,
collate_fn=COLLATE_FN)
test_dataloader = DataLoader(
test_dataset,
batch_size=1,
shuffle=False,
num_workers=1,
collate_fn=COLLATE_FN)
logging.info('train dataloader: {}, test dataloader: {}'.format(len(train_dataloader),len(test_dataloader)))
# Init the model and optimzier
MODEL = importlib.import_module('models.' + config.net.model) # import network module
num_input_channel = int(config.data.use_color)*3 + int(not config.data.no_height)*1
if config.net.model == 'boxnet':
Detector = MODEL.BoxNet
else:
Detector = MODEL.VoteNet
net = Detector(num_class=dataset_config.num_class,
num_heading_bin=dataset_config.num_heading_bin,
num_size_cluster=dataset_config.num_size_cluster,
mean_size_arr=dataset_config.mean_size_arr,
num_proposal=config.net.num_target,
input_feature_dim=num_input_channel,
vote_factor=config.net.vote_factor,
sampling=config.net.cluster_sampling,
backbone=config.net.backbone)
if config.net.weights != '':
#assert config.net.backbone == "sparseconv", "only support sparseconv"
print('===> Loading weights: ' + config.net.weights)
state = torch.load(config.net.weights, map_location=lambda s, l: default_restore_location(s, 'cpu'))
model = net
if config.net.is_train:
model = net.backbone_net
if config.net.backbone == "sparseconv":
model = net.backbone_net.net
matched_weights = DetectionTrainer.load_state_with_same_shape(model, state['state_dict'])
model_dict = model.state_dict()
model_dict.update(matched_weights)
model.load_state_dict(model_dict)
net.to(self.cur_device)
if get_world_size() > 1:
net = torch.nn.parallel.DistributedDataParallel(
module=net, device_ids=[self.cur_device], output_device=self.cur_device, broadcast_buffers=False)
# Load the Adam optimizer
self.optimizer = optim.Adam(net.parameters(), lr=config.optimizer.learning_rate, weight_decay=config.optimizer.weight_decay)
# writer
if self.is_master:
self.writer = SummaryWriter(log_dir='tensorboard')
self.config = config
self.dataset_config = dataset_config
self.net = net
self.train_dataloader = train_dataloader
self.test_dataloader = test_dataloader
self.best_mAP = -1
# Used for AP calculation
self.CONFIG_DICT = {'remove_empty_box':False, 'use_3d_nms':True,
'nms_iou':0.25, 'use_old_type_nms':False, 'cls_nms':True,
'per_class_proposal': True, 'conf_thresh':0.05, 'dataset_config': dataset_config}
# Used for AP calculation
self.CONFIG_DICT_TEST = {'remove_empty_box': (not config.test.faster_eval),
'use_3d_nms': config.test.use_3d_nms,
'nms_iou': config.test.nms_iou,
'use_old_type_nms': config.test.use_old_type_nms,
'cls_nms': config.test.use_cls_nms,
'per_class_proposal': config.test.per_class_proposal,
'conf_thresh': config.test.conf_thresh,
'dataset_config': dataset_config}
# Load checkpoint if there is any
self.start_epoch = 0
CHECKPOINT_PATH = os.path.join('checkpoint.tar')
if os.path.isfile(CHECKPOINT_PATH):
checkpoint = torch.load(CHECKPOINT_PATH)
if get_world_size() > 1:
_model = self.net.module
else:
_model = self.net
_model.load_state_dict(checkpoint['state_dict'])
self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
self.start_epoch = checkpoint['epoch']
self.best_mAP = checkpoint['best_mAP']
logging.info("-> loaded checkpoint %s (epoch: %d)"%(CHECKPOINT_PATH, self.start_epoch))
# Decay Batchnorm momentum from 0.5 to 0.999
# note: pytorch's BN momentum (default 0.1)= 1 - tensorflow's BN momentum
BN_MOMENTUM_INIT = 0.5
BN_MOMENTUM_MAX = 0.001
BN_DECAY_STEP = config.optimizer.bn_decay_step
BN_DECAY_RATE = config.optimizer.bn_decay_rate
bn_lbmd = lambda it: max(BN_MOMENTUM_INIT * BN_DECAY_RATE**(int(it / BN_DECAY_STEP)), BN_MOMENTUM_MAX)
self.bnm_scheduler = BNMomentumScheduler(net, bn_lambda=bn_lbmd, last_epoch=self.start_epoch-1)
def setup_logging(self):
ch = logging.StreamHandler(sys.stdout)
logging.getLogger().setLevel(logging.WARN)
if self.is_master:
logging.getLogger().setLevel(logging.INFO)
logging.basicConfig(
format=os.uname()[1].split('.')[0] + ' %(asctime)s %(message)s',
datefmt='%m/%d %H:%M:%S',
handlers=[ch])
@staticmethod
def load_state_with_same_shape(model, weights):
model_state = model.state_dict()
if list(weights.keys())[0].startswith('module.'):
print("Loading multigpu weights with module. prefix...")
weights = {k.partition('module.')[2]:weights[k] for k in weights.keys()}
if list(weights.keys())[0].startswith('encoder.'):
logging.info("Loading multigpu weights with encoder. prefix...")
weights = {k.partition('encoder.')[2]:weights[k] for k in weights.keys()}
# print(weights.items())
filtered_weights = {
k: v for k, v in weights.items() if k in model_state and v.size() == model_state[k].size()
}
print("Loading weights:" + ', '.join(filtered_weights.keys()))
return filtered_weights
@staticmethod
def get_current_lr(epoch, config):
lr = config.optimizer.learning_rate
for i,lr_decay_epoch in enumerate(config.optimizer.lr_decay_steps):
if epoch >= lr_decay_epoch:
lr *= config.optimizer.lr_decay_rates[i]
return lr
@staticmethod
def adjust_learning_rate(optimizer, epoch, config):
lr = DetectionTrainer.get_current_lr(epoch, config)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def train_one_epoch(self, epoch_cnt):
stat_dict = {} # collect statistics
DetectionTrainer.adjust_learning_rate(self.optimizer, epoch_cnt, self.config)
self.bnm_scheduler.step() # decay BN momentum
self.net.train() # set model to training mode
for batch_idx, batch_data_label in enumerate(self.train_dataloader):
for key in batch_data_label:
if key == 'scan_name':
continue
batch_data_label[key] = batch_data_label[key].cuda()
# Forward pass
self.optimizer.zero_grad()
inputs = {'point_clouds': batch_data_label['point_clouds']}
if 'voxel_coords' in batch_data_label:
inputs.update({
'voxel_coords': batch_data_label['voxel_coords'],
'voxel_inds': batch_data_label['voxel_inds'],
'voxel_feats': batch_data_label['voxel_feats']})
end_points = self.net(inputs)
# Compute loss and gradients, update parameters.
for key in batch_data_label:
assert(key not in end_points)
end_points[key] = batch_data_label[key]
loss, end_points = criterion(end_points, self.dataset_config)
loss.backward()
self.optimizer.step()
# Accumulate statistics and print out
for key in end_points:
if 'loss' in key or 'acc' in key or 'ratio' in key:
if key not in stat_dict: stat_dict[key] = 0
stat_dict[key] += end_points[key].item()
batch_interval = 10
if ((batch_idx+1) % batch_interval == 0) and self.is_master:
logging.info(' ---- batch: %03d ----' % (batch_idx+1))
for key in stat_dict:
self.writer.add_scalar('training/{}'.format(key), stat_dict[key]/batch_interval,
(epoch_cnt*len(self.train_dataloader)+batch_idx)*self.config.data.batch_size)
for key in sorted(stat_dict.keys()):
logging.info('mean %s: %f'%(key, stat_dict[key]/batch_interval))
stat_dict[key] = 0
def evaluate_one_epoch(self, epoch_cnt):
np.random.seed(0)
stat_dict = {} # collect statistics
ap_calculator = APCalculator(ap_iou_thresh=self.config.test.ap_iou, class2type_map=self.dataset_config.class2type)
self.net.eval() # set model to eval mode (for bn and dp)
for batch_idx, batch_data_label in enumerate(self.test_dataloader):
if batch_idx % 10 == 0:
logging.info('Eval batch: %d'%(batch_idx))
for key in batch_data_label:
if key == 'scan_name':
continue
batch_data_label[key] = batch_data_label[key].cuda()
# Forward pass
inputs = {'point_clouds': batch_data_label['point_clouds']}
if 'voxel_coords' in batch_data_label:
inputs.update({
'voxel_coords': batch_data_label['voxel_coords'],
'voxel_inds': batch_data_label['voxel_inds'],
'voxel_feats': batch_data_label['voxel_feats']})
with torch.no_grad():
end_points = self.net(inputs)
# Compute loss
for key in batch_data_label:
assert(key not in end_points)
end_points[key] = batch_data_label[key]
loss, end_points = criterion(end_points, self.dataset_config)
# Accumulate statistics and print out
for key in end_points:
if 'loss' in key or 'acc' in key or 'ratio' in key:
if key not in stat_dict: stat_dict[key] = 0
stat_dict[key] += end_points[key].item()
batch_pred_map_cls = parse_predictions(end_points, self.CONFIG_DICT)
batch_gt_map_cls = parse_groundtruths(end_points, self.CONFIG_DICT)
ap_calculator.step(batch_pred_map_cls, batch_gt_map_cls)
# Dump evaluation results for visualization
if self.config.data.dump_results and batch_idx == 0 and epoch_cnt %10 == 0 and self.is_master:
dump_results(end_points, 'results', self.dataset_config)
# Log statistics
logging.info('eval mean %s: %f'%(key, stat_dict[key]/(float(batch_idx+1))))
if self.is_master:
for key in sorted(stat_dict.keys()):
self.writer.add_scalar('validation/{}'.format(key), stat_dict[key]/float(batch_idx+1),
(epoch_cnt+1)*len(self.train_dataloader)*self.config.data.batch_size)
# Evaluate average precision
metrics_dict = ap_calculator.compute_metrics()
for key in metrics_dict:
logging.info('eval %s: %f'%(key, metrics_dict[key]))
if self.is_master:
self.writer.add_scalar('validation/mAP{}'.format(self.config.test.ap_iou), metrics_dict['mAP'], (epoch_cnt+1)*len(self.train_dataloader)*self.config.data.batch_size)
#mean_loss = stat_dict['loss']/float(batch_idx+1)
return metrics_dict['mAP']
def train(self):
for epoch in range(self.start_epoch, self.config.optimizer.max_epoch):
logging.info('**** EPOCH %03d ****' % (epoch))
logging.info('Current learning rate: %f'%(DetectionTrainer.get_current_lr(epoch, self.config)))
logging.info('Current BN decay momentum: %f'%(self.bnm_scheduler.lmbd(self.bnm_scheduler.last_epoch)))
logging.info(str(datetime.now()))
# Reset numpy seed.
# REF: https://github.com/pytorch/pytorch/issues/5059
np.random.seed()
if get_world_size() > 1:
self.sampler.set_epoch(epoch)
self.train_one_epoch(epoch)
if epoch % 5 == 4 and self.is_master: # Eval every 5 epochs
best_mAP = self.evaluate_one_epoch(epoch)
if best_mAP > self.best_mAP:
self.best_mAP = best_mAP
# Save checkpoint
save_dict = {'epoch': epoch+1, # after training one epoch, the start_epoch should be epoch+1
'optimizer_state_dict': self.optimizer.state_dict(),
'best_mAP': self.best_mAP}
if get_world_size() > 1:
save_dict['state_dict'] = self.net.module.state_dict()
else:
save_dict['state_dict'] = self.net.state_dict()
torch.save(save_dict, 'checkpoint.tar')
OmegaConf.save(self.config, 'config.yaml')
@staticmethod
def write_to_benchmark(data, scene_name):
from models.ap_helper import flip_axis_back_camera
OBJ_CLASS_IDS = np.array([3,4,5,6,7,8,9,10,11,12,14,16,24,28,33,34,36,39])
os.makedirs('benchmark_output', exist_ok=True)
bsize = len(scene_name)
for bsize_ in range(bsize):
write_list = []
cur_data = data[bsize_]
cur_name = scene_name[bsize_]
for class_id, bbox, score in cur_data:
bbox = flip_axis_back_camera(bbox)
minx = np.min(bbox[:,0])
miny = np.min(bbox[:,1])
minz = np.min(bbox[:,2])
maxx = np.max(bbox[:,0])
maxy = np.max(bbox[:,1])
maxz = np.max(bbox[:,2])
write_list.append([minx, miny, minz, maxx,maxy, maxz, OBJ_CLASS_IDS[class_id], score])
np.savetxt(os.path.join('benchmark_output', cur_name+'.txt'), np.array(write_list))
def test(self):
if self.config.test.use_cls_nms:
assert(self.config.test.use_3d_nms)
AP_IOU_THRESHOLDS = self.config.test.ap_iou_thresholds
logging.info(str(datetime.now()))
# Reset numpy seed.
# REF: https://github.com/pytorch/pytorch/issues/5059
np.random.seed(0)
stat_dict = {}
ap_calculator_list = [APCalculator(iou_thresh, self.dataset_config.class2type) for iou_thresh in AP_IOU_THRESHOLDS]
self.net.eval() # set model to eval mode (for bn and dp)
for batch_idx, batch_data_label in enumerate(self.test_dataloader):
if batch_idx % 10 == 0:
print('Eval batch: %d'%(batch_idx))
for key in batch_data_label:
if key == 'scan_name':
continue
batch_data_label[key] = batch_data_label[key].cuda()
# Forward pass
inputs = {'point_clouds': batch_data_label['point_clouds']}
if 'voxel_coords' in batch_data_label:
inputs.update({
'voxel_coords': batch_data_label['voxel_coords'],
'voxel_inds': batch_data_label['voxel_inds'],
'voxel_feats': batch_data_label['voxel_feats']})
with torch.no_grad():
end_points = self.net(inputs)
# Compute loss
for key in batch_data_label:
assert(key not in end_points)
end_points[key] = batch_data_label[key]
loss, end_points = criterion(end_points, self.dataset_config)
# Accumulate statistics and print out
for key in end_points:
if 'loss' in key or 'acc' in key or 'ratio' in key:
if key not in stat_dict: stat_dict[key] = 0
stat_dict[key] += end_points[key].item()
batch_pred_map_cls = parse_predictions(end_points, self.CONFIG_DICT_TEST)
batch_gt_map_cls = parse_groundtruths(end_points, self.CONFIG_DICT_TEST)
for ap_calculator in ap_calculator_list:
ap_calculator.step(batch_pred_map_cls, batch_gt_map_cls)
# debug
if self.config.test.write_to_benchmark:
#from lib.utils.io3d import write_triangle_mesh
#write_triangle_mesh(batch_data_label['point_clouds'][0].cpu().numpy(), None, None, batch_data_label['scan_name'][0]+'.ply')
DetectionTrainer.write_to_benchmark(batch_pred_map_cls, batch_data_label['scan_name'])
if self.config.test.save_vis:
dump_results_(end_points, 'visualization', self.dataset_config)
# Log statistics
for key in sorted(stat_dict.keys()):
logging.info('eval mean %s: %f'%(key, stat_dict[key]/(float(batch_idx+1))))
# Evaluate average precision
if not self.config.test.write_to_benchmark:
for i, ap_calculator in enumerate(ap_calculator_list):
logging.info('-'*10 + 'iou_thresh: %f'%(AP_IOU_THRESHOLDS[i]) + '-'*10)
metrics_dict = ap_calculator.compute_metrics()
for key in metrics_dict:
logging.info('eval %s: %f'%(key, metrics_dict[key]))
mean_loss = stat_dict['loss']/float(batch_idx+1)
return mean_loss
|
ContrastiveSceneContexts-main
|
downstream/votenet/lib/ddp_trainer.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#!/usr/bin/env python3
import os
import time
import torch
import signal
import pickle
import threading
import random
import functools
import traceback
import torch.nn as nn
import torch.distributed as dist
import multiprocessing as mp
"""Multiprocessing error handler."""
class ChildException(Exception):
"""Wraps an exception from a child process."""
def __init__(self, child_trace):
super(ChildException, self).__init__(child_trace)
class ErrorHandler(object):
"""Multiprocessing error handler (based on fairseq's).
Listens for errors in child processes and
propagates the tracebacks to the parent process.
"""
def __init__(self, error_queue):
# Shared error queue
self.error_queue = error_queue
# Children processes sharing the error queue
self.children_pids = []
# Start a thread listening to errors
self.error_listener = threading.Thread(target=self.listen, daemon=True)
self.error_listener.start()
# Register the signal handler
signal.signal(signal.SIGUSR1, self.signal_handler)
def add_child(self, pid):
"""Registers a child process."""
self.children_pids.append(pid)
def listen(self):
"""Listens for errors in the error queue."""
# Wait until there is an error in the queue
child_trace = self.error_queue.get()
# Put the error back for the signal handler
self.error_queue.put(child_trace)
# Invoke the signal handler
os.kill(os.getpid(), signal.SIGUSR1)
def signal_handler(self, sig_num, stack_frame):
"""Signal handler."""
# Kill children processes
for pid in self.children_pids:
os.kill(pid, signal.SIGINT)
# Propagate the error from the child process
raise ChildException(self.error_queue.get())
"""Multiprocessing helpers."""
def run(proc_rank, world_size, port, error_queue, fun, fun_args, fun_kwargs):
"""Runs a function from a child process."""
try:
# Initialize the process group
init_process_group(proc_rank, world_size, port)
# Run the function
fun(*fun_args, **fun_kwargs)
except:
# Propagate exception to the parent process
error_queue.put(traceback.format_exc())
finally:
destroy_process_group()
def multi_proc_run(num_proc, port, fun, fun_args=(), fun_kwargs={}):
"""Runs a function in a multi-proc setting."""
# Handle errors from training subprocesses
error_queue = mp.SimpleQueue()
error_handler = ErrorHandler(error_queue)
# Run each training subprocess
ps = []
for i in range(num_proc):
p_i = mp.Process(
target=run,
args=(i, num_proc, port, error_queue, fun, fun_args, fun_kwargs)
)
ps.append(p_i)
p_i.start()
error_handler.add_child(p_i.pid)
# Wait for each subprocess to finish
for p in ps:
p.join()
"""Distributed helpers."""
def is_master_proc(num_gpus):
"""Determines if the current process is the master process.
Master process is responsible for logging, writing and loading checkpoints.
In the multi GPU setting, we assign the master role to the rank 0 process.
When training using a single GPU, there is only one training processes
which is considered the master processes.
"""
return num_gpus == 1 or torch.distributed.get_rank() == 0
def get_world_size():
if not dist.is_available():
return 1
if not dist.is_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
return dist.get_rank()
def synchronize():
"""
Helper function to synchronize (barrier) among all processes when
using distributed training
"""
if not dist.is_available():
return
if not dist.is_initialized():
return
world_size = dist.get_world_size()
if world_size == 1:
return
dist.barrier()
def all_gather_differentiable(tensor):
"""
Run differentiable gather function for SparseConv features with variable number of points.
tensor: [num_points, feature_dim]
"""
world_size = get_world_size()
if world_size == 1:
return [tensor]
num_points, f_dim = tensor.size()
local_np = torch.LongTensor([num_points]).to("cuda")
np_list = [torch.LongTensor([0]).to("cuda") for _ in range(world_size)]
dist.all_gather(np_list, local_np)
np_list = [int(np.item()) for np in np_list]
max_np = max(np_list)
tensor_list = []
for _ in np_list:
tensor_list.append(torch.FloatTensor(size=(max_np, f_dim)).to("cuda"))
if local_np != max_np:
padding = torch.zeros(size=(max_np-local_np, f_dim)).to("cuda").float()
tensor = torch.cat((tensor, padding), dim=0)
assert tensor.size() == (max_np, f_dim)
dist.all_gather(tensor_list, tensor)
data_list = []
for gather_np, gather_tensor in zip(np_list, tensor_list):
gather_tensor = gather_tensor[:gather_np]
assert gather_tensor.size() == (gather_np, f_dim)
data_list.append(gather_tensor)
return data_list
def all_gather(data):
"""
Run all_gather on arbitrary picklable data (not necessarily tensors)
Args:
data: any picklable object
Returns:
list[data]: list of data gathered from each rank
"""
world_size = get_world_size()
if world_size == 1:
return [data]
# serialized to a Tensor
buffer = pickle.dumps(data)
storage = torch.ByteStorage.from_buffer(buffer)
tensor = torch.ByteTensor(storage).to("cuda")
# obtain Tensor size of each rank
local_size = torch.LongTensor([tensor.numel()]).to("cuda")
size_list = [torch.LongTensor([0]).to("cuda") for _ in range(world_size)]
dist.all_gather(size_list, local_size)
size_list = [int(size.item()) for size in size_list]
max_size = max(size_list)
# receiving Tensor from all ranks
# we pad the tensor because torch all_gather does not support
# gathering tensors of different shapes
tensor_list = []
for _ in size_list:
tensor_list.append(torch.ByteTensor(size=(max_size,)).to("cuda"))
if local_size != max_size:
padding = torch.ByteTensor(size=(max_size - local_size,)).to("cuda")
tensor = torch.cat((tensor, padding), dim=0)
dist.all_gather(tensor_list, tensor)
data_list = []
for size, tensor in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
return data_list
def init_process_group(proc_rank, world_size, port):
"""Initializes the default process group."""
# Set the GPU to use
torch.cuda.set_device(proc_rank)
# Initialize the process group
torch.distributed.init_process_group(
backend="nccl",
init_method="tcp://{}:{}".format("localhost", str(port)),
world_size=world_size,
rank=proc_rank
)
def destroy_process_group():
"""Destroys the default process group."""
torch.distributed.destroy_process_group()
|
ContrastiveSceneContexts-main
|
downstream/votenet/lib/distributed.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
""" Utility functions for metric evaluation.
Author: Or Litany and Charles R. Qi
"""
import os
import sys
import torch
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
import numpy as np
# Mesh IO
import trimesh
# ----------------------------------------
# Precision and Recall
# ----------------------------------------
def multi_scene_precision_recall(labels, pred, iou_thresh, conf_thresh, label_mask, pred_mask=None):
'''
Args:
labels: (B, N, 6)
pred: (B, M, 6)
iou_thresh: scalar
conf_thresh: scalar
label_mask: (B, N,) with values in 0 or 1 to indicate which GT boxes to consider.
pred_mask: (B, M,) with values in 0 or 1 to indicate which PRED boxes to consider.
Returns:
TP,FP,FN,Precision,Recall
'''
# Make sure the masks are not Torch tensor, otherwise the mask==1 returns uint8 array instead
# of True/False array as in numpy
assert(not torch.is_tensor(label_mask))
assert(not torch.is_tensor(pred_mask))
TP, FP, FN = 0, 0, 0
if label_mask is None: label_mask = np.ones((labels.shape[0], labels.shape[1]))
if pred_mask is None: pred_mask = np.ones((pred.shape[0], pred.shape[1]))
for batch_idx in range(labels.shape[0]):
TP_i, FP_i, FN_i = single_scene_precision_recall(labels[batch_idx, label_mask[batch_idx,:]==1, :],
pred[batch_idx, pred_mask[batch_idx,:]==1, :],
iou_thresh, conf_thresh)
TP += TP_i
FP += FP_i
FN += FN_i
return TP, FP, FN, precision_recall(TP, FP, FN)
def single_scene_precision_recall(labels, pred, iou_thresh, conf_thresh):
"""Compute P and R for predicted bounding boxes. Ignores classes!
Args:
labels: (N x bbox) ground-truth bounding boxes (6 dims)
pred: (M x (bbox + conf)) predicted bboxes with confidence and maybe classification
Returns:
TP, FP, FN
"""
# for each pred box with high conf (C), compute IoU with all gt boxes.
# TP = number of times IoU > th ; FP = C - TP
# FN - number of scene objects without good match
gt_bboxes = labels[:, :6]
num_scene_bboxes = gt_bboxes.shape[0]
conf = pred[:, 6]
conf_pred_bbox = pred[np.where(conf > conf_thresh)[0], :6]
num_conf_pred_bboxes = conf_pred_bbox.shape[0]
# init an array to keep iou between generated and scene bboxes
iou_arr = np.zeros([num_conf_pred_bboxes, num_scene_bboxes])
for g_idx in range(num_conf_pred_bboxes):
for s_idx in range(num_scene_bboxes):
iou_arr[g_idx, s_idx] = calc_iou(conf_pred_bbox[g_idx ,:], gt_bboxes[s_idx, :])
good_match_arr = (iou_arr >= iou_thresh)
TP = good_match_arr.any(axis=1).sum()
FP = num_conf_pred_bboxes - TP
FN = num_scene_bboxes - good_match_arr.any(axis=0).sum()
return TP, FP, FN
def precision_recall(TP, FP, FN):
Prec = 1.0 * TP / (TP + FP) if TP+FP>0 else 0
Rec = 1.0 * TP / (TP + FN)
return Prec, Rec
def calc_iou(box_a, box_b):
"""Computes IoU of two axis aligned bboxes.
Args:
box_a, box_b: 6D of center and lengths
Returns:
iou
"""
max_a = box_a[0:3] + box_a[3:6]/2
max_b = box_b[0:3] + box_b[3:6]/2
min_max = np.array([max_a, max_b]).min(0)
min_a = box_a[0:3] - box_a[3:6]/2
min_b = box_b[0:3] - box_b[3:6]/2
max_min = np.array([min_a, min_b]).max(0)
if not ((min_max > max_min).all()):
return 0.0
intersection = (min_max - max_min).prod()
vol_a = box_a[3:6].prod()
vol_b = box_b[3:6].prod()
union = vol_a + vol_b - intersection
return 1.0*intersection / union
if __name__ == '__main__':
print('running some tests')
############
## Test IoU
############
box_a = np.array([0,0,0,1,1,1])
box_b = np.array([0,0,0,2,2,2])
expected_iou = 1.0/8
pred_iou = calc_iou(box_a, box_b)
assert expected_iou == pred_iou, 'function returned wrong IoU'
box_a = np.array([0,0,0,1,1,1])
box_b = np.array([10,10,10,2,2,2])
expected_iou = 0.0
pred_iou = calc_iou(box_a, box_b)
assert expected_iou == pred_iou, 'function returned wrong IoU'
print('IoU test -- PASSED')
#########################
## Test Precition Recall
#########################
gt_boxes = np.array([[0,0,0,1,1,1],[3, 0, 1, 1, 10, 1]])
detected_boxes = np.array([[0,0,0,1,1,1, 1.0],[3, 0, 1, 1, 10, 1, 0.9]])
TP, FP, FN = single_scene_precision_recall(gt_boxes, detected_boxes, 0.5, 0.5)
assert TP == 2 and FP == 0 and FN == 0
assert precision_recall(TP, FP, FN) == (1, 1)
detected_boxes = np.array([[0,0,0,1,1,1, 1.0]])
TP, FP, FN = single_scene_precision_recall(gt_boxes, detected_boxes, 0.5, 0.5)
assert TP == 1 and FP == 0 and FN == 1
assert precision_recall(TP, FP, FN) == (1, 0.5)
detected_boxes = np.array([[0,0,0,1,1,1, 1.0], [-1,-1,0,0.1,0.1,1, 1.0]])
TP, FP, FN = single_scene_precision_recall(gt_boxes, detected_boxes, 0.5, 0.5)
assert TP == 1 and FP == 1 and FN == 1
assert precision_recall(TP, FP, FN) == (0.5, 0.5)
# wrong box has low confidence
detected_boxes = np.array([[0,0,0,1,1,1, 1.0], [-1,-1,0,0.1,0.1,1, 0.1]])
TP, FP, FN = single_scene_precision_recall(gt_boxes, detected_boxes, 0.5, 0.5)
assert TP == 1 and FP == 0 and FN == 1
assert precision_recall(TP, FP, FN) == (1, 0.5)
print('Precition Recall test -- PASSED')
|
ContrastiveSceneContexts-main
|
downstream/votenet/lib/utils/metric_util.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
""" Generic Code for Object Detection Evaluation
Input:
For each class:
For each image:
Predictions: box, score
Groundtruths: box
Output:
For each class:
precision-recal and average precision
Author: Charles R. Qi
Ref: https://raw.githubusercontent.com/rbgirshick/py-faster-rcnn/master/lib/datasets/voc_eval.py
"""
import numpy as np
def voc_ap(rec, prec, use_07_metric=False):
""" ap = voc_ap(rec, prec, [use_07_metric])
Compute VOC AP given precision and recall.
If use_07_metric is true, uses the
VOC 07 11 point method (default:False).
"""
if use_07_metric:
# 11 point metric
ap = 0.
for t in np.arange(0., 1.1, 0.1):
if np.sum(rec >= t) == 0:
p = 0
else:
p = np.max(prec[rec >= t])
ap = ap + p / 11.
else:
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.], rec, [1.]))
mpre = np.concatenate(([0.], prec, [0.]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
from lib.utils.metric_util import calc_iou # axis-aligned 3D box IoU
def get_iou(bb1, bb2):
""" Compute IoU of two bounding boxes.
** Define your bod IoU function HERE **
"""
#pass
iou3d = calc_iou(bb1, bb2)
return iou3d
from lib.utils.box_util import box3d_iou
def get_iou_obb(bb1,bb2):
iou3d, iou2d = box3d_iou(bb1,bb2)
return iou3d
def get_iou_main(get_iou_func, args):
return get_iou_func(*args)
def eval_det_cls(pred, gt, ovthresh=0.25, use_07_metric=False, get_iou_func=get_iou):
""" Generic functions to compute precision/recall for object detection
for a single class.
Input:
pred: map of {img_id: [(bbox, score)]} where bbox is numpy array
gt: map of {img_id: [bbox]}
ovthresh: scalar, iou threshold
use_07_metric: bool, if True use VOC07 11 point method
Output:
rec: numpy array of length nd
prec: numpy array of length nd
ap: scalar, average precision
"""
# construct gt objects
class_recs = {} # {img_id: {'bbox': bbox list, 'det': matched list}}
npos = 0
for img_id in gt.keys():
bbox = np.array(gt[img_id])
det = [False] * len(bbox)
npos += len(bbox)
class_recs[img_id] = {'bbox': bbox, 'det': det}
# pad empty list to all other imgids
for img_id in pred.keys():
if img_id not in gt:
class_recs[img_id] = {'bbox': np.array([]), 'det': []}
# construct dets
image_ids = []
confidence = []
BB = []
for img_id in pred.keys():
for box,score in pred[img_id]:
image_ids.append(img_id)
confidence.append(score)
BB.append(box)
confidence = np.array(confidence)
BB = np.array(BB) # (nd,4 or 8,3 or 6)
# sort by confidence
sorted_ind = np.argsort(-confidence)
sorted_scores = np.sort(-confidence)
BB = BB[sorted_ind, ...]
image_ids = [image_ids[x] for x in sorted_ind]
# go down dets and mark TPs and FPs
nd = len(image_ids)
tp = np.zeros(nd)
fp = np.zeros(nd)
for d in range(nd):
#if d%100==0: print(d)
R = class_recs[image_ids[d]]
bb = BB[d,...].astype(float)
ovmax = -np.inf
BBGT = R['bbox'].astype(float)
if BBGT.size > 0:
# compute overlaps
for j in range(BBGT.shape[0]):
iou = get_iou_main(get_iou_func, (bb, BBGT[j,...]))
if iou > ovmax:
ovmax = iou
jmax = j
#print d, ovmax
if ovmax > ovthresh:
if not R['det'][jmax]:
tp[d] = 1.
R['det'][jmax] = 1
else:
fp[d] = 1.
else:
fp[d] = 1.
# compute precision recall
fp = np.cumsum(fp)
tp = np.cumsum(tp)
rec = tp / float(npos)
#print('NPOS: ', npos)
# avoid divide by zero in case the first detection matches a difficult
# ground truth
prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
ap = voc_ap(rec, prec, use_07_metric)
return rec, prec, ap
def eval_det_cls_wrapper(arguments):
pred, gt, ovthresh, use_07_metric, get_iou_func = arguments
rec, prec, ap = eval_det_cls(pred, gt, ovthresh, use_07_metric, get_iou_func)
return (rec, prec, ap)
def eval_det(pred_all, gt_all, ovthresh=0.25, use_07_metric=False, get_iou_func=get_iou):
""" Generic functions to compute precision/recall for object detection
for multiple classes.
Input:
pred_all: map of {img_id: [(classname, bbox, score)]}
gt_all: map of {img_id: [(classname, bbox)]}
ovthresh: scalar, iou threshold
use_07_metric: bool, if true use VOC07 11 point method
Output:
rec: {classname: rec}
prec: {classname: prec_all}
ap: {classname: scalar}
"""
pred = {} # map {classname: pred}
gt = {} # map {classname: gt}
for img_id in pred_all.keys():
for classname, bbox, score in pred_all[img_id]:
if classname not in pred: pred[classname] = {}
if img_id not in pred[classname]:
pred[classname][img_id] = []
if classname not in gt: gt[classname] = {}
if img_id not in gt[classname]:
gt[classname][img_id] = []
pred[classname][img_id].append((bbox,score))
for img_id in gt_all.keys():
for classname, bbox in gt_all[img_id]:
if classname not in gt: gt[classname] = {}
if img_id not in gt[classname]:
gt[classname][img_id] = []
gt[classname][img_id].append(bbox)
rec = {}
prec = {}
ap = {}
for classname in gt.keys():
print('Computing AP for class: ', classname)
rec[classname], prec[classname], ap[classname] = eval_det_cls(pred[classname], gt[classname], ovthresh, use_07_metric, get_iou_func)
print(classname, ap[classname])
return rec, prec, ap
from multiprocessing import Pool
def eval_det_multiprocessing(pred_all, gt_all, ovthresh=0.25, use_07_metric=False, get_iou_func=get_iou):
""" Generic functions to compute precision/recall for object detection
for multiple classes.
Input:
pred_all: map of {img_id: [(classname, bbox, score)]}
gt_all: map of {img_id: [(classname, bbox)]}
ovthresh: scalar, iou threshold
use_07_metric: bool, if true use VOC07 11 point method
Output:
rec: {classname: rec}
prec: {classname: prec_all}
ap: {classname: scalar}
"""
pred = {} # map {classname: pred}
gt = {} # map {classname: gt}
for img_id in pred_all.keys():
for classname, bbox, score in pred_all[img_id]:
if classname not in pred: pred[classname] = {}
if img_id not in pred[classname]:
pred[classname][img_id] = []
if classname not in gt: gt[classname] = {}
if img_id not in gt[classname]:
gt[classname][img_id] = []
pred[classname][img_id].append((bbox,score))
for img_id in gt_all.keys():
for classname, bbox in gt_all[img_id]:
if classname not in gt: gt[classname] = {}
if img_id not in gt[classname]:
gt[classname][img_id] = []
gt[classname][img_id].append(bbox)
rec = {}
prec = {}
ap = {}
p = Pool(processes=10)
ret_values = p.map(eval_det_cls_wrapper, [(pred[classname], gt[classname], ovthresh, use_07_metric, get_iou_func) for classname in gt.keys() if classname in pred])
p.close()
for i, classname in enumerate(gt.keys()):
if classname in pred:
rec[classname], prec[classname], ap[classname] = ret_values[i]
else:
rec[classname] = 0
prec[classname] = 0
ap[classname] = 0
print(classname, ap[classname])
return rec, prec, ap
|
ContrastiveSceneContexts-main
|
downstream/votenet/lib/utils/eval_det.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
""" Chamfer distance in Pytorch.
Author: Charles R. Qi
"""
import torch
import torch.nn as nn
import numpy as np
def huber_loss(error, delta=1.0):
"""
Args:
error: Torch tensor (d1,d2,...,dk)
Returns:
loss: Torch tensor (d1,d2,...,dk)
x = error = pred - gt or dist(pred,gt)
0.5 * |x|^2 if |x|<=d
0.5 * d^2 + d * (|x|-d) if |x|>d
Ref: https://github.com/charlesq34/frustum-pointnets/blob/master/models/model_util.py
"""
abs_error = torch.abs(error)
#quadratic = torch.min(abs_error, torch.FloatTensor([delta]))
quadratic = torch.clamp(abs_error, max=delta)
linear = (abs_error - quadratic)
loss = 0.5 * quadratic**2 + delta * linear
return loss
def nn_distance(pc1, pc2, l1smooth=False, delta=1.0, l1=False):
"""
Input:
pc1: (B,N,C) torch tensor
pc2: (B,M,C) torch tensor
l1smooth: bool, whether to use l1smooth loss
delta: scalar, the delta used in l1smooth loss
Output:
dist1: (B,N) torch float32 tensor
idx1: (B,N) torch int64 tensor
dist2: (B,M) torch float32 tensor
idx2: (B,M) torch int64 tensor
"""
N = pc1.shape[1]
M = pc2.shape[1]
pc1_expand_tile = pc1.unsqueeze(2).repeat(1,1,M,1)
pc2_expand_tile = pc2.unsqueeze(1).repeat(1,N,1,1)
pc_diff = pc1_expand_tile - pc2_expand_tile
if l1smooth:
pc_dist = torch.sum(huber_loss(pc_diff, delta), dim=-1) # (B,N,M)
elif l1:
pc_dist = torch.sum(torch.abs(pc_diff), dim=-1) # (B,N,M)
else:
pc_dist = torch.sum(pc_diff**2, dim=-1) # (B,N,M)
dist1, idx1 = torch.min(pc_dist, dim=2) # (B,N)
dist2, idx2 = torch.min(pc_dist, dim=1) # (B,M)
return dist1, idx1, dist2, idx2
def demo_nn_distance():
np.random.seed(0)
pc1arr = np.random.random((1,5,3))
pc2arr = np.random.random((1,6,3))
pc1 = torch.from_numpy(pc1arr.astype(np.float32))
pc2 = torch.from_numpy(pc2arr.astype(np.float32))
dist1, idx1, dist2, idx2 = nn_distance(pc1, pc2)
print(dist1)
print(idx1)
dist = np.zeros((5,6))
for i in range(5):
for j in range(6):
dist[i,j] = np.sum((pc1arr[0,i,:] - pc2arr[0,j,:]) ** 2)
print(dist)
print('-'*30)
print('L1smooth dists:')
dist1, idx1, dist2, idx2 = nn_distance(pc1, pc2, True)
print(dist1)
print(idx1)
dist = np.zeros((5,6))
for i in range(5):
for j in range(6):
error = np.abs(pc1arr[0,i,:] - pc2arr[0,j,:])
quad = np.minimum(error, 1.0)
linear = error - quad
loss = 0.5*quad**2 + 1.0*linear
dist[i,j] = np.sum(loss)
print(dist)
if __name__ == '__main__':
demo_nn_distance()
|
ContrastiveSceneContexts-main
|
downstream/votenet/lib/utils/nn_distance.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
""" Utility functions for processing point clouds.
Author: Charles R. Qi and Or Litany
"""
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
# Point cloud IO
import numpy as np
try:
from plyfile import PlyData, PlyElement
except:
print("Please install the module 'plyfile' for PLY i/o, e.g.")
print("pip install plyfile")
sys.exit(-1)
# Mesh IO
import trimesh
import math
import matplotlib.pyplot as pyplot
# ----------------------------------------
# Point Cloud Sampling
# ----------------------------------------
def random_sampling(pc, num_sample, replace=None, return_choices=False):
""" Input is NxC, output is num_samplexC
"""
if replace is None: replace = (pc.shape[0]<num_sample)
choices = np.random.choice(pc.shape[0], num_sample, replace=replace)
if return_choices:
return pc[choices], choices
else:
return pc[choices]
# ----------------------------------------
# Point Cloud/Volume Conversions
# ----------------------------------------
def point_cloud_to_volume_batch(point_clouds, vsize=12, radius=1.0, flatten=True):
""" Input is BxNx3 batch of point cloud
Output is Bx(vsize^3)
"""
vol_list = []
for b in range(point_clouds.shape[0]):
vol = point_cloud_to_volume(np.squeeze(point_clouds[b,:,:]), vsize, radius)
if flatten:
vol_list.append(vol.flatten())
else:
vol_list.append(np.expand_dims(np.expand_dims(vol, -1), 0))
if flatten:
return np.vstack(vol_list)
else:
return np.concatenate(vol_list, 0)
def point_cloud_to_volume(points, vsize, radius=1.0):
""" input is Nx3 points.
output is vsize*vsize*vsize
assumes points are in range [-radius, radius]
"""
vol = np.zeros((vsize,vsize,vsize))
voxel = 2*radius/float(vsize)
locations = (points + radius)/voxel
locations = locations.astype(int)
vol[locations[:,0],locations[:,1],locations[:,2]] = 1.0
return vol
def volume_to_point_cloud(vol):
""" vol is occupancy grid (value = 0 or 1) of size vsize*vsize*vsize
return Nx3 numpy array.
"""
vsize = vol.shape[0]
assert(vol.shape[1] == vsize and vol.shape[1] == vsize)
points = []
for a in range(vsize):
for b in range(vsize):
for c in range(vsize):
if vol[a,b,c] == 1:
points.append(np.array([a,b,c]))
if len(points) == 0:
return np.zeros((0,3))
points = np.vstack(points)
return points
def point_cloud_to_volume_v2_batch(point_clouds, vsize=12, radius=1.0, num_sample=128):
""" Input is BxNx3 a batch of point cloud
Output is BxVxVxVxnum_samplex3
Added on Feb 19
"""
vol_list = []
for b in range(point_clouds.shape[0]):
vol = point_cloud_to_volume_v2(point_clouds[b,:,:], vsize, radius, num_sample)
vol_list.append(np.expand_dims(vol, 0))
return np.concatenate(vol_list, 0)
def point_cloud_to_volume_v2(points, vsize, radius=1.0, num_sample=128):
""" input is Nx3 points
output is vsize*vsize*vsize*num_sample*3
assumes points are in range [-radius, radius]
samples num_sample points in each voxel, if there are less than
num_sample points, replicate the points
Added on Feb 19
"""
vol = np.zeros((vsize,vsize,vsize,num_sample,3))
voxel = 2*radius/float(vsize)
locations = (points + radius)/voxel
locations = locations.astype(int)
loc2pc = {}
for n in range(points.shape[0]):
loc = tuple(locations[n,:])
if loc not in loc2pc:
loc2pc[loc] = []
loc2pc[loc].append(points[n,:])
for i in range(vsize):
for j in range(vsize):
for k in range(vsize):
if (i,j,k) not in loc2pc:
vol[i,j,k,:,:] = np.zeros((num_sample,3))
else:
pc = loc2pc[(i,j,k)] # a list of (3,) arrays
pc = np.vstack(pc) # kx3
# Sample/pad to num_sample points
if pc.shape[0]>num_sample:
pc = random_sampling(pc, num_sample, False)
elif pc.shape[0]<num_sample:
pc = np.lib.pad(pc, ((0,num_sample-pc.shape[0]),(0,0)), 'edge')
# Normalize
pc_center = (np.array([i,j,k])+0.5)*voxel - radius
pc = (pc - pc_center) / voxel # shift and scale
vol[i,j,k,:,:] = pc
return vol
def point_cloud_to_image_batch(point_clouds, imgsize, radius=1.0, num_sample=128):
""" Input is BxNx3 a batch of point cloud
Output is BxIxIxnum_samplex3
Added on Feb 19
"""
img_list = []
for b in range(point_clouds.shape[0]):
img = point_cloud_to_image(point_clouds[b,:,:], imgsize, radius, num_sample)
img_list.append(np.expand_dims(img, 0))
return np.concatenate(img_list, 0)
def point_cloud_to_image(points, imgsize, radius=1.0, num_sample=128):
""" input is Nx3 points
output is imgsize*imgsize*num_sample*3
assumes points are in range [-radius, radius]
samples num_sample points in each pixel, if there are less than
num_sample points, replicate the points
Added on Feb 19
"""
img = np.zeros((imgsize, imgsize, num_sample, 3))
pixel = 2*radius/float(imgsize)
locations = (points[:,0:2] + radius)/pixel # Nx2
locations = locations.astype(int)
loc2pc = {}
for n in range(points.shape[0]):
loc = tuple(locations[n,:])
if loc not in loc2pc:
loc2pc[loc] = []
loc2pc[loc].append(points[n,:])
for i in range(imgsize):
for j in range(imgsize):
if (i,j) not in loc2pc:
img[i,j,:,:] = np.zeros((num_sample,3))
else:
pc = loc2pc[(i,j)]
pc = np.vstack(pc)
if pc.shape[0]>num_sample:
pc = random_sampling(pc, num_sample, False)
elif pc.shape[0]<num_sample:
pc = np.lib.pad(pc, ((0,num_sample-pc.shape[0]),(0,0)), 'edge')
pc_center = (np.array([i,j])+0.5)*pixel - radius
pc[:,0:2] = (pc[:,0:2] - pc_center)/pixel
img[i,j,:,:] = pc
return img
# ----------------------------------------
# Point cloud IO
# ----------------------------------------
def read_ply(filename):
""" read XYZ point cloud from filename PLY file """
plydata = PlyData.read(filename)
pc = plydata['vertex'].data
pc_array = np.array([[x, y, z] for x,y,z in pc])
return pc_array
def write_ply(points, filename, text=True):
""" input: Nx3, write points to filename as PLY format. """
points = [(points[i,0], points[i,1], points[i,2]) for i in range(points.shape[0])]
vertex = np.array(points, dtype=[('x', 'f4'), ('y', 'f4'),('z', 'f4')])
el = PlyElement.describe(vertex, 'vertex', comments=['vertices'])
PlyData([el], text=text).write(filename)
def write_ply_color(points, labels, filename, num_classes=None, colormap=pyplot.cm.jet):
""" Color (N,3) points with labels (N) within range 0 ~ num_classes-1 as OBJ file """
labels = labels.astype(int)
N = points.shape[0]
if num_classes is None:
num_classes = np.max(labels)+1
else:
assert(num_classes>np.max(labels))
vertex = []
#colors = [pyplot.cm.jet(i/float(num_classes)) for i in range(num_classes)]
colors = [colormap(i/float(num_classes)) for i in range(num_classes)]
for i in range(N):
c = colors[labels[i]]
c = [int(x*255) for x in c]
vertex.append( (points[i,0],points[i,1],points[i,2],c[0],c[1],c[2]) )
vertex = np.array(vertex, dtype=[('x', 'f4'), ('y', 'f4'),('z', 'f4'),('red', 'u1'), ('green', 'u1'),('blue', 'u1')])
el = PlyElement.describe(vertex, 'vertex', comments=['vertices'])
PlyData([el], text=True).write(filename)
def write_ply_rgb(points, colors, out_filename, num_classes=None):
""" Color (N,3) points with RGB colors (N,3) within range [0,255] as OBJ file """
colors = colors.astype(int)
N = points.shape[0]
fout = open(out_filename, 'w')
for i in range(N):
c = colors[i,:]
fout.write('v %f %f %f %d %d %d\n' % (points[i,0],points[i,1],points[i,2],c[0],c[1],c[2]))
fout.close()
# ----------------------------------------
# Simple Point cloud and Volume Renderers
# ----------------------------------------
def pyplot_draw_point_cloud(points, output_filename):
""" points is a Nx3 numpy array """
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(points[:,0], points[:,1], points[:,2])
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
#savefig(output_filename)
def pyplot_draw_volume(vol, output_filename):
""" vol is of size vsize*vsize*vsize
output an image to output_filename
"""
points = volume_to_point_cloud(vol)
pyplot_draw_point_cloud(points, output_filename)
# ----------------------------------------
# Simple Point manipulations
# ----------------------------------------
def rotate_point_cloud(points, rotation_matrix=None):
""" Input: (n,3), Output: (n,3) """
# Rotate in-place around Z axis.
if rotation_matrix is None:
rotation_angle = np.random.uniform() * 2 * np.pi
sinval, cosval = np.sin(rotation_angle), np.cos(rotation_angle)
rotation_matrix = np.array([[cosval, sinval, 0],
[-sinval, cosval, 0],
[0, 0, 1]])
ctr = points.mean(axis=0)
rotated_data = np.dot(points-ctr, rotation_matrix) + ctr
return rotated_data, rotation_matrix
def rotate_pc_along_y(pc, rot_angle):
''' Input ps is NxC points with first 3 channels as XYZ
z is facing forward, x is left ward, y is downward
'''
cosval = np.cos(rot_angle)
sinval = np.sin(rot_angle)
rotmat = np.array([[cosval, -sinval],[sinval, cosval]])
pc[:,[0,2]] = np.dot(pc[:,[0,2]], np.transpose(rotmat))
return pc
def roty(t):
"""Rotation about the y-axis."""
c = np.cos(t)
s = np.sin(t)
return np.array([[c, 0, s],
[0, 1, 0],
[-s, 0, c]])
def roty_batch(t):
"""Rotation about the y-axis.
t: (x1,x2,...xn)
return: (x1,x2,...,xn,3,3)
"""
input_shape = t.shape
output = np.zeros(tuple(list(input_shape)+[3,3]))
c = np.cos(t)
s = np.sin(t)
output[...,0,0] = c
output[...,0,2] = s
output[...,1,1] = 1
output[...,2,0] = -s
output[...,2,2] = c
return output
def rotz(t):
"""Rotation about the z-axis."""
c = np.cos(t)
s = np.sin(t)
return np.array([[c, -s, 0],
[s, c, 0],
[0, 0, 1]])
# ----------------------------------------
# BBox
# ----------------------------------------
def bbox_corner_dist_measure(crnr1, crnr2):
""" compute distance between box corners to replace iou
Args:
crnr1, crnr2: Nx3 points of box corners in camera axis (y points down)
output is a scalar between 0 and 1
"""
dist = sys.maxsize
for y in range(4):
rows = ([(x+y)%4 for x in range(4)] + [4+(x+y)%4 for x in range(4)])
d_ = np.linalg.norm(crnr2[rows, :] - crnr1, axis=1).sum() / 8.0
if d_ < dist:
dist = d_
u = sum([np.linalg.norm(x[0,:] - x[6,:]) for x in [crnr1, crnr2]])/2.0
measure = max(1.0 - dist/u, 0)
print(measure)
return measure
def point_cloud_to_bbox(points):
""" Extract the axis aligned box from a pcl or batch of pcls
Args:
points: Nx3 points or BxNx3
output is 6 dim: xyz pos of center and 3 lengths
"""
which_dim = len(points.shape) - 2 # first dim if a single cloud and second if batch
mn, mx = points.min(which_dim), points.max(which_dim)
lengths = mx - mn
cntr = 0.5*(mn + mx)
return np.concatenate([cntr, lengths], axis=which_dim)
def write_bbox(scene_bbox, out_filename):
"""Export scene bbox to meshes
Args:
scene_bbox: (N x 6 numpy array): xyz pos of center and 3 lengths
out_filename: (string) filename
Note:
To visualize the boxes in MeshLab.
1. Select the objects (the boxes)
2. Filters -> Polygon and Quad Mesh -> Turn into Quad-Dominant Mesh
3. Select Wireframe view.
"""
def convert_box_to_trimesh_fmt(box):
ctr = box[:3]
lengths = box[3:]
trns = np.eye(4)
trns[0:3, 3] = ctr
trns[3,3] = 1.0
box_trimesh_fmt = trimesh.creation.box(lengths, trns)
return box_trimesh_fmt
scene = trimesh.scene.Scene()
for box in scene_bbox:
scene.add_geometry(convert_box_to_trimesh_fmt(box))
mesh_list = trimesh.util.concatenate(scene.dump())
# save to ply file
mesh_list.export(out_filename)
return
def write_oriented_bbox(scene_bbox, out_filename):
"""Export oriented (around Z axis) scene bbox to meshes
Args:
scene_bbox: (N x 7 numpy array): xyz pos of center and 3 lengths (dx,dy,dz)
and heading angle around Z axis.
Y forward, X right, Z upward. heading angle of positive X is 0,
heading angle of positive Y is 90 degrees.
out_filename: (string) filename
"""
def heading2rotmat(heading_angle):
pass
rotmat = np.zeros((3,3))
rotmat[2,2] = 1
cosval = np.cos(heading_angle)
sinval = np.sin(heading_angle)
rotmat[0:2,0:2] = np.array([[cosval, -sinval],[sinval, cosval]])
return rotmat
def convert_oriented_box_to_trimesh_fmt(box):
ctr = box[:3]
lengths = box[3:6]
trns = np.eye(4)
trns[0:3, 3] = ctr
trns[3,3] = 1.0
trns[0:3,0:3] = heading2rotmat(box[6])
box_trimesh_fmt = trimesh.creation.box(lengths, trns)
return box_trimesh_fmt
scene = trimesh.scene.Scene()
for box in scene_bbox:
scene.add_geometry(convert_oriented_box_to_trimesh_fmt(box))
mesh_list = trimesh.util.concatenate(scene.dump())
# save to ply file
mesh_list.export(out_filename)
return
def generate_bbox_mesh(bbox, output_file=None):
"""
bbox: np array (n, 6),
output_file: string
"""
def create_cylinder_mesh(radius, p0, p1, stacks=10, slices=10):
def compute_length_vec3(vec3):
return math.sqrt(vec3[0]*vec3[0] + vec3[1]*vec3[1] + vec3[2]*vec3[2])
def rotation(axis, angle):
rot = np.eye(4)
c = np.cos(-angle)
s = np.sin(-angle)
t = 1.0 - c
axis /= compute_length_vec3(axis)
x = axis[0]
y = axis[1]
z = axis[2]
rot[0,0] = 1 + t*(x*x-1)
rot[0,1] = z*s+t*x*y
rot[0,2] = -y*s+t*x*z
rot[1,0] = -z*s+t*x*y
rot[1,1] = 1+t*(y*y-1)
rot[1,2] = x*s+t*y*z
rot[2,0] = y*s+t*x*z
rot[2,1] = -x*s+t*y*z
rot[2,2] = 1+t*(z*z-1)
return rot
verts = []
indices = []
diff = (p1 - p0).astype(np.float32)
height = compute_length_vec3(diff)
for i in range(stacks+1):
for i2 in range(slices):
theta = i2 * 2.0 * math.pi / slices
pos = np.array([radius*math.cos(theta), radius*math.sin(theta), height*i/stacks])
verts.append(pos)
for i in range(stacks):
for i2 in range(slices):
i2p1 = math.fmod(i2 + 1, slices)
indices.append( np.array([(i + 1)*slices + i2, i*slices + i2, i*slices + i2p1], dtype=np.uint32) )
indices.append( np.array([(i + 1)*slices + i2, i*slices + i2p1, (i + 1)*slices + i2p1], dtype=np.uint32) )
transform = np.eye(4)
va = np.array([0, 0, 1], dtype=np.float32)
vb = diff
vb /= compute_length_vec3(vb)
axis = np.cross(vb, va)
angle = np.arccos(np.clip(np.dot(va, vb), -1, 1))
if angle != 0:
if compute_length_vec3(axis) == 0:
dotx = va[0]
if (math.fabs(dotx) != 1.0):
axis = np.array([1,0,0]) - dotx * va
else:
axis = np.array([0,1,0]) - va[1] * va
axis /= compute_length_vec3(axis)
transform = rotation(axis, -angle)
transform[:3,3] += p0
verts = [np.dot(transform, np.array([v[0], v[1], v[2], 1.0])) for v in verts]
verts = [np.array([v[0], v[1], v[2]]) / v[3] for v in verts]
return verts, indices
def get_bbox_edges(bbox_min, bbox_max):
def get_bbox_verts(bbox_min, bbox_max):
verts = [
np.array([bbox_min[0], bbox_min[1], bbox_min[2]]),
np.array([bbox_max[0], bbox_min[1], bbox_min[2]]),
np.array([bbox_max[0], bbox_max[1], bbox_min[2]]),
np.array([bbox_min[0], bbox_max[1], bbox_min[2]]),
np.array([bbox_min[0], bbox_min[1], bbox_max[2]]),
np.array([bbox_max[0], bbox_min[1], bbox_max[2]]),
np.array([bbox_max[0], bbox_max[1], bbox_max[2]]),
np.array([bbox_min[0], bbox_max[1], bbox_max[2]])
]
return verts
box_verts = get_bbox_verts(bbox_min, bbox_max)
edges = [
(box_verts[0], box_verts[1]),
(box_verts[1], box_verts[2]),
(box_verts[2], box_verts[3]),
(box_verts[3], box_verts[0]),
(box_verts[4], box_verts[5]),
(box_verts[5], box_verts[6]),
(box_verts[6], box_verts[7]),
(box_verts[7], box_verts[4]),
(box_verts[0], box_verts[4]),
(box_verts[1], box_verts[5]),
(box_verts[2], box_verts[6]),
(box_verts[3], box_verts[7])
]
return edges
radius = 0.02
offset = [0,0,0]
verts = []
indices = []
for box in bbox:
box_min = np.array([box[0], box[1], box[2]])
box_max = np.array([box[3], box[4], box[5]])
edges = get_bbox_edges(box_min, box_max)
for k in range(len(edges)):
cyl_verts, cyl_ind = create_cylinder_mesh(radius, edges[k][0], edges[k][1])
cur_num_verts = len(verts)
cyl_verts = [x + offset for x in cyl_verts]
cyl_ind = [x + cur_num_verts for x in cyl_ind]
verts.extend(cyl_verts)
indices.extend(cyl_ind)
return verts, indices
def write_oriented_bbox_(scene_bbox, out_filename):
"""Export oriented (around Z axis) scene bbox to meshes
Args:
scene_bbox: (N x 7 numpy array): xyz pos of center and 3 lengths (dx,dy,dz)
and heading angle around Z axis.
Y forward, X right, Z upward. heading angle of positive X is 0,
heading angle of positive Y is 90 degrees.
out_filename: (string) filename
"""
def write_ply_mesh(verts, colors, indices, output_file):
if colors is None:
colors = np.zeros_like(verts)
if indices is None:
indices = []
file = open(output_file, 'w')
file.write('ply \n')
file.write('format ascii 1.0\n')
file.write('element vertex {:d}\n'.format(len(verts)))
file.write('property float x\n')
file.write('property float y\n')
file.write('property float z\n')
file.write('property uchar red\n')
file.write('property uchar green\n')
file.write('property uchar blue\n')
file.write('element face {:d}\n'.format(len(indices)))
file.write('property list uchar uint vertex_indices\n')
file.write('end_header\n')
for vert, color in zip(verts, colors):
file.write("{:f} {:f} {:f} {:d} {:d} {:d}\n".format(vert[0], vert[1], vert[2] , int(color[0]*255), int(color[1]*255), int(color[2]*255)))
for ind in indices:
file.write('3 {:d} {:d} {:d}\n'.format(ind[0], ind[1], ind[2]))
file.close()
def heading2rotmat(heading_angle):
pass
rotmat = np.zeros((3,3))
rotmat[2,2] = 1
cosval = np.cos(heading_angle)
sinval = np.sin(heading_angle)
rotmat[0:2,0:2] = np.array([[cosval, -sinval],[sinval, cosval]])
return rotmat
def convert_oriented_box_to_trimesh_fmt(box):
ctr = box[:3]
lengths = box[3:6]
trns = np.eye(4)
trns[0:3, 3] = ctr
trns[3,3] = 1.0
trns[0:3,0:3] = heading2rotmat(box[6])
box = np.array([[-0.5,-0.5,-0.5, 0.5, 0.5, 0.5]])
box[:,0] = box[:,0] * lengths[0] + trns[0,3]
box[:,1] = box[:,1] * lengths[1] + trns[1,3]
box[:,2] = box[:,2] * lengths[2] + trns[2,3]
box[:,3] = box[:,3] * lengths[0] + trns[0,3]
box[:,4] = box[:,4] * lengths[1] + trns[1,3]
box[:,5] = box[:,5] * lengths[2] + trns[2,3]
vertices, indices = generate_bbox_mesh(box)
return vertices, indices
verts, inds = convert_oriented_box_to_trimesh_fmt(scene_bbox)
write_ply_mesh(verts, None, inds, out_filename)
return
def write_oriented_bbox_camera_coord(scene_bbox, out_filename):
"""Export oriented (around Y axis) scene bbox to meshes
Args:
scene_bbox: (N x 7 numpy array): xyz pos of center and 3 lengths (dx,dy,dz)
and heading angle around Y axis.
Z forward, X rightward, Y downward. heading angle of positive X is 0,
heading angle of negative Z is 90 degrees.
out_filename: (string) filename
"""
def heading2rotmat(heading_angle):
pass
rotmat = np.zeros((3,3))
rotmat[1,1] = 1
cosval = np.cos(heading_angle)
sinval = np.sin(heading_angle)
rotmat[0,:] = np.array([cosval, 0, sinval])
rotmat[2,:] = np.array([-sinval, 0, cosval])
return rotmat
def convert_oriented_box_to_trimesh_fmt(box):
ctr = box[:3]
lengths = box[3:6]
trns = np.eye(4)
trns[0:3, 3] = ctr
trns[3,3] = 1.0
trns[0:3,0:3] = heading2rotmat(box[6])
box_trimesh_fmt = trimesh.creation.box(lengths, trns)
return box_trimesh_fmt
scene = trimesh.scene.Scene()
for box in scene_bbox:
scene.add_geometry(convert_oriented_box_to_trimesh_fmt(box))
mesh_list = trimesh.util.concatenate(scene.dump())
# save to ply file
mesh_list.export(out_filename)
return
def write_lines_as_cylinders(pcl, filename, rad=0.005, res=64):
"""Create lines represented as cylinders connecting pairs of 3D points
Args:
pcl: (N x 2 x 3 numpy array): N pairs of xyz pos
filename: (string) filename for the output mesh (ply) file
rad: radius for the cylinder
res: number of sections used to create the cylinder
"""
scene = trimesh.scene.Scene()
for src,tgt in pcl:
# compute line
vec = tgt - src
M = trimesh.geometry.align_vectors([0,0,1],vec, False)
vec = tgt - src # compute again since align_vectors modifies vec in-place!
M[:3,3] = 0.5*src + 0.5*tgt
height = np.sqrt(np.dot(vec, vec))
scene.add_geometry(trimesh.creation.cylinder(radius=rad, height=height, sections=res, transform=M))
mesh_list = trimesh.util.concatenate(scene.dump())
mesh_list.export('%s.ply'%(filename))
# ----------------------------------------
# Testing
# ----------------------------------------
if __name__ == '__main__':
print('running some tests')
############
## Test "write_lines_as_cylinders"
############
pcl = np.random.rand(32, 2, 3)
write_lines_as_cylinders(pcl, 'point_connectors')
input()
scene_bbox = np.zeros((1,7))
scene_bbox[0,3:6] = np.array([1,2,3]) # dx,dy,dz
scene_bbox[0,6] = np.pi/4 # 45 degrees
write_oriented_bbox(scene_bbox, 'single_obb_45degree.ply')
############
## Test point_cloud_to_bbox
############
pcl = np.random.rand(32, 16, 3)
pcl_bbox = point_cloud_to_bbox(pcl)
assert pcl_bbox.shape == (32, 6)
pcl = np.random.rand(16, 3)
pcl_bbox = point_cloud_to_bbox(pcl)
assert pcl_bbox.shape == (6,)
############
## Test corner distance
############
crnr1 = np.array([[2.59038660e+00, 8.96107932e-01, 4.73305349e+00],
[4.12281644e-01, 8.96107932e-01, 4.48046631e+00],
[2.97129656e-01, 8.96107932e-01, 5.47344275e+00],
[2.47523462e+00, 8.96107932e-01, 5.72602993e+00],
[2.59038660e+00, 4.41155793e-03, 4.73305349e+00],
[4.12281644e-01, 4.41155793e-03, 4.48046631e+00],
[2.97129656e-01, 4.41155793e-03, 5.47344275e+00],
[2.47523462e+00, 4.41155793e-03, 5.72602993e+00]])
crnr2 = crnr1
print(bbox_corner_dist_measure(crnr1, crnr2))
print('tests PASSED')
|
ContrastiveSceneContexts-main
|
downstream/votenet/lib/utils/pc_util.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
from pc_util import bbox_corner_dist_measure
# boxes are axis aigned 2D boxes of shape (n,5) in FLOAT numbers with (x1,y1,x2,y2,score)
''' Ref: https://www.pyimagesearch.com/2015/02/16/faster-non-maximum-suppression-python/
Ref: https://github.com/vickyboy47/nms-python/blob/master/nms.py
'''
def nms_2d(boxes, overlap_threshold):
x1 = boxes[:,0]
y1 = boxes[:,1]
x2 = boxes[:,2]
y2 = boxes[:,3]
score = boxes[:,4]
area = (x2-x1)*(y2-y1)
I = np.argsort(score)
pick = []
while (I.size!=0):
last = I.size
i = I[-1]
pick.append(i)
suppress = [last-1]
for pos in range(last-1):
j = I[pos]
xx1 = max(x1[i],x1[j])
yy1 = max(y1[i],y1[j])
xx2 = min(x2[i],x2[j])
yy2 = min(y2[i],y2[j])
w = xx2-xx1
h = yy2-yy1
if (w>0 and h>0):
o = w*h/area[j]
print('Overlap is', o)
if (o>overlap_threshold):
suppress.append(pos)
I = np.delete(I,suppress)
return pick
def nms_2d_faster(boxes, overlap_threshold, old_type=False):
x1 = boxes[:,0]
y1 = boxes[:,1]
x2 = boxes[:,2]
y2 = boxes[:,3]
score = boxes[:,4]
area = (x2-x1)*(y2-y1)
I = np.argsort(score)
pick = []
while (I.size!=0):
last = I.size
i = I[-1]
pick.append(i)
xx1 = np.maximum(x1[i], x1[I[:last-1]])
yy1 = np.maximum(y1[i], y1[I[:last-1]])
xx2 = np.minimum(x2[i], x2[I[:last-1]])
yy2 = np.minimum(y2[i], y2[I[:last-1]])
w = np.maximum(0, xx2-xx1)
h = np.maximum(0, yy2-yy1)
if old_type:
o = (w*h)/area[I[:last-1]]
else:
inter = w*h
o = inter / (area[i] + area[I[:last-1]] - inter)
I = np.delete(I, np.concatenate(([last-1], np.where(o>overlap_threshold)[0])))
return pick
def nms_3d_faster(boxes, overlap_threshold, old_type=False):
x1 = boxes[:,0]
y1 = boxes[:,1]
z1 = boxes[:,2]
x2 = boxes[:,3]
y2 = boxes[:,4]
z2 = boxes[:,5]
score = boxes[:,6]
area = (x2-x1)*(y2-y1)*(z2-z1)
I = np.argsort(score)
pick = []
while (I.size!=0):
last = I.size
i = I[-1]
pick.append(i)
xx1 = np.maximum(x1[i], x1[I[:last-1]])
yy1 = np.maximum(y1[i], y1[I[:last-1]])
zz1 = np.maximum(z1[i], z1[I[:last-1]])
xx2 = np.minimum(x2[i], x2[I[:last-1]])
yy2 = np.minimum(y2[i], y2[I[:last-1]])
zz2 = np.minimum(z2[i], z2[I[:last-1]])
l = np.maximum(0, xx2-xx1)
w = np.maximum(0, yy2-yy1)
h = np.maximum(0, zz2-zz1)
if old_type:
o = (l*w*h)/area[I[:last-1]]
else:
inter = l*w*h
o = inter / (area[i] + area[I[:last-1]] - inter)
I = np.delete(I, np.concatenate(([last-1], np.where(o>overlap_threshold)[0])))
return pick
def nms_3d_faster_samecls(boxes, overlap_threshold, old_type=False):
x1 = boxes[:,0]
y1 = boxes[:,1]
z1 = boxes[:,2]
x2 = boxes[:,3]
y2 = boxes[:,4]
z2 = boxes[:,5]
score = boxes[:,6]
cls = boxes[:,7]
area = (x2-x1)*(y2-y1)*(z2-z1)
I = np.argsort(score)
pick = []
while (I.size!=0):
last = I.size
i = I[-1]
pick.append(i)
xx1 = np.maximum(x1[i], x1[I[:last-1]])
yy1 = np.maximum(y1[i], y1[I[:last-1]])
zz1 = np.maximum(z1[i], z1[I[:last-1]])
xx2 = np.minimum(x2[i], x2[I[:last-1]])
yy2 = np.minimum(y2[i], y2[I[:last-1]])
zz2 = np.minimum(z2[i], z2[I[:last-1]])
cls1 = cls[i]
cls2 = cls[I[:last-1]]
l = np.maximum(0, xx2-xx1)
w = np.maximum(0, yy2-yy1)
h = np.maximum(0, zz2-zz1)
if old_type:
o = (l*w*h)/area[I[:last-1]]
else:
inter = l*w*h
o = inter / (area[i] + area[I[:last-1]] - inter)
o = o * (cls1==cls2)
I = np.delete(I, np.concatenate(([last-1], np.where(o>overlap_threshold)[0])))
return pick
def nms_crnr_dist(boxes, conf, overlap_threshold):
I = np.argsort(conf)
pick = []
while (I.size!=0):
last = I.size
i = I[-1]
pick.append(i)
scores = []
for ind in I[:-1]:
scores.append(bbox_corner_dist_measure(boxes[i,:], boxes[ind, :]))
I = np.delete(I, np.concatenate(([last-1], np.where(np.array(scores)>overlap_threshold)[0])))
return pick
if __name__=='__main__':
a = np.random.random((100,5))
print(nms_2d(a,0.9))
print(nms_2d_faster(a,0.9))
|
ContrastiveSceneContexts-main
|
downstream/votenet/lib/utils/nms.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
'''Code adapted from https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix'''
import os
import time
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
import sys
sys.path.append(BASE_DIR)
import tf_logger
class Visualizer():
def __init__(self, opt, name='train'):
# self.opt = opt
#self.logger = tf_logger.Logger(os.path.join(opt.logging_dir, opt.name))
#self.log_name = os.path.join(opt.checkpoint_dir, opt.name, 'loss_log.txt')
self.logger = tf_logger.Logger(os.path.join(opt.log_dir, name))
self.log_name = os.path.join(opt.log_dir, 'tf_visualizer_log.txt')
with open(self.log_name, "a") as log_file:
now = time.strftime("%c")
log_file.write('================ Training Loss (%s) ================\n' % now)
# |visuals|: dictionary of images to save
def log_images(self, visuals, step):
for label, image_numpy in visuals.items():
self.logger.image_summary(
label, [image_numpy], step)
# scalars: dictionary of scalar labels and values
def log_scalars(self, scalars, step):
for label, val in scalars.items():
self.logger.scalar_summary(label, val, step)
# scatter plots
def plot_current_points(self, points, disp_offset=10):
pass
# scalars: same format as |scalars| of plot_current_scalars
def print_current_scalars(self, epoch, i, scalars):
message = '(epoch: %d, iters: %d) ' % (epoch, i)
for k, v in scalars.items():
message += '%s: %.3f ' % (k, v)
print(message)
with open(self.log_name, "a") as log_file:
log_file.write('%s\n' % message)
|
ContrastiveSceneContexts-main
|
downstream/votenet/lib/utils/tf_visualizer.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import numpy as np
import trimesh
# color palette for nyu40 labels
def create_color_palette():
return [
(0, 0, 0),
(174, 199, 232), # wall
(152, 223, 138), # floor
(31, 119, 180), # cabinet
(255, 187, 120), # bed
(188, 189, 34), # chair
(140, 86, 75), # sofa
(255, 152, 150), # table
(214, 39, 40), # door
(197, 176, 213), # window
(148, 103, 189), # bookshelf
(196, 156, 148), # picture
(23, 190, 207), # counter
(178, 76, 76),
(247, 182, 210), # desk
(66, 188, 102),
(219, 219, 141), # curtain
(140, 57, 197),
(202, 185, 52),
(51, 176, 203),
(200, 54, 131),
(92, 193, 61),
(78, 71, 183),
(172, 114, 82),
(255, 127, 14), # refrigerator
(91, 163, 138),
(153, 98, 156),
(140, 153, 101),
(158, 218, 229), # shower curtain
(100, 125, 154),
(178, 127, 135),
(120, 185, 128),
(146, 111, 194),
(44, 160, 44), # toilet
(112, 128, 144), # sink
(96, 207, 209),
(227, 119, 194), # bathtub
(213, 92, 176),
(94, 106, 211),
(82, 84, 163), # otherfurn
(100, 85, 144),
]
def write_triangle_mesh(vertices, colors, faces, outputFile):
mesh = trimesh.Trimesh(vertices=vertices, vertex_colors=colors, faces=faces, process=False)
mesh.export(outputFile)
def read_triangle_mesh(filename):
mesh = trimesh.load_mesh(filename, process=False)
if isinstance(mesh, trimesh.PointCloud):
vertices = mesh.vertices
colors = mesh.colors
faces = None
elif isinstance(mesh, trimesh.Trimesh):
vertices = mesh.vertices
colors = mesh.visual.vertex_colors
faces = mesh.faces
return vertices, colors, faces
def generate_bbox_mesh(bbox):
"""
bbox: np array (n, 7), last one is instance/label id
"""
def create_cylinder_mesh(radius, p0, p1, stacks=10, slices=10):
def compute_length_vec3(vec3):
return math.sqrt(vec3[0]*vec3[0] + vec3[1]*vec3[1] + vec3[2]*vec3[2])
def rotation(axis, angle):
rot = np.eye(4)
c = np.cos(-angle)
s = np.sin(-angle)
t = 1.0 - c
axis /= compute_length_vec3(axis)
x = axis[0]
y = axis[1]
z = axis[2]
rot[0,0] = 1 + t*(x*x-1)
rot[0,1] = z*s+t*x*y
rot[0,2] = -y*s+t*x*z
rot[1,0] = -z*s+t*x*y
rot[1,1] = 1+t*(y*y-1)
rot[1,2] = x*s+t*y*z
rot[2,0] = y*s+t*x*z
rot[2,1] = -x*s+t*y*z
rot[2,2] = 1+t*(z*z-1)
return rot
verts = []
indices = []
diff = (p1 - p0).astype(np.float32)
height = compute_length_vec3(diff)
for i in range(stacks+1):
for i2 in range(slices):
theta = i2 * 2.0 * math.pi / slices
pos = np.array([radius*math.cos(theta), radius*math.sin(theta), height*i/stacks])
verts.append(pos)
for i in range(stacks):
for i2 in range(slices):
i2p1 = math.fmod(i2 + 1, slices)
indices.append( np.array([(i + 1)*slices + i2, i*slices + i2, i*slices + i2p1], dtype=np.uint32) )
indices.append( np.array([(i + 1)*slices + i2, i*slices + i2p1, (i + 1)*slices + i2p1], dtype=np.uint32) )
transform = np.eye(4)
va = np.array([0, 0, 1], dtype=np.float32)
vb = diff
vb /= compute_length_vec3(vb)
axis = np.cross(vb, va)
angle = np.arccos(np.clip(np.dot(va, vb), -1, 1))
if angle != 0:
if compute_length_vec3(axis) == 0:
dotx = va[0]
if (math.fabs(dotx) != 1.0):
axis = np.array([1,0,0]) - dotx * va
else:
axis = np.array([0,1,0]) - va[1] * va
axis /= compute_length_vec3(axis)
transform = rotation(axis, -angle)
transform[:3,3] += p0
verts = [np.dot(transform, np.array([v[0], v[1], v[2], 1.0])) for v in verts]
verts = [np.array([v[0], v[1], v[2]]) / v[3] for v in verts]
return verts, indices
def get_bbox_edges(bbox_min, bbox_max):
def get_bbox_verts(bbox_min, bbox_max):
verts = [
np.array([bbox_min[0], bbox_min[1], bbox_min[2]]),
np.array([bbox_max[0], bbox_min[1], bbox_min[2]]),
np.array([bbox_max[0], bbox_max[1], bbox_min[2]]),
np.array([bbox_min[0], bbox_max[1], bbox_min[2]]),
np.array([bbox_min[0], bbox_min[1], bbox_max[2]]),
np.array([bbox_max[0], bbox_min[1], bbox_max[2]]),
np.array([bbox_max[0], bbox_max[1], bbox_max[2]]),
np.array([bbox_min[0], bbox_max[1], bbox_max[2]])
]
return verts
box_verts = get_bbox_verts(bbox_min, bbox_max)
edges = [
(box_verts[0], box_verts[1]),
(box_verts[1], box_verts[2]),
(box_verts[2], box_verts[3]),
(box_verts[3], box_verts[0]),
(box_verts[4], box_verts[5]),
(box_verts[5], box_verts[6]),
(box_verts[6], box_verts[7]),
(box_verts[7], box_verts[4]),
(box_verts[0], box_verts[4]),
(box_verts[1], box_verts[5]),
(box_verts[2], box_verts[6]),
(box_verts[3], box_verts[7])
]
return edges
radius = 0.02
offset = [0,0,0]
verts = []
indices = []
colors = []
for box in bbox:
box_min = np.array([box[0], box[1], box[2]])
box_max = np.array([box[3], box[4], box[5]])
r, g, b = create_color_palette()[int(box[6]%41)]
edges = get_bbox_edges(box_min, box_max)
for k in range(len(edges)):
cyl_verts, cyl_ind = create_cylinder_mesh(radius, edges[k][0], edges[k][1])
cur_num_verts = len(verts)
cyl_color = [[r/255.0,g/255.0,b/255.0] for _ in cyl_verts]
cyl_verts = [x + offset for x in cyl_verts]
cyl_ind = [x + cur_num_verts for x in cyl_ind]
verts.extend(cyl_verts)
indices.extend(cyl_ind)
colors.extend(cyl_color)
return verts, colors, indices
|
ContrastiveSceneContexts-main
|
downstream/votenet/lib/utils/io3d.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import tensorflow as tf
import numpy as np
import scipy.misc
try:
from StringIO import StringIO # Python 2.7
except ImportError:
from io import BytesIO # Python 3.x
class Logger(object):
def __init__(self, log_dir):
"""Create a summary writer logging to log_dir."""
self.writer = tf.summary.FileWriter(log_dir)
def scalar_summary(self, tag, value, step):
"""Log a scalar variable."""
summary = tf.Summary(value=[tf.Summary.Value(tag=tag, simple_value=value)])
self.writer.add_summary(summary, step)
def image_summary(self, tag, images, step):
"""Log a list of images."""
img_summaries = []
for i, img in enumerate(images):
# Write the image to a string
try:
s = StringIO()
except:
s = BytesIO()
scipy.misc.toimage(img).save(s, format="png")
# Create an Image object
img_sum = tf.Summary.Image(encoded_image_string=s.getvalue(),
height=img.shape[0],
width=img.shape[1])
# Create a Summary value
img_summaries.append(tf.Summary.Value(tag='%s/%d' % (tag, i), image=img_sum))
# Create and write Summary
summary = tf.Summary(value=img_summaries)
self.writer.add_summary(summary, step)
def histo_summary(self, tag, values, step, bins=1000):
"""Log a histogram of the tensor of values."""
# Create a histogram using numpy
counts, bin_edges = np.histogram(values, bins=bins)
# Fill the fields of the histogram proto
hist = tf.HistogramProto()
hist.min = float(np.min(values))
hist.max = float(np.max(values))
hist.num = int(np.prod(values.shape))
hist.sum = float(np.sum(values))
hist.sum_squares = float(np.sum(values**2))
# Drop the start of the first bin
bin_edges = bin_edges[1:]
# Add bin edges and counts
for edge in bin_edges:
hist.bucket_limit.append(edge)
for c in counts:
hist.bucket.append(c)
# Create and write Summary
summary = tf.Summary(value=[tf.Summary.Value(tag=tag, histo=hist)])
self.writer.add_summary(summary, step)
self.writer.flush()
|
ContrastiveSceneContexts-main
|
downstream/votenet/lib/utils/tf_logger.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
""" Helper functions for calculating 2D and 3D bounding box IoU.
Collected and written by Charles R. Qi
Last modified: Jul 2019
"""
from __future__ import print_function
import numpy as np
from scipy.spatial import ConvexHull
def polygon_clip(subjectPolygon, clipPolygon):
""" Clip a polygon with another polygon.
Ref: https://rosettacode.org/wiki/Sutherland-Hodgman_polygon_clipping#Python
Args:
subjectPolygon: a list of (x,y) 2d points, any polygon.
clipPolygon: a list of (x,y) 2d points, has to be *convex*
Note:
**points have to be counter-clockwise ordered**
Return:
a list of (x,y) vertex point for the intersection polygon.
"""
def inside(p):
return(cp2[0]-cp1[0])*(p[1]-cp1[1]) > (cp2[1]-cp1[1])*(p[0]-cp1[0])
def computeIntersection():
dc = [ cp1[0] - cp2[0], cp1[1] - cp2[1] ]
dp = [ s[0] - e[0], s[1] - e[1] ]
n1 = cp1[0] * cp2[1] - cp1[1] * cp2[0]
n2 = s[0] * e[1] - s[1] * e[0]
n3 = 1.0 / (dc[0] * dp[1] - dc[1] * dp[0])
return [(n1*dp[0] - n2*dc[0]) * n3, (n1*dp[1] - n2*dc[1]) * n3]
outputList = subjectPolygon
cp1 = clipPolygon[-1]
for clipVertex in clipPolygon:
cp2 = clipVertex
inputList = outputList
outputList = []
s = inputList[-1]
for subjectVertex in inputList:
e = subjectVertex
if inside(e):
if not inside(s):
outputList.append(computeIntersection())
outputList.append(e)
elif inside(s):
outputList.append(computeIntersection())
s = e
cp1 = cp2
if len(outputList) == 0:
return None
return(outputList)
def poly_area(x,y):
""" Ref: http://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates """
return 0.5*np.abs(np.dot(x,np.roll(y,1))-np.dot(y,np.roll(x,1)))
def convex_hull_intersection(p1, p2):
""" Compute area of two convex hull's intersection area.
p1,p2 are a list of (x,y) tuples of hull vertices.
return a list of (x,y) for the intersection and its volume
"""
inter_p = polygon_clip(p1,p2)
if inter_p is not None:
hull_inter = ConvexHull(inter_p)
return inter_p, hull_inter.volume
else:
return None, 0.0
def box3d_vol(corners):
''' corners: (8,3) no assumption on axis direction '''
a = np.sqrt(np.sum((corners[0,:] - corners[1,:])**2))
b = np.sqrt(np.sum((corners[1,:] - corners[2,:])**2))
c = np.sqrt(np.sum((corners[0,:] - corners[4,:])**2))
return a*b*c
def is_clockwise(p):
x = p[:,0]
y = p[:,1]
return np.dot(x,np.roll(y,1))-np.dot(y,np.roll(x,1)) > 0
def box3d_iou(corners1, corners2):
''' Compute 3D bounding box IoU.
Input:
corners1: numpy array (8,3), assume up direction is negative Y
corners2: numpy array (8,3), assume up direction is negative Y
Output:
iou: 3D bounding box IoU
iou_2d: bird's eye view 2D bounding box IoU
todo (rqi): add more description on corner points' orders.
'''
# corner points are in counter clockwise order
rect1 = [(corners1[i,0], corners1[i,2]) for i in range(3,-1,-1)]
rect2 = [(corners2[i,0], corners2[i,2]) for i in range(3,-1,-1)]
area1 = poly_area(np.array(rect1)[:,0], np.array(rect1)[:,1])
area2 = poly_area(np.array(rect2)[:,0], np.array(rect2)[:,1])
inter, inter_area = convex_hull_intersection(rect1, rect2)
iou_2d = inter_area/(area1+area2-inter_area)
ymax = min(corners1[0,1], corners2[0,1])
ymin = max(corners1[4,1], corners2[4,1])
inter_vol = inter_area * max(0.0, ymax-ymin)
vol1 = box3d_vol(corners1)
vol2 = box3d_vol(corners2)
iou = inter_vol / (vol1 + vol2 - inter_vol)
return iou, iou_2d
def get_iou(bb1, bb2):
"""
Calculate the Intersection over Union (IoU) of two 2D bounding boxes.
Parameters
----------
bb1 : dict
Keys: {'x1', 'x2', 'y1', 'y2'}
The (x1, y1) position is at the top left corner,
the (x2, y2) position is at the bottom right corner
bb2 : dict
Keys: {'x1', 'x2', 'y1', 'y2'}
The (x, y) position is at the top left corner,
the (x2, y2) position is at the bottom right corner
Returns
-------
float
in [0, 1]
"""
assert bb1['x1'] < bb1['x2']
assert bb1['y1'] < bb1['y2']
assert bb2['x1'] < bb2['x2']
assert bb2['y1'] < bb2['y2']
# determine the coordinates of the intersection rectangle
x_left = max(bb1['x1'], bb2['x1'])
y_top = max(bb1['y1'], bb2['y1'])
x_right = min(bb1['x2'], bb2['x2'])
y_bottom = min(bb1['y2'], bb2['y2'])
if x_right < x_left or y_bottom < y_top:
return 0.0
# The intersection of two axis-aligned bounding boxes is always an
# axis-aligned bounding box
intersection_area = (x_right - x_left) * (y_bottom - y_top)
# compute the area of both AABBs
bb1_area = (bb1['x2'] - bb1['x1']) * (bb1['y2'] - bb1['y1'])
bb2_area = (bb2['x2'] - bb2['x1']) * (bb2['y2'] - bb2['y1'])
# compute the intersection over union by taking the intersection
# area and dividing it by the sum of prediction + ground-truth
# areas - the interesection area
iou = intersection_area / float(bb1_area + bb2_area - intersection_area)
assert iou >= 0.0
assert iou <= 1.0
return iou
def box2d_iou(box1, box2):
''' Compute 2D bounding box IoU.
Input:
box1: tuple of (xmin,ymin,xmax,ymax)
box2: tuple of (xmin,ymin,xmax,ymax)
Output:
iou: 2D IoU scalar
'''
return get_iou({'x1':box1[0], 'y1':box1[1], 'x2':box1[2], 'y2':box1[3]}, \
{'x1':box2[0], 'y1':box2[1], 'x2':box2[2], 'y2':box2[3]})
# -----------------------------------------------------------
# Convert from box parameters to
# -----------------------------------------------------------
def roty(t):
"""Rotation about the y-axis."""
c = np.cos(t)
s = np.sin(t)
return np.array([[c, 0, s],
[0, 1, 0],
[-s, 0, c]])
def roty_batch(t):
"""Rotation about the y-axis.
t: (x1,x2,...xn)
return: (x1,x2,...,xn,3,3)
"""
input_shape = t.shape
output = np.zeros(tuple(list(input_shape)+[3,3]))
c = np.cos(t)
s = np.sin(t)
output[...,0,0] = c
output[...,0,2] = s
output[...,1,1] = 1
output[...,2,0] = -s
output[...,2,2] = c
return output
def get_3d_box(box_size, heading_angle, center):
''' box_size is array(l,w,h), heading_angle is radius clockwise from pos x axis, center is xyz of box center
output (8,3) array for 3D box cornders
Similar to utils/compute_orientation_3d
'''
R = roty(heading_angle)
l,w,h = box_size
x_corners = [l/2,l/2,-l/2,-l/2,l/2,l/2,-l/2,-l/2];
y_corners = [h/2,h/2,h/2,h/2,-h/2,-h/2,-h/2,-h/2];
z_corners = [w/2,-w/2,-w/2,w/2,w/2,-w/2,-w/2,w/2];
corners_3d = np.dot(R, np.vstack([x_corners,y_corners,z_corners]))
corners_3d[0,:] = corners_3d[0,:] + center[0];
corners_3d[1,:] = corners_3d[1,:] + center[1];
corners_3d[2,:] = corners_3d[2,:] + center[2];
corners_3d = np.transpose(corners_3d)
return corners_3d
def get_3d_box_batch(box_size, heading_angle, center):
''' box_size: [x1,x2,...,xn,3]
heading_angle: [x1,x2,...,xn]
center: [x1,x2,...,xn,3]
Return:
[x1,x3,...,xn,8,3]
'''
input_shape = heading_angle.shape
R = roty_batch(heading_angle)
l = np.expand_dims(box_size[...,0], -1) # [x1,...,xn,1]
w = np.expand_dims(box_size[...,1], -1)
h = np.expand_dims(box_size[...,2], -1)
corners_3d = np.zeros(tuple(list(input_shape)+[8,3]))
corners_3d[...,:,0] = np.concatenate((l/2,l/2,-l/2,-l/2,l/2,l/2,-l/2,-l/2), -1)
corners_3d[...,:,1] = np.concatenate((h/2,h/2,h/2,h/2,-h/2,-h/2,-h/2,-h/2), -1)
corners_3d[...,:,2] = np.concatenate((w/2,-w/2,-w/2,w/2,w/2,-w/2,-w/2,w/2), -1)
tlist = [i for i in range(len(input_shape))]
tlist += [len(input_shape)+1, len(input_shape)]
corners_3d = np.matmul(corners_3d, np.transpose(R, tuple(tlist)))
corners_3d += np.expand_dims(center, -2)
return corners_3d
if __name__=='__main__':
# Function for polygon ploting
import matplotlib
from matplotlib.patches import Polygon
from matplotlib.collections import PatchCollection
import matplotlib.pyplot as plt
def plot_polys(plist,scale=500.0):
fig, ax = plt.subplots()
patches = []
for p in plist:
poly = Polygon(np.array(p)/scale, True)
patches.append(poly)
pc = PatchCollection(patches, cmap=matplotlib.cm.jet, alpha=0.5)
colors = 100*np.random.rand(len(patches))
pc.set_array(np.array(colors))
ax.add_collection(pc)
plt.show()
# Demo on ConvexHull
points = np.random.rand(30, 2) # 30 random points in 2-D
hull = ConvexHull(points)
# **In 2D "volume" is is area, "area" is perimeter
print(('Hull area: ', hull.volume))
for simplex in hull.simplices:
print(simplex)
# Demo on convex hull overlaps
sub_poly = [(0,0),(300,0),(300,300),(0,300)]
clip_poly = [(150,150),(300,300),(150,450),(0,300)]
inter_poly = polygon_clip(sub_poly, clip_poly)
print(poly_area(np.array(inter_poly)[:,0], np.array(inter_poly)[:,1]))
# Test convex hull interaction function
rect1 = [(50,0),(50,300),(300,300),(300,0)]
rect2 = [(150,150),(300,300),(150,450),(0,300)]
plot_polys([rect1, rect2])
inter, area = convex_hull_intersection(rect1, rect2)
print((inter, area))
if inter is not None:
print(poly_area(np.array(inter)[:,0], np.array(inter)[:,1]))
print('------------------')
rect1 = [(0.30026005199835404, 8.9408694211408424), \
(-1.1571105364358421, 9.4686676477075533), \
(0.1777082043006144, 13.154404877812102), \
(1.6350787927348105, 12.626606651245391)]
rect1 = [rect1[0], rect1[3], rect1[2], rect1[1]]
rect2 = [(0.23908745901608636, 8.8551095691132886), \
(-1.2771419487733995, 9.4269062966181956), \
(0.13138836963152717, 13.161896351296868), \
(1.647617777421013, 12.590099623791961)]
rect2 = [rect2[0], rect2[3], rect2[2], rect2[1]]
plot_polys([rect1, rect2])
inter, area = convex_hull_intersection(rect1, rect2)
print((inter, area))
|
ContrastiveSceneContexts-main
|
downstream/votenet/lib/utils/box_util.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import glob, os
import numpy as np
import cv2
import argparse
from plyfile import PlyData, PlyElement
# params
parser = argparse.ArgumentParser()
# data paths
parser.add_argument('--input_path', required=True, help='path to sens file to read')
parser.add_argument('--output_path', required=True, help='path to output folder')
parser.add_argument('--save_npz', action='store_true')
opt = parser.parse_args()
print(opt)
if not os.path.exists(opt.output_path):
os.mkdir(opt.output_path)
# Load Depth Camera Intrinsic
depth_intrinsic = np.loadtxt(opt.input_path + '/intrinsic/intrinsic_depth.txt')
print('Depth intrinsic: ')
print(depth_intrinsic)
# Compute Camrea Distance (just for demo, so you can choose the camera distance in frame sampling)
poses = sorted(glob.glob(opt.input_path + '/pose/*.txt'), key=lambda a: int(os.path.basename(a).split('.')[0]))
depths = sorted(glob.glob(opt.input_path + '/depth/*.png'), key=lambda a: int(os.path.basename(a).split('.')[0]))
colors = sorted(glob.glob(opt.input_path + '/color/*.png'), key=lambda a: int(os.path.basename(a).split('.')[0]))
# # Get Aligned Point Clouds.
for ind, (pose, depth, color) in enumerate(zip(poses, depths, colors)):
name = os.path.basename(pose).split('.')[0]
if os.path.exists(opt.output_path + '/{}.npz'.format(name)):
continue
try:
print('='*50, ': {}'.format(pose))
depth_img = cv2.imread(depth, -1) # read 16bit grayscale image
mask = (depth_img != 0)
color_image = cv2.imread(color)
color_image = cv2.resize(color_image, (640, 480))
color_image = np.reshape(color_image[mask], [-1,3])
colors = np.zeros_like(color_image)
colors[:,0] = color_image[:,2]
colors[:,1] = color_image[:,1]
colors[:,2] = color_image[:,0]
pose = np.loadtxt(poses[ind])
print('Camera pose: ')
print(pose)
depth_shift = 1000.0
x,y = np.meshgrid(np.linspace(0,depth_img.shape[1]-1,depth_img.shape[1]), np.linspace(0,depth_img.shape[0]-1,depth_img.shape[0]))
uv_depth = np.zeros((depth_img.shape[0], depth_img.shape[1], 3))
uv_depth[:,:,0] = x
uv_depth[:,:,1] = y
uv_depth[:,:,2] = depth_img/depth_shift
uv_depth = np.reshape(uv_depth, [-1,3])
uv_depth = uv_depth[np.where(uv_depth[:,2]!=0),:].squeeze()
intrinsic_inv = np.linalg.inv(depth_intrinsic)
fx = depth_intrinsic[0,0]
fy = depth_intrinsic[1,1]
cx = depth_intrinsic[0,2]
cy = depth_intrinsic[1,2]
bx = depth_intrinsic[0,3]
by = depth_intrinsic[1,3]
point_list = []
n = uv_depth.shape[0]
points = np.ones((n,4))
X = (uv_depth[:,0]-cx)*uv_depth[:,2]/fx + bx
Y = (uv_depth[:,1]-cy)*uv_depth[:,2]/fy + by
points[:,0] = X
points[:,1] = Y
points[:,2] = uv_depth[:,2]
points_world = np.dot(points, np.transpose(pose))
print(points_world.shape)
pcd_save = np.zeros((points_world.shape[0], 7))
pcd_save[:,:3] = points_world[:,:3]
pcd_save[:,3:6] = colors
print('Saving npz file...')
np.savez(opt.output_path + '/{}.npz'.format(name), pcd=pcd_save)
except:
continue
|
ContrastiveSceneContexts-main
|
pretrain/scannet_pair/point_cloud_extractor.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import glob, os, sys
from SensorData import SensorData
# params
parser = argparse.ArgumentParser()
# data paths
parser.add_argument('--target_dir', required=True, help='path to the target dir')
opt = parser.parse_args()
print(opt)
def main():
overlaps = glob.glob(os.path.join(opt.target_dir, "*/pcd/overlap.txt"))
with open(os.path.join(opt.target_dir, 'overlap30.txt'), 'w') as f:
for fo in overlaps:
for line in open(fo):
pcd0, pcd1, op = line.strip().split()
if float(op) >= 0.3:
print('{} {} {}'.format(pcd0, pcd1, op), file=f)
print('done')
if __name__ == '__main__':
main()
|
ContrastiveSceneContexts-main
|
pretrain/scannet_pair/generage_list.py
|
import os, struct
import numpy as np
import zlib
import imageio
import cv2
COMPRESSION_TYPE_COLOR = {-1:'unknown', 0:'raw', 1:'png', 2:'jpeg'}
COMPRESSION_TYPE_DEPTH = {-1:'unknown', 0:'raw_ushort', 1:'zlib_ushort', 2:'occi_ushort'}
class RGBDFrame():
def load(self, file_handle):
self.camera_to_world = np.asarray(struct.unpack('f'*16, file_handle.read(16*4)), dtype=np.float32).reshape(4, 4)
self.timestamp_color = struct.unpack('Q', file_handle.read(8))[0]
self.timestamp_depth = struct.unpack('Q', file_handle.read(8))[0]
self.color_size_bytes = struct.unpack('Q', file_handle.read(8))[0]
self.depth_size_bytes = struct.unpack('Q', file_handle.read(8))[0]
self.color_data = b''.join(struct.unpack('c'*self.color_size_bytes, file_handle.read(self.color_size_bytes)))
self.depth_data = b''.join(struct.unpack('c'*self.depth_size_bytes, file_handle.read(self.depth_size_bytes)))
def decompress_depth(self, compression_type):
if compression_type == 'zlib_ushort':
return self.decompress_depth_zlib()
else:
raise
def decompress_depth_zlib(self):
return zlib.decompress(self.depth_data)
def decompress_color(self, compression_type):
if compression_type == 'jpeg':
return self.decompress_color_jpeg()
else:
raise
def decompress_color_jpeg(self):
return imageio.imread(self.color_data)
class SensorData:
def __init__(self, filename):
self.version = 4
self.load(filename)
def load(self, filename):
with open(filename, 'rb') as f:
version = struct.unpack('I', f.read(4))[0]
assert self.version == version
strlen = struct.unpack('Q', f.read(8))[0]
self.sensor_name = b''.join(struct.unpack('c'*strlen, f.read(strlen)))
self.intrinsic_color = np.asarray(struct.unpack('f'*16, f.read(16*4)), dtype=np.float32).reshape(4, 4)
self.extrinsic_color = np.asarray(struct.unpack('f'*16, f.read(16*4)), dtype=np.float32).reshape(4, 4)
self.intrinsic_depth = np.asarray(struct.unpack('f'*16, f.read(16*4)), dtype=np.float32).reshape(4, 4)
self.extrinsic_depth = np.asarray(struct.unpack('f'*16, f.read(16*4)), dtype=np.float32).reshape(4, 4)
self.color_compression_type = COMPRESSION_TYPE_COLOR[struct.unpack('i', f.read(4))[0]]
self.depth_compression_type = COMPRESSION_TYPE_DEPTH[struct.unpack('i', f.read(4))[0]]
self.color_width = struct.unpack('I', f.read(4))[0]
self.color_height = struct.unpack('I', f.read(4))[0]
self.depth_width = struct.unpack('I', f.read(4))[0]
self.depth_height = struct.unpack('I', f.read(4))[0]
self.depth_shift = struct.unpack('f', f.read(4))[0]
num_frames = struct.unpack('Q', f.read(8))[0]
self.frames = []
for i in range(num_frames):
frame = RGBDFrame()
frame.load(f)
self.frames.append(frame)
def export_depth_images(self, output_path, image_size=None, frame_skip=1):
if not os.path.exists(output_path):
os.makedirs(output_path)
print('exporting', len(self.frames)//frame_skip, ' depth frames to', output_path)
for f in range(0, len(self.frames), frame_skip):
if os.path.exists((os.path.join(output_path, str(f) + '.png'))):
continue
if f % 100 == 0:
print('exporting', f, 'th depth frames to', os.path.join(output_path, str(f) + '.png'))
depth_data = self.frames[f].decompress_depth(self.depth_compression_type)
depth = np.fromstring(depth_data, dtype=np.uint16).reshape(self.depth_height, self.depth_width)
if image_size is not None:
depth = cv2.resize(depth, (image_size[1], image_size[0]), interpolation=cv2.INTER_NEAREST)
imageio.imwrite(os.path.join(output_path, str(f) + '.png'), depth)
def export_color_images(self, output_path, image_size=None, frame_skip=1):
if not os.path.exists(output_path):
os.makedirs(output_path)
print('exporting', len(self.frames)//frame_skip, 'color frames to', output_path)
for f in range(0, len(self.frames), frame_skip):
if os.path.exists((os.path.join(output_path, str(f) + '.png'))):
continue
if f % 100 == 0:
print('exporting', f, 'th color frames to', os.path.join(output_path, str(f) + '.png'))
color = self.frames[f].decompress_color(self.color_compression_type)
if image_size is not None:
color = cv2.resize(color, (image_size[1], image_size[0]), interpolation=cv2.INTER_NEAREST)
# imageio.imwrite(os.path.join(output_path, str(f) + '.jpg'), color)
imageio.imwrite(os.path.join(output_path, str(f) + '.png'), color)
def save_mat_to_file(self, matrix, filename):
with open(filename, 'w') as f:
for line in matrix:
np.savetxt(f, line[np.newaxis], fmt='%f')
def export_poses(self, output_path, frame_skip=1):
if not os.path.exists(output_path):
os.makedirs(output_path)
print('exporting', len(self.frames)//frame_skip, 'camera poses to', output_path)
for f in range(0, len(self.frames), frame_skip):
self.save_mat_to_file(self.frames[f].camera_to_world, os.path.join(output_path, str(f) + '.txt'))
def export_intrinsics(self, output_path):
if not os.path.exists(output_path):
os.makedirs(output_path)
print('exporting camera intrinsics to', output_path)
self.save_mat_to_file(self.intrinsic_color, os.path.join(output_path, 'intrinsic_color.txt'))
self.save_mat_to_file(self.extrinsic_color, os.path.join(output_path, 'extrinsic_color.txt'))
self.save_mat_to_file(self.intrinsic_depth, os.path.join(output_path, 'intrinsic_depth.txt'))
self.save_mat_to_file(self.extrinsic_depth, os.path.join(output_path, 'extrinsic_depth.txt'))
|
ContrastiveSceneContexts-main
|
pretrain/scannet_pair/SensorData.py
|
# Copyright 2014 Darsh Ranjan
#
# This file is part of python-plyfile.
#
# python-plyfile is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# python-plyfile is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with python-plyfile. If not, see
# <http://www.gnu.org/licenses/>.
from itertools import islice as _islice
import numpy as _np
from sys import byteorder as _byteorder
try:
_range = xrange
except NameError:
_range = range
# Many-many relation
_data_type_relation = [
('int8', 'i1'),
('char', 'i1'),
('uint8', 'u1'),
('uchar', 'b1'),
('uchar', 'u1'),
('int16', 'i2'),
('short', 'i2'),
('uint16', 'u2'),
('ushort', 'u2'),
('int32', 'i4'),
('int', 'i4'),
('uint32', 'u4'),
('uint', 'u4'),
('float32', 'f4'),
('float', 'f4'),
('float64', 'f8'),
('double', 'f8')
]
_data_types = dict(_data_type_relation)
_data_type_reverse = dict((b, a) for (a, b) in _data_type_relation)
_types_list = []
_types_set = set()
for (_a, _b) in _data_type_relation:
if _a not in _types_set:
_types_list.append(_a)
_types_set.add(_a)
if _b not in _types_set:
_types_list.append(_b)
_types_set.add(_b)
_byte_order_map = {
'ascii': '=',
'binary_little_endian': '<',
'binary_big_endian': '>'
}
_byte_order_reverse = {
'<': 'binary_little_endian',
'>': 'binary_big_endian'
}
_native_byte_order = {'little': '<', 'big': '>'}[_byteorder]
def _lookup_type(type_str):
if type_str not in _data_type_reverse:
try:
type_str = _data_types[type_str]
except KeyError:
raise ValueError("field type %r not in %r" %
(type_str, _types_list))
return _data_type_reverse[type_str]
def _split_line(line, n):
fields = line.split(None, n)
if len(fields) == n:
fields.append('')
assert len(fields) == n + 1
return fields
def make2d(array, cols=None, dtype=None):
'''
Make a 2D array from an array of arrays. The `cols' and `dtype'
arguments can be omitted if the array is not empty.
'''
if (cols is None or dtype is None) and not len(array):
raise RuntimeError("cols and dtype must be specified for empty "
"array")
if cols is None:
cols = len(array[0])
if dtype is None:
dtype = array[0].dtype
return _np.fromiter(array, [('_', dtype, (cols,))],
count=len(array))['_']
class PlyParseError(Exception):
'''
Raised when a PLY file cannot be parsed.
The attributes `element', `row', `property', and `message' give
additional information.
'''
def __init__(self, message, element=None, row=None, prop=None):
self.message = message
self.element = element
self.row = row
self.prop = prop
s = ''
if self.element:
s += 'element %r: ' % self.element.name
if self.row is not None:
s += 'row %d: ' % self.row
if self.prop:
s += 'property %r: ' % self.prop.name
s += self.message
Exception.__init__(self, s)
def __repr__(self):
return ('PlyParseError(%r, element=%r, row=%r, prop=%r)' %
self.message, self.element, self.row, self.prop)
class PlyData(object):
'''
PLY file header and data.
A PlyData instance is created in one of two ways: by the static
method PlyData.read (to read a PLY file), or directly from __init__
given a sequence of elements (which can then be written to a PLY
file).
'''
def __init__(self, elements=[], text=False, byte_order='=',
comments=[], obj_info=[]):
'''
elements: sequence of PlyElement instances.
text: whether the resulting PLY file will be text (True) or
binary (False).
byte_order: '<' for little-endian, '>' for big-endian, or '='
for native. This is only relevant if `text' is False.
comments: sequence of strings that will be placed in the header
between the 'ply' and 'format ...' lines.
obj_info: like comments, but will be placed in the header with
"obj_info ..." instead of "comment ...".
'''
if byte_order == '=' and not text:
byte_order = _native_byte_order
self.byte_order = byte_order
self.text = text
self.comments = list(comments)
self.obj_info = list(obj_info)
self.elements = elements
def _get_elements(self):
return self._elements
def _set_elements(self, elements):
self._elements = tuple(elements)
self._index()
elements = property(_get_elements, _set_elements)
def _get_byte_order(self):
return self._byte_order
def _set_byte_order(self, byte_order):
if byte_order not in ['<', '>', '=']:
raise ValueError("byte order must be '<', '>', or '='")
self._byte_order = byte_order
byte_order = property(_get_byte_order, _set_byte_order)
def _index(self):
self._element_lookup = dict((elt.name, elt) for elt in
self._elements)
if len(self._element_lookup) != len(self._elements):
raise ValueError("two elements with same name")
@staticmethod
def _parse_header(stream):
'''
Parse a PLY header from a readable file-like stream.
'''
lines = []
comments = {'comment': [], 'obj_info': []}
while True:
line = stream.readline().decode('ascii').strip()
fields = _split_line(line, 1)
if fields[0] == 'end_header':
break
elif fields[0] in comments.keys():
lines.append(fields)
else:
lines.append(line.split())
a = 0
if lines[a] != ['ply']:
raise PlyParseError("expected 'ply'")
a += 1
while lines[a][0] in comments.keys():
comments[lines[a][0]].append(lines[a][1])
a += 1
if lines[a][0] != 'format':
raise PlyParseError("expected 'format'")
if lines[a][2] != '1.0':
raise PlyParseError("expected version '1.0'")
if len(lines[a]) != 3:
raise PlyParseError("too many fields after 'format'")
fmt = lines[a][1]
if fmt not in _byte_order_map:
raise PlyParseError("don't understand format %r" % fmt)
byte_order = _byte_order_map[fmt]
text = fmt == 'ascii'
a += 1
while a < len(lines) and lines[a][0] in comments.keys():
comments[lines[a][0]].append(lines[a][1])
a += 1
return PlyData(PlyElement._parse_multi(lines[a:]),
text, byte_order,
comments['comment'], comments['obj_info'])
@staticmethod
def read(stream):
'''
Read PLY data from a readable file-like object or filename.
'''
(must_close, stream) = _open_stream(stream, 'read')
try:
data = PlyData._parse_header(stream)
for elt in data:
elt._read(stream, data.text, data.byte_order)
finally:
if must_close:
stream.close()
return data
def write(self, stream):
'''
Write PLY data to a writeable file-like object or filename.
'''
(must_close, stream) = _open_stream(stream, 'write')
try:
stream.write(self.header.encode('ascii'))
stream.write(b'\r\n')
for elt in self:
elt._write(stream, self.text, self.byte_order)
finally:
if must_close:
stream.close()
@property
def header(self):
'''
Provide PLY-formatted metadata for the instance.
'''
lines = ['ply']
if self.text:
lines.append('format ascii 1.0')
else:
lines.append('format ' +
_byte_order_reverse[self.byte_order] +
' 1.0')
# Some information is lost here, since all comments are placed
# between the 'format' line and the first element.
for c in self.comments:
lines.append('comment ' + c)
for c in self.obj_info:
lines.append('obj_info ' + c)
lines.extend(elt.header for elt in self.elements)
lines.append('end_header')
return '\r\n'.join(lines)
def __iter__(self):
return iter(self.elements)
def __len__(self):
return len(self.elements)
def __contains__(self, name):
return name in self._element_lookup
def __getitem__(self, name):
return self._element_lookup[name]
def __str__(self):
return self.header
def __repr__(self):
return ('PlyData(%r, text=%r, byte_order=%r, '
'comments=%r, obj_info=%r)' %
(self.elements, self.text, self.byte_order,
self.comments, self.obj_info))
def _open_stream(stream, read_or_write):
if hasattr(stream, read_or_write):
return (False, stream)
try:
return (True, open(stream, read_or_write[0] + 'b'))
except TypeError:
raise RuntimeError("expected open file or filename")
class PlyElement(object):
'''
PLY file element.
A client of this library doesn't normally need to instantiate this
directly, so the following is only for the sake of documenting the
internals.
Creating a PlyElement instance is generally done in one of two ways:
as a byproduct of PlyData.read (when reading a PLY file) and by
PlyElement.describe (before writing a PLY file).
'''
def __init__(self, name, properties, count, comments=[]):
'''
This is not part of the public interface. The preferred methods
of obtaining PlyElement instances are PlyData.read (to read from
a file) and PlyElement.describe (to construct from a numpy
array).
'''
self._name = str(name)
self._check_name()
self._count = count
self._properties = tuple(properties)
self._index()
self.comments = list(comments)
self._have_list = any(isinstance(p, PlyListProperty)
for p in self.properties)
@property
def count(self):
return self._count
def _get_data(self):
return self._data
def _set_data(self, data):
self._data = data
self._count = len(data)
self._check_sanity()
data = property(_get_data, _set_data)
def _check_sanity(self):
for prop in self.properties:
if prop.name not in self._data.dtype.fields:
raise ValueError("dangling property %r" % prop.name)
def _get_properties(self):
return self._properties
def _set_properties(self, properties):
self._properties = tuple(properties)
self._check_sanity()
self._index()
properties = property(_get_properties, _set_properties)
def _index(self):
self._property_lookup = dict((prop.name, prop)
for prop in self._properties)
if len(self._property_lookup) != len(self._properties):
raise ValueError("two properties with same name")
def ply_property(self, name):
return self._property_lookup[name]
@property
def name(self):
return self._name
def _check_name(self):
if any(c.isspace() for c in self._name):
msg = "element name %r contains spaces" % self._name
raise ValueError(msg)
def dtype(self, byte_order='='):
'''
Return the numpy dtype of the in-memory representation of the
data. (If there are no list properties, and the PLY format is
binary, then this also accurately describes the on-disk
representation of the element.)
'''
return [(prop.name, prop.dtype(byte_order))
for prop in self.properties]
@staticmethod
def _parse_multi(header_lines):
'''
Parse a list of PLY element definitions.
'''
elements = []
while header_lines:
(elt, header_lines) = PlyElement._parse_one(header_lines)
elements.append(elt)
return elements
@staticmethod
def _parse_one(lines):
'''
Consume one element definition. The unconsumed input is
returned along with a PlyElement instance.
'''
a = 0
line = lines[a]
if line[0] != 'element':
raise PlyParseError("expected 'element'")
if len(line) > 3:
raise PlyParseError("too many fields after 'element'")
if len(line) < 3:
raise PlyParseError("too few fields after 'element'")
(name, count) = (line[1], int(line[2]))
comments = []
properties = []
while True:
a += 1
if a >= len(lines):
break
if lines[a][0] == 'comment':
comments.append(lines[a][1])
elif lines[a][0] == 'property':
properties.append(PlyProperty._parse_one(lines[a]))
else:
break
return (PlyElement(name, properties, count, comments),
lines[a:])
@staticmethod
def describe(data, name, len_types={}, val_types={},
comments=[]):
'''
Construct a PlyElement from an array's metadata.
len_types and val_types can be given as mappings from list
property names to type strings (like 'u1', 'f4', etc., or
'int8', 'float32', etc.). These can be used to define the length
and value types of list properties. List property lengths
always default to type 'u1' (8-bit unsigned integer), and value
types default to 'i4' (32-bit integer).
'''
if not isinstance(data, _np.ndarray):
raise TypeError("only numpy arrays are supported")
if len(data.shape) != 1:
raise ValueError("only one-dimensional arrays are "
"supported")
count = len(data)
properties = []
descr = data.dtype.descr
for t in descr:
if not isinstance(t[1], str):
raise ValueError("nested records not supported")
if not t[0]:
raise ValueError("field with empty name")
if len(t) != 2 or t[1][1] == 'O':
# non-scalar field, which corresponds to a list
# property in PLY.
if t[1][1] == 'O':
if len(t) != 2:
raise ValueError("non-scalar object fields not "
"supported")
len_str = _data_type_reverse[len_types.get(t[0], 'u1')]
if t[1][1] == 'O':
val_type = val_types.get(t[0], 'i4')
val_str = _lookup_type(val_type)
else:
val_str = _lookup_type(t[1][1:])
prop = PlyListProperty(t[0], len_str, val_str)
else:
val_str = _lookup_type(t[1][1:])
prop = PlyProperty(t[0], val_str)
properties.append(prop)
elt = PlyElement(name, properties, count, comments)
elt.data = data
return elt
def _read(self, stream, text, byte_order):
'''
Read the actual data from a PLY file.
'''
if text:
self._read_txt(stream)
else:
if self._have_list:
# There are list properties, so a simple load is
# impossible.
self._read_bin(stream, byte_order)
else:
# There are no list properties, so loading the data is
# much more straightforward.
self._data = _np.fromfile(stream,
self.dtype(byte_order),
self.count)
if len(self._data) < self.count:
k = len(self._data)
del self._data
raise PlyParseError("early end-of-file", self, k)
self._check_sanity()
def _write(self, stream, text, byte_order):
'''
Write the data to a PLY file.
'''
if text:
self._write_txt(stream)
else:
if self._have_list:
# There are list properties, so serialization is
# slightly complicated.
self._write_bin(stream, byte_order)
else:
# no list properties, so serialization is
# straightforward.
self.data.astype(self.dtype(byte_order),
copy=False).tofile(stream)
def _read_txt(self, stream):
'''
Load a PLY element from an ASCII-format PLY file. The element
may contain list properties.
'''
self._data = _np.empty(self.count, dtype=self.dtype())
k = 0
for line in _islice(iter(stream.readline, b''), self.count):
fields = iter(line.strip().split())
for prop in self.properties:
try:
self._data[prop.name][k] = prop._from_fields(fields)
except StopIteration:
raise PlyParseError("early end-of-line",
self, k, prop)
except ValueError:
raise PlyParseError("malformed input",
self, k, prop)
try:
next(fields)
except StopIteration:
pass
else:
raise PlyParseError("expected end-of-line", self, k)
k += 1
if k < self.count:
del self._data
raise PlyParseError("early end-of-file", self, k)
def _write_txt(self, stream):
'''
Save a PLY element to an ASCII-format PLY file. The element may
contain list properties.
'''
for rec in self.data:
fields = []
for prop in self.properties:
fields.extend(prop._to_fields(rec[prop.name]))
_np.savetxt(stream, [fields], '%.18g', newline='\r\n')
def _read_bin(self, stream, byte_order):
'''
Load a PLY element from a binary PLY file. The element may
contain list properties.
'''
self._data = _np.empty(self.count, dtype=self.dtype(byte_order))
for k in _range(self.count):
for prop in self.properties:
try:
self._data[prop.name][k] = \
prop._read_bin(stream, byte_order)
except StopIteration:
raise PlyParseError("early end-of-file",
self, k, prop)
def _write_bin(self, stream, byte_order):
'''
Save a PLY element to a binary PLY file. The element may
contain list properties.
'''
for rec in self.data:
for prop in self.properties:
prop._write_bin(rec[prop.name], stream, byte_order)
@property
def header(self):
'''
Format this element's metadata as it would appear in a PLY
header.
'''
lines = ['element %s %d' % (self.name, self.count)]
# Some information is lost here, since all comments are placed
# between the 'element' line and the first property definition.
for c in self.comments:
lines.append('comment ' + c)
lines.extend(list(map(str, self.properties)))
return '\r\n'.join(lines)
def __getitem__(self, key):
return self.data[key]
def __setitem__(self, key, value):
self.data[key] = value
def __str__(self):
return self.header
def __repr__(self):
return ('PlyElement(%r, %r, count=%d, comments=%r)' %
(self.name, self.properties, self.count,
self.comments))
class PlyProperty(object):
'''
PLY property description. This class is pure metadata; the data
itself is contained in PlyElement instances.
'''
def __init__(self, name, val_dtype):
self._name = str(name)
self._check_name()
self.val_dtype = val_dtype
def _get_val_dtype(self):
return self._val_dtype
def _set_val_dtype(self, val_dtype):
self._val_dtype = _data_types[_lookup_type(val_dtype)]
val_dtype = property(_get_val_dtype, _set_val_dtype)
@property
def name(self):
return self._name
def _check_name(self):
if any(c.isspace() for c in self._name):
msg = "Error: property name %r contains spaces" % self._name
raise RuntimeError(msg)
@staticmethod
def _parse_one(line):
assert line[0] == 'property'
if line[1] == 'list':
if len(line) > 5:
raise PlyParseError("too many fields after "
"'property list'")
if len(line) < 5:
raise PlyParseError("too few fields after "
"'property list'")
return PlyListProperty(line[4], line[2], line[3])
else:
if len(line) > 3:
raise PlyParseError("too many fields after "
"'property'")
if len(line) < 3:
raise PlyParseError("too few fields after "
"'property'")
return PlyProperty(line[2], line[1])
def dtype(self, byte_order='='):
'''
Return the numpy dtype description for this property (as a tuple
of strings).
'''
return byte_order + self.val_dtype
def _from_fields(self, fields):
'''
Parse from generator. Raise StopIteration if the property could
not be read.
'''
return _np.dtype(self.dtype()).type(next(fields))
def _to_fields(self, data):
'''
Return generator over one item.
'''
yield _np.dtype(self.dtype()).type(data)
def _read_bin(self, stream, byte_order):
'''
Read data from a binary stream. Raise StopIteration if the
property could not be read.
'''
try:
return _np.fromfile(stream, self.dtype(byte_order), 1)[0]
except IndexError:
raise StopIteration
def _write_bin(self, data, stream, byte_order):
'''
Write data to a binary stream.
'''
_np.dtype(self.dtype(byte_order)).type(data).tofile(stream)
def __str__(self):
val_str = _data_type_reverse[self.val_dtype]
return 'property %s %s' % (val_str, self.name)
def __repr__(self):
return 'PlyProperty(%r, %r)' % (self.name,
_lookup_type(self.val_dtype))
class PlyListProperty(PlyProperty):
'''
PLY list property description.
'''
def __init__(self, name, len_dtype, val_dtype):
PlyProperty.__init__(self, name, val_dtype)
self.len_dtype = len_dtype
def _get_len_dtype(self):
return self._len_dtype
def _set_len_dtype(self, len_dtype):
self._len_dtype = _data_types[_lookup_type(len_dtype)]
len_dtype = property(_get_len_dtype, _set_len_dtype)
def dtype(self, byte_order='='):
'''
List properties always have a numpy dtype of "object".
'''
return '|O'
def list_dtype(self, byte_order='='):
'''
Return the pair (len_dtype, val_dtype) (both numpy-friendly
strings).
'''
return (byte_order + self.len_dtype,
byte_order + self.val_dtype)
def _from_fields(self, fields):
(len_t, val_t) = self.list_dtype()
n = int(_np.dtype(len_t).type(next(fields)))
data = _np.loadtxt(list(_islice(fields, n)), val_t, ndmin=1)
if len(data) < n:
raise StopIteration
return data
def _to_fields(self, data):
'''
Return generator over the (numerical) PLY representation of the
list data (length followed by actual data).
'''
(len_t, val_t) = self.list_dtype()
data = _np.asarray(data, dtype=val_t).ravel()
yield _np.dtype(len_t).type(data.size)
for x in data:
yield x
def _read_bin(self, stream, byte_order):
(len_t, val_t) = self.list_dtype(byte_order)
try:
n = _np.fromfile(stream, len_t, 1)[0]
except IndexError:
raise StopIteration
data = _np.fromfile(stream, val_t, n)
if len(data) < n:
raise StopIteration
return data
def _write_bin(self, data, stream, byte_order):
'''
Write data to a binary stream.
'''
(len_t, val_t) = self.list_dtype(byte_order)
data = _np.asarray(data, dtype=val_t).ravel()
_np.array(data.size, dtype=len_t).tofile(stream)
data.tofile(stream)
def __str__(self):
len_str = _data_type_reverse[self.len_dtype]
val_str = _data_type_reverse[self.val_dtype]
return 'property list %s %s %s' % (len_str, val_str, self.name)
def __repr__(self):
return ('PlyListProperty(%r, %r, %r)' %
(self.name,
_lookup_type(self.len_dtype),
_lookup_type(self.val_dtype)))
|
ContrastiveSceneContexts-main
|
pretrain/scannet_pair/plyfile.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import numpy as np
import math
import glob, os
import argparse
import open3d as o3d
def make_open3d_point_cloud(xyz, color=None, voxel_size=None):
if np.isnan(xyz).any():
return None
xyz = xyz[:,:3]
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(xyz)
if color is not None:
pcd.colors = o3d.utility.Vector3dVector(color)
if voxel_size is not None:
pcd = pcd.voxel_down_sample(voxel_size)
return pcd
def compute_overlap_ratio(pcd0, pcd1, voxel_size):
pcd0_down = pcd0.voxel_down_sample(voxel_size)
pcd1_down = pcd1.voxel_down_sample(voxel_size)
matching01 = get_matching_indices(pcd0_down, pcd1_down, voxel_size * 1.5, 1)
matching10 = get_matching_indices(pcd1_down, pcd0_down, voxel_size * 1.5, 1)
overlap0 = float(len(matching01)) / float(len(pcd0_down.points))
overlap1 = float(len(matching10)) / float(len(pcd1_down.points))
return max(overlap0, overlap1)
def get_matching_indices(source, pcd_tree, search_voxel_size, K=None):
match_inds = []
for i, point in enumerate(source.points):
[_, idx, _] = pcd_tree.search_radius_vector_3d(point, search_voxel_size)
if K is not None:
idx = idx[:K]
for j in idx:
match_inds.append((i, j))
return match_inds
# params
parser = argparse.ArgumentParser()
# data paths
parser.add_argument('--input_path', required=True, help='path to sens file to read')
parser.add_argument('--voxel_size', type=float, default=0.05)
opt = parser.parse_args()
print(opt)
print('load point clouds and downsampling...')
_points = [
(pcd_name, make_open3d_point_cloud(np.load(pcd_name)['pcd'], voxel_size=opt.voxel_size))
for pcd_name in glob.glob(os.path.join(opt.input_path, "*.npz"))
]
points = [(pcd_name, pcd) for (pcd_name, pcd) in _points if pcd is not None]
print('load {} point clouds ({} invalid has been filtered), computing matching/overlapping'.format(
len(points), len(_points) - len(points)))
matching_matrix = np.zeros((len(points), len(points)))
for i, (pcd0_name, pcd0) in enumerate(points):
print('matching to...{}'.format(pcd0_name))
pcd0_tree = o3d.geometry.KDTreeFlann(copy.deepcopy(pcd0))
for j, (pcd1_name, pcd1) in enumerate(points):
if i == j:
continue
matching_matrix[i, j] = float(len(get_matching_indices(pcd1, pcd0_tree, 1.5 * opt.voxel_size, 1))) / float(len(pcd1.points))
# write to file
print('writing to file')
with open(os.path.join(opt.input_path, "overlap.txt"), 'w') as f:
for i, (pcd0_name, pcd0) in enumerate(points):
for j, (pcd1_name, pcd1) in enumerate(points):
if i < j:
overlap = max(matching_matrix[i, j], matching_matrix[j, i])
f.write("{} {} {}\n".format(pcd0_name, pcd1_name, overlap))
print('done.')
|
ContrastiveSceneContexts-main
|
pretrain/scannet_pair/compute_full_overlapping.py
|
import argparse
import os, sys
from SensorData import SensorData
# params
parser = argparse.ArgumentParser()
# data paths
parser.add_argument('--filename', required=True, help='path to sens file to read')
parser.add_argument('--output_path', required=True, help='path to output folder')
parser.add_argument('--export_depth_images', dest='export_depth_images', action='store_true')
parser.add_argument('--export_color_images', dest='export_color_images', action='store_true')
parser.add_argument('--export_poses', dest='export_poses', action='store_true')
parser.add_argument('--export_intrinsics', dest='export_intrinsics', action='store_true')
parser.add_argument('--frame_skip', type=int, default=25)
parser.set_defaults(export_depth_images=True, export_color_images=True, export_poses=True, export_intrinsics=True)
opt = parser.parse_args()
print(opt)
def main():
if not os.path.exists(opt.output_path):
os.makedirs(opt.output_path)
# load the data
print('loading %s...' % opt.filename)
sd = SensorData(opt.filename)
print('loaded!\n')
if opt.export_depth_images:
sd.export_depth_images(os.path.join(opt.output_path, 'depth'), frame_skip=opt.frame_skip)
if opt.export_color_images:
sd.export_color_images(os.path.join(opt.output_path, 'color'), frame_skip=opt.frame_skip)
if opt.export_poses:
sd.export_poses(os.path.join(opt.output_path, 'pose'), frame_skip=opt.frame_skip)
if opt.export_intrinsics:
sd.export_intrinsics(os.path.join(opt.output_path, 'intrinsic'))
if __name__ == '__main__':
main()
|
ContrastiveSceneContexts-main
|
pretrain/scannet_pair/reader.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import sys
import os
import json
import logging
import torch
from omegaconf import OmegaConf
from easydict import EasyDict as edict
import lib.multiprocessing_utils as mpu
import hydra
from lib.ddp_trainer import PointNCELossTrainer, PartitionPointNCELossTrainer, PartitionPointNCELossTrainerPointNet
ch = logging.StreamHandler(sys.stdout)
logging.getLogger().setLevel(logging.INFO)
logging.basicConfig(
format='%(asctime)s %(message)s', datefmt='%m/%d %H:%M:%S', handlers=[ch])
torch.manual_seed(0)
torch.cuda.manual_seed(0)
logging.basicConfig(level=logging.INFO, format="")
def get_trainer(trainer):
if trainer == 'PointNCELossTrainer':
return PointNCELossTrainer
elif trainer == 'PartitionPointNCELossTrainer':
return PartitionPointNCELossTrainer
elif trainer == 'PartitionPointNCELossTrainerPointNet':
return PartitionPointNCELossTrainerPointNet
else:
raise ValueError(f'Trainer {trainer} not found')
@hydra.main(config_path='config', config_name='defaults.yaml')
def main(config):
if os.path.exists('config.yaml'):
logging.info('===> Loading exsiting config file')
config = OmegaConf.load('config.yaml')
logging.info('===> Loaded exsiting config file')
logging.info('===> Configurations')
logging.info(config.pretty())
# Convert to dict
if config.misc.num_gpus > 1:
mpu.multi_proc_run(config.misc.num_gpus,
fun=single_proc_run, fun_args=(config,))
else:
single_proc_run(config)
def single_proc_run(config):
from lib.ddp_data_loaders import make_data_loader
train_loader = make_data_loader(
config,
int(config.trainer.batch_size / config.misc.num_gpus),
num_threads=int(config.misc.train_num_thread / config.misc.num_gpus))
Trainer = get_trainer(config.trainer.trainer)
trainer = Trainer(config=config, data_loader=train_loader)
if config.misc.is_train:
trainer.train()
else:
trainer.test()
if __name__ == "__main__":
os.environ['MKL_THREADING_LAYER'] = 'GNU'
main()
|
ContrastiveSceneContexts-main
|
pretrain/contrastive_scene_contexts/ddp_train.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import sys
import os
from model.pointnet2.pointnet2_modules import PointnetSAModuleVotes, PointnetFPModule
from model.pointnet2.pointnet2_utils import furthest_point_sample
import MinkowskiEngine as ME
class PointNet2Backbone(nn.Module):
r"""
Backbone network for point cloud feature learning.
Based on Pointnet++ single-scale grouping network.
Parameters
----------
input_feature_dim: int
Number of input channels in the feature descriptor for each point.
e.g. 3 for RGB.
"""
def __init__(self, num_feats, n_out, config, D):
super().__init__()
input_feature_dim= 0
self.config = config
self.sa1 = PointnetSAModuleVotes(
npoint=2048,
radius=0.2,
nsample=64,
mlp=[input_feature_dim, 64, 64, 128],
use_xyz=True,
normalize_xyz=True
)
self.sa2 = PointnetSAModuleVotes(
npoint=1024,
radius=0.4,
nsample=32,
mlp=[128, 128, 128, 256],
use_xyz=True,
normalize_xyz=True
)
self.sa3 = PointnetSAModuleVotes(
npoint=512,
radius=0.8,
nsample=16,
mlp=[256, 128, 128, 256],
use_xyz=True,
normalize_xyz=True
)
self.sa4 = PointnetSAModuleVotes(
npoint=256,
radius=1.2,
nsample=16,
mlp=[256, 128, 128, 256],
use_xyz=True,
normalize_xyz=True
)
self.fp1 = PointnetFPModule(mlp=[256+256,256,256])
self.fp2 = PointnetFPModule(mlp=[256+256,256,256])
self.fp3 = PointnetFPModule(mlp=[256+128,256,128])
self.fp4 = PointnetFPModule(mlp=[128,128,32])
def _break_up_pc(self, pc):
xyz = pc[..., 0:3].contiguous()
features = (
pc[..., 3:].transpose(1, 2).contiguous()
if pc.size(-1) > 3 else None
)
return xyz, features
def forward(self, pointcloud: torch.cuda.FloatTensor, end_points=None):
r"""
Forward pass of the network
Parameters
----------
pointcloud: Variable(torch.cuda.FloatTensor)
(B, N, 3 + input_feature_dim) tensor
Point cloud to run predicts on
Each point in the point-cloud MUST
be formated as (x, y, z, features...)
Returns
----------
end_points: {XXX_xyz, XXX_features, XXX_inds}
XXX_xyz: float32 Tensor of shape (B,K,3)
XXX_features: float32 Tensor of shape (B,K,D)
XXX-inds: int64 Tensor of shape (B,K) values in [0,N-1]
"""
if not end_points: end_points = {}
xyz0, features0 = self._break_up_pc(pointcloud)
# --------- 4 SET ABSTRACTION LAYERS ---------
xyz, features, fps_inds = self.sa1(xyz0, features0)
end_points['sa1_inds'] = fps_inds
end_points['sa1_xyz'] = xyz
end_points['sa1_features'] = features
xyz, features, fps_inds = self.sa2(xyz, features) # this fps_inds is just 0,1,...,1023
end_points['sa2_inds'] = fps_inds
end_points['sa2_xyz'] = xyz
end_points['sa2_features'] = features
xyz, features, fps_inds = self.sa3(xyz, features) # this fps_inds is just 0,1,...,511
end_points['sa3_xyz'] = xyz
end_points['sa3_features'] = features
xyz, features, fps_inds = self.sa4(xyz, features) # this fps_inds is just 0,1,...,255
end_points['sa4_xyz'] = xyz
end_points['sa4_features'] = features
# --------- 2 FEATURE UPSAMPLING LAYERS --------
features = self.fp1(end_points['sa3_xyz'], end_points['sa4_xyz'], end_points['sa3_features'], end_points['sa4_features'])
features = self.fp2(end_points['sa2_xyz'], end_points['sa3_xyz'], end_points['sa2_features'], features)
features = self.fp3(end_points['sa1_xyz'], end_points['sa2_xyz'], end_points['sa1_features'], features)
features = self.fp4(xyz0 , end_points['sa1_xyz'], features0, features)
return features
|
ContrastiveSceneContexts-main
|
pretrain/contrastive_scene_contexts/model/pointnet2backbone.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import model.res16unet as res16unet
import model.pointnet2backbone as pointnet2
MODELS = []
def add_models(module):
MODELS.extend([getattr(module, a) for a in dir(module) if 'Net' in a])
add_models(res16unet)
add_models(pointnet2)
def get_models():
'''Returns a tuple of sample models.'''
return MODELS
def load_model(name):
'''Creates and returns an instance of the model given its class name.
'''
all_models = get_models()
mdict = {model.__name__: model for model in all_models}
if name not in mdict:
print('Invalid model index. Options are:')
for model in all_models:
print('\t* {}'.format(model.__name__))
return None
NetClass = mdict[name]
return NetClass
|
ContrastiveSceneContexts-main
|
pretrain/contrastive_scene_contexts/model/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from model.resnet import ResNetBase, get_norm
from model.modules.common import ConvType, NormType, conv, conv_tr
from model.modules.resnet_block import BasicBlock, Bottleneck
from MinkowskiEngine import MinkowskiReLU, MinkowskiGlobalPooling
from MinkowskiEngine import SparseTensor
import MinkowskiEngine.MinkowskiOps as me
import torch
import torch.nn as nn
class Res16UNetBase(ResNetBase):
BLOCK = None
PLANES = (32, 64, 128, 256, 256, 256, 256, 256)
DILATIONS = (1, 1, 1, 1, 1, 1, 1, 1)
LAYERS = (2, 2, 2, 2, 2, 2, 2, 2)
INIT_DIM = 32
OUT_PIXEL_DIST = 1
NORM_TYPE = NormType.BATCH_NORM
NON_BLOCK_CONV_TYPE = ConvType.SPATIAL_HYPERCUBE
CONV_TYPE = ConvType.SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS
def __init__(self,
in_channels,
out_channels,
config,
D=3):
super(Res16UNetBase, self).__init__(in_channels, out_channels, config, D)
self.normalize_feature = config.net.normalize_feature
def network_initialization(self, in_channels, out_channels, config, D):
dilations = self.DILATIONS
bn_momentum = config.opt.bn_momentum
def space_n_time_m(n, m):
return n if D == 3 else [n, n, n, m]
if D == 4:
self.OUT_PIXEL_DIST = space_n_time_m(self.OUT_PIXEL_DIST, 1)
self.inplanes = self.INIT_DIM
self.conv0p1s1 = conv(
in_channels,
self.inplanes,
kernel_size=space_n_time_m(config.net.conv1_kernel_size, 1),
stride=1,
dilation=1,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bn0 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum)
self.conv1p1s2 = conv(
self.inplanes,
self.inplanes,
kernel_size=space_n_time_m(2, 1),
stride=space_n_time_m(2, 1),
dilation=1,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bn1 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum)
self.block1 = self._make_layer(
self.BLOCK,
self.PLANES[0],
self.LAYERS[0],
dilation=dilations[0],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.conv2p2s2 = conv(
self.inplanes,
self.inplanes,
kernel_size=space_n_time_m(2, 1),
stride=space_n_time_m(2, 1),
dilation=1,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bn2 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum)
self.block2 = self._make_layer(
self.BLOCK,
self.PLANES[1],
self.LAYERS[1],
dilation=dilations[1],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.conv3p4s2 = conv(
self.inplanes,
self.inplanes,
kernel_size=space_n_time_m(2, 1),
stride=space_n_time_m(2, 1),
dilation=1,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bn3 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum)
self.block3 = self._make_layer(
self.BLOCK,
self.PLANES[2],
self.LAYERS[2],
dilation=dilations[2],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.conv4p8s2 = conv(
self.inplanes,
self.inplanes,
kernel_size=space_n_time_m(2, 1),
stride=space_n_time_m(2, 1),
dilation=1,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bn4 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum)
self.block4 = self._make_layer(
self.BLOCK,
self.PLANES[3],
self.LAYERS[3],
dilation=dilations[3],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.convtr4p16s2 = conv_tr(
self.inplanes,
self.PLANES[4],
kernel_size=space_n_time_m(2, 1),
upsample_stride=space_n_time_m(2, 1),
dilation=1,
bias=False,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bntr4 = get_norm(self.NORM_TYPE, self.PLANES[4], D, bn_momentum=bn_momentum)
self.inplanes = self.PLANES[4] + self.PLANES[2] * self.BLOCK.expansion
self.block5 = self._make_layer(
self.BLOCK,
self.PLANES[4],
self.LAYERS[4],
dilation=dilations[4],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.convtr5p8s2 = conv_tr(
self.inplanes,
self.PLANES[5],
kernel_size=space_n_time_m(2, 1),
upsample_stride=space_n_time_m(2, 1),
dilation=1,
bias=False,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bntr5 = get_norm(self.NORM_TYPE, self.PLANES[5], D, bn_momentum=bn_momentum)
self.inplanes = self.PLANES[5] + self.PLANES[1] * self.BLOCK.expansion
self.block6 = self._make_layer(
self.BLOCK,
self.PLANES[5],
self.LAYERS[5],
dilation=dilations[5],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.convtr6p4s2 = conv_tr(
self.inplanes,
self.PLANES[6],
kernel_size=space_n_time_m(2, 1),
upsample_stride=space_n_time_m(2, 1),
dilation=1,
bias=False,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bntr6 = get_norm(self.NORM_TYPE, self.PLANES[6], D, bn_momentum=bn_momentum)
self.inplanes = self.PLANES[6] + self.PLANES[0] * self.BLOCK.expansion
self.block7 = self._make_layer(
self.BLOCK,
self.PLANES[6],
self.LAYERS[6],
dilation=dilations[6],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.convtr7p2s2 = conv_tr(
self.inplanes,
self.PLANES[7],
kernel_size=space_n_time_m(2, 1),
upsample_stride=space_n_time_m(2, 1),
dilation=1,
bias=False,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bntr7 = get_norm(self.NORM_TYPE, self.PLANES[7], D, bn_momentum=bn_momentum)
self.inplanes = self.PLANES[7] + self.INIT_DIM
self.block8 = self._make_layer(
self.BLOCK,
self.PLANES[7],
self.LAYERS[7],
dilation=dilations[7],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.relu = MinkowskiReLU(inplace=True)
self.final = conv(self.PLANES[7], out_channels, kernel_size=1, stride=1, bias=True, D=D)
def forward(self, x):
out = self.conv0p1s1(x)
out = self.bn0(out)
out_p1 = self.relu(out)
out = self.conv1p1s2(out_p1)
out = self.bn1(out)
out = self.relu(out)
out_b1p2 = self.block1(out)
out = self.conv2p2s2(out_b1p2)
out = self.bn2(out)
out = self.relu(out)
out_b2p4 = self.block2(out)
out = self.conv3p4s2(out_b2p4)
out = self.bn3(out)
out = self.relu(out)
out_b3p8 = self.block3(out)
out = self.conv4p8s2(out_b3p8)
out = self.bn4(out)
out = self.relu(out)
encoder_out = self.block4(out)
out = self.convtr4p16s2(encoder_out)
out = self.bntr4(out)
out = self.relu(out)
out = me.cat(out, out_b3p8)
out = self.block5(out)
out = self.convtr5p8s2(out)
out = self.bntr5(out)
out = self.relu(out)
out = me.cat(out, out_b2p4)
out = self.block6(out)
out = self.convtr6p4s2(out)
out = self.bntr6(out)
out = self.relu(out)
out = me.cat(out, out_b1p2)
out = self.block7(out)
out = self.convtr7p2s2(out)
out = self.bntr7(out)
out = self.relu(out)
out = me.cat(out, out_p1)
out = self.block8(out)
contrastive = self.final(out)
if self.normalize_feature:
contrastive = SparseTensor(
contrastive.F / torch.norm(contrastive.F, p=2, dim=1, keepdim=True),
coords_key=contrastive.coords_key,
coords_manager=contrastive.coords_man)
return contrastive
class Res16UNet34(Res16UNetBase):
BLOCK = BasicBlock
LAYERS = (2, 3, 4, 6, 2, 2, 2, 2)
class Res16UNet34C(Res16UNet34):
PLANES = (32, 64, 128, 256, 256, 128, 96, 96)
class Res16UNet18(Res16UNetBase):
BLOCK = BasicBlock
LAYERS = (2, 2, 2, 2, 2, 2, 2, 2)
class Res16UNet18A(Res16UNet18):
PLANES = (32, 64, 128, 256, 128, 128, 96, 96)
|
ContrastiveSceneContexts-main
|
pretrain/contrastive_scene_contexts/model/res16unet.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch.nn as nn
import MinkowskiEngine as ME
from MinkowskiEngine import MinkowskiNetwork
from model.modules.common import ConvType, NormType, get_norm, conv, sum_pool
from model.modules.resnet_block import BasicBlock, Bottleneck
class Model(MinkowskiNetwork):
OUT_PIXEL_DIST = -1
def __init__(self, in_channels, out_channels, config, D, **kwargs):
super(Model, self).__init__(D)
self.in_channels = in_channels
self.out_channels = out_channels
self.config = config
class ResNetBase(Model):
BLOCK = None
LAYERS = ()
INIT_DIM = 64
PLANES = (64, 128, 256, 512)
OUT_PIXEL_DIST = 32
HAS_LAST_BLOCK = False
CONV_TYPE = ConvType.HYPERCUBE
def __init__(self, in_channels, out_channels, config, D=3, **kwargs):
assert self.BLOCK is not None
assert self.OUT_PIXEL_DIST > 0
super(ResNetBase, self).__init__(in_channels, out_channels, config, D, **kwargs)
self.network_initialization(in_channels, out_channels, config, D)
self.weight_initialization()
def network_initialization(self, in_channels, out_channels, config, D):
def space_n_time_m(n, m):
return n if D == 3 else [n, n, n, m]
if D == 4:
self.OUT_PIXEL_DIST = space_n_time_m(self.OUT_PIXEL_DIST, 1)
dilations = config.dilations
bn_momentum = config.opt.bn_momentum
self.inplanes = self.INIT_DIM
self.conv1 = conv(
in_channels,
self.inplanes,
kernel_size=space_n_time_m(config.conv1_kernel_size, 1),
stride=1,
D=D)
self.bn1 = get_norm(NormType.BATCH_NORM, self.inplanes, D=self.D, bn_momentum=bn_momentum)
self.relu = ME.MinkowskiReLU(inplace=True)
self.pool = sum_pool(kernel_size=space_n_time_m(2, 1), stride=space_n_time_m(2, 1), D=D)
self.layer1 = self._make_layer(
self.BLOCK,
self.PLANES[0],
self.LAYERS[0],
stride=space_n_time_m(2, 1),
dilation=space_n_time_m(dilations[0], 1))
self.layer2 = self._make_layer(
self.BLOCK,
self.PLANES[1],
self.LAYERS[1],
stride=space_n_time_m(2, 1),
dilation=space_n_time_m(dilations[1], 1))
self.layer3 = self._make_layer(
self.BLOCK,
self.PLANES[2],
self.LAYERS[2],
stride=space_n_time_m(2, 1),
dilation=space_n_time_m(dilations[2], 1))
self.layer4 = self._make_layer(
self.BLOCK,
self.PLANES[3],
self.LAYERS[3],
stride=space_n_time_m(2, 1),
dilation=space_n_time_m(dilations[3], 1))
self.final = conv(
self.PLANES[3] * self.BLOCK.expansion, out_channels, kernel_size=1, bias=True, D=D)
def weight_initialization(self):
for m in self.modules():
if isinstance(m, ME.MinkowskiBatchNorm):
nn.init.constant_(m.bn.weight, 1)
nn.init.constant_(m.bn.bias, 0)
def _make_layer(self,
block,
planes,
blocks,
stride=1,
dilation=1,
norm_type=NormType.BATCH_NORM,
bn_momentum=0.1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv(
self.inplanes,
planes * block.expansion,
kernel_size=1,
stride=stride,
bias=False,
D=self.D),
get_norm(norm_type, planes * block.expansion, D=self.D, bn_momentum=bn_momentum),
)
layers = []
layers.append(
block(
self.inplanes,
planes,
stride=stride,
dilation=dilation,
downsample=downsample,
conv_type=self.CONV_TYPE,
D=self.D))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(
block(
self.inplanes,
planes,
stride=1,
dilation=dilation,
conv_type=self.CONV_TYPE,
D=self.D))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.pool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.final(x)
return x
|
ContrastiveSceneContexts-main
|
pretrain/contrastive_scene_contexts/model/resnet.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
''' Modified based on Ref: https://github.com/erikwijmans/Pointnet2_PyTorch '''
import torch
import torch.nn as nn
from typing import List, Tuple
class SharedMLP(nn.Sequential):
def __init__(
self,
args: List[int],
*,
bn: bool = False,
activation=nn.ReLU(inplace=True),
preact: bool = False,
first: bool = False,
name: str = ""
):
super().__init__()
for i in range(len(args) - 1):
self.add_module(
name + 'layer{}'.format(i),
Conv2d(
args[i],
args[i + 1],
bn=(not first or not preact or (i != 0)) and bn,
activation=activation
if (not first or not preact or (i != 0)) else None,
preact=preact
)
)
class _BNBase(nn.Sequential):
def __init__(self, in_size, batch_norm=None, name=""):
super().__init__()
self.add_module(name + "bn", batch_norm(in_size))
nn.init.constant_(self[0].weight, 1.0)
nn.init.constant_(self[0].bias, 0)
class BatchNorm1d(_BNBase):
def __init__(self, in_size: int, *, name: str = ""):
super().__init__(in_size, batch_norm=nn.BatchNorm1d, name=name)
class BatchNorm2d(_BNBase):
def __init__(self, in_size: int, name: str = ""):
super().__init__(in_size, batch_norm=nn.BatchNorm2d, name=name)
class BatchNorm3d(_BNBase):
def __init__(self, in_size: int, name: str = ""):
super().__init__(in_size, batch_norm=nn.BatchNorm3d, name=name)
class _ConvBase(nn.Sequential):
def __init__(
self,
in_size,
out_size,
kernel_size,
stride,
padding,
activation,
bn,
init,
conv=None,
batch_norm=None,
bias=True,
preact=False,
name=""
):
super().__init__()
bias = bias and (not bn)
conv_unit = conv(
in_size,
out_size,
kernel_size=kernel_size,
stride=stride,
padding=padding,
bias=bias
)
init(conv_unit.weight)
if bias:
nn.init.constant_(conv_unit.bias, 0)
if bn:
if not preact:
bn_unit = batch_norm(out_size)
else:
bn_unit = batch_norm(in_size)
if preact:
if bn:
self.add_module(name + 'bn', bn_unit)
if activation is not None:
self.add_module(name + 'activation', activation)
self.add_module(name + 'conv', conv_unit)
if not preact:
if bn:
self.add_module(name + 'bn', bn_unit)
if activation is not None:
self.add_module(name + 'activation', activation)
class Conv1d(_ConvBase):
def __init__(
self,
in_size: int,
out_size: int,
*,
kernel_size: int = 1,
stride: int = 1,
padding: int = 0,
activation=nn.ReLU(inplace=True),
bn: bool = False,
init=nn.init.kaiming_normal_,
bias: bool = True,
preact: bool = False,
name: str = ""
):
super().__init__(
in_size,
out_size,
kernel_size,
stride,
padding,
activation,
bn,
init,
conv=nn.Conv1d,
batch_norm=BatchNorm1d,
bias=bias,
preact=preact,
name=name
)
class Conv2d(_ConvBase):
def __init__(
self,
in_size: int,
out_size: int,
*,
kernel_size: Tuple[int, int] = (1, 1),
stride: Tuple[int, int] = (1, 1),
padding: Tuple[int, int] = (0, 0),
activation=nn.ReLU(inplace=True),
bn: bool = False,
init=nn.init.kaiming_normal_,
bias: bool = True,
preact: bool = False,
name: str = ""
):
super().__init__(
in_size,
out_size,
kernel_size,
stride,
padding,
activation,
bn,
init,
conv=nn.Conv2d,
batch_norm=BatchNorm2d,
bias=bias,
preact=preact,
name=name
)
class Conv3d(_ConvBase):
def __init__(
self,
in_size: int,
out_size: int,
*,
kernel_size: Tuple[int, int, int] = (1, 1, 1),
stride: Tuple[int, int, int] = (1, 1, 1),
padding: Tuple[int, int, int] = (0, 0, 0),
activation=nn.ReLU(inplace=True),
bn: bool = False,
init=nn.init.kaiming_normal_,
bias: bool = True,
preact: bool = False,
name: str = ""
):
super().__init__(
in_size,
out_size,
kernel_size,
stride,
padding,
activation,
bn,
init,
conv=nn.Conv3d,
batch_norm=BatchNorm3d,
bias=bias,
preact=preact,
name=name
)
class FC(nn.Sequential):
def __init__(
self,
in_size: int,
out_size: int,
*,
activation=nn.ReLU(inplace=True),
bn: bool = False,
init=None,
preact: bool = False,
name: str = ""
):
super().__init__()
fc = nn.Linear(in_size, out_size, bias=not bn)
if init is not None:
init(fc.weight)
if not bn:
nn.init.constant_(fc.bias, 0)
if preact:
if bn:
self.add_module(name + 'bn', BatchNorm1d(in_size))
if activation is not None:
self.add_module(name + 'activation', activation)
self.add_module(name + 'fc', fc)
if not preact:
if bn:
self.add_module(name + 'bn', BatchNorm1d(out_size))
if activation is not None:
self.add_module(name + 'activation', activation)
def set_bn_momentum_default(bn_momentum):
def fn(m):
if isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d)):
m.momentum = bn_momentum
return fn
class BNMomentumScheduler(object):
def __init__(
self, model, bn_lambda, last_epoch=-1,
setter=set_bn_momentum_default
):
if not isinstance(model, nn.Module):
raise RuntimeError(
"Class '{}' is not a PyTorch nn Module".format(
type(model).__name__
)
)
self.model = model
self.setter = setter
self.lmbd = bn_lambda
self.step(last_epoch + 1)
self.last_epoch = last_epoch
def step(self, epoch=None):
if epoch is None:
epoch = self.last_epoch + 1
self.last_epoch = epoch
self.model.apply(self.setter(self.lmbd(epoch)))
|
ContrastiveSceneContexts-main
|
pretrain/contrastive_scene_contexts/model/pointnet2/pytorch_utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import glob
import os
from setuptools import setup
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
this_dir = os.path.dirname(os.path.abspath(__file__))
_ext_src_root = "_ext_src"
_ext_sources = glob.glob("{}/src/*.cpp".format(_ext_src_root)) + glob.glob(
"{}/src/*.cu".format(_ext_src_root)
)
setup(
name='pointnet2',
ext_modules=[
CUDAExtension(
name='pointnet2._ext',
sources=_ext_sources,
extra_compile_args={
"cxx": ["-O3"],
"nvcc": ["-O3", "-Xfatbin", "-compress-all"],
},
include_dirs=[os.path.join(this_dir, _ext_src_root, "include")],
)
],
cmdclass={
'build_ext': BuildExtension
}
)
|
ContrastiveSceneContexts-main
|
pretrain/contrastive_scene_contexts/model/pointnet2/setup.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
''' Modified based on: https://github.com/erikwijmans/Pointnet2_PyTorch '''
from __future__ import (
division,
absolute_import,
with_statement,
print_function,
unicode_literals,
)
import torch
from torch.autograd import Function
import torch.nn as nn
import pytorch_utils as pt_utils
import sys
try:
import builtins
except:
import __builtin__ as builtins
try:
import pointnet2._ext as _ext
except ImportError:
if not getattr(builtins, "__POINTNET2_SETUP__", False):
raise ImportError(
"Could not import _ext module.\n"
"Please see the setup instructions in the README: "
"https://github.com/erikwijmans/Pointnet2_PyTorch/blob/master/README.rst"
)
if False:
# Workaround for type hints without depending on the `typing` module
from typing import *
class RandomDropout(nn.Module):
def __init__(self, p=0.5, inplace=False):
super(RandomDropout, self).__init__()
self.p = p
self.inplace = inplace
def forward(self, X):
theta = torch.Tensor(1).uniform_(0, self.p)[0]
return pt_utils.feature_dropout_no_scaling(X, theta, self.train, self.inplace)
class FurthestPointSampling(Function):
@staticmethod
def forward(ctx, xyz, npoint):
# type: (Any, torch.Tensor, int) -> torch.Tensor
r"""
Uses iterative furthest point sampling to select a set of npoint features that have the largest
minimum distance
Parameters
----------
xyz : torch.Tensor
(B, N, 3) tensor where N > npoint
npoint : int32
number of features in the sampled set
Returns
-------
torch.Tensor
(B, npoint) tensor containing the set
"""
fps_inds = _ext.furthest_point_sampling(xyz, npoint)
ctx.mark_non_differentiable(fps_inds)
return fps_inds
@staticmethod
def backward(xyz, a=None):
return None, None
furthest_point_sample = FurthestPointSampling.apply
class GatherOperation(Function):
@staticmethod
def forward(ctx, features, idx):
# type: (Any, torch.Tensor, torch.Tensor) -> torch.Tensor
r"""
Parameters
----------
features : torch.Tensor
(B, C, N) tensor
idx : torch.Tensor
(B, npoint) tensor of the features to gather
Returns
-------
torch.Tensor
(B, C, npoint) tensor
"""
_, C, N = features.size()
ctx.for_backwards = (idx, C, N)
return _ext.gather_points(features, idx)
@staticmethod
def backward(ctx, grad_out):
idx, C, N = ctx.for_backwards
grad_features = _ext.gather_points_grad(grad_out.contiguous(), idx, N)
return grad_features, None
gather_operation = GatherOperation.apply
class ThreeNN(Function):
@staticmethod
def forward(ctx, unknown, known):
# type: (Any, torch.Tensor, torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]
r"""
Find the three nearest neighbors of unknown in known
Parameters
----------
unknown : torch.Tensor
(B, n, 3) tensor of known features
known : torch.Tensor
(B, m, 3) tensor of unknown features
Returns
-------
dist : torch.Tensor
(B, n, 3) l2 distance to the three nearest neighbors
idx : torch.Tensor
(B, n, 3) index of 3 nearest neighbors
"""
dist2, idx = _ext.three_nn(unknown, known)
return torch.sqrt(dist2), idx
@staticmethod
def backward(ctx, a=None, b=None):
return None, None
three_nn = ThreeNN.apply
class ThreeInterpolate(Function):
@staticmethod
def forward(ctx, features, idx, weight):
# type(Any, torch.Tensor, torch.Tensor, torch.Tensor) -> Torch.Tensor
r"""
Performs weight linear interpolation on 3 features
Parameters
----------
features : torch.Tensor
(B, c, m) Features descriptors to be interpolated from
idx : torch.Tensor
(B, n, 3) three nearest neighbors of the target features in features
weight : torch.Tensor
(B, n, 3) weights
Returns
-------
torch.Tensor
(B, c, n) tensor of the interpolated features
"""
B, c, m = features.size()
n = idx.size(1)
ctx.three_interpolate_for_backward = (idx, weight, m)
return _ext.three_interpolate(features, idx, weight)
@staticmethod
def backward(ctx, grad_out):
# type: (Any, torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]
r"""
Parameters
----------
grad_out : torch.Tensor
(B, c, n) tensor with gradients of ouputs
Returns
-------
grad_features : torch.Tensor
(B, c, m) tensor with gradients of features
None
None
"""
idx, weight, m = ctx.three_interpolate_for_backward
grad_features = _ext.three_interpolate_grad(
grad_out.contiguous(), idx, weight, m
)
return grad_features, None, None
three_interpolate = ThreeInterpolate.apply
class GroupingOperation(Function):
@staticmethod
def forward(ctx, features, idx):
# type: (Any, torch.Tensor, torch.Tensor) -> torch.Tensor
r"""
Parameters
----------
features : torch.Tensor
(B, C, N) tensor of features to group
idx : torch.Tensor
(B, npoint, nsample) tensor containing the indicies of features to group with
Returns
-------
torch.Tensor
(B, C, npoint, nsample) tensor
"""
B, nfeatures, nsample = idx.size()
_, C, N = features.size()
ctx.for_backwards = (idx, N)
return _ext.group_points(features, idx)
@staticmethod
def backward(ctx, grad_out):
# type: (Any, torch.tensor) -> Tuple[torch.Tensor, torch.Tensor]
r"""
Parameters
----------
grad_out : torch.Tensor
(B, C, npoint, nsample) tensor of the gradients of the output from forward
Returns
-------
torch.Tensor
(B, C, N) gradient of the features
None
"""
idx, N = ctx.for_backwards
grad_features = _ext.group_points_grad(grad_out.contiguous(), idx, N)
return grad_features, None
grouping_operation = GroupingOperation.apply
class BallQuery(Function):
@staticmethod
def forward(ctx, radius, nsample, xyz, new_xyz):
# type: (Any, float, int, torch.Tensor, torch.Tensor) -> torch.Tensor
r"""
Parameters
----------
radius : float
radius of the balls
nsample : int
maximum number of features in the balls
xyz : torch.Tensor
(B, N, 3) xyz coordinates of the features
new_xyz : torch.Tensor
(B, npoint, 3) centers of the ball query
Returns
-------
torch.Tensor
(B, npoint, nsample) tensor with the indicies of the features that form the query balls
"""
inds = _ext.ball_query(new_xyz, xyz, radius, nsample)
ctx.mark_non_differentiable(inds)
return inds
@staticmethod
def backward(ctx, a=None):
return None, None, None, None
ball_query = BallQuery.apply
class QueryAndGroup(nn.Module):
r"""
Groups with a ball query of radius
Parameters
---------
radius : float32
Radius of ball
nsample : int32
Maximum number of features to gather in the ball
"""
def __init__(self, radius, nsample, use_xyz=True, ret_grouped_xyz=False, normalize_xyz=False, sample_uniformly=False, ret_unique_cnt=False):
# type: (QueryAndGroup, float, int, bool) -> None
super(QueryAndGroup, self).__init__()
self.radius, self.nsample, self.use_xyz = radius, nsample, use_xyz
self.ret_grouped_xyz = ret_grouped_xyz
self.normalize_xyz = normalize_xyz
self.sample_uniformly = sample_uniformly
self.ret_unique_cnt = ret_unique_cnt
if self.ret_unique_cnt:
assert(self.sample_uniformly)
def forward(self, xyz, new_xyz, features=None):
# type: (QueryAndGroup, torch.Tensor. torch.Tensor, torch.Tensor) -> Tuple[Torch.Tensor]
r"""
Parameters
----------
xyz : torch.Tensor
xyz coordinates of the features (B, N, 3)
new_xyz : torch.Tensor
centriods (B, npoint, 3)
features : torch.Tensor
Descriptors of the features (B, C, N)
Returns
-------
new_features : torch.Tensor
(B, 3 + C, npoint, nsample) tensor
"""
idx = ball_query(self.radius, self.nsample, xyz, new_xyz)
if self.sample_uniformly:
unique_cnt = torch.zeros((idx.shape[0], idx.shape[1]))
for i_batch in range(idx.shape[0]):
for i_region in range(idx.shape[1]):
unique_ind = torch.unique(idx[i_batch, i_region, :])
num_unique = unique_ind.shape[0]
unique_cnt[i_batch, i_region] = num_unique
sample_ind = torch.randint(0, num_unique, (self.nsample - num_unique,), dtype=torch.long)
all_ind = torch.cat((unique_ind, unique_ind[sample_ind]))
idx[i_batch, i_region, :] = all_ind
xyz_trans = xyz.transpose(1, 2).contiguous()
grouped_xyz = grouping_operation(xyz_trans, idx) # (B, 3, npoint, nsample)
grouped_xyz -= new_xyz.transpose(1, 2).unsqueeze(-1)
if self.normalize_xyz:
grouped_xyz /= self.radius
if features is not None:
grouped_features = grouping_operation(features, idx)
if self.use_xyz:
new_features = torch.cat(
[grouped_xyz, grouped_features], dim=1
) # (B, C + 3, npoint, nsample)
else:
new_features = grouped_features
else:
assert (
self.use_xyz
), "Cannot have not features and not use xyz as a feature!"
new_features = grouped_xyz
ret = [new_features]
if self.ret_grouped_xyz:
ret.append(grouped_xyz)
if self.ret_unique_cnt:
ret.append(unique_cnt)
if len(ret) == 1:
return ret[0]
else:
return tuple(ret)
class GroupAll(nn.Module):
r"""
Groups all features
Parameters
---------
"""
def __init__(self, use_xyz=True, ret_grouped_xyz=False):
# type: (GroupAll, bool) -> None
super(GroupAll, self).__init__()
self.use_xyz = use_xyz
def forward(self, xyz, new_xyz, features=None):
# type: (GroupAll, torch.Tensor, torch.Tensor, torch.Tensor) -> Tuple[torch.Tensor]
r"""
Parameters
----------
xyz : torch.Tensor
xyz coordinates of the features (B, N, 3)
new_xyz : torch.Tensor
Ignored
features : torch.Tensor
Descriptors of the features (B, C, N)
Returns
-------
new_features : torch.Tensor
(B, C + 3, 1, N) tensor
"""
grouped_xyz = xyz.transpose(1, 2).unsqueeze(2)
if features is not None:
grouped_features = features.unsqueeze(2)
if self.use_xyz:
new_features = torch.cat(
[grouped_xyz, grouped_features], dim=1
) # (B, 3 + C, 1, N)
else:
new_features = grouped_features
else:
new_features = grouped_xyz
if self.ret_grouped_xyz:
return new_features, grouped_xyz
else:
return new_features
|
ContrastiveSceneContexts-main
|
pretrain/contrastive_scene_contexts/model/pointnet2/pointnet2_utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
''' Testing customized ops. '''
import torch
from torch.autograd import gradcheck
import numpy as np
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
import pointnet2_utils
def test_interpolation_grad():
batch_size = 1
feat_dim = 2
m = 4
feats = torch.randn(batch_size, feat_dim, m, requires_grad=True).float().cuda()
def interpolate_func(inputs):
idx = torch.from_numpy(np.array([[[0,1,2],[1,2,3]]])).int().cuda()
weight = torch.from_numpy(np.array([[[1,1,1],[2,2,2]]])).float().cuda()
interpolated_feats = pointnet2_utils.three_interpolate(inputs, idx, weight)
return interpolated_feats
assert (gradcheck(interpolate_func, feats, atol=1e-1, rtol=1e-1))
if __name__=='__main__':
test_interpolation_grad()
|
ContrastiveSceneContexts-main
|
pretrain/contrastive_scene_contexts/model/pointnet2/pointnet2_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
''' Pointnet2 layers.
Modified based on: https://github.com/erikwijmans/Pointnet2_PyTorch
Extended with the following:
1. Uniform sampling in each local region (sample_uniformly)
2. Return sampled points indices to support votenet.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
import pointnet2_utils
import pytorch_utils as pt_utils
from typing import List
class _PointnetSAModuleBase(nn.Module):
def __init__(self):
super().__init__()
self.npoint = None
self.groupers = None
self.mlps = None
def forward(self, xyz: torch.Tensor,
features: torch.Tensor = None) -> (torch.Tensor, torch.Tensor):
r"""
Parameters
----------
xyz : torch.Tensor
(B, N, 3) tensor of the xyz coordinates of the features
features : torch.Tensor
(B, N, C) tensor of the descriptors of the the features
Returns
-------
new_xyz : torch.Tensor
(B, npoint, 3) tensor of the new features' xyz
new_features : torch.Tensor
(B, npoint, \sum_k(mlps[k][-1])) tensor of the new_features descriptors
"""
new_features_list = []
xyz_flipped = xyz.transpose(1, 2).contiguous()
new_xyz = pointnet2_utils.gather_operation(
xyz_flipped,
pointnet2_utils.furthest_point_sample(xyz, self.npoint)
).transpose(1, 2).contiguous() if self.npoint is not None else None
for i in range(len(self.groupers)):
new_features = self.groupers[i](
xyz, new_xyz, features
) # (B, C, npoint, nsample)
new_features = self.mlps[i](
new_features
) # (B, mlp[-1], npoint, nsample)
new_features = F.max_pool2d(
new_features, kernel_size=[1, new_features.size(3)]
) # (B, mlp[-1], npoint, 1)
new_features = new_features.squeeze(-1) # (B, mlp[-1], npoint)
new_features_list.append(new_features)
return new_xyz, torch.cat(new_features_list, dim=1)
class PointnetSAModuleMSG(_PointnetSAModuleBase):
r"""Pointnet set abstrction layer with multiscale grouping
Parameters
----------
npoint : int
Number of features
radii : list of float32
list of radii to group with
nsamples : list of int32
Number of samples in each ball query
mlps : list of list of int32
Spec of the pointnet before the global max_pool for each scale
bn : bool
Use batchnorm
"""
def __init__(
self,
*,
npoint: int,
radii: List[float],
nsamples: List[int],
mlps: List[List[int]],
bn: bool = True,
use_xyz: bool = True,
sample_uniformly: bool = False
):
super().__init__()
assert len(radii) == len(nsamples) == len(mlps)
self.npoint = npoint
self.groupers = nn.ModuleList()
self.mlps = nn.ModuleList()
for i in range(len(radii)):
radius = radii[i]
nsample = nsamples[i]
self.groupers.append(
pointnet2_utils.QueryAndGroup(radius, nsample, use_xyz=use_xyz, sample_uniformly=sample_uniformly)
if npoint is not None else pointnet2_utils.GroupAll(use_xyz)
)
mlp_spec = mlps[i]
if use_xyz:
mlp_spec[0] += 3
self.mlps.append(pt_utils.SharedMLP(mlp_spec, bn=bn))
class PointnetSAModule(PointnetSAModuleMSG):
r"""Pointnet set abstrction layer
Parameters
----------
npoint : int
Number of features
radius : float
Radius of ball
nsample : int
Number of samples in the ball query
mlp : list
Spec of the pointnet before the global max_pool
bn : bool
Use batchnorm
"""
def __init__(
self,
*,
mlp: List[int],
npoint: int = None,
radius: float = None,
nsample: int = None,
bn: bool = True,
use_xyz: bool = True
):
super().__init__(
mlps=[mlp],
npoint=npoint,
radii=[radius],
nsamples=[nsample],
bn=bn,
use_xyz=use_xyz
)
class PointnetSAModuleVotes(nn.Module):
''' Modified based on _PointnetSAModuleBase and PointnetSAModuleMSG
with extra support for returning point indices for getting their GT votes '''
def __init__(
self,
*,
mlp: List[int],
npoint: int = None,
radius: float = None,
nsample: int = None,
bn: bool = True,
use_xyz: bool = True,
pooling: str = 'max',
sigma: float = None, # for RBF pooling
normalize_xyz: bool = False, # noramlize local XYZ with radius
sample_uniformly: bool = False,
ret_unique_cnt: bool = False
):
super().__init__()
self.npoint = npoint
self.radius = radius
self.nsample = nsample
self.pooling = pooling
self.mlp_module = None
self.use_xyz = use_xyz
self.sigma = sigma
if self.sigma is None:
self.sigma = self.radius/2
self.normalize_xyz = normalize_xyz
self.ret_unique_cnt = ret_unique_cnt
if npoint is not None:
self.grouper = pointnet2_utils.QueryAndGroup(radius, nsample,
use_xyz=use_xyz, ret_grouped_xyz=True, normalize_xyz=normalize_xyz,
sample_uniformly=sample_uniformly, ret_unique_cnt=ret_unique_cnt)
else:
self.grouper = pointnet2_utils.GroupAll(use_xyz, ret_grouped_xyz=True)
mlp_spec = mlp
if use_xyz and len(mlp_spec)>0:
mlp_spec[0] += 3
self.mlp_module = pt_utils.SharedMLP(mlp_spec, bn=bn)
def forward(self, xyz: torch.Tensor,
features: torch.Tensor = None,
inds: torch.Tensor = None) -> (torch.Tensor, torch.Tensor):
r"""
Parameters
----------
xyz : torch.Tensor
(B, N, 3) tensor of the xyz coordinates of the features
features : torch.Tensor
(B, C, N) tensor of the descriptors of the the features
inds : torch.Tensor
(B, npoint) tensor that stores index to the xyz points (values in 0-N-1)
Returns
-------
new_xyz : torch.Tensor
(B, npoint, 3) tensor of the new features' xyz
new_features : torch.Tensor
(B, \sum_k(mlps[k][-1]), npoint) tensor of the new_features descriptors
inds: torch.Tensor
(B, npoint) tensor of the inds
"""
xyz_flipped = xyz.transpose(1, 2).contiguous()
if inds is None:
inds = pointnet2_utils.furthest_point_sample(xyz, self.npoint)
else:
assert(inds.shape[1] == self.npoint)
new_xyz = pointnet2_utils.gather_operation(
xyz_flipped, inds
).transpose(1, 2).contiguous() if self.npoint is not None else None
if not self.ret_unique_cnt:
grouped_features, grouped_xyz = self.grouper(
xyz, new_xyz, features
) # (B, C, npoint, nsample)
else:
grouped_features, grouped_xyz, unique_cnt = self.grouper(
xyz, new_xyz, features
) # (B, C, npoint, nsample), (B,3,npoint,nsample), (B,npoint)
new_features = self.mlp_module(
grouped_features
) # (B, mlp[-1], npoint, nsample)
if self.pooling == 'max':
new_features = F.max_pool2d(
new_features, kernel_size=[1, new_features.size(3)]
) # (B, mlp[-1], npoint, 1)
elif self.pooling == 'avg':
new_features = F.avg_pool2d(
new_features, kernel_size=[1, new_features.size(3)]
) # (B, mlp[-1], npoint, 1)
elif self.pooling == 'rbf':
# Use radial basis function kernel for weighted sum of features (normalized by nsample and sigma)
# Ref: https://en.wikipedia.org/wiki/Radial_basis_function_kernel
rbf = torch.exp(-1 * grouped_xyz.pow(2).sum(1,keepdim=False) / (self.sigma**2) / 2) # (B, npoint, nsample)
new_features = torch.sum(new_features * rbf.unsqueeze(1), -1, keepdim=True) / float(self.nsample) # (B, mlp[-1], npoint, 1)
new_features = new_features.squeeze(-1) # (B, mlp[-1], npoint)
if not self.ret_unique_cnt:
return new_xyz, new_features, inds
else:
return new_xyz, new_features, inds, unique_cnt
class PointnetSAModuleMSGVotes(nn.Module):
''' Modified based on _PointnetSAModuleBase and PointnetSAModuleMSG
with extra support for returning point indices for getting their GT votes '''
def __init__(
self,
*,
mlps: List[List[int]],
npoint: int,
radii: List[float],
nsamples: List[int],
bn: bool = True,
use_xyz: bool = True,
sample_uniformly: bool = False
):
super().__init__()
assert(len(mlps) == len(nsamples) == len(radii))
self.npoint = npoint
self.groupers = nn.ModuleList()
self.mlps = nn.ModuleList()
for i in range(len(radii)):
radius = radii[i]
nsample = nsamples[i]
self.groupers.append(
pointnet2_utils.QueryAndGroup(radius, nsample, use_xyz=use_xyz, sample_uniformly=sample_uniformly)
if npoint is not None else pointnet2_utils.GroupAll(use_xyz)
)
mlp_spec = mlps[i]
if use_xyz:
mlp_spec[0] += 3
self.mlps.append(pt_utils.SharedMLP(mlp_spec, bn=bn))
def forward(self, xyz: torch.Tensor,
features: torch.Tensor = None, inds: torch.Tensor = None) -> (torch.Tensor, torch.Tensor):
r"""
Parameters
----------
xyz : torch.Tensor
(B, N, 3) tensor of the xyz coordinates of the features
features : torch.Tensor
(B, C, C) tensor of the descriptors of the the features
inds : torch.Tensor
(B, npoint) tensor that stores index to the xyz points (values in 0-N-1)
Returns
-------
new_xyz : torch.Tensor
(B, npoint, 3) tensor of the new features' xyz
new_features : torch.Tensor
(B, \sum_k(mlps[k][-1]), npoint) tensor of the new_features descriptors
inds: torch.Tensor
(B, npoint) tensor of the inds
"""
new_features_list = []
xyz_flipped = xyz.transpose(1, 2).contiguous()
if inds is None:
inds = pointnet2_utils.furthest_point_sample(xyz, self.npoint)
new_xyz = pointnet2_utils.gather_operation(
xyz_flipped, inds
).transpose(1, 2).contiguous() if self.npoint is not None else None
for i in range(len(self.groupers)):
new_features = self.groupers[i](
xyz, new_xyz, features
) # (B, C, npoint, nsample)
new_features = self.mlps[i](
new_features
) # (B, mlp[-1], npoint, nsample)
new_features = F.max_pool2d(
new_features, kernel_size=[1, new_features.size(3)]
) # (B, mlp[-1], npoint, 1)
new_features = new_features.squeeze(-1) # (B, mlp[-1], npoint)
new_features_list.append(new_features)
return new_xyz, torch.cat(new_features_list, dim=1), inds
class PointnetFPModule(nn.Module):
r"""Propigates the features of one set to another
Parameters
----------
mlp : list
Pointnet module parameters
bn : bool
Use batchnorm
"""
def __init__(self, *, mlp: List[int], bn: bool = True):
super().__init__()
self.mlp = pt_utils.SharedMLP(mlp, bn=bn)
def forward(
self, unknown: torch.Tensor, known: torch.Tensor,
unknow_feats: torch.Tensor, known_feats: torch.Tensor
) -> torch.Tensor:
r"""
Parameters
----------
unknown : torch.Tensor
(B, n, 3) tensor of the xyz positions of the unknown features
known : torch.Tensor
(B, m, 3) tensor of the xyz positions of the known features
unknow_feats : torch.Tensor
(B, C1, n) tensor of the features to be propigated to
known_feats : torch.Tensor
(B, C2, m) tensor of features to be propigated
Returns
-------
new_features : torch.Tensor
(B, mlp[-1], n) tensor of the features of the unknown features
"""
if known is not None:
dist, idx = pointnet2_utils.three_nn(unknown, known)
dist_recip = 1.0 / (dist + 1e-8)
norm = torch.sum(dist_recip, dim=2, keepdim=True)
weight = dist_recip / norm
interpolated_feats = pointnet2_utils.three_interpolate(
known_feats, idx, weight
)
else:
interpolated_feats = known_feats.expand(
*known_feats.size()[0:2], unknown.size(1)
)
if unknow_feats is not None:
new_features = torch.cat([interpolated_feats, unknow_feats],
dim=1) #(B, C2 + C1, n)
else:
new_features = interpolated_feats
new_features = new_features.unsqueeze(-1)
new_features = self.mlp(new_features)
return new_features.squeeze(-1)
class PointnetLFPModuleMSG(nn.Module):
''' Modified based on _PointnetSAModuleBase and PointnetSAModuleMSG
learnable feature propagation layer.'''
def __init__(
self,
*,
mlps: List[List[int]],
radii: List[float],
nsamples: List[int],
post_mlp: List[int],
bn: bool = True,
use_xyz: bool = True,
sample_uniformly: bool = False
):
super().__init__()
assert(len(mlps) == len(nsamples) == len(radii))
self.post_mlp = pt_utils.SharedMLP(post_mlp, bn=bn)
self.groupers = nn.ModuleList()
self.mlps = nn.ModuleList()
for i in range(len(radii)):
radius = radii[i]
nsample = nsamples[i]
self.groupers.append(
pointnet2_utils.QueryAndGroup(radius, nsample, use_xyz=use_xyz,
sample_uniformly=sample_uniformly)
)
mlp_spec = mlps[i]
if use_xyz:
mlp_spec[0] += 3
self.mlps.append(pt_utils.SharedMLP(mlp_spec, bn=bn))
def forward(self, xyz2: torch.Tensor, xyz1: torch.Tensor,
features2: torch.Tensor, features1: torch.Tensor) -> torch.Tensor:
r""" Propagate features from xyz1 to xyz2.
Parameters
----------
xyz2 : torch.Tensor
(B, N2, 3) tensor of the xyz coordinates of the features
xyz1 : torch.Tensor
(B, N1, 3) tensor of the xyz coordinates of the features
features2 : torch.Tensor
(B, C2, N2) tensor of the descriptors of the the features
features1 : torch.Tensor
(B, C1, N1) tensor of the descriptors of the the features
Returns
-------
new_features1 : torch.Tensor
(B, \sum_k(mlps[k][-1]), N1) tensor of the new_features descriptors
"""
new_features_list = []
for i in range(len(self.groupers)):
new_features = self.groupers[i](
xyz1, xyz2, features1
) # (B, C1, N2, nsample)
new_features = self.mlps[i](
new_features
) # (B, mlp[-1], N2, nsample)
new_features = F.max_pool2d(
new_features, kernel_size=[1, new_features.size(3)]
) # (B, mlp[-1], N2, 1)
new_features = new_features.squeeze(-1) # (B, mlp[-1], N2)
if features2 is not None:
new_features = torch.cat([new_features, features2],
dim=1) #(B, mlp[-1] + C2, N2)
new_features = new_features.unsqueeze(-1)
new_features = self.post_mlp(new_features)
new_features_list.append(new_features)
return torch.cat(new_features_list, dim=1).squeeze(-1)
if __name__ == "__main__":
from torch.autograd import Variable
torch.manual_seed(1)
torch.cuda.manual_seed_all(1)
xyz = Variable(torch.randn(2, 9, 3).cuda(), requires_grad=True)
xyz_feats = Variable(torch.randn(2, 9, 6).cuda(), requires_grad=True)
test_module = PointnetSAModuleMSG(
npoint=2, radii=[5.0, 10.0], nsamples=[6, 3], mlps=[[9, 3], [9, 6]]
)
test_module.cuda()
print(test_module(xyz, xyz_feats))
for _ in range(1):
_, new_features = test_module(xyz, xyz_feats)
new_features.backward(
torch.cuda.FloatTensor(*new_features.size()).fill_(1)
)
print(new_features)
print(xyz.grad)
|
ContrastiveSceneContexts-main
|
pretrain/contrastive_scene_contexts/model/pointnet2/pointnet2_modules.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch.nn as nn
from model.modules.common import ConvType, NormType, get_norm, conv
from MinkowskiEngine import MinkowskiReLU
class BasicBlockBase(nn.Module):
expansion = 1
NORM_TYPE = NormType.BATCH_NORM
def __init__(self,
inplanes,
planes,
stride=1,
dilation=1,
downsample=None,
conv_type=ConvType.HYPERCUBE,
bn_momentum=0.1,
D=3):
super(BasicBlockBase, self).__init__()
self.conv1 = conv(
inplanes, planes, kernel_size=3, stride=stride, dilation=dilation, conv_type=conv_type, D=D)
self.norm1 = get_norm(self.NORM_TYPE, planes, D, bn_momentum=bn_momentum)
self.conv2 = conv(
planes,
planes,
kernel_size=3,
stride=1,
dilation=dilation,
bias=False,
conv_type=conv_type,
D=D)
self.norm2 = get_norm(self.NORM_TYPE, planes, D, bn_momentum=bn_momentum)
self.relu = MinkowskiReLU(inplace=True)
self.downsample = downsample
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.norm2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class BasicBlock(BasicBlockBase):
NORM_TYPE = NormType.BATCH_NORM
class BottleneckBase(nn.Module):
expansion = 4
NORM_TYPE = NormType.BATCH_NORM
def __init__(self,
inplanes,
planes,
stride=1,
dilation=1,
downsample=None,
conv_type=ConvType.HYPERCUBE,
bn_momentum=0.1,
D=3):
super(BottleneckBase, self).__init__()
self.conv1 = conv(inplanes, planes, kernel_size=1, D=D)
self.norm1 = get_norm(self.NORM_TYPE, planes, D, bn_momentum=bn_momentum)
self.conv2 = conv(
planes, planes, kernel_size=3, stride=stride, dilation=dilation, conv_type=conv_type, D=D)
self.norm2 = get_norm(self.NORM_TYPE, planes, D, bn_momentum=bn_momentum)
self.conv3 = conv(planes, planes * self.expansion, kernel_size=1, D=D)
self.norm3 = get_norm(self.NORM_TYPE, planes * self.expansion, D, bn_momentum=bn_momentum)
self.relu = MinkowskiReLU(inplace=True)
self.downsample = downsample
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.norm2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.norm3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(BottleneckBase):
NORM_TYPE = NormType.BATCH_NORM
|
ContrastiveSceneContexts-main
|
pretrain/contrastive_scene_contexts/model/modules/resnet_block.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
|
ContrastiveSceneContexts-main
|
pretrain/contrastive_scene_contexts/model/modules/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import collections
from enum import Enum
import MinkowskiEngine as ME
class NormType(Enum):
BATCH_NORM = 0
SPARSE_LAYER_NORM = 1
SPARSE_INSTANCE_NORM = 2
SPARSE_SWITCH_NORM = 3
def get_norm(norm_type, n_channels, D, bn_momentum=0.1):
if norm_type == NormType.BATCH_NORM:
return ME.MinkowskiBatchNorm(n_channels, momentum=bn_momentum)
elif norm_type == NormType.SPARSE_INSTANCE_NORM:
return ME.MinkowskiInstanceNorm(n_channels, D=D)
else:
raise ValueError(f'Norm type: {norm_type} not supported')
class ConvType(Enum):
"""
Define the kernel region type
"""
HYPERCUBE = 0, 'HYPERCUBE'
SPATIAL_HYPERCUBE = 1, 'SPATIAL_HYPERCUBE'
SPATIO_TEMPORAL_HYPERCUBE = 2, 'SPATIO_TEMPORAL_HYPERCUBE'
HYPERCROSS = 3, 'HYPERCROSS'
SPATIAL_HYPERCROSS = 4, 'SPATIAL_HYPERCROSS'
SPATIO_TEMPORAL_HYPERCROSS = 5, 'SPATIO_TEMPORAL_HYPERCROSS'
SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS = 6, 'SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS '
def __new__(cls, value, name):
member = object.__new__(cls)
member._value_ = value
member.fullname = name
return member
def __int__(self):
return self.value
# Covert the ConvType var to a RegionType var
conv_to_region_type = {
# kernel_size = [k, k, k, 1]
ConvType.HYPERCUBE: ME.RegionType.HYPERCUBE,
ConvType.SPATIAL_HYPERCUBE: ME.RegionType.HYPERCUBE,
ConvType.SPATIO_TEMPORAL_HYPERCUBE: ME.RegionType.HYPERCUBE,
ConvType.HYPERCROSS: ME.RegionType.HYPERCROSS,
ConvType.SPATIAL_HYPERCROSS: ME.RegionType.HYPERCROSS,
ConvType.SPATIO_TEMPORAL_HYPERCROSS: ME.RegionType.HYPERCROSS,
ConvType.SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS: ME.RegionType.HYBRID
}
int_to_region_type = {m.value: m for m in ME.RegionType}
def convert_region_type(region_type):
"""
Convert the integer region_type to the corresponding RegionType enum object.
"""
return int_to_region_type[region_type]
def convert_conv_type(conv_type, kernel_size, D):
assert isinstance(conv_type, ConvType), "conv_type must be of ConvType"
region_type = conv_to_region_type[conv_type]
axis_types = None
if conv_type == ConvType.SPATIAL_HYPERCUBE:
# No temporal convolution
if isinstance(kernel_size, collections.Sequence):
kernel_size = kernel_size[:3]
else:
kernel_size = [
kernel_size,
] * 3
if D == 4:
kernel_size.append(1)
elif conv_type == ConvType.SPATIO_TEMPORAL_HYPERCUBE:
# conv_type conversion already handled
assert D == 4
elif conv_type == ConvType.HYPERCUBE:
# conv_type conversion already handled
pass
elif conv_type == ConvType.SPATIAL_HYPERCROSS:
if isinstance(kernel_size, collections.Sequence):
kernel_size = kernel_size[:3]
else:
kernel_size = [
kernel_size,
] * 3
if D == 4:
kernel_size.append(1)
elif conv_type == ConvType.HYPERCROSS:
# conv_type conversion already handled
pass
elif conv_type == ConvType.SPATIO_TEMPORAL_HYPERCROSS:
# conv_type conversion already handled
assert D == 4
elif conv_type == ConvType.SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS:
# Define the CUBIC conv kernel for spatial dims and CROSS conv for temp dim
axis_types = [
ME.RegionType.HYPERCUBE,
] * 3
if D == 4:
axis_types.append(ME.RegionType.HYPERCROSS)
return region_type, axis_types, kernel_size
def conv(in_planes,
out_planes,
kernel_size,
stride=1,
dilation=1,
bias=False,
conv_type=ConvType.HYPERCUBE,
D=-1):
assert D > 0, 'Dimension must be a positive integer'
region_type, axis_types, kernel_size = convert_conv_type(conv_type, kernel_size, D)
kernel_generator = ME.KernelGenerator(
kernel_size, stride, dilation, region_type=region_type, axis_types=axis_types, dimension=D)
return ME.MinkowskiConvolution(
in_channels=in_planes,
out_channels=out_planes,
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
has_bias=bias,
kernel_generator=kernel_generator,
dimension=D)
def conv_tr(in_planes,
out_planes,
kernel_size,
upsample_stride=1,
dilation=1,
bias=False,
conv_type=ConvType.HYPERCUBE,
D=-1):
assert D > 0, 'Dimension must be a positive integer'
region_type, axis_types, kernel_size = convert_conv_type(conv_type, kernel_size, D)
kernel_generator = ME.KernelGenerator(
kernel_size,
upsample_stride,
dilation,
region_type=region_type,
axis_types=axis_types,
dimension=D)
return ME.MinkowskiConvolutionTranspose(
in_channels=in_planes,
out_channels=out_planes,
kernel_size=kernel_size,
stride=upsample_stride,
dilation=dilation,
has_bias=bias,
kernel_generator=kernel_generator,
dimension=D)
def avg_pool(kernel_size,
stride=1,
dilation=1,
conv_type=ConvType.HYPERCUBE,
in_coords_key=None,
D=-1):
assert D > 0, 'Dimension must be a positive integer'
region_type, axis_types, kernel_size = convert_conv_type(conv_type, kernel_size, D)
kernel_generator = ME.KernelGenerator(
kernel_size, stride, dilation, region_type=region_type, axis_types=axis_types, dimension=D)
return ME.MinkowskiAvgPooling(
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
kernel_generator=kernel_generator,
dimension=D)
def avg_unpool(kernel_size, stride=1, dilation=1, conv_type=ConvType.HYPERCUBE, D=-1):
assert D > 0, 'Dimension must be a positive integer'
region_type, axis_types, kernel_size = convert_conv_type(conv_type, kernel_size, D)
kernel_generator = ME.KernelGenerator(
kernel_size, stride, dilation, region_type=region_type, axis_types=axis_types, dimension=D)
return ME.MinkowskiAvgUnpooling(
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
kernel_generator=kernel_generator,
dimension=D)
def sum_pool(kernel_size, stride=1, dilation=1, conv_type=ConvType.HYPERCUBE, D=-1):
assert D > 0, 'Dimension must be a positive integer'
region_type, axis_types, kernel_size = convert_conv_type(conv_type, kernel_size, D)
kernel_generator = ME.KernelGenerator(
kernel_size, stride, dilation, region_type=region_type, axis_types=axis_types, dimension=D)
return ME.MinkowskiSumPooling(
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
kernel_generator=kernel_generator,
dimension=D)
|
ContrastiveSceneContexts-main
|
pretrain/contrastive_scene_contexts/model/modules/common.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import random
class Compose:
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, coords, feats):
for transform in self.transforms:
coords, feats = transform(coords, feats)
return coords, feats
class Jitter:
def __init__(self, mu=0, sigma=0.01):
self.mu = mu
self.sigma = sigma
def __call__(self, coords, feats):
if random.random() < 0.95:
feats += np.random.normal(self.mu, self.sigma, (feats.shape[0], feats.shape[1]))
return coords, feats
|
ContrastiveSceneContexts-main
|
pretrain/contrastive_scene_contexts/lib/transforms.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import time
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0.0
self.sq_sum = 0.0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
self.sq_sum += val**2 * n
self.var = self.sq_sum / self.count - self.avg ** 2
class Timer(object):
"""A simple timer."""
def __init__(self):
self.total_time = 0.
self.calls = 0
self.start_time = 0.
self.diff = 0.
self.avg = 0.
def reset(self):
self.total_time = 0
self.calls = 0
self.start_time = 0
self.diff = 0
self.avg = 0
def tic(self):
# using time.time instead of time.clock because time time.clock
# does not normalize for multithreading
self.start_time = time.time()
def toc(self, average=True):
self.diff = time.time() - self.start_time
self.total_time += self.diff
self.calls += 1
self.avg = self.total_time / self.calls
if average:
return self.avg
else:
return self.diff
|
ContrastiveSceneContexts-main
|
pretrain/contrastive_scene_contexts/lib/timer.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
|
ContrastiveSceneContexts-main
|
pretrain/contrastive_scene_contexts/lib/__init__.py
|
# Written by Chris Choy <chrischoy@ai.stanford.edu>
# Distributed under MIT License
import logging
import random
import torch
import torch.utils.data
import numpy as np
import glob
import os
import copy
from tqdm import tqdm
from scipy.linalg import expm, norm
from lib.io3d import write_triangle_mesh
import lib.transforms as t
import MinkowskiEngine as ME
from torch.utils.data.sampler import RandomSampler
from lib.data_sampler import DistributedInfSampler
import open3d as o3d
def make_open3d_point_cloud(xyz, color=None):
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(xyz)
if color is not None:
pcd.colors = o3d.utility.Vector3dVector(color)
return pcd
def get_matching_indices(source, target, trans, search_voxel_size, K=None):
source_copy = copy.deepcopy(source)
target_copy = copy.deepcopy(target)
source_copy.transform(trans)
pcd_tree = o3d.geometry.KDTreeFlann(target_copy)
match_inds = []
for i, point in enumerate(source_copy.points):
[_, idx, _] = pcd_tree.search_radius_vector_3d(point, search_voxel_size)
if K is not None:
idx = idx[:K]
for j in idx:
match_inds.append((i, j))
return match_inds
def default_collate_pair_fn(list_data):
xyz0, xyz1, coords0, coords1, feats0, feats1, label0, label1, instance0, instance1, matching_inds, trans, T0 = list(zip(*list_data))
xyz_batch0, coords_batch0, feats_batch0, label_batch0, instance_batch0 = [], [], [], [], []
xyz_batch1, coords_batch1, feats_batch1, label_batch1, instance_batch1 = [], [], [], [], []
matching_inds_batch, trans_batch, len_batch, T0_batch = [], [], [], []
batch_id = 0
curr_start_inds = np.zeros((1, 2))
for batch_id, _ in enumerate(coords0):
N0 = coords0[batch_id].shape[0]
N1 = coords1[batch_id].shape[0]
# Move batchids to the beginning
xyz_batch0.append(torch.from_numpy(xyz0[batch_id]))
coords_batch0.append(
torch.cat((torch.ones(N0, 1).float() * batch_id,
torch.from_numpy(coords0[batch_id]).float()), 1))
feats_batch0.append(torch.from_numpy(feats0[batch_id]))
label_batch0.append(torch.from_numpy(label0[batch_id]))
instance_batch0.append(torch.from_numpy(instance0[batch_id]))
xyz_batch1.append(torch.from_numpy(xyz1[batch_id]))
coords_batch1.append(
torch.cat((torch.ones(N1, 1).float() * batch_id,
torch.from_numpy(coords1[batch_id]).float()), 1))
feats_batch1.append(torch.from_numpy(feats1[batch_id]))
label_batch1.append(torch.from_numpy(label1[batch_id]))
instance_batch1.append(torch.from_numpy(instance1[batch_id]))
trans_batch.append(torch.from_numpy(trans[batch_id]))
T0_batch.append(torch.from_numpy(T0[batch_id]))
# in case 0 matching
if len(matching_inds[batch_id]) == 0:
matching_inds[batch_id].extend([0, 0])
matching_inds_batch.append(
torch.from_numpy(np.array(matching_inds[batch_id]) + curr_start_inds))
len_batch.append([N0, N1])
# Move the head
curr_start_inds[0, 0] += N0
curr_start_inds[0, 1] += N1
# Concatenate all lists
xyz_batch0 = torch.cat(xyz_batch0, 0).float()
coords_batch0 = torch.cat(coords_batch0, 0).float()
feats_batch0 = torch.cat(feats_batch0, 0).float()
label_batch0 = torch.cat(label_batch0, 0).int()
instance_batch0 = torch.cat(instance_batch0, 0).int()
xyz_batch1 = torch.cat(xyz_batch1, 0).float()
coords_batch1 = torch.cat(coords_batch1, 0).float()
feats_batch1 = torch.cat(feats_batch1, 0).float()
label_batch1 = torch.cat(label_batch1, 0).int()
instance_batch1 = torch.cat(instance_batch1, 0).int()
trans_batch = torch.cat(trans_batch, 0).float()
T0_batch = torch.stack(T0_batch, 0).float()
matching_inds_batch = torch.cat(matching_inds_batch, 0).int()
return {
'pcd0': xyz_batch0,
'pcd1': xyz_batch1,
'sinput0_C': coords_batch0,
'sinput0_F': feats_batch0,
'sinput0_L': label_batch0,
'sinput0_I': instance_batch1,
'sinput1_C': coords_batch1,
'sinput1_F': feats_batch1,
'sinput1_L': label_batch1,
'sinput1_I': instance_batch1,
'correspondences': matching_inds_batch,
'trans': trans_batch,
'T0': T0_batch,
'len_batch': len_batch,
}
# Rotation matrix along axis with angle theta
def M(axis, theta):
return expm(np.cross(np.eye(3), axis / norm(axis) * theta))
def sample_random_trans(pcd, randg, rotation_range=360):
T = np.eye(4)
R = M(randg.rand(3) - 0.5, rotation_range * np.pi / 180.0 * (randg.rand(1) - 0.5))
T[:3, :3] = R
T[:3, 3] = R.dot(-np.mean(pcd, axis=0))
return T
def sample_random_trans_z(pcd):
ROTATION_AUGMENTATION_BOUND = ((-np.pi / 64, np.pi / 64), (-np.pi / 64, np.pi / 64), (-np.pi, np.pi))
rot_mats = []
for axis_ind, rot_bound in enumerate(ROTATION_AUGMENTATION_BOUND):
theta = 0
axis = np.zeros(3)
axis[axis_ind] = 1
if rot_bound is not None:
theta = np.random.uniform(*rot_bound)
rot_mats.append(M(axis, theta))
# Use random order
np.random.shuffle(rot_mats)
rot_mat = rot_mats[0] @ rot_mats[1] @ rot_mats[2]
T = np.eye(4)
T[:3, :3] = rot_mat
T[:3, 3] = rot_mat.dot(-np.mean(pcd, axis=0))
return T
def only_trans(pcd):
T = np.eye(4)
T[:3, 3] = -np.mean(pcd, axis=0)
return T
class PairDataset(torch.utils.data.Dataset):
AUGMENT = None
def __init__(self,
phase,
transform=None,
random_scale=False,
manual_seed=False,
config=None):
self.phase = phase
self.files = []
self.data_objects = []
self.transform = transform
self.voxel_size = config.data.voxel_size
self.matching_search_voxel_size = \
config.data.voxel_size * config.trainer.positive_pair_search_voxel_size_multiplier
self.config = config
self.random_scale = random_scale
self.min_scale = 0.8
self.max_scale = 1.2
self.randg = np.random.RandomState()
if manual_seed:
self.reset_seed()
self.root = '/'
if phase == "train":
self.root_filelist = root = config.data.scannet_match_dir
else:
raise NotImplementedError
logging.info(f"Loading the subset {phase} from {root}")
fname_txt = os.path.join(self.root_filelist, 'splits/overlap30.txt')
with open(fname_txt) as f:
content = f.readlines()
fnames = [x.strip().split() for x in content]
for fname in fnames:
self.files.append([os.path.join(self.root_filelist, fname[0]),
os.path.join(self.root_filelist, fname[1])])
def reset_seed(self, seed=0):
logging.info(f"Resetting the data loader seed to {seed}")
self.randg.seed(seed)
def apply_transform(self, pts, trans):
R = trans[:3, :3]
T = trans[:3, 3]
pts = pts @ R.T + T
return pts
def __len__(self):
return len(self.files)
class ScanNetIndoorPairDataset(PairDataset):
OVERLAP_RATIO = None
AUGMENT = None
def __init__(self,
phase,
transform=None,
random_scale=False,
manual_seed=False,
config=None):
PairDataset.__init__(self, phase, transform, random_scale, manual_seed, config)
# add
self.CLASS_LABELS = ('wall', 'floor', 'cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window',
'bookshelf', 'picture', 'counter', 'desk', 'curtain', 'refrigerator',
'shower curtain', 'toilet', 'sink', 'bathtub', 'otherfurniture')
self.VALID_CLASS_IDS = (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39)
NUM_LABELS = 41 # Will be converted to 20 as defined in IGNORE_LABELS.
# 0-40
IGNORE_LABELS = tuple(set(range(41)) - set(self.VALID_CLASS_IDS))
self.label_map = {}
n_used = 0
for l in range(NUM_LABELS):
if l in IGNORE_LABELS:
self.label_map[l] = 255
else:
self.label_map[l] = n_used
n_used += 1
self.label_map[255] = 255
def get_correspondences(self, idx):
file0 = os.path.join(self.root, self.files[idx][0])
file1 = os.path.join(self.root, self.files[idx][1])
data0 = np.load(file0)
data1 = np.load(file1)
xyz0 = data0["pcd"][:,:3]
xyz1 = data1["pcd"][:,:3]
label0 = (data0["pcd"][:,6] / 1000).astype(np.int32)
label1 = (data1["pcd"][:,6] / 1000).astype(np.int32)
instance0 = (data0["pcd"][:,6] % 1000).astype(np.int32)
instance1 = (data1["pcd"][:,6] % 1000).astype(np.int32)
color0 = data0['pcd'][:,3:6]
color1 = data1['pcd'][:,3:6]
matching_search_voxel_size = self.matching_search_voxel_size
if self.random_scale and random.random() < 0.95:
scale = self.min_scale + \
(self.max_scale - self.min_scale) * random.random()
matching_search_voxel_size *= scale
xyz0 = scale * xyz0
xyz1 = scale * xyz1
if self.config.data.random_rotation_xyz:
T0 = sample_random_trans(xyz0, self.randg)
T1 = sample_random_trans(xyz1, self.randg)
else:
T0 = sample_random_trans_z(xyz0)
T1 = sample_random_trans_z(xyz1)
#else:
# T0 = only_trans(xyz0)
# T1 = only_trans(xyz1)
trans = T1 @ np.linalg.inv(T0)
xyz0 = self.apply_transform(xyz0, T0)
xyz1 = self.apply_transform(xyz1, T1)
# Voxelization
sel0 = ME.utils.sparse_quantize(xyz0 / self.voxel_size, return_index=True)
sel1 = ME.utils.sparse_quantize(xyz1 / self.voxel_size, return_index=True)
if not self.config.data.voxelize:
sel0 = sel0[np.random.choice(sel0.shape[0], self.config.data.num_points,
replace=self.config.data.num_points>sel0.shape[0])]
sel1 = sel1[np.random.choice(sel1.shape[0], self.config.data.num_points,
replace=self.config.data.num_points>sel1.shape[0])]
# Make point clouds using voxelized points
pcd0 = make_open3d_point_cloud(xyz0)
pcd1 = make_open3d_point_cloud(xyz1)
# Select features and points using the returned voxelized indices
pcd0.colors = o3d.utility.Vector3dVector(color0[sel0])
pcd1.colors = o3d.utility.Vector3dVector(color1[sel1])
pcd0.points = o3d.utility.Vector3dVector(np.array(pcd0.points)[sel0])
pcd1.points = o3d.utility.Vector3dVector(np.array(pcd1.points)[sel1])
label0 = label0[sel0]
label1 = label1[sel1]
color0 = color0[sel0]
color1 = color1[sel1]
instance0 = instance0[sel0]
instance1 = instance1[sel1]
matches = get_matching_indices(pcd0, pcd1, trans, matching_search_voxel_size)
# Get features
feats_train0, feats_train1 = [], []
feats_train0.append(color0)
feats_train1.append(color1)
feats0 = np.hstack(feats_train0)
feats1 = np.hstack(feats_train1)
# Get coords
xyz0 = np.array(pcd0.points)
xyz1 = np.array(pcd1.points)
if self.config.data.voxelize:
coords0 = np.floor(xyz0 / self.voxel_size)
coords1 = np.floor(xyz1 / self.voxel_size)
else:
coords0 = xyz0
coords1 = xyz1
#jitter color
if self.transform:
coords0, feats0 = self.transform(coords0, feats0)
coords1, feats1 = self.transform(coords1, feats1)
feats0 = feats0 / 255.0 - 0.5
feats1 = feats1 / 255.0 - 0.5
# label mapping for monitor
label0 = np.array([self.label_map[x] for x in label0], dtype=np.int)
label1 = np.array([self.label_map[x] for x in label1], dtype=np.int)
# NB(s9xie): xyz are coordinates in the original system;
# coords are sparse conv grid coords. (subject to a scaling factor)
# coords0 -> sinput0_C
# trans is T0*T1^-1
return (xyz0, xyz1, coords0, coords1, feats0, feats1, label0, label1, instance0, instance1, matches, trans, T0)
def __getitem__(self, idx):
return self.get_correspondences(idx)
class ScanNetMatchPairDataset(ScanNetIndoorPairDataset):
OVERLAP_RATIO = 0.3
DATA_FILES = {
'train': './config/train_scannet.txt',
}
ALL_DATASETS = [ScanNetMatchPairDataset]
dataset_str_mapping = {d.__name__: d for d in ALL_DATASETS}
def make_data_loader(config, batch_size, num_threads=0):
if config.data.dataset not in dataset_str_mapping.keys():
logging.error(f'Dataset {config.data.dataset}, does not exists in ' +
', '.join(dataset_str_mapping.keys()))
Dataset = dataset_str_mapping[config.data.dataset]
transforms = []
transforms.append(t.Jitter())
dset = Dataset(
phase="train",
transform=t.Compose(transforms),
random_scale=False,
config=config)
collate_pair_fn = default_collate_pair_fn
if config.misc.num_gpus > 1:
sampler = DistributedInfSampler(dset)
else:
sampler = None
loader = torch.utils.data.DataLoader(
dset,
batch_size=batch_size,
shuffle=False if sampler else True,
num_workers=num_threads,
collate_fn=collate_pair_fn,
pin_memory=False,
sampler=sampler,
drop_last=True)
return loader
|
ContrastiveSceneContexts-main
|
pretrain/contrastive_scene_contexts/lib/ddp_data_loaders.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import os.path as osp
import gc
import logging
import numpy as np
import json
from omegaconf import OmegaConf
import torch.nn as nn
import torch
import torch.optim as optim
import torch.nn.functional as F
from tensorboardX import SummaryWriter
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data.sampler import RandomSampler
from lib.data_sampler import InfSampler, DistributedInfSampler
from model import load_model
from lib.timer import Timer, AverageMeter
import MinkowskiEngine as ME
import lib.distributed as du
import torch.distributed as dist
from lib.criterion import NCESoftmaxLoss
from torch.serialization import default_restore_location
torch.autograd.set_detect_anomaly(True)
LARGE_NUM = 1e9
def apply_transform(pts, trans):
voxel_size = 0.025
R = trans[:3, :3]
T = trans[:3, 3]
pts = pts * voxel_size
pts = torch.matmul(pts - T, torch.inverse(R.T))
pts = pts - torch.mean(pts, 0)
pts = pts / voxel_size
return pts
def _hash(arr, M):
if isinstance(arr, np.ndarray):
N, D = arr.shape
else:
N, D = len(arr[0]), len(arr)
hash_vec = np.zeros(N, dtype=np.int64)
for d in range(D):
if isinstance(arr, np.ndarray):
hash_vec += arr[:, d] * M**d
else:
hash_vec += arr[d] * M**d
return hash_vec
def load_state(model, weights, lenient_weight_loading=False):
if du.get_world_size() > 1:
_model = model.module
else:
_model = model
if lenient_weight_loading:
model_state = _model.state_dict()
filtered_weights = {
k: v for k, v in weights.items() if k in model_state and v.size() == model_state[k].size()
}
logging.info("Load weights:" + ', '.join(filtered_weights.keys()))
weights = model_state
weights.update(filtered_weights)
_model.load_state_dict(weights, strict=True)
def shuffle_loader(data_loader, cur_epoch):
assert isinstance(data_loader.sampler, (RandomSampler, InfSampler, DistributedSampler, DistributedInfSampler))
if isinstance(data_loader.sampler, DistributedSampler):
data_loader.sampler.set_epoch(cur_epoch)
class ContrastiveLossTrainer:
def __init__(
self,
config,
data_loader):
assert config.misc.use_gpu and torch.cuda.is_available(), "DDP mode must support GPU"
num_feats = 3 # always 3 for finetuning.
self.is_master = du.is_master_proc(config.misc.num_gpus) if config.misc.num_gpus > 1 else True
# Model initialization
self.cur_device = torch.cuda.current_device()
Model = load_model(config.net.model)
model = Model(
num_feats,
config.net.model_n_out,
config,
D=3)
model = model.cuda(device=self.cur_device)
if config.misc.num_gpus > 1:
model = torch.nn.parallel.DistributedDataParallel(
module=model,
device_ids=[self.cur_device],
output_device=self.cur_device,
broadcast_buffers=False,
)
self.config = config
self.model = model
self.optimizer = getattr(optim, config.opt.optimizer)(
model.parameters(),
lr=config.opt.lr,
momentum=config.opt.momentum,
weight_decay=config.opt.weight_decay)
self.scheduler = optim.lr_scheduler.ExponentialLR(self.optimizer, config.opt.exp_gamma)
self.curr_iter = 0
self.batch_size = data_loader.batch_size
self.data_loader = data_loader
self.neg_thresh = config.trainer.neg_thresh
self.pos_thresh = config.trainer.pos_thresh
#---------------- optional: resume checkpoint by given path ----------------------
if config.net.weight:
if self.is_master:
logging.info('===> Loading weights: ' + config.net.weight)
state = torch.load(config.net.weight, map_location=lambda s, l: default_restore_location(s, 'cpu'))
load_state(model, state['state_dict'], config.misc.lenient_weight_loading)
if self.is_master:
logging.info('===> Loaded weights: ' + config.net.weight)
#---------------- default: resume checkpoint in current folder ----------------------
checkpoint_fn = 'weights/weights.pth'
if osp.isfile(checkpoint_fn):
if self.is_master:
logging.info("=> loading checkpoint '{}'".format(checkpoint_fn))
state = torch.load(checkpoint_fn, map_location=lambda s, l: default_restore_location(s, 'cpu'))
self.curr_iter = state['curr_iter']
load_state(model, state['state_dict'])
self.optimizer.load_state_dict(state['optimizer'])
self.scheduler.load_state_dict(state['scheduler'])
if self.is_master:
logging.info("=> loaded checkpoint '{}' (curr_iter {})".format(checkpoint_fn, state['curr_iter']))
else:
logging.info("=> no checkpoint found at '{}'".format(checkpoint_fn))
if self.is_master:
self.writer = SummaryWriter(logdir='logs')
if not os.path.exists('weights'):
os.makedirs('weights', mode=0o755)
OmegaConf.save(config, 'config.yaml')
# added
from lib.shape_context import ShapeContext
self.partitioner = ShapeContext(r1=config.shape_context.r1,
r2=config.shape_context.r2,
nbins_xy=config.shape_context.nbins_xy,
nbins_zy=config.shape_context.nbins_zy)
def pdist(self, A, B):
D2 = torch.sum((A.unsqueeze(1) - B.unsqueeze(0)).pow(2), 2)
return torch.sqrt(D2 + 1e-7)
def _save_checkpoint(self, curr_iter, filename='checkpoint'):
if not self.is_master:
return
_model = self.model.module if du.get_world_size() > 1 else self.model
state = {
'curr_iter': curr_iter,
'state_dict': _model.state_dict(),
'optimizer': self.optimizer.state_dict(),
'scheduler': self.scheduler.state_dict(),
}
filepath = os.path.join('weights', f'{filename}.pth')
logging.info("Saving checkpoint: {} ...".format(filepath))
torch.save(state, filepath)
# Delete symlink if it exists
if os.path.exists('weights/weights.pth'):
os.remove('weights/weights.pth')
# Create symlink
os.system('ln -s {}.pth weights/weights.pth'.format(filename))
class PointNCELossTrainer(ContrastiveLossTrainer):
def __init__(
self,
config,
data_loader):
ContrastiveLossTrainer.__init__(self, config, data_loader)
self.T = config.misc.nceT
self.npos = config.misc.npos
self.stat_freq = config.trainer.stat_freq
self.lr_update_freq = config.trainer.lr_update_freq
self.checkpoint_freq = config.trainer.checkpoint_freq
def compute_loss(self, q, k, mask=None):
npos = q.shape[0]
logits = torch.mm(q, k.transpose(1, 0)) # npos by npos
labels = torch.arange(npos).cuda().long()
out = torch.div(logits, self.T)
out = out.squeeze().contiguous()
if mask != None:
out = out - LARGE_NUM * mask.float()
criterion = NCESoftmaxLoss().cuda()
loss = criterion(out, labels)
return loss
def train(self):
curr_iter = self.curr_iter
data_loader_iter = self.data_loader.__iter__()
data_meter, data_timer, total_timer = AverageMeter(), Timer(), Timer()
while (curr_iter < self.config.opt.max_iter):
curr_iter += 1
epoch = curr_iter / len(self.data_loader)
batch_loss = self._train_iter(data_loader_iter, [data_meter, data_timer, total_timer])
# update learning rate
if curr_iter % self.lr_update_freq == 0 or curr_iter == 1:
lr = self.scheduler.get_last_lr()
self.scheduler.step()
# Print logs
if curr_iter % self.stat_freq == 0 and self.is_master:
self.writer.add_scalar('train/loss', batch_loss['loss'], curr_iter)
logging.info(
"Train Epoch: {:.3f} [{}/{}], Current Loss: {:.3e}"
.format(epoch, curr_iter,
len(self.data_loader), batch_loss['loss']) +
"\tData time: {:.4f}, Train time: {:.4f}, Iter time: {:.4f}, LR: {}".format(
data_meter.avg, total_timer.avg - data_meter.avg, total_timer.avg, self.scheduler.get_last_lr()))
data_meter.reset()
total_timer.reset()
# save checkpoint
if self.is_master and curr_iter % self.checkpoint_freq == 0:
lr = self.scheduler.get_last_lr()
logging.info(f" Epoch: {epoch}, LR: {lr}")
checkpoint_name = 'checkpoint'
if not self.config.trainer.overwrite_checkpoint:
checkpoint_name += '_{}'.format(curr_iter)
self._save_checkpoint(curr_iter, checkpoint_name)
def _train_iter(self, data_loader_iter, timers):
data_meter, data_timer, total_timer = timers
self.optimizer.zero_grad()
batch_loss = {
'loss': 0.0,
}
data_time = 0
total_timer.tic()
data_timer.tic()
input_dict = data_loader_iter.next()
data_time += data_timer.toc(average=False)
sinput0 = ME.SparseTensor(
input_dict['sinput0_F'], coords=input_dict['sinput0_C']).to(self.cur_device)
F0 = self.model(sinput0)
F0 = F0.F
sinput1 = ME.SparseTensor(
input_dict['sinput1_F'], coords=input_dict['sinput1_C']).to(self.cur_device)
F1 = self.model(sinput1)
F1 = F1.F
N0, N1 = input_dict['pcd0'].shape[0], input_dict['pcd1'].shape[0]
pos_pairs = input_dict['correspondences'].to(self.cur_device)
q_unique, count = pos_pairs[:, 0].unique(return_counts=True)
uniform = torch.distributions.Uniform(0, 1).sample([len(count)]).to(self.cur_device)
off = torch.floor(uniform*count).long()
cums = torch.cat([torch.tensor([0], device=self.cur_device), torch.cumsum(count, dim=0)[0:-1]], dim=0)
k_sel = pos_pairs[:, 1][off+cums]
if self.npos < q_unique.shape[0]:
sampled_inds = np.random.choice(q_unique.shape[0], self.npos, replace=False)
q_unique = q_unique[sampled_inds]
k_sel = k_sel[sampled_inds]
q = F0[q_unique.long()]
k = F1[k_sel.long()]
loss = self.compute_loss(q,k)
loss.backward()
result = {"loss": loss}
if self.config.misc.num_gpus > 1:
result = du.scaled_all_reduce_dict(result, self.config.misc.num_gpus)
batch_loss['loss'] += result["loss"].item()
self.optimizer.step()
torch.cuda.empty_cache()
total_timer.toc()
data_meter.update(data_time)
return batch_loss
class PartitionPointNCELossTrainer(PointNCELossTrainer):
def _train_iter(self, data_loader_iter, timers):
# optimizer and loss
self.optimizer.zero_grad()
batch_loss = {
'loss': 0.0,
}
loss = 0
# timing
data_meter, data_timer, total_timer = timers
data_time = 0
total_timer.tic()
data_timer.tic()
input_dict = data_loader_iter.next()
data_time += data_timer.toc(average=False)
# network forwarding
sinput0 = ME.SparseTensor(
input_dict['sinput0_F'], coords=input_dict['sinput0_C']).to(self.cur_device)
F0 = self.model(sinput0)
F0 = F0.F
sinput1 = ME.SparseTensor(
input_dict['sinput1_F'], coords=input_dict['sinput1_C']).to(self.cur_device)
F1 = self.model(sinput1)
F1 = F1.F
# get positive pairs
pos_pairs = input_dict['correspondences'].to(self.cur_device)
q_unique, count = pos_pairs[:, 0].unique(return_counts=True)
uniform = torch.distributions.Uniform(0, 1).sample([len(count)]).to(self.cur_device)
off = torch.floor(uniform*count).long()
cums = torch.cat([torch.tensor([0], device=self.cur_device), torch.cumsum(count, dim=0)[0:-1]], dim=0)
k_sel = pos_pairs[:, 1][off+cums]
# iterate batch
source_batch_ids = input_dict['sinput0_C'][q_unique.long()][:,0].float().cuda()
for batch_id in range(self.batch_size):
# batch mask
mask = (source_batch_ids == batch_id)
q_unique_batch = q_unique[mask]
k_sel_batch = k_sel[mask]
# sampling points in current scene
if self.npos < q_unique_batch.shape[0]:
sampled_inds = np.random.choice(q_unique_batch.shape[0], self.npos, replace=False)
q_unique_batch = q_unique_batch[sampled_inds]
k_sel_batch = k_sel_batch[sampled_inds]
q = F0[q_unique_batch.long()]
k = F1[k_sel_batch.long()]
npos = q.shape[0]
if npos == 0:
logging.info('partitionTrainer: no points in this batch')
continue
source_xyz = input_dict['sinput0_C'][q_unique_batch.long()][:,1:].float().cuda()
if self.config.data.world_space:
T0 = input_dict['T0'][batch_id].cuda()
source_xyz = apply_transform(source_xyz, T0)
if self.config.shape_context.fast_partition:
source_partition = self.partitioner.compute_partitions_fast(source_xyz)
else:
source_partition = self.partitioner.compute_partitions(source_xyz)
for partition_id in range(self.partitioner.partitions):
factor = 1.0
if self.config.shape_context.weight_inner and partition_id < int(self.partitioner.partitions/2):
factor = 2.0
mask_q = (source_partition == partition_id)
mask_q.fill_diagonal_(True)
loss += factor * self.compute_loss(q, k, ~mask_q) / (self.partitioner.partitions * self.batch_size)
loss.backward()
result = {"loss": loss}
if self.config.misc.num_gpus > 1:
result = du.scaled_all_reduce_dict(result, self.config.misc.num_gpus)
batch_loss['loss'] += result["loss"].item()
self.optimizer.step()
torch.cuda.empty_cache()
total_timer.toc()
data_meter.update(data_time)
return batch_loss
class PartitionPointNCELossTrainerPointNet(PointNCELossTrainer):
def _train_iter(self, data_loader_iter, timers):
# optimizer and loss
self.optimizer.zero_grad()
batch_loss = {
'loss': 0.0,
}
loss = 0
# timing
data_meter, data_timer, total_timer = timers
data_time = 0
total_timer.tic()
data_timer.tic()
input_dict = data_loader_iter.next()
data_time += data_timer.toc(average=False)
# network forwarding
points = input_dict['sinput0_C']
feats = input_dict['sinput0_F']
points0 = []
for batch_id in points[:,0].unique():
mask = points[:,0] == batch_id
points0.append(points[mask, 1:])
points0 = torch.stack(points0).cuda()
F0 = self.model(points0)
F0 = F0.transpose(1,2).contiguous()
F0 = F0.view(-1, 32)
points = input_dict['sinput1_C']
feats = input_dict['sinput1_F']
points1 = []
for batch_id in points[:,0].unique():
mask = points[:,0] == batch_id
points1.append(points[mask, 1:])
points1 = torch.stack(points1).cuda()
F1 = self.model(points1)
F1 = F1.transpose(1,2).contiguous()
F1 = F1.view(-1, 32)
# get positive pairs
pos_pairs = input_dict['correspondences'].to(self.cur_device)
q_unique, count = pos_pairs[:, 0].unique(return_counts=True)
uniform = torch.distributions.Uniform(0, 1).sample([len(count)]).to(self.cur_device)
off = torch.floor(uniform*count).long()
cums = torch.cat([torch.tensor([0], device=self.cur_device), torch.cumsum(count, dim=0)[0:-1]], dim=0)
k_sel = pos_pairs[:, 1][off+cums]
# iterate batch
source_batch_ids = input_dict['sinput0_C'][q_unique.long()][:,0].float().cuda()
for batch_id in range(self.batch_size):
# batch mask
mask = (source_batch_ids == batch_id)
q_unique_batch = q_unique[mask]
k_sel_batch = k_sel[mask]
# sampling points in current scene
if self.npos < q_unique_batch.shape[0]:
sampled_inds = np.random.choice(q_unique_batch.shape[0], self.npos, replace=False)
q_unique_batch = q_unique_batch[sampled_inds]
k_sel_batch = k_sel_batch[sampled_inds]
q = F0[q_unique_batch.long()]
k = F1[k_sel_batch.long()]
npos = q.shape[0]
if npos == 0:
logging.info('partitionTrainer: no points in this batch')
continue
source_xyz = input_dict['sinput0_C'][q_unique_batch.long()][:,1:].float().cuda()
if self.config.data.world_space:
T0 = input_dict['T0'][batch_id].cuda()
source_xyz = apply_transform(source_xyz, T0)
if self.config.shape_context.fast_partition:
source_partition = self.partitioner.compute_partitions_fast(source_xyz)
else:
source_partition = self.partitioner.compute_partitions(source_xyz)
for partition_id in range(self.partitioner.partitions):
factor = 1.0
if self.config.shape_context.weight_inner and partition_id < int(self.partitioner.partitions/2):
factor = 2.0
mask_q = (source_partition == partition_id)
mask_q.fill_diagonal_(True)
loss += factor * self.compute_loss(q, k, ~mask_q) / (self.partitioner.partitions * self.batch_size)
loss.backward()
result = {"loss": loss}
if self.config.misc.num_gpus > 1:
result = du.scaled_all_reduce_dict(result, self.config.misc.num_gpus)
batch_loss['loss'] += result["loss"].item()
self.optimizer.step()
torch.cuda.empty_cache()
total_timer.toc()
data_meter.update(data_time)
return batch_loss
|
ContrastiveSceneContexts-main
|
pretrain/contrastive_scene_contexts/lib/ddp_trainer.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import trimesh
# color palette for nyu40 labels
def create_color_palette():
return [
(0, 0, 0),
(174, 199, 232), # wall
(152, 223, 138), # floor
(31, 119, 180), # cabinet
(255, 187, 120), # bed
(188, 189, 34), # chair
(140, 86, 75), # sofa
(255, 152, 150), # table
(214, 39, 40), # door
(197, 176, 213), # window
(148, 103, 189), # bookshelf
(196, 156, 148), # picture
(23, 190, 207), # counter
(178, 76, 76),
(247, 182, 210), # desk
(66, 188, 102),
(219, 219, 141), # curtain
(140, 57, 197),
(202, 185, 52),
(51, 176, 203),
(200, 54, 131),
(92, 193, 61),
(78, 71, 183),
(172, 114, 82),
(255, 127, 14), # refrigerator
(91, 163, 138),
(153, 98, 156),
(140, 153, 101),
(158, 218, 229), # shower curtain
(100, 125, 154),
(178, 127, 135),
(120, 185, 128),
(146, 111, 194),
(44, 160, 44), # toilet
(112, 128, 144), # sink
(96, 207, 209),
(227, 119, 194), # bathtub
(213, 92, 176),
(94, 106, 211),
(82, 84, 163), # otherfurn
(100, 85, 144),
]
def write_triangle_mesh(vertices, colors, faces, outputFile):
mesh = trimesh.Trimesh(vertices=vertices, vertex_colors=colors, faces=faces, process=False)
mesh.export(outputFile)
def read_triangle_mesh(filename):
mesh = trimesh.load_mesh(filename, process=False)
if isinstance(mesh, trimesh.PointCloud):
vertices = mesh.vertices
colors = mesh.colors
faces = None
elif isinstance(mesh, trimesh.Trimesh):
vertices = mesh.vertices
colors = mesh.visual.vertex_colors
faces = mesh.faces
return vertices, colors, faces
|
ContrastiveSceneContexts-main
|
pretrain/contrastive_scene_contexts/lib/io3d.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#!/usr/bin/env python3
"""Distributed helpers."""
import pickle
import time
import functools
import logging
import torch
import torch.distributed as dist
import torch.nn as nn
from torch.autograd import Function
def get_world_size():
if not dist.is_available():
return 1
if not dist.is_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
return dist.get_rank()
def is_main_process():
return get_rank() == 0
def synchronize():
"""
Helper function to synchronize (barrier) among all processes when
using distributed training
"""
if not dist.is_available():
return
if not dist.is_initialized():
return
world_size = dist.get_world_size()
if world_size == 1:
return
dist.barrier()
def all_gather_differentiable(tensor):
"""
Run differentiable gather function for SparseConv features with variable number of points.
tensor: [num_points, feature_dim]
"""
world_size = get_world_size()
if world_size == 1:
return [tensor]
num_points, f_dim = tensor.size()
local_np = torch.LongTensor([num_points]).to("cuda")
np_list = [torch.LongTensor([0]).to("cuda") for _ in range(world_size)]
dist.all_gather(np_list, local_np)
np_list = [int(np.item()) for np in np_list]
max_np = max(np_list)
tensor_list = []
for _ in np_list:
tensor_list.append(torch.FloatTensor(size=(max_np, f_dim)).to("cuda"))
if local_np != max_np:
padding = torch.zeros(size=(max_np-local_np, f_dim)).to("cuda").float()
tensor = torch.cat((tensor, padding), dim=0)
assert tensor.size() == (max_np, f_dim)
dist.all_gather(tensor_list, tensor)
data_list = []
for gather_np, gather_tensor in zip(np_list, tensor_list):
gather_tensor = gather_tensor[:gather_np]
assert gather_tensor.size() == (gather_np, f_dim)
data_list.append(gather_tensor)
return data_list
def all_gather(data):
"""
Run all_gather on arbitrary picklable data (not necessarily tensors)
Args:
data: any picklable object
Returns:
list[data]: list of data gathered from each rank
"""
world_size = get_world_size()
if world_size == 1:
return [data]
# serialized to a Tensor
buffer = pickle.dumps(data)
storage = torch.ByteStorage.from_buffer(buffer)
tensor = torch.ByteTensor(storage).to("cuda")
# obtain Tensor size of each rank
local_size = torch.LongTensor([tensor.numel()]).to("cuda")
size_list = [torch.LongTensor([0]).to("cuda") for _ in range(world_size)]
dist.all_gather(size_list, local_size)
size_list = [int(size.item()) for size in size_list]
max_size = max(size_list)
# receiving Tensor from all ranks
# we pad the tensor because torch all_gather does not support
# gathering tensors of different shapes
tensor_list = []
for _ in size_list:
tensor_list.append(torch.ByteTensor(size=(max_size,)).to("cuda"))
if local_size != max_size:
padding = torch.ByteTensor(size=(max_size - local_size,)).to("cuda")
tensor = torch.cat((tensor, padding), dim=0)
dist.all_gather(tensor_list, tensor)
data_list = []
for size, tensor in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
return data_list
def is_master_proc(num_gpus):
"""Determines if the current process is the master process.
Master process is responsible for logging, writing and loading checkpoints.
In the multi GPU setting, we assign the master role to the rank 0 process.
When training using a single GPU, there is only one training processes
which is considered the master processes.
"""
return num_gpus == 1 or torch.distributed.get_rank() == 0
def init_process_group(proc_rank, world_size):
"""Initializes the default process group."""
# Set the GPU to use
torch.cuda.set_device(proc_rank)
# Initialize the process group
torch.distributed.init_process_group(
backend="nccl",
init_method="tcp://{}:{}".format("localhost", "10001"),
world_size=world_size,
rank=proc_rank
)
def destroy_process_group():
"""Destroys the default process group."""
torch.distributed.destroy_process_group()
@functools.lru_cache()
def _get_global_gloo_group():
"""
Return a process group based on gloo backend, containing all the ranks
The result is cached.
"""
if dist.get_backend() == "nccl":
return dist.new_group(backend="gloo")
else:
return dist.group.WORLD
def _serialize_to_tensor(data, group):
backend = dist.get_backend(group)
assert backend in ["gloo", "nccl"]
device = torch.device("cpu" if backend == "gloo" else "cuda")
buffer = pickle.dumps(data)
if len(buffer) > 1024 ** 3:
logger = logging.getLogger(__name__)
logger.warning(
"Rank {} trying to all-gather {:.2f} GB of data on device {}".format(
get_rank(), len(buffer) / (1024 ** 3), device
)
)
storage = torch.ByteStorage.from_buffer(buffer)
tensor = torch.ByteTensor(storage).to(device=device)
return tensor
def _pad_to_largest_tensor(tensor, group):
"""
Returns:
list[int]: size of the tensor, on each rank
Tensor: padded tensor that has the max size
"""
world_size = dist.get_world_size(group=group)
assert (
world_size >= 1
), "comm.gather/all_gather must be called from ranks within the given group!"
local_size = torch.tensor([tensor.numel()], dtype=torch.int64, device=tensor.device)
size_list = [
torch.zeros([1], dtype=torch.int64, device=tensor.device) for _ in range(world_size)
]
dist.all_gather(size_list, local_size, group=group)
size_list = [int(size.item()) for size in size_list]
max_size = max(size_list)
# we pad the tensor because torch all_gather does not support
# gathering tensors of different shapes
if local_size != max_size:
padding = torch.zeros((max_size - local_size,), dtype=torch.uint8, device=tensor.device)
tensor = torch.cat((tensor, padding), dim=0)
return size_list, tensor
def all_gather_obj(data, group=None):
"""
Run all_gather on arbitrary picklable data (not necessarily tensors).
Args:
data: any picklable object
group: a torch process group. By default, will use a group which
contains all ranks on gloo backend.
Returns:
list[data]: list of data gathered from each rank
"""
if get_world_size() == 1:
return [data]
if group is None:
group = _get_global_gloo_group()
if dist.get_world_size(group) == 1:
return [data]
tensor = _serialize_to_tensor(data, group)
size_list, tensor = _pad_to_largest_tensor(tensor, group)
max_size = max(size_list)
# receiving Tensor from all ranks
tensor_list = [
torch.empty((max_size,), dtype=torch.uint8, device=tensor.device) for _ in size_list
]
dist.all_gather(tensor_list, tensor, group=group)
data_list = []
for size, tensor in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
return data_list
def scaled_all_reduce_dict_obj(res_dict, num_gpus):
""" Reduce a dictionary of arbitrary objects. """
res_dict_list = all_gather_obj(res_dict)
assert len(res_dict_list) == num_gpus
res_keys = res_dict_list[0].keys()
res_dict_reduced = {}
for k in res_keys:
res_dict_reduced[k] = 1.0 * sum([r[k] for r in res_dict_list]) / num_gpus
return res_dict_reduced
def scaled_all_reduce_dict(res_dict, num_gpus):
""" Reduce a dictionary of tensors. """
reductions = []
for k in res_dict:
reduction = torch.distributed.all_reduce(res_dict[k], async_op=True)
reductions.append(reduction)
for reduction in reductions:
reduction.wait()
for k in res_dict:
res_dict[k] = res_dict[k].clone().mul_(1.0 / num_gpus)
return res_dict
def scaled_all_reduce(tensors, num_gpus, is_scale=True):
"""Performs the scaled all_reduce operation on the provided tensors.
The input tensors are modified in-place. Currently supports only the sum
reduction operator. The reduced values are scaled by the inverse size of
the process group (equivalent to cfg.NUM_GPUS).
"""
# Queue the reductions
reductions = []
for tensor in tensors:
reduction = torch.distributed.all_reduce(tensor, async_op=True)
reductions.append(reduction)
# Wait for reductions to finish
for reduction in reductions:
reduction.wait()
# Scale the results
if is_scale:
for tensor in tensors:
tensor.mul_(1.0 / num_gpus)
return tensors
def all_gather_batch(tensors):
"""
Performs all_gather operation on the provided tensors.
"""
# Queue the gathered tensors
# gathers = []
tensor_list = []
output_tensor = []
world_size = dist.get_world_size()
for tensor in tensors:
tensor_all = [torch.ones_like(tensor) for _ in range(world_size)]
torch.distributed.all_gather(
# list(tensor_all.unbind(0)),
tensor_all,
tensor,
async_op=False # performance opt
)
tensor_list.append(tensor_all)
# gathers.append(gather)
# Wait for gathers to finish
# for gather in gathers:
# gather.wait()
for tensor_all in tensor_list:
output_tensor.append(torch.cat(tensor_all, dim=0))
return output_tensor
class AllGatherWithGradient(Function):
"""AllGatherWithGradient"""
def __init__(self, args):
super().__init__()
self.args = args
def forward(self, input):
x_gather = all_gather_batch([input])[0]
return x_gather
def backward(self, grad_output):
N = grad_output.size(0)
mini_batchsize = N // self.args.num_gpus
# Does not scale for gradient
grad_output = scaled_all_reduce([grad_output], self.args.num_gpus, is_scale=False)[0]
cur_gpu = get_rank()
grad_output = \
grad_output[cur_gpu * mini_batchsize: (cur_gpu + 1) * mini_batchsize]
return grad_output
class AllGatherVariableSizeWithGradient(Function):
"""
All Gather with Gradient for variable size inputs: [different num_points, ?].
Return a list.
"""
def __init__(self, config):
super().__init__()
self.config = config
def forward(self, input):
x_gather_list = all_gather_differentiable(input)
input_size_list =all_gather_obj(input.size(0))
cur_gpu = get_rank()
if (cur_gpu == 0):
self.start_list = [sum(input_size_list[:t]) for t in range(len(input_size_list)+1)]
dist.barrier()
return torch.cat(x_gather_list, 0)
def backward(self, grad_output):
grad_output = scaled_all_reduce([grad_output], self.config.num_gpus, is_scale=False)[0]
cur_gpu = get_rank()
return grad_output[self.start[cur_gpu]:self.start[cur_gpu+1]]
return grad_output
|
ContrastiveSceneContexts-main
|
pretrain/contrastive_scene_contexts/lib/distributed.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#!/usr/bin/env python3
"""Multiprocessing helpers."""
import multiprocessing as mp
import traceback
from lib.error_handler import ErrorHandler
import lib.distributed as du
def run(proc_rank, world_size, error_queue, fun, fun_args, fun_kwargs):
# Initialize the process group
"""Runs a function from a child process."""
try:
# Initialize the process group
du.init_process_group(proc_rank, world_size)
# Run the function
fun(*fun_args, **fun_kwargs)
except KeyboardInterrupt:
# Killed by the parent process
pass
except Exception:
# Propagate exception to the parent process
error_queue.put(traceback.format_exc())
finally:
# Destroy the process group
du.destroy_process_group()
def multi_proc_run(num_proc, fun, fun_args=(), fun_kwargs={}):
"""Runs a function in a multi-proc setting."""
# Handle errors from training subprocesses
error_queue = mp.SimpleQueue()
error_handler = ErrorHandler(error_queue)
# Run each training subprocess
ps = []
for i in range(num_proc):
p_i = mp.Process(
target=run,
args=(i, num_proc, error_queue, fun, fun_args, fun_kwargs)
)
ps.append(p_i)
p_i.start()
error_handler.add_child(p_i.pid)
# Wait for each subprocess to finish
for p in ps:
p.join()
|
ContrastiveSceneContexts-main
|
pretrain/contrastive_scene_contexts/lib/multiprocessing_utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import numpy as np
class ShapeContext(object):
def __init__(self, r1=0.125, r2=2, nbins_xy=2, nbins_zy=2):
# right-hand rule
"""
nbins_xy >= 2
nbins_zy >= 1
"""
self.r1 = r1
self.r2 = r2
self.nbins_xy = nbins_xy
self.nbins_zy = nbins_zy
self.partitions = nbins_xy * nbins_zy * 2
@staticmethod
def pdist(rel_trans):
D2 = torch.sum(rel_trans.pow(2), 2)
return torch.sqrt(D2 + 1e-7)
@staticmethod
def compute_rel_trans(A, B):
return A.unsqueeze(0) - B.unsqueeze(1)
@staticmethod
def hash(A, B, seed):
'''
seed = bins of B
entry < 0 will be ignored
'''
mask = (A >= 0) & (B >= 0)
C = torch.zeros_like(A) - 1
C[mask] = A[mask] * seed + B[mask]
return C
@staticmethod
def compute_angles(rel_trans):
""" compute angles between a set of points """
angles_xy = torch.atan2(rel_trans[:,:,1], rel_trans[:,:,0])
#angles between 0, 2*pi
angles_xy = torch.fmod(angles_xy + 2 * math.pi, 2 * math.pi)
angles_zy = torch.atan2(rel_trans[:,:,1], rel_trans[:,:,2])
#angles between 0, pi
angles_zy = torch.fmod(angles_zy + 2 * math.pi, math.pi)
return angles_xy, angles_zy
def compute_partitions(self, xyz):
rel_trans = ShapeContext.compute_rel_trans(xyz, xyz)
# angles
angles_xy, angles_zy = ShapeContext.compute_angles(rel_trans)
angles_xy_bins = torch.floor(angles_xy / (2 * math.pi / self.nbins_xy))
angles_zy_bins = torch.floor(angles_zy / (math.pi / self.nbins_zy))
angles_bins = ShapeContext.hash(angles_xy_bins, angles_zy_bins, self.nbins_zy)
# distances
distance_matrix = ShapeContext.pdist(rel_trans)
dist_bins = torch.zeros_like(angles_bins) - 1
# partitions
mask = (distance_matrix >= self.r1) & (distance_matrix < self.r2)
dist_bins[mask] = 0
mask = distance_matrix >= self.r2
dist_bins[mask] = 1
bins = ShapeContext.hash(dist_bins, angles_bins, self.nbins_xy * self.nbins_zy)
return bins
def compute_partitions_fast(self, xyz):
'''
fast partitions: axis-aligned partitions
'''
partition_matrix = torch.zeros((xyz.shape[0], xyz.shape[0]))
partition_matrix = partition_matrix.cuda() -1e9
rel_trans = ShapeContext.compute_rel_trans(xyz, xyz)
maskUp = rel_trans[:,:,2] > 0.0
maskDown = rel_trans[:,:,2] < 0.0
distance_matrix = ShapeContext.pdist(rel_trans)
mask = (distance_matrix[:,:] > self.r1) & (distance_matrix[:,:] <= self.r2)
partition_matrix[mask & maskUp] = 0
partition_matrix[mask & maskDown] = 1
mask = distance_matrix[:,:] > self.r2
partition_matrix[mask & maskUp] = 2
partition_matrix[mask & maskDown] = 3
self.partitions = 4
return partition_matrix
|
ContrastiveSceneContexts-main
|
pretrain/contrastive_scene_contexts/lib/shape_context.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#!/usr/bin/env python3
"""Multiprocessing error handler."""
import os
import signal
import threading
class ChildException(Exception):
"""Wraps an exception from a child process."""
def __init__(self, child_trace):
super(ChildException, self).__init__(child_trace)
class ErrorHandler(object):
"""Multiprocessing error handler (based on fairseq's).
Listens for errors in child processes and
propagates the tracebacks to the parent process.
"""
def __init__(self, error_queue):
# Shared error queue
self.error_queue = error_queue
# Children processes sharing the error queue
self.children_pids = []
# Start a thread listening to errors
self.error_listener = threading.Thread(target=self.listen, daemon=True)
self.error_listener.start()
# Register the signal handler
signal.signal(signal.SIGUSR1, self.signal_handler)
def add_child(self, pid):
"""Registers a child process."""
self.children_pids.append(pid)
def listen(self):
"""Listens for errors in the error queue."""
# Wait until there is an error in the queue
child_trace = self.error_queue.get()
# Put the error back for the signal handler
self.error_queue.put(child_trace)
# Invoke the signal handler
os.kill(os.getpid(), signal.SIGUSR1)
def signal_handler(self, sig_num, stack_frame):
"""Signal handler."""
# Kill children processes
for pid in self.children_pids:
os.kill(pid, signal.SIGINT)
# Propagate the error from the child process
raise ChildException(self.error_queue.get())
|
ContrastiveSceneContexts-main
|
pretrain/contrastive_scene_contexts/lib/error_handler.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from torch import nn
class NCESoftmaxLoss(nn.Module):
def __init__(self):
super(NCESoftmaxLoss, self).__init__()
self.criterion = nn.CrossEntropyLoss()
def forward(self, x, label):
bsz = x.shape[0]
x = x.squeeze()
loss = self.criterion(x, label)
return loss
|
ContrastiveSceneContexts-main
|
pretrain/contrastive_scene_contexts/lib/criterion.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from torch.utils.data.sampler import Sampler
import torch.distributed as dist
import math
class InfSampler(Sampler):
def __init__(self, data_source, shuffle=False):
self.data_source = data_source
self.shuffle = shuffle
self.reset_permutation()
def reset_permutation(self):
perm = len(self.data_source)
if self.shuffle:
perm = torch.randperm(perm)
self._perm = perm.tolist()
def __iter__(self):
return self
def __next__(self):
if len(self._perm) == 0:
self.reset_permutation()
return self._perm.pop()
def __len__(self):
return len(self.data_source)
next = __next__ # Python 2 compatibility
class DistributedInfSampler(InfSampler):
def __init__(self, data_source, num_replicas=None, rank=None, shuffle=True):
if num_replicas is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
num_replicas = dist.get_world_size()
if rank is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
rank = dist.get_rank()
self.data_source = data_source
self.num_replicas = num_replicas
self.rank = rank
self.epoch = 0
self.it = 0
self.num_samples = int(math.ceil(len(self.data_source) * 1.0 / self.num_replicas))
self.total_size = self.num_samples * self.num_replicas
self.shuffle = shuffle
self.reset_permutation()
def __next__(self):
it = self.it * self.num_replicas + self.rank
value = self._perm[it % len(self._perm)]
self.it = self.it + 1
if (self.it * self.num_replicas) >= len(self._perm):
self.reset_permutation()
self.it = 0
return value
def __len__(self):
return self.num_samples
|
ContrastiveSceneContexts-main
|
pretrain/contrastive_scene_contexts/lib/data_sampler.py
|
# Evaluates semantic label task
# Input:
# - path to .txt prediction files
# - path to .txt ground truth files
# - output file to write results to
# Note that only the valid classes are used for evaluation,
# i.e., any ground truth label not in the valid label set
# is ignored in the evaluation.
#
# example usage: evaluate_semantic_label.py --scan_path [path to scan data] --output_file [output file]
# python imports
import math
import logging
import os, sys, argparse
import inspect
try:
import numpy as np
except:
print("Failed to import numpy package.")
sys.exit(-1)
try:
from itertools import izip
except ImportError:
izip = zip
#currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
#parentdir = os.path.dirname(currentdir)
#sys.path.insert(0,parentdir)
from .scannet_benchmark_utils import util_3d, util
class Evaluator:
def __init__(self, CLASS_LABELS, VALID_CLASS_IDS):
#CLASS_LABELS = ['wall', 'floor', 'cabinet', 'bed', 'chair', 'sofa', 'table',
# 'door', 'window', 'bookshelf', 'picture', 'counter', 'desk',
# 'curtain', 'refrigerator', 'shower curtain', 'toilet', 'sink', 'bathtub', 'otherfurniture']
#VALID_CLASS_IDS = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39])
self.CLASS_LABELS = CLASS_LABELS
self.VALID_CLASS_IDS = VALID_CLASS_IDS
self.UNKNOWN_ID = np.max(VALID_CLASS_IDS) + 1
self.gt = {}
self.pred = {}
max_id = self.UNKNOWN_ID
self.confusion = np.zeros((max_id+1, max_id+1), dtype=np.ulonglong)
def update_confusion(self, pred_ids, gt_ids, sceneId=None):
# sanity checks
if not pred_ids.shape == gt_ids.shape:
util.print_error('%s: number of predicted values does not match number of vertices' % pred_file, user_fault=True)
n = self.confusion.shape[0]
k = (gt_ids >= 0) & (gt_ids < n)
temporal = np.bincount(n * gt_ids[k].astype(int) + pred_ids[k], minlength=n**2).reshape(n, n)
for valid_class_row in self.VALID_CLASS_IDS:
for valid_class_col in self.VALID_CLASS_IDS:
self.confusion[valid_class_row][valid_class_col] += temporal[valid_class_row][valid_class_col]
@staticmethod
def write_to_benchmark(base='benchmark_segmentation', sceneId=None, pred_ids=None):
os.makedirs(base, exist_ok=True)
util_3d.export_ids('{}.txt'.format(os.path.join(base, sceneId)), pred_ids)
def get_iou(self, label_id, confusion):
if not label_id in self.VALID_CLASS_IDS:
return float('nan')
# #true positives
tp = np.longlong(confusion[label_id, label_id])
# #false negatives
fn = np.longlong(confusion[label_id, :].sum()) - tp
# #false positives
not_ignored = [l for l in self.VALID_CLASS_IDS if not l == label_id]
fp = np.longlong(confusion[not_ignored, label_id].sum())
denom = (tp + fp + fn)
if denom == 0:
return float('nan')
return (float(tp) / denom, tp, denom)
def write_result_file(self, confusion, ious, filename):
with open(filename, 'w') as f:
f.write('iou scores\n')
for i in range(len(self.VALID_CLASS_IDS)):
label_id = self.VALID_CLASS_IDS[i]
label_name = self.CLASS_LABELS[i]
iou = ious[label_name][0]
f.write('{0:<14s}({1:<2d}): {2:>5.3f}\n'.format(label_name, label_id, iou))
f.write("{0:<14s}: {1:>5.3f}".format('mean', np.array([ious[k][0] for k in ious]).mean()))
f.write('\nconfusion matrix\n')
f.write('\t\t\t')
for i in range(len(self.VALID_CLASS_IDS)):
#f.write('\t{0:<14s}({1:<2d})'.format(CLASS_LABELS[i], VALID_CLASS_IDS[i]))
f.write('{0:<8d}'.format(self.VALID_CLASS_IDS[i]))
f.write('\n')
for r in range(len(self.VALID_CLASS_IDS)):
f.write('{0:<14s}({1:<2d})'.format(self.CLASS_LABELS[r], self.VALID_CLASS_IDS[r]))
for c in range(len(self.VALID_CLASS_IDS)):
f.write('\t{0:>5.3f}'.format(confusion[self.VALID_CLASS_IDS[r],self.VALID_CLASS_IDS[c]]))
f.write('\n')
print('wrote results to', filename)
def evaluate_confusion(self, output_file=None):
class_ious = {}
counter = 0
summation = 0
for i in range(len(self.VALID_CLASS_IDS)):
label_name = self.CLASS_LABELS[i]
label_id = self.VALID_CLASS_IDS[i]
class_ious[label_name] = self.get_iou(label_id, self.confusion)
# print
logging.info('classes IoU')
logging.info('----------------------------')
for i in range(len(self.VALID_CLASS_IDS)):
label_name = self.CLASS_LABELS[i]
try:
logging.info('{0:<14s}: {1:>5.3f} ({2:>6d}/{3:<6d})'.format(label_name, class_ious[label_name][0], class_ious[label_name][1], class_ious[label_name][2]))
summation += class_ious[label_name][0]
counter += 1
except:
logging.info('{0:<14s}: nan ( nan/nan )'.format(label_name))
if counter !=0:
logging.info("{0:<14s}: {1:>5.3f}".format('mean', summation / counter))
if output_file:
self.write_result_file(self.confusion, class_ious, output_file)
return summation / counter
def config():
parser = argparse.ArgumentParser()
parser.add_argument('--pred_path', required=True, help='path to directory of predicted .txt files')
parser.add_argument('--gt_path', required=True, help='path to gt files')
parser.add_argument('--output_file', type=str, default='./semantic_label_evaluation.txt')
opt = parser.parse_args()
return opt
def main():
opt = config()
#------------------------- ScanNet --------------------------
CLASS_LABELS = ['wall', 'floor', 'cabinet', 'bed', 'chair', 'sofa', 'table',
'door', 'window', 'bookshelf', 'picture', 'counter', 'desk',
'curtain', 'refrigerator', 'shower curtain', 'toilet', 'sink', 'bathtub', 'otherfurniture']
VALID_CLASS_IDS = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39])
evaluator = Evaluator(CLASS_LABELS=CLASS_LABELS, VALID_CLASS_IDS=VALID_CLASS_IDS)
print('reading', len(os.listdir(opt.pred_path))-1, 'scans...')
for i, pred_file in enumerate(os.listdir(opt.pred_path)):
if pred_file == 'semantic_label_evaluation.txt':
continue
gt_file = os.path.join(opt.gt_path, pred_file)
if not os.path.isfile(gt_file):
util.print_error('Result file {} does not match any gt file'.format(pred_file), user_fault=True)
gt_ids = util_3d.load_ids(gt_file)
pred_file = os.path.join(opt.pred_path, pred_file)
pred_ids = util_3d.load_ids(pred_file)
evaluator.update_confusion(pred_ids, gt_ids, pred_file.split('.')[0])
sys.stdout.write("\rscans processed: {}".format(i+1))
sys.stdout.flush()
# evaluate
evaluator.evaluate_confusion(opt.output_file)
if __name__ == '__main__':
main()
|
ContrastiveSceneContexts-main
|
pretrain/contrastive_scene_contexts/lib/evaluation/evaluate_semantic_label.py
|
# Evaluates semantic instance task
# Adapted from the CityScapes evaluation: https://github.com/mcordts/cityscapesScripts/tree/master/cityscapesscripts/evaluation
# Input:
# - path to .txt prediction files
# - path to .txt ground truth files
# - output file to write results to
# Each .txt prediction file look like:
# [(pred0) rel. path to pred. mask over verts as .txt] [(pred0) label id] [(pred0) confidence]
# [(pred1) rel. path to pred. mask over verts as .txt] [(pred1) label id] [(pred1) confidence]
# [(pred2) rel. path to pred. mask over verts as .txt] [(pred2) label id] [(pred2) confidence]
# ...
#
# NOTE: The prediction files must live in the root of the given prediction path.
# Predicted mask .txt files must live in a subfolder.
# Additionally, filenames must not contain spaces.
# The relative paths to predicted masks must contain one integer per line,
# where each line corresponds to vertices in the *_vh_clean_2.ply (in that order).
# Non-zero integers indicate part of the predicted instance.
# The label ids specify the class of the corresponding mask.
# Confidence is a float confidence score of the mask.
#
# Note that only the valid classes are used for evaluation,
# i.e., any ground truth label not in the valid label set
# is ignored in the evaluation.
#
# example usage: evaluate_semantic_instance.py --scan_path [path to scan data] --output_file [output file]
# python imports
import logging
import math
import os, sys, argparse
import inspect
from copy import deepcopy
import argparse
import numpy as np
#currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
#parentdir = os.path.dirname(currentdir)
#sys.path.insert(0,parentdir)
from lib.scannet_benchmark_utils import util_3d
from lib.scannet_benchmark_utils import util
def setup_logging():
ch = logging.StreamHandler(sys.stdout)
logging.getLogger().setLevel(logging.INFO)
logging.basicConfig(
format=os.uname()[1].split('.')[0] + ' %(asctime)s %(message)s',
datefmt='%m/%d %H:%M:%S',
handlers=[ch])
class Evaluator:
# ---------- Evaluation params ---------- #
# overlaps for evaluation
overlaps = np.append(np.arange(0.5,0.95,0.05), 0.25)
# minimum region size for evaluation [verts]
min_region_sizes = np.array( [ 100 ] )
# distance thresholds [m]
distance_threshes = np.array( [ float('inf') ] )
# distance confidences
distance_confs = np.array( [ -float('inf') ] )
def __init__(self, CLASS_LABELS, VALID_CLASS_IDS, benchmark=False):
# ---------- Label info ---------- #
#CLASS_LABELS = ['cabinet', 'bed', 'chair', 'sofa', 'table', 'door',
# 'window', 'bookshelf', 'picture', 'counter',
# 'desk', 'curtain', 'refrigerator', 'shower curtain',
# 'toilet', 'sink', 'bathtub', 'otherfurniture']
#VALID_CLASS_IDS = np.array([3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39])
self.CLASS_LABELS = CLASS_LABELS
self.VALID_CLASS_IDS = VALID_CLASS_IDS
self.ID_TO_LABEL = {}
self.LABEL_TO_ID = {}
for i in range(len(VALID_CLASS_IDS)):
self.LABEL_TO_ID[CLASS_LABELS[i]] = VALID_CLASS_IDS[i]
self.ID_TO_LABEL[VALID_CLASS_IDS[i]] = CLASS_LABELS[i]
self.pred_instances = {}
self.gt_instances = {}
self.benchmark = benchmark
def evaluate_matches(self, matches):
# results: class x overlap
ap = np.zeros( (len(self.distance_threshes) , len(self.CLASS_LABELS) , len(self.overlaps)) , np.float )
for di, (min_region_size, distance_thresh, distance_conf) in enumerate(zip(self.min_region_sizes, self.distance_threshes, self.distance_confs)):
for oi, overlap_th in enumerate(self.overlaps):
pred_visited = {}
for m in matches:
for p in matches[m]['pred']:
for label_name in self.CLASS_LABELS:
for p in matches[m]['pred'][label_name]:
if 'filename' in p:
pred_visited[p['filename']] = False
for li, label_name in enumerate(self.CLASS_LABELS):
y_true = np.empty(0)
y_score = np.empty(0)
hard_false_negatives = 0
has_gt = False
has_pred = False
for m in matches:
pred_instances = matches[m]['pred'][label_name]
gt_instances = matches[m]['gt'][label_name]
# filter groups in ground truth
gt_instances = [ gt for gt in gt_instances if gt['instance_id']>=1000 and gt['vert_count']>=min_region_size and gt['med_dist']<=distance_thresh and gt['dist_conf']>=distance_conf ]
if gt_instances:
has_gt = True
if pred_instances:
has_pred = True
cur_true = np.ones ( len(gt_instances) )
cur_score = np.ones ( len(gt_instances) ) * (-float("inf"))
cur_match = np.zeros( len(gt_instances) , dtype=np.bool )
# collect matches
for (gti,gt) in enumerate(gt_instances):
found_match = False
num_pred = len(gt['matched_pred'])
for pred in gt['matched_pred']:
# greedy assignments
if pred_visited[pred['filename']]:
continue
overlap = float(pred['intersection']) / (gt['vert_count']+pred['vert_count']-pred['intersection'])
if overlap > overlap_th:
confidence = pred['confidence']
# if already have a prediction for this gt,
# the prediction with the lower score is automatically a false positive
if cur_match[gti]:
max_score = max( cur_score[gti] , confidence )
min_score = min( cur_score[gti] , confidence )
cur_score[gti] = max_score
# append false positive
cur_true = np.append(cur_true,0)
cur_score = np.append(cur_score,min_score)
cur_match = np.append(cur_match,True)
# otherwise set score
else:
found_match = True
cur_match[gti] = True
cur_score[gti] = confidence
pred_visited[pred['filename']] = True
if not found_match:
hard_false_negatives += 1
# remove non-matched ground truth instances
cur_true = cur_true [ cur_match==True ]
cur_score = cur_score[ cur_match==True ]
# collect non-matched predictions as false positive
for pred in pred_instances:
found_gt = False
for gt in pred['matched_gt']:
overlap = float(gt['intersection']) / (gt['vert_count']+pred['vert_count']-gt['intersection'])
if overlap > overlap_th:
found_gt = True
break
if not found_gt:
num_ignore = pred['void_intersection']
for gt in pred['matched_gt']:
# group?
if gt['instance_id'] < 1000:
num_ignore += gt['intersection']
# small ground truth instances
if gt['vert_count'] < min_region_size or gt['med_dist']>distance_thresh or gt['dist_conf']<distance_conf:
num_ignore += gt['intersection']
proportion_ignore = float(num_ignore)/pred['vert_count']
# if not ignored append false positive
if proportion_ignore <= overlap_th:
cur_true = np.append(cur_true,0)
confidence = pred["confidence"]
cur_score = np.append(cur_score,confidence)
# append to overall results
y_true = np.append(y_true,cur_true)
y_score = np.append(y_score,cur_score)
# compute average precision
if has_gt and has_pred:
# compute precision recall curve first
# sorting and cumsum
score_arg_sort = np.argsort(y_score)
y_score_sorted = y_score[score_arg_sort]
y_true_sorted = y_true[score_arg_sort]
y_true_sorted_cumsum = np.cumsum(y_true_sorted)
# unique thresholds
(thresholds,unique_indices) = np.unique( y_score_sorted , return_index=True )
num_prec_recall = len(unique_indices) + 1
# prepare precision recall
num_examples = len(y_score_sorted)
try:
num_true_examples = y_true_sorted_cumsum[-1]
except:
num_true_examples = 0
precision = np.zeros(num_prec_recall)
recall = np.zeros(num_prec_recall)
# deal with the first point
y_true_sorted_cumsum = np.append( y_true_sorted_cumsum , 0 )
# deal with remaining
for idx_res,idx_scores in enumerate(unique_indices):
cumsum = y_true_sorted_cumsum[idx_scores-1]
tp = num_true_examples - cumsum
fp = num_examples - idx_scores - tp
fn = cumsum + hard_false_negatives
p = float(tp)/(tp+fp)
r = float(tp)/(tp+fn)
precision[idx_res] = p
recall [idx_res] = r
# first point in curve is artificial
precision[-1] = 1.
recall [-1] = 0.
# compute average of precision-recall curve
recall_for_conv = np.copy(recall)
recall_for_conv = np.append(recall_for_conv[0], recall_for_conv)
recall_for_conv = np.append(recall_for_conv, 0.)
stepWidths = np.convolve(recall_for_conv,[-0.5,0,0.5],'valid')
# integrate is now simply a dot product
ap_current = np.dot(precision, stepWidths)
elif has_gt:
ap_current = 0.0
else:
ap_current = float('nan')
ap[di,li,oi] = ap_current
return ap
def compute_averages(self, aps):
d_inf = 0
o50 = np.where(np.isclose(self.overlaps,0.5))
o25 = np.where(np.isclose(self.overlaps,0.25))
oAllBut25 = np.where(np.logical_not(np.isclose(self.overlaps,0.25)))
avg_dict = {}
#avg_dict['all_ap'] = np.nanmean(aps[ d_inf,:,: ])
avg_dict['all_ap'] = np.nanmean(aps[ d_inf,:,oAllBut25])
avg_dict['all_ap_50%'] = np.nanmean(aps[ d_inf,:,o50])
avg_dict['all_ap_25%'] = np.nanmean(aps[ d_inf,:,o25])
avg_dict["classes"] = {}
for (li,label_name) in enumerate(self.CLASS_LABELS):
avg_dict["classes"][label_name] = {}
#avg_dict["classes"][label_name]["ap"] = np.average(aps[ d_inf,li, :])
avg_dict["classes"][label_name]["ap"] = np.average(aps[ d_inf,li,oAllBut25])
avg_dict["classes"][label_name]["ap50%"] = np.average(aps[ d_inf,li,o50])
avg_dict["classes"][label_name]["ap25%"] = np.average(aps[ d_inf,li,o25])
return avg_dict
def assign_instances_for_scan(self, scene_id):
# get gt instances
gt_ids = self.gt_instances[scene_id]
gt_instances = util_3d.get_instances(gt_ids, self.VALID_CLASS_IDS, self.CLASS_LABELS, self.ID_TO_LABEL)
# associate
gt2pred = deepcopy(gt_instances)
for label in gt2pred:
for gt in gt2pred[label]:
gt['matched_pred'] = []
pred2gt = {}
for label in self.CLASS_LABELS:
pred2gt[label] = []
num_pred_instances = 0
# mask of void labels in the groundtruth
bool_void = np.logical_not(np.in1d(gt_ids//1000, self.VALID_CLASS_IDS))
# go thru all prediction masks
for instance_id in self.pred_instances[scene_id]:
label_id = int(self.pred_instances[scene_id][instance_id]['label_id'])
conf = self.pred_instances[scene_id][instance_id]['conf']
if not label_id in self.ID_TO_LABEL:
continue
label_name = self.ID_TO_LABEL[label_id]
# read the mask
pred_mask = self.pred_instances[scene_id][instance_id]['pred_mask']
# convert to binary
num = np.count_nonzero(pred_mask)
if num < self.min_region_sizes[0]:
continue # skip if empty
pred_instance = {}
pred_instance['filename'] = str(scene_id) + '/' + str(instance_id)
pred_instance['pred_id'] = num_pred_instances
pred_instance['label_id'] = label_id
pred_instance['vert_count'] = num
pred_instance['confidence'] = conf
pred_instance['void_intersection'] = np.count_nonzero(np.logical_and(bool_void, pred_mask))
# matched gt instances
matched_gt = []
# go thru all gt instances with matching label
for (gt_num, gt_inst) in enumerate(gt2pred[label_name]):
intersection = np.count_nonzero(np.logical_and(gt_ids == gt_inst['instance_id'], pred_mask))
if intersection > 0:
gt_copy = gt_inst.copy()
pred_copy = pred_instance.copy()
gt_copy['intersection'] = intersection
pred_copy['intersection'] = intersection
matched_gt.append(gt_copy)
gt2pred[label_name][gt_num]['matched_pred'].append(pred_copy)
pred_instance['matched_gt'] = matched_gt
num_pred_instances += 1
pred2gt[label_name].append(pred_instance)
return gt2pred, pred2gt
def print_results(self, avgs):
sep = ""
col1 = ":"
lineLen = 64
logging.info("")
logging.info("#"*lineLen)
line = ""
line += "{:<15}".format("what" ) + sep + col1
line += "{:>15}".format("AP" ) + sep
line += "{:>15}".format("AP_50%" ) + sep
line += "{:>15}".format("AP_25%" ) + sep
logging.info(line)
logging.info("#"*lineLen)
for (li,label_name) in enumerate(self.CLASS_LABELS):
ap_avg = avgs["classes"][label_name]["ap"]
ap_50o = avgs["classes"][label_name]["ap50%"]
ap_25o = avgs["classes"][label_name]["ap25%"]
line = "{:<15}".format(label_name) + sep + col1
line += sep + "{:>15.3f}".format(ap_avg ) + sep
line += sep + "{:>15.3f}".format(ap_50o ) + sep
line += sep + "{:>15.3f}".format(ap_25o ) + sep
logging.info(line)
all_ap_avg = avgs["all_ap"]
all_ap_50o = avgs["all_ap_50%"]
all_ap_25o = avgs["all_ap_25%"]
logging.info("-"*lineLen)
line = "{:<15}".format("average") + sep + col1
line += "{:>15.3f}".format(all_ap_avg) + sep
line += "{:>15.3f}".format(all_ap_50o) + sep
line += "{:>15.3f}".format(all_ap_25o) + sep
logging.info(line)
logging.info("")
@staticmethod
def write_to_benchmark(output_path='benchmark_instance', scene_id=None, pred_inst={}):
os.makedirs(output_path, exist_ok=True)
os.makedirs(os.path.join(output_path, 'predicted_masks'), exist_ok=True)
f = open(os.path.join(output_path, scene_id + '.txt'), 'w')
for instance_id in pred_inst:
# for pred instance id starts from 0; in gt valid instance id starts from 1
score = pred_inst[instance_id]['conf']
label = pred_inst[instance_id]['label_id']
mask = pred_inst[instance_id]['pred_mask']
f.write('predicted_masks/{}_{:03d}.txt {} {:.4f}'.format(scene_id, instance_id, label, score))
if instance_id < len(pred_inst) - 1:
f.write('\n')
util_3d.export_ids(os.path.join(output_path, 'predicted_masks', scene_id + '_%03d.txt' % (instance_id)), mask)
f.close()
def add_prediction(self, instance_info, id):
self.pred_instances[id] = instance_info
def add_gt(self, instance_info, id):
self.gt_instances[id] = instance_info
def evaluate(self):
print('evaluating', len(self.pred_instances), 'scans...')
matches = {}
for i, scene_id in enumerate(self.pred_instances):
gt2pred, pred2gt = self.assign_instances_for_scan(scene_id)
matches[scene_id] = {}
matches[scene_id]['gt'] = gt2pred
matches[scene_id]['pred'] = pred2gt
sys.stdout.write("\rscans processed: {}".format(i+1))
sys.stdout.flush()
print('')
ap_scores = self.evaluate_matches(matches)
avgs = self.compute_averages(ap_scores)
# print
self.print_results(avgs)
return avgs['all_ap'], avgs['all_ap_50%'], avgs['all_ap_25%']
def write_result_file(avgs, filename):
_SPLITTER = ','
with open(filename, 'w') as f:
f.write(_SPLITTER.join(['class', 'class id', 'ap', 'ap50', 'ap25']) + '\n')
for i in range(len(VALID_CLASS_IDS)):
class_name = CLASS_LABELS[i]
class_id = VALID_CLASS_IDS[i]
ap = avgs["classes"][class_name]["ap"]
ap50 = avgs["classes"][class_name]["ap50%"]
ap25 = avgs["classes"][class_name]["ap25%"]
f.write(_SPLITTER.join([str(x) for x in [class_name, class_id, ap, ap50, ap25]]) + '\n')
def config():
parser = argparse.ArgumentParser()
parser.add_argument('--pred_path', required=True, help='path to directory of predicted .txt files')
parser.add_argument('--gt_path', required=True, help='path to directory of gt .txt files')
parser.add_argument('--output_file', default='semantic_instance_evaluation.txt', help='output file [default: semantic_instance_evaluation.txt]')
opt = parser.parse_args()
return opt
if __name__ == '__main__':
opt = config()
setup_logging()
#-----------------scannet----------------------
CLASS_LABELS = ['cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window',
'bookshelf', 'picture', 'counter', 'desk', 'curtain', 'refrigerator',
'shower curtain', 'toilet', 'sink', 'bathtub', 'otherfurniture']
VALID_CLASS_IDS = np.array([3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39])
evaluator = Evaluator(CLASS_LABELS=CLASS_LABELS, VALID_CLASS_IDS=VALID_CLASS_IDS)
print('reading', len(os.listdir(opt.pred_path))-1, 'scans...')
for i, pred_file in enumerate(os.listdir(opt.pred_path)):
if os.path.isdir(os.path.join(opt.pred_path, pred_file)):
continue
scene_id = pred_file[:12]
sys.stdout.write("\rscans read: {}".format(i+1))
sys.stdout.flush()
gt_file = os.path.join(opt.gt_path, pred_file)
gt_ids = util_3d.load_ids(gt_file)
evaluator.add_gt(gt_ids, scene_id)
instances = util_3d.read_instance_prediction_file(os.path.join(opt.pred_path,pred_file), opt.pred_path)
for pred_mask_file in instances:
# read the mask
pred_mask = util_3d.load_ids(pred_mask_file)
instances[pred_mask_file]['pred_mask'] = pred_mask
evaluator.add_prediction(instances, scene_id)
print('')
_, _, _ = evaluator.evaluate()
|
ContrastiveSceneContexts-main
|
pretrain/contrastive_scene_contexts/lib/evaluation/evaluate_semantic_instance.py
|
import os, sys
import csv
try:
import numpy as np
except:
print("Failed to import numpy package.")
sys.exit(-1)
try:
import imageio
except:
print("Please install the module 'imageio' for image processing, e.g.")
print("pip install imageio")
sys.exit(-1)
# print an error message and quit
def print_error(message, user_fault=False):
sys.stderr.write('ERROR: ' + str(message) + '\n')
if user_fault:
sys.exit(2)
sys.exit(-1)
# if string s represents an int
def represents_int(s):
try:
int(s)
return True
except ValueError:
return False
def read_label_mapping(filename, label_from='raw_category', label_to='nyu40id'):
assert os.path.isfile(filename)
mapping = dict()
with open(filename) as csvfile:
reader = csv.DictReader(csvfile, delimiter='\t')
for row in reader:
mapping[row[label_from]] = int(row[label_to])
# if ints convert
if represents_int([key for key in mapping.keys()][0]):
mapping = {int(k):v for k,v in mapping.items()}
return mapping
# input: scene_types.txt or scene_types_all.txt
def read_scene_types_mapping(filename, remove_spaces=True):
assert os.path.isfile(filename)
mapping = dict()
lines = open(filename).read().splitlines()
lines = [line.split('\t') for line in lines]
if remove_spaces:
mapping = { x[1].strip():int(x[0]) for x in lines }
else:
mapping = { x[1]:int(x[0]) for x in lines }
return mapping
# color by label
def visualize_label_image(filename, image):
height = image.shape[0]
width = image.shape[1]
vis_image = np.zeros([height, width, 3], dtype=np.uint8)
color_palette = create_color_palette()
for idx, color in enumerate(color_palette):
vis_image[image==idx] = color
imageio.imwrite(filename, vis_image)
# color by different instances (mod length of color palette)
def visualize_instance_image(filename, image):
height = image.shape[0]
width = image.shape[1]
vis_image = np.zeros([height, width, 3], dtype=np.uint8)
color_palette = create_color_palette()
instances = np.unique(image)
for idx, inst in enumerate(instances):
vis_image[image==inst] = color_palette[inst%len(color_palette)]
imageio.imwrite(filename, vis_image)
|
ContrastiveSceneContexts-main
|
pretrain/contrastive_scene_contexts/lib/evaluation/scannet_benchmark_utils/util.py
|
import os, sys
import json
try:
import numpy as np
except:
print("Failed to import numpy package.")
sys.exit(-1)
try:
from plyfile import PlyData, PlyElement
except:
print("Please install the module 'plyfile' for PLY i/o, e.g.")
print("pip install plyfile")
sys.exit(-1)
from . import util
# matrix: 4x4 np array
# points Nx3 np array
def transform_points(matrix, points):
assert len(points.shape) == 2 and points.shape[1] == 3
num_points = points.shape[0]
p = np.concatenate([points, np.ones((num_points, 1))], axis=1)
p = np.matmul(matrix, np.transpose(p))
p = np.transpose(p)
p[:,:3] /= p[:,3,None]
return p[:,:3]
def export_ids(filename, ids):
with open(filename, 'w') as f:
for id in ids:
f.write('%d\n' % id)
def load_ids(filename):
ids = open(filename).read().splitlines()
ids = np.array(ids, dtype=np.int64)
return ids
def read_mesh_vertices(filename):
assert os.path.isfile(filename)
with open(filename, 'rb') as f:
plydata = PlyData.read(f)
num_verts = plydata['vertex'].count
vertices = np.zeros(shape=[num_verts, 3], dtype=np.float32)
vertices[:,0] = plydata['vertex'].data['x']
vertices[:,1] = plydata['vertex'].data['y']
vertices[:,2] = plydata['vertex'].data['z']
return vertices
# export 3d instance labels for instance evaluation
def export_instance_ids_for_eval(filename, label_ids, instance_ids):
assert label_ids.shape[0] == instance_ids.shape[0]
output_mask_path_relative = 'predicted_masks'
name = os.path.splitext(os.path.basename(filename))[0]
output_mask_path = os.path.join(os.path.dirname(filename), output_mask_path_relative)
if not os.path.isdir(output_mask_path):
os.mkdir(output_mask_path)
insts = np.unique(instance_ids)
zero_mask = np.zeros(shape=(instance_ids.shape[0]), dtype=np.int32)
with open(filename, 'w') as f:
for idx, inst_id in enumerate(insts):
if inst_id == 0: # 0 -> no instance for this vertex
continue
loc = np.where(instance_ids == inst_id)
label_id = label_ids[loc[0][0]]
# write mask indexing
output_mask_file_relavtive = os.path.join(output_mask_path_relative, name + '_' + str(idx) + '.txt')
f.write('%s %d %f\n' % (output_mask_file_relavtive, label_id, 1.0))
# write mask
mask = np.copy(zero_mask)
mask[loc[0]] = 1
output_mask_file = os.path.join(output_mask_path, name + '_' + str(idx) + '.txt')
export_ids(output_mask_file, mask)
# ------------ Instance Utils ------------ #
class Instance(object):
instance_id = 0
label_id = 0
vert_count = 0
med_dist = -1
dist_conf = 0.0
def __init__(self, mesh_vert_instances, instance_id):
if (instance_id == -1):
return
self.instance_id = int(instance_id)
self.label_id = int(self.get_label_id(instance_id))
self.vert_count = int(self.get_instance_verts(mesh_vert_instances, instance_id))
def get_label_id(self, instance_id):
return int(instance_id // 1000)
def get_instance_verts(self, mesh_vert_instances, instance_id):
return (mesh_vert_instances == instance_id).sum()
def to_json(self):
return json.dumps(self, default=lambda o: o.__dict__, sort_keys=True, indent=4)
def to_dict(self):
dict = {}
dict["instance_id"] = self.instance_id
dict["label_id"] = self.label_id
dict["vert_count"] = self.vert_count
dict["med_dist"] = self.med_dist
dict["dist_conf"] = self.dist_conf
return dict
def from_json(self, data):
self.instance_id = int(data["instance_id"])
self.label_id = int(data["label_id"])
self.vert_count = int(data["vert_count"])
if ("med_dist" in data):
self.med_dist = float(data["med_dist"])
self.dist_conf = float(data["dist_conf"])
def __str__(self):
return "("+str(self.instance_id)+")"
def read_instance_prediction_file(filename, pred_path):
lines = open(filename).read().splitlines()
instance_info = {}
abs_pred_path = os.path.abspath(pred_path)
for line in lines:
parts = line.split(' ')
if len(parts) != 3:
util.print_error('invalid instance prediction file. Expected (per line): [rel path prediction] [label id prediction] [confidence prediction]')
if os.path.isabs(parts[0]):
util.print_error('invalid instance prediction file. First entry in line must be a relative path')
mask_file = os.path.join(os.path.dirname(filename), parts[0])
mask_file = os.path.abspath(mask_file)
# check that mask_file lives inside prediction path
if os.path.commonprefix([mask_file, abs_pred_path]) != abs_pred_path:
util.print_error('predicted mask {} in prediction text file {} points outside of prediction path.'.format(mask_file,filename))
info = {}
info["label_id"] = int(float(parts[1]))
info["conf"] = float(parts[2])
instance_info[mask_file] = info
return instance_info
def get_instances(ids, class_ids, class_labels, id2label):
instances = {}
for label in class_labels:
instances[label] = []
instance_ids = np.unique(ids)
for id in instance_ids:
if id == 0:
continue
inst = Instance(ids, id)
if inst.label_id in class_ids:
instances[id2label[inst.label_id]].append(inst.to_dict())
return instances
|
ContrastiveSceneContexts-main
|
pretrain/contrastive_scene_contexts/lib/evaluation/scannet_benchmark_utils/util_3d.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
import argparse
import time
import torch
from mmcv import Config
from mmcv.parallel import MMDataParallel
from model.builder import build_estimator
def parse_args():
parser = argparse.ArgumentParser(description='MMSeg benchmark a model')
parser.add_argument('config', help='test config file path')
parser.add_argument(
'--log-interval', type=int, default=50, help='interval of logging')
args = parser.parse_args()
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
# set cudnn_benchmark
torch.backends.cudnn.benchmark = False
# build the model and load checkpoint
cfg.model.train_cfg = None
model = build_estimator(cfg.model, test_cfg=cfg.get('test_cfg'))
model = MMDataParallel(model, device_ids=[0])
model.eval()
# the first several iterations may be very slow so skip them
num_warmup = 5
pure_inf_time = 0
total_iters = 200
metas = [[dict(min_disp=1, max_disp=100, ori_shape=(512, 640), img_shape=(512, 640))]]
img = [torch.rand([1, 1, 3, 512, 640]).cuda()]
data = dict(img=img, img_metas=metas, r_img=img, gt_disp=None)
# benchmark with 200 image and take the average
for i in range(total_iters):
torch.cuda.synchronize()
start_time = time.perf_counter()
with torch.no_grad():
model(return_loss=False, rescale=True, evaluate=False, **data)
torch.cuda.synchronize()
elapsed = time.perf_counter() - start_time
if i >= num_warmup:
pure_inf_time += elapsed
if (i + 1) % args.log_interval == 0:
fps = (i + 1 - num_warmup) / pure_inf_time
print(f'Done image [{i + 1:<3}/ {total_iters}], '
f'fps: {fps:.2f} img / s')
if (i + 1) == total_iters:
fps = (i + 1 - num_warmup) / pure_inf_time
print(f'Overall fps: {fps:.2f} img / s')
break
if __name__ == '__main__':
main()
|
CODD-main
|
benchmark_speed.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
import argparse
import copy
import os
import os.path as osp
import time
import warnings
import mmcv
import torch
from mmcv.cnn.utils import revert_sync_batchnorm
from mmcv.runner import get_dist_info, init_dist
from mmcv.utils import Config, DictAction, get_git_hash
from mmseg import __version__
from mmseg.apis import set_random_seed
from mmseg.datasets import build_dataset
from mmseg.utils import collect_env, get_root_logger
import datasets # NOQA
from apis import train_estimator
from model import build_estimator
def parse_args():
parser = argparse.ArgumentParser(description='Train a segmentor')
parser.add_argument('config', help='train config file path')
parser.add_argument(
'--load-from', help='the checkpoint file to load weights from')
parser.add_argument(
'--resume-from', help='the checkpoint file to resume from')
parser.add_argument('--work-dir', help='the dir to save logs and models')
parser.add_argument(
'--no-validate',
action='store_true',
help='whether not to evaluate the checkpoint during training')
group_gpus = parser.add_mutually_exclusive_group()
group_gpus.add_argument(
'--gpus',
type=int,
help='number of gpus to use '
'(only applicable to non-distributed training)')
group_gpus.add_argument(
'--gpu-ids',
type=int,
nargs='+',
help='ids of gpus to use '
'(only applicable to non-distributed training)')
parser.add_argument('--seed', type=int, default=42, help='random seed')
parser.add_argument(
'--deterministic',
action='store_true',
help='whether to set deterministic options for CUDNN backend.')
parser.add_argument(
'--options', nargs='+', action=DictAction, help='custom options')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument('--detect_anomaly', action='store_true')
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
torch.autograd.set_detect_anomaly(args.detect_anomaly)
cfg = Config.fromfile(args.config)
if args.options is not None:
cfg.merge_from_dict(args.options)
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = args.work_dir
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
if args.load_from is not None:
cfg.load_from = args.load_from
if args.resume_from is not None:
cfg.resume_from = args.resume_from
if args.gpu_ids is not None:
cfg.gpu_ids = args.gpu_ids
else:
cfg.gpu_ids = range(1) if args.gpus is None else range(args.gpus)
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
# gpu_ids is used to calculate iter when resuming checkpoint
_, world_size = get_dist_info()
cfg.gpu_ids = range(world_size)
# create work_dir
mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
# dump config
cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config)))
# init the logger before other steps
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
log_file = osp.join(cfg.work_dir, f'{timestamp}.log')
logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)
# init the meta dict to record some important information such as
# environment info and seed, which will be logged
meta = dict()
# log env info
env_info_dict = collect_env()
env_info = '\n'.join([f'{k}: {v}' for k, v in env_info_dict.items()])
dash_line = '-' * 60 + '\n'
logger.info('Environment info:\n' + dash_line + env_info + '\n' +
dash_line)
meta['env_info'] = env_info
# log some basic info
logger.info(f'Distributed training: {distributed}')
logger.info(f'Config:\n{cfg.pretty_text}')
# set random seeds
if args.seed is not None:
logger.info(f'Set random seed to {args.seed}, deterministic: '
f'{args.deterministic}')
set_random_seed(args.seed, deterministic=args.deterministic)
cfg.seed = args.seed
meta['seed'] = args.seed
meta['exp_name'] = osp.basename(args.config)
model = build_estimator(
cfg.model,
train_cfg=cfg.get('train_cfg'),
test_cfg=cfg.get('test_cfg'))
model.init_weights()
# SyncBN is not support for DP
if not distributed:
warnings.warn(
'SyncBN is only supported with DDP. To be compatible with DP, '
'we convert SyncBN to BN. Please use dist_train.sh which can '
'avoid this error.')
model = revert_sync_batchnorm(model)
logger.info(model)
datasets = [build_dataset(cfg.data.train)]
if len(cfg.workflow) == 2:
val_dataset = copy.deepcopy(cfg.data.val)
val_dataset.pipeline = cfg.data.train.pipeline
datasets.append(build_dataset(val_dataset))
if cfg.checkpoint_config is not None:
# save mmseg version, config file content and class names in
# checkpoints as meta data
cfg.checkpoint_config.meta = dict(
mmseg_version=f'{__version__}+{get_git_hash()[:7]}',
config=cfg.pretty_text,
CLASSES=datasets[0].CLASSES,
PALETTE=datasets[0].PALETTE)
# add an attribute for visualization convenience
model.CLASSES = datasets[0].CLASSES
# passing checkpoint meta for saving best checkpoint
meta.update(cfg.checkpoint_config.meta)
train_estimator(
model,
datasets,
cfg,
distributed=distributed,
validate=(not args.no_validate),
timestamp=timestamp,
meta=meta)
if __name__ == '__main__':
main()
|
CODD-main
|
train.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
import argparse
import os
import mmcv
import torch
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import (get_dist_info, init_dist, load_checkpoint, wrap_fp16_model)
from mmcv.utils import DictAction
from mmseg.datasets import build_dataloader, build_dataset
import datasets # NOQA
from apis import multi_gpu_inference, single_gpu_inference
from model import build_estimator
def parse_args():
parser = argparse.ArgumentParser(description="mmseg test (and eval) a model")
parser.add_argument("config", help="test config file path")
parser.add_argument("checkpoint", help="checkpoint file")
parser.add_argument(
"--show-dir", default='./work_dirs/output',
help="directory where logs and visualization will be saved",
)
parser.add_argument('--eval', action='store_true', help='eval results')
parser.add_argument('--show', action='store_true', help='draw comparison figures')
parser.add_argument("--img-dir", help="directory to input images")
parser.add_argument("--r-img-dir", help="directory to input images")
parser.add_argument(
"--img-suffix", default=".png", help="suffix of image file, e.g., '.png'")
parser.add_argument(
"--num-frames", type=int, help="number of frames to run inference"
)
parser.add_argument(
"--num-workers", type=int, help="number of workers to run inference", default=1
)
parser.add_argument("--options", nargs="+", action=DictAction, help="custom options")
group_gpus = parser.add_mutually_exclusive_group()
group_gpus.add_argument(
'--gpus',
type=int,
help='number of gpus to use '
'(only applicable to non-distributed training)')
group_gpus.add_argument(
'--gpu-ids',
type=int,
nargs='+',
help='ids of gpus to use '
'(only applicable to non-distributed training)')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
cfg = mmcv.Config.fromfile(args.config)
if args.options is not None:
cfg.merge_from_dict(args.options)
# set cudnn_benchmark
if cfg.get("cudnn_benchmark", False):
torch.backends.cudnn.benchmark = True
cfg.data.test.test_mode = True
if args.num_frames is not None:
cfg.data.test.num_samples = args.num_frames
if args.gpu_ids is not None:
cfg.gpu_ids = args.gpu_ids
else:
cfg.gpu_ids = range(1) if args.gpus is None else range(args.gpus)
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
if not distributed:
cfg.data.workers_per_gpu = 0
# build the dataloader
if args.img_dir is not None:
cfg.data.test.data_root = None
cfg.data.test.img_dir = args.img_dir
cfg.data.test.r_img_dir = args.r_img_dir
cfg.data.test.img_suffix = args.img_suffix
cfg.data.test.r_img_suffix = args.img_suffix
rank, world_size = get_dist_info()
cfg.data.test.rank = rank
cfg.data.test.world_size = world_size
cfg.data.test.inference_mode = True
# build the dataloader
dataset = build_dataset(cfg.data.test)
data_loader = build_dataloader(
dataset,
samples_per_gpu=1,
workers_per_gpu=args.num_workers,
dist=distributed,
shuffle=False,
persistent_workers=distributed
)
# build the model and load checkpoint
cfg.model.train_cfg = None
model = build_estimator(cfg.model, test_cfg=cfg.get("test_cfg"))
fp16_cfg = cfg.get("fp16", None)
if fp16_cfg is not None:
wrap_fp16_model(model)
load_checkpoint(model, args.checkpoint, map_location="cpu")
if not distributed:
device_ids = [0] if args.gpus > 1 else None
model = MMDataParallel(model, device_ids=device_ids)
single_gpu_inference(model, data_loader, args.show_dir, show=args.show, evaluate=args.eval)
else:
model = MMDistributedDataParallel(
model.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False,
)
multi_gpu_inference(model, data_loader, args.show_dir, show=args.show, evaluate=args.eval)
if __name__ == '__main__':
main()
|
CODD-main
|
inference.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
from .inference import single_gpu_inference, multi_gpu_inference
from .train import train_estimator
|
CODD-main
|
apis/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
import warnings
import torch
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import build_optimizer, build_runner
from mmseg.core import DistEvalHook, EvalHook
from mmseg.datasets import build_dataloader, build_dataset
from mmseg.utils import get_root_logger
def train_estimator(model,
dataset,
cfg,
distributed=False,
validate=False,
timestamp=None,
meta=None):
"""Launch estimator training."""
logger = get_root_logger(cfg.log_level)
# prepare data loaders
dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset]
data_loaders = [
build_dataloader(
ds,
cfg.data.samples_per_gpu,
cfg.data.workers_per_gpu,
# cfg.gpus will be ignored if distributed
len(cfg.gpu_ids),
dist=distributed,
seed=cfg.seed,
drop_last=True,
persistent_workers=True if cfg.data.workers_per_gpu > 0 else False) for ds in dataset
]
# put model on gpus
if distributed:
find_unused_parameters = cfg.get('find_unused_parameters', False)
# Sets the `find_unused_parameters` parameter in
# torch.nn.parallel.DistributedDataParallel
model = MMDistributedDataParallel(
model.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False,
find_unused_parameters=find_unused_parameters)
else:
model = MMDataParallel(
model.cuda(cfg.gpu_ids[0]), device_ids=cfg.gpu_ids)
# build runner
optimizer = build_optimizer(model, cfg.optimizer)
if cfg.get('runner') is None:
cfg.runner = {'type': 'IterBasedRunner', 'max_iters': cfg.total_iters}
warnings.warn(
'config is now expected to have a `runner` section, '
'please set `runner` in your config.', UserWarning)
runner = build_runner(
cfg.runner,
default_args=dict(
model=model,
batch_processor=None,
optimizer=optimizer,
work_dir=cfg.work_dir,
logger=logger,
meta=meta))
# register hooks
runner.register_training_hooks(cfg.lr_config, cfg.optimizer_config,
cfg.checkpoint_config, cfg.log_config,
cfg.get('momentum_config', None))
# an ugly workaround to make the .log and .log.json filenames the same
runner.timestamp = timestamp
# register eval hooks
if validate:
val_dataset = build_dataset(cfg.data.val, dict(test_mode=True))
val_dataloader = build_dataloader(
val_dataset,
samples_per_gpu=1,
workers_per_gpu=cfg.data.workers_per_gpu,
dist=distributed,
shuffle=False,
persistent_workers=True if cfg.data.workers_per_gpu > 0 else False
)
eval_cfg = cfg.get('evaluation', {})
eval_cfg['by_epoch'] = cfg.runner['type'] != 'IterBasedRunner'
eval_hook = DistEvalHook if distributed else EvalHook
# In this PR (https://github.com/open-mmlab/mmcv/pull/1193), the
# priority of IterTimerHook has been modified from 'NORMAL' to 'LOW'.
runner.register_hook(
eval_hook(val_dataloader, **eval_cfg), priority='LOW')
if cfg.resume_from:
runner.resume(cfg.resume_from)
elif cfg.load_from:
runner.load_checkpoint(cfg.load_from)
runner.run(data_loaders, cfg.workflow)
|
CODD-main
|
apis/train.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
import functools
import os.path as osp
import mmcv
import torch
import torch.distributed as dist
from mmcv.runner import get_dist_info
from mmcv.utils import print_log, mkdir_or_exist
from mmseg.utils import get_root_logger
from utils import RunningStatsWithBuffer
def single_gpu_inference(
model,
data_loader,
out_dir=None,
show=False,
evaluate=False,
**kwargs
):
"""Inference with single GPU.
Args:
model (nn.Module): Model to be tested.
data_loader (nn.Dataloader): Pytorch data loader.
out_dir (str, optional): If specified, the results will be dumped
into the directory to save output results.
opacity (float): Opacity of painted segmentation map.
Default 0.5.
Must be in (0, 1] range.
show (bool): whether draw comparison figure.
evaluate (bool): whether to calculate metrics.
Returns:
None.
"""
model.eval()
dataset = data_loader.dataset
prog_bar = mmcv.ProgressBar(len(dataset))
mkdir_or_exist(out_dir)
rs = RunningStatsWithBuffer(osp.join(out_dir, "stats.csv")) if evaluate else None
for i, data in enumerate(data_loader):
with torch.no_grad():
result = model(return_loss=False, rescale=True, evaluate=evaluate, **data)
if out_dir:
img_metas = data["img_metas"][0].data[0]
for img_meta in img_metas:
out_file = osp.join(out_dir, img_meta["ori_filename"])
model.module.show_result(
img_meta["filename"],
result,
show=show,
out_file=out_file,
inp=data,
dataset={
k: v
for k, v in vars(dataset).items()
if isinstance(v, (int, float, tuple))
},
running_stats=rs,
)
batch_size = len(result)
for _ in range(batch_size):
prog_bar.update()
if evaluate:
print_log(
f"\n{rs.n} samples, mean {rs.mean}, std: {rs.std}", logger=get_root_logger()
)
rs.dump()
def multi_gpu_inference(
model,
data_loader,
out_dir=None,
show=False,
evaluate=False,
**kwargs
):
"""Test model with multiple gpus.
This method tests model with multiple gpus and collects the results
under two different modes: gpu and cpu modes. By setting 'gpu_collect=True'
it encodes results to gpu tensors and use gpu communication for results
collection. On cpu mode it saves the results on different gpus to 'tmpdir'
and collects them by the rank 0 worker.
Args:
model (nn.Module): Model to be tested.
data_loader (nn.Dataloader): Pytorch data loader.
out_dir (str): Path of directory to save output results.
opacity(float): Opacity of painted segmentation map.
Default 0.5.
Must be in (0, 1] range.
Returns:
None.
"""
model.eval()
dataset = data_loader.dataset
rank, world_size = get_dist_info()
if rank == 0:
prog_bar = mmcv.ProgressBar(len(dataset))
mkdir_or_exist(out_dir)
rs = RunningStatsWithBuffer(osp.join(out_dir, "stats.csv")) if evaluate else None
for i, data in enumerate(data_loader):
with torch.no_grad():
result = model(return_loss=False, rescale=True, evaluate=evaluate, **data)
if out_dir:
img_metas = data["img_metas"][0].data[0]
for img_meta in img_metas:
out_file = osp.join(out_dir, img_meta["ori_filename"])
model.module.show_result(
img_meta["filename"],
result,
show=show,
out_file=out_file,
inp=data,
dataset={
k: v
for k, v in vars(dataset).items()
if isinstance(v, (int, float, tuple))
},
running_stats=rs,
)
if rank == 0:
batch_size = len(result)
for _ in range(batch_size * world_size):
prog_bar.update()
if evaluate:
output = [None for _ in range(world_size)]
dist.all_gather_object(output, rs)
if rank == 0:
rs = functools.reduce(lambda a, b: a + b, output)
print_log(
f"\n{rs.n} samples, mean {rs.mean}, std: {rs.std}",
logger=get_root_logger(),
)
rs.dump()
|
CODD-main
|
apis/inference.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.