python_code
stringlengths 0
4.04M
| repo_name
stringlengths 8
58
| file_path
stringlengths 5
147
|
---|---|---|
c3dm-main
|
c3dm/tools/__init__.py
|
|
# Copyright (c) Facebook, Inc. and its affiliates.
import pickle
import torch
import glob
import os
def load_stats(flstats):
try:
stats, _ = pickle.load(open(flstats,'rb')) # dont load the config
except Exception as e:
print("Cant load stats! %s" % flstats)
stats = None
return stats
def get_model_path(fl):
fl = os.path.splitext(fl)[0]
flmodel = "%s.pth" % fl
return flmodel
def get_optimizer_path(fl):
fl = os.path.splitext(fl)[0]
flopt = "%s_opt.pth" % fl
return flopt
def get_stats_path(fl):
fl = os.path.splitext(fl)[0]
flstats = "%s_stats.pkl" % fl
return flstats
def save_model(model,stats,fl,optimizer=None,cfg=None):
flstats = get_stats_path(fl)
flmodel = get_model_path(fl)
print("saving model to %s" % flmodel)
torch.save(model.state_dict(),flmodel)
if optimizer is not None:
flopt = get_optimizer_path(fl)
print("saving optimizer to %s" % flopt)
torch.save(optimizer.state_dict(),flopt)
print("saving model stats and cfg to %s" % flstats)
pickle.dump((stats,cfg),open(flstats,'wb'))
def load_model(fl):
flstats = get_stats_path(fl)
flmodel = get_model_path(fl)
flopt = get_optimizer_path(fl)
model_state_dict = torch.load(flmodel)
stats = load_stats(flstats)
if os.path.isfile(flopt):
optimizer = torch.load(flopt)
else:
optimizer = None
return model_state_dict, stats, optimizer
def get_checkpoint(exp_dir,epoch):
fl = os.path.join( exp_dir, 'model_epoch_%08d.pth' % epoch )
return fl
def find_last_checkpoint(exp_dir):
fls = sorted( glob.glob( os.path.join(
glob.escape(exp_dir), 'model_epoch_'+'[0-9]'*8+'.pth'
)))
return fls[-1] if len(fls) > 0 else None
def purge_epoch(exp_dir,epoch):
model_path = get_checkpoint(exp_dir,epoch)
to_kill = [ model_path,
get_optimizer_path(model_path),
get_stats_path(model_path) ]
for k in to_kill:
if os.path.isfile(k):
print('deleting %s' % k)
os.remove(k)
|
c3dm-main
|
c3dm/tools/model_io.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import os
import time
import sys
import copy
import torch
from tqdm import tqdm
from tools.stats import Stats
from tools.utils import pprint_dict, has_method, get_net_input
def cache_preds(model,
loader,
cache_vars=None,
stats=None,
n_extract=None,
cat=True,
eval_mode=True,
strict_mode=False,
):
print("caching model predictions: %s" % str(cache_vars) )
if eval_mode:
model.eval()
else:
print('TRAINING EVAL MODE!!!')
model.train()
trainmode = 'test'
t_start = time.time()
iterator = loader.__iter__()
cached_preds = []
cache_size = 0. # in GB ... counts only cached tensor sizes
n_batches = len(loader)
if n_extract is not None:
n_batches = n_extract
with tqdm(total=n_batches,file=sys.stdout) as pbar:
for it, batch in enumerate(loader):
last_iter = it==n_batches-1
# move to gpu and cast to Var
net_input = get_net_input(batch)
with torch.no_grad():
preds = model(**net_input)
if strict_mode:
assert not any( k in preds for k in net_input.keys() )
preds.update(net_input) # merge everything into one big dict
# if True:
# model.visualize('ff_debug', 'eval', preds, None, clear_env=False)
# import pdb; pdb.set_trace()
if stats is not None:
stats.update(preds,time_start=t_start,stat_set=trainmode)
assert stats.it[trainmode]==it, "inconsistent stat iteration number!"
# restrict the variables to cache
if cache_vars is not None:
preds = {k:preds[k] for k in cache_vars if k in preds}
# ... gather and log the size of the cache
preds, preds_size = gather_all(preds)
cache_size += preds_size
# for k in preds:
# if has_method(preds[k],'cuda'):
# preds[k] = preds[k].data.cpu()
# cache_size += preds[k].numpy().nbytes / 1e9
cached_preds.append(preds)
pbar.set_postfix(cache_size="%1.2f GB"%cache_size)
pbar.update(1)
if last_iter and n_extract is not None:
break
if cat:
return concatenate_cache( cached_preds )
else:
return cached_preds
def gather_all( preds ):
cache_size = 0
for k in preds:
if has_method(preds[k],'cuda'):
preds[k] = preds[k].data.cpu()
cache_size += preds[k].numpy().nbytes / 1e9
elif type(preds[k])==dict:
preds[k], size_now = gather_all( preds[k] )
cache_size += size_now
return preds, cache_size
# cache concatenation - largely taken from pytorch default_collate()
import re
from torch._six import container_abcs, string_classes, int_classes
np_str_obj_array_pattern = re.compile(r'[SaUO]')
error_msg_fmt = "batch must contain tensors, numbers, dicts or lists; found {}"
numpy_type_map = {
'float64': torch.DoubleTensor,
'float32': torch.FloatTensor,
'float16': torch.HalfTensor,
'int64': torch.LongTensor,
'int32': torch.IntTensor,
'int16': torch.ShortTensor,
'int8': torch.CharTensor,
'uint8': torch.ByteTensor,
}
def concatenate_cache(batch):
r"""Puts each data field into a tensor with outer dimension batch size"""
elem_type = type(batch[0])
if isinstance(batch[0], torch.Tensor):
out = None
return torch.cat(batch, 0, out=out) # the main difference is here
elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \
and elem_type.__name__ != 'string_':
elem = batch[0]
if elem_type.__name__ == 'ndarray':
# array of string classes and object
if np_str_obj_array_pattern.search(elem.dtype.str) is not None:
raise TypeError(error_msg_fmt.format(elem.dtype))
return concatenate_cache([torch.from_numpy(b) for b in batch])
if elem.shape == (): # scalars
py_type = float if elem.dtype.name.startswith('float') else int
return numpy_type_map[elem.dtype.name](list(map(py_type, batch)))
elif isinstance(batch[0], float):
return torch.tensor(batch, dtype=torch.float64)
elif isinstance(batch[0], int_classes):
return torch.tensor(batch)
elif isinstance(batch[0], string_classes):
return batch
elif isinstance(batch[0], container_abcs.Mapping):
return {key: concatenate_cache([d[key] for d in batch]) for key in batch[0]}
elif isinstance(batch[0], tuple) and hasattr(batch[0], '_fields'): # namedtuple
return type(batch[0])(*(concatenate_cache(samples) for samples in zip(*batch)))
elif isinstance(batch[0], container_abcs.Sequence): # also some diffs here
# just unpack
return [ s_ for s in batch for s_ in s ]
# elif batch[0] is None:
# return batch
raise TypeError((error_msg_fmt.format(type(batch[0]))))
# def concatenate_cache(cached_preds):
# flds = list(cached_preds[0].keys())
# cached_preds_concat = {}
# for fld in flds:
# classic_cat = True
# if type(cached_preds[0][fld])==str or type(cached_preds[0][fld][0])==str:
# classic_cat = True
# else:
# try:
# cached_preds_concat[fld] = torch.cat( \
# [c[fld] for c in cached_preds] , dim=0 )
# classic_cat = False
# except:
# pass
# if classic_cat:
# cached_preds_concat[fld] = \
# [x for c in cached_preds for x in c[fld]]
# return cached_preds_concat
|
c3dm-main
|
c3dm/tools/cache_preds.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import torch
import math
import torch.nn.functional as Fu
def so3_6d_to_rot(d6):
"""
d6 ... batch x 6
Follows Sec. B in the appendix of:
https://arxiv.org/pdf/1812.07035.pdf
"""
a1, a2 = d6[:, :3], d6[:, 3:]
b1 = Fu.normalize(a1, dim=1)
b2 = a2 - (b1 * a2).sum(1, keepdim=True) * b1
b2 = Fu.normalize(b2, dim=1)
b3 = torch.cross(b1, b2)
R = torch.stack((b1, b2, b3), dim=1)
# if True:
# assert torch.allclose(torch.det(R), R.new_ones(R.shape[0]))
return R
def so3_relative_angle(R1, R2):
"""
Calculates the relative angle (in radians) between pairs of
rotation matrices `R1` and `R2` with
:math: `(\\phi = \text{acos}\frac{\text{Trace}(R_1 R_2^T)-1}{2})`.
.. note::
This corresponds to a geodesic distance on the 3D manifold of rotation
matrices.
Args:
R1: Batch of rotation matrices of shape :math:`(\text{minibatch}, 3, 3)`.
R2: Batch of rotation matrices of shape :math:`(\text{minibatch}, 3, 3)`.
Returns:
Corresponding rotation angles of shape :math:`(\text{minibatch},)`.
Raises:
ValueError if `R1` or `R2` is of incorrect shape.
ValueError if `R1` or `R2` has an unexpected trace.
"""
R12 = torch.bmm(R1, R2.permute(0, 2, 1))
return so3_rotation_angle(R12)
def so3_rotation_angle(R, eps: float = 1e-4):
"""
Calculates angles (in radians) of a batch of rotation matrices `R` with
:math: `\\phi = \text{acos}\frac{\text{Trace}(R)-1}{2}`. The trace of the
input matrices is checked to be in the valid range [-1-`eps`,3+`eps`].
The `eps` argument is a small constant that allows for small errors
caused by limited machine precision.
Args:
R: Batch of rotation matrices of shape :math:`(\text{minibatch}, 3, 3)`.
eps: Tolerance for the valid trace check.
Returns:
Corresponding rotation angles of shape :math:`(\text{minibatch},)`.
Raises:
ValueError if `R` is of incorrect shape.
ValueError if `R` has an unexpected trace.
"""
N , dim1, dim2 = R.shape
if dim1 != 3 or dim2 != 3:
raise ValueError('Input has to be a batch of 3x3 Tensors.')
rot_trace = R[:, 0, 0] + R[:, 1, 1] + R[:, 2, 2]
if ((rot_trace < -1. - eps) + (rot_trace > 3. + eps)).any():
raise ValueError('A matrix has trace outside valid range [-1-eps,3+eps].')
# clamp to valid range
rot_trace = torch.clamp(rot_trace, -1., 3.)
# phi ... rotation angle
phi = (0.5 * (rot_trace - 1.)).acos()
return phi
def rand_rot(N,dtype=torch.float32,max_rot_angle=float(math.pi),\
axes=(1,1,1),get_ss=False):
rand_axis = torch.zeros( (N,3) ).type(dtype).normal_()
# apply the axes mask
axes = torch.Tensor(axes).type(dtype)
rand_axis = axes[None,:] * rand_axis
rand_axis = Fu.normalize( rand_axis, dim=1, p=2 )
rand_angle = torch.ones( N ).type(dtype).uniform_(0,max_rot_angle)
R_ss_rand = rand_axis * rand_angle[:,None]
R_rand = so3_exponential_map(R_ss_rand)
# if max_rot_angle < float(np.pi)-1:
# e_ = torch.eye(3).type(R_rand.type())
# angles = so3_geod_dist(e_[None,:,:].repeat(N,1,1),R_rand).acos()
# print( "rand rot angles: mu=%1.3f std=%1.3f" % (angles.mean(),angles.std()) )
if get_ss:
return R_rand, R_ss_rand
else:
return R_rand
def random_2d_rotation(size, dtype, max_angle):
theta = (torch.rand(size).type(dtype) - 0.5) * 2 * max_angle
sins = torch.sin(theta)
coss = torch.cos(theta)
return torch.stack((
torch.stack((coss, -sins), dim=-1),
torch.stack((sins, coss), dim=-1),
), dim=-2)
def so3_exponential_map(log_rot: torch.Tensor, eps: float = 0.0001):
"""
Convert a batch of logarithmic representations of rotation matrices `log_rot`
to a batch of 3x3 rotation matrices using Rodrigues formula.
The conversion has a singularity around 0 which is handled by clamping
controlled with the `eps` argument.
Args:
log_rot: batch of vectors of shape :math:`(\text{minibatch} , 3)`
eps: a float constant handling the conversion singularity around 0
Returns:
batch of rotation matrices of shape :math:`(\text{minibatch} , 3 , 3)`
Raises:
ValueError if `log_rot` is of incorrect shape
"""
_ , dim = log_rot.shape
if dim != 3:
raise ValueError('Input tensor shape has to be Nx3.')
nrms = (log_rot * log_rot).sum(1)
phis = torch.clamp(nrms, 0.).sqrt()
phisi = 1. / (phis+eps)
fac1 = phisi * phis.sin()
fac2 = phisi * phisi * (1. - phis.cos())
ss = hat(log_rot)
R = fac1[:, None, None] * ss + \
fac2[:, None, None] * torch.bmm(ss, ss) + \
torch.eye(3, dtype=log_rot.dtype, device=log_rot.device)[None]
# from old.functions import rotss2rot
# R_ = rotss2rot(log_rot)
# print((R-R_).abs().max())
# import pdb; pdb.set_trace()
return R
def so3_log_map(R, eps: float = 0.0001):
"""
Convert a batch of 3x3 rotation matrices `R`
to a batch of 3-dimensional matrix logarithms of rotation matrices
The conversion has a singularity around `(R=I)` which is handled
by clamping controlled with the `eps` argument.
Args:
R: batch of rotation matrices of shape `(minibatch, 3, 3)`.
eps: A float constant handling the conversion singularity.
Returns:
Batch of logarithms of input rotation matrices
of shape `(minibatch, 3)`.
Raises:
ValueError if `R` is of incorrect shape.
ValueError if `R` has an unexpected trace.
"""
N, dim1, dim2 = R.shape
if dim1 != 3 or dim2 != 3:
raise ValueError("Input has to be a batch of 3x3 Tensors.")
phi = so3_rotation_angle(R)
phi_valid = torch.clamp(phi.abs(), eps) * phi.sign()
phi_valid = phi_valid + (phi_valid==0).type_as(phi_valid) * eps
log_rot_hat = (phi_valid /
(2.0 * phi_valid.sin()))[:, None, None] * (R - R.permute(0, 2, 1))
log_rot = hat_inv(log_rot_hat)
return log_rot
def hat_inv(h: torch.Tensor):
"""
Compute the inverse Hat operator [1] of a batch of 3x3 matrices.
Args:
h: batch of skew-symmetric matrices of shape :math:`(\text{minibatch}, 3, 3)`
Returns:
batch of 3d vectors of shape :math:`(\text{minibatch}, 3)`
Raises:
ValueError if `h` is of incorrect shape
ValueError if `h` not skew-symmetric
[1] https://en.wikipedia.org/wiki/Hat_operator
"""
N , dim1, dim2 = h.shape
if dim1 != 3 or dim2 != 3:
raise ValueError('Input has to be a batch of 3x3 Tensors.')
ss_diff = (h + h.permute(0, 2, 1)).abs().max()
if float(ss_diff) > 1e-5:
raise ValueError('One of input matrices not skew-symmetric.')
x = h[:, 2, 1]
y = h[:, 0, 2]
z = h[:, 1, 0]
v = torch.stack((x, y, z), dim=1)
return v
def hat(v: torch.Tensor):
"""
Compute the Hat operator [1] of a batch of 3D vectors.
Args:
v: batch of vectors of shape :math:`(\text{minibatch} , 3)`
Returns:
batch of skew-symmetric matrices of shape :math:`(\text{minibatch}, 3 , 3)`
Raises:
ValueError if `v` is of incorrect shape
[1] https://en.wikipedia.org/wiki/Hat_operator
"""
N , dim = v.shape
if dim != 3:
raise ValueError('Input vectors have to be 3-dimensional.')
h = v.new_zeros(N, 3, 3)
x, y, z = v[:, 0], v[:, 1], v[:, 2]
h[:, 0, 1] = -z
h[:, 0, 2] = y
h[:, 1, 0] = z
h[:, 1, 2] = -x
h[:, 2, 0] = -y
h[:, 2, 1] = x
return h
|
c3dm-main
|
c3dm/tools/so3.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from tools.attr_dict import AttrDict
import inspect
import io
import os
import tarfile
import time
import urllib.request
import zipfile
import numpy as np
def pprint_dict(d, indent=3):
for key, value in d.items():
print(' ' * indent + str(key),end='', flush=True)
# print('.', end='', flush=True)
if isinstance(value, AttrDict):
# if len(value.keys())==1:
# import pdb; pdb.set_trace() # breakpoint 970a0708 //
# if not isinstance(value, AttrDict):
print("")
pprint_dict(value, indent+1)
else:
print(' = ' + str(value))
def has_method(ob,m):
obcls=ob.__class__
return hasattr(obcls, m) and callable(getattr(obcls,m))
def argparse_to_dict(args):
raise NotImplementedError('finish this')
return cfg
def get_net_input(batch):
# move to gpu and cast to Var
net_input = {}
for k in batch:
if has_method(batch[k],'cuda'):
net_input[k] = batch[k].cuda()
else:
net_input[k] = batch[k]
return net_input
def auto_init_args(obj,tgt=None,can_overwrite=False):
# autoassign constructor arguments
frame = inspect.currentframe().f_back # the frame above
params = frame.f_locals
nparams = frame.f_code.co_argcount
paramnames = frame.f_code.co_varnames[1:nparams]
if tgt is not None:
if not can_overwrite:
assert not hasattr(obj,tgt)
setattr(obj,tgt,AttrDict())
tgt_attr = getattr(obj,tgt)
else:
tgt_attr = obj
for name in paramnames:
# print('autosetting %s -> %s' % (name,str(params[name])) )
setattr(tgt_attr,name,params[name])
def untar_to_dir(url, path):
response = urllib.request.urlopen(url)
compressed_stream = io.BytesIO(response.read())
if url.endswith('zip'):
opener = lambda stream : zipfile.ZipFile(stream, "r")
else:
# assume tarball
opener = lambda stream : tarfile.open(fileobj=stream, mode="r|*")
with opener(compressed_stream) as tar:
os.makedirs(path)
tar.extractall(path=path)
class NumpySeedFix(object):
def __init__(self,seed=0):
self.rstate = None
self.seed = seed
def __enter__(self):
self.rstate = np.random.get_state()
np.random.seed(self.seed)
def __exit__(self, type, value, traceback):
if not(type is None ) and issubclass(type,Exception):
print("error inside 'with' block")
return
np.random.set_state(self.rstate)
class TorchSeedFix(object):
def __init__(self,seed=0):
self.rstate = None
self.seed = seed
def __enter__(self):
self.rstate = torch.random.get_rng_state()
torch.manual_seed(self.seed)
def __exit__(self, type, value, traceback):
if not(type is None ) and issubclass(type,Exception):
print("error inside 'with' block")
return
torch.manual_seed(self.seed)
class Timer:
def __init__(self,name="timer",quiet=False):
self.name = name
self.quiet = quiet
def __enter__(self):
self.start = time.time()
return self
def __exit__(self, *args):
self.end = time.time()
self.interval = self.end - self.start
if not self.quiet:
print( "%20s: %1.6f sec" % ( self.name , self.interval ) )
|
c3dm-main
|
c3dm/tools/utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import torch
import torch.nn as nn
from torchvision import models
import torch.nn.functional as F
Fu = F
from torch.autograd import Variable
import numpy as np
from math import exp
from tools.functions import avg_l2_dist, avg_l2_huber, image_meshgrid, huber, logexploss
if torch.cuda.is_available():
T = torch.cuda
else:
T = torch
def total_variation_loss(image):
# shift one pixel and get difference (for both x and y direction)
loss = torch.abs(image[:, :, :, :-1] - image[:, :, :, 1:]) + \
torch.abs(image[:, :, :-1, :] - image[:, :, 1:, :])
return loss
class GaussianLayer(nn.Module):
def __init__(self, sigma=1., separated=False):
super(GaussianLayer, self).__init__()
self.separated = separated
filter_size = int(2*np.ceil(sigma)+1)
generated_filters = gaussian(filter_size, sigma).reshape([1,filter_size])
if self.separated:
self.gaussian_filter_horizontal = nn.Conv2d(\
in_channels=1, out_channels=1, \
kernel_size=(1,filter_size), padding=(0,filter_size//2),bias=False)
self.gaussian_filter_horizontal.weight.data.copy_(\
generated_filters)
self.gaussian_filter_vertical = nn.Conv2d(\
in_channels=1, out_channels=1, \
kernel_size=(filter_size,1), padding=(filter_size//2,0), bias=False)
self.gaussian_filter_vertical.weight.data.copy_(\
generated_filters.t())
else:
filter_full = generated_filters * generated_filters.t()
self.gaussian_filter = nn.Conv2d(\
in_channels=1, out_channels=1, \
kernel_size=(filter_size,filter_size),
padding=(filter_size//2,filter_size//2),bias=False)
self.gaussian_filter.weight.data = filter_full[None, None]
# do not back prop!!!
for prm in self.parameters():
prm.requires_grad = False
def forward(self, img):
ba, dim, he, wi = img.shape
img = torch.cat((img, img.new_ones(ba,1,he,wi)), dim=1)
img = img.view(ba*(dim+1), 1, he, wi)
if self.separated:
imgb = self.gaussian_filter_horizontal(img)
imgb = self.gaussian_filter_vertical(imgb)
else:
imgb = self.gaussian_filter(img)
imgb = imgb.view(ba, dim+1, he, wi)
imgb = imgb[ :, :dim, :, : ] / \
torch.clamp(imgb[ :, dim:dim+1, :, : ], 0.001)
return imgb
class TVLoss(nn.Module):
def __init__(self):
super(TVLoss, self).__init__()
sobel_filter = torch.FloatTensor([[1, 0, -1],
[2, 0, -2],
[1, 0, -1]])
sobel_filter = sobel_filter / sobel_filter.abs().sum()
self.sobel_filter_horizontal = nn.Conv2d(
in_channels=1, out_channels=1, bias=False,
kernel_size=sobel_filter.shape, padding=sobel_filter.shape[0]//2)
self.sobel_filter_horizontal.weight.data.copy_(sobel_filter)
self.sobel_filter_vertical = nn.Conv2d(
in_channels=1, out_channels=1, bias = False,
kernel_size=sobel_filter.shape, padding=sobel_filter.shape[0]//2)
self.sobel_filter_vertical.weight.data.copy_(sobel_filter.t())
# do not back prop!!!
for prm in self.parameters():
prm.requires_grad = False
def forward(self, im, masks=None):
tv = self.sobel_filter_horizontal(im).abs() + \
self.sobel_filter_vertical(im).abs()
if masks is not None:
masks = Fu.interpolate(masks, tv.shape[2:], mode='nearest')
tv = tv * masks
return tv.mean()
class LapFilter(nn.Module):
def __init__(self, size=5):
super(LapFilter, self).__init__()
# use gauss layer to setup the circular 2D filter (hacky)
gauss = GaussianLayer(sigma=size, separated=False)
flt = gauss.gaussian_filter.weight
thr = flt[0, 0, flt.shape[2]//2, 0]
flt = (flt >= thr).float()
flt = flt / torch.clamp(flt.sum(), 1e-4)
self.circ_filter = nn.Conv2d(
in_channels=1,
out_channels=1,
bias=False,
kernel_size=size,
padding=size
)
self.circ_filter.weight.data = flt.clone()
# do not back prop!!!
for prm in self.parameters():
prm.requires_grad = False
def forward(self, img, masks=None):
ba, dim, he, wi = img.shape
if (masks is not None) and (masks.shape[2:]!=img.shape[2:]):
masks = Fu.interpolate(masks, (he, wi), mode='nearest')
else:
masks = img.new_ones(ba, 1, he, wi)
imgf = img * masks
imgf = torch.cat((imgf, masks), dim=1)
imgf = imgf.view(ba*(dim+1), 1, he, wi)
imgf = self.circ_filter(imgf)
imgf = imgf.view(ba, dim+1, he, wi)
imgf = imgf[ :, :dim, :, : ] / \
torch.clamp(imgf[ :, dim:dim+1, :, : ], 0.001)
return imgf
class LapLoss(nn.Module):
def __init__(self, size=5):
super(LapLoss, self).__init__()
self.lapfilter = LapFilter(size=size)
# do not back prop!!!
for prm in self.parameters():
prm.requires_grad = False
def forward(self, img, masks=None):
if masks is not None:
masks = Fu.interpolate(masks, size=img.shape[2:], mode='nearest')
else:
masks = img[:,0:1,:,:] * 0. + 1.
imgf = self.lapfilter(img, masks=masks)
diff = (((img - imgf)*masks)**2).sum(dim=(1,2,3))
diff = diff / torch.clamp(masks.sum(dim=(1,2,3)), 1e-4)
return diff.mean(), imgf
## Perceptual VGG19 loss
class PerceptualVGG19(nn.Module):
def __init__(self, feature_layers, use_normalization=True,
path=None, input_from_tanh=True, flatten=True,
):
super(PerceptualVGG19, self).__init__()
if path != '' and path is not None:
print('Loading pretrained model')
model = models.vgg19(pretrained=False)
model.load_state_dict(torch.load(path))
else:
model = models.vgg19(pretrained=True)
model.float()
model.eval()
self.model = model
self.feature_layers = feature_layers
self.input_from_tanh = input_from_tanh
self.flatten = flatten
self.mean = torch.FloatTensor([0.485, 0.456, 0.406])
self.mean_tensor = None
self.std = torch.FloatTensor([0.229, 0.224, 0.225])
self.std_tensor = None
self.use_normalization = use_normalization
if torch.cuda.is_available():
self.mean = self.mean.cuda()
self.std = self.std.cuda()
for param in self.parameters():
param.requires_grad = False
def normalize(self, x):
if not self.use_normalization:
return x
if self.mean_tensor is None:
self.mean_tensor = self.mean.view(1, 3, 1, 1)
self.std_tensor = self.std.view(1, 3, 1, 1)
if self.input_from_tanh:
x = (x + 1) / 2
return (x - self.mean_tensor) / self.std_tensor
def run(self, x, resize):
features = []
masks = []
h = x
for f in range(max(self.feature_layers) + 1):
h = self.model.features[f](h)
if f in self.feature_layers:
not_normed_features = h.clone()
if resize:
features.append(not_normed_features.view(h.size(0),-1))
else:
features.append(not_normed_features)
if resize:
features = torch.cat(features, dim=1)
return masks, features
def forward(self, x, resize=True):
h = self.normalize(x)
return self.run(h, resize)
class AppearanceLoss(nn.modules.Module):
def __init__(self, n_l1_scales=4, sigma_coeff=1., huber_thr=0.03, border = 0.1):
super(AppearanceLoss, self).__init__()
self.n_l1_scales = n_l1_scales
self.sigma_coeff = sigma_coeff
self.huber_thr = huber_thr
self.border=border
self.perception_loss_module = PerceptualVGG19( feature_layers=[0, 5, 10, 15],
use_normalization=True,
input_from_tanh=False,
flatten=False )
self.perception_loss_module = self.perception_loss_module.cuda()
def grayscale_transform(self, x):
return x.mean(1,keepdim=True)
def forward(self, input, target, sig=None, mask=None):
# input/target an image between [0,1]
input_rgb = input
gt_tgt_rgb = target
image_size = list(input.shape[2:])
# mask both input and target borders
border_in_pix = int(self.border * np.array(input.shape[2:]).mean())
brd_mask = input.new_zeros(input.shape)
brd_mask[:,:,border_in_pix:-border_in_pix,border_in_pix:-border_in_pix] = 1.
if mask is not None:
brd_mask *= mask
input_rgb = input_rgb * brd_mask
gt_tgt_rgb = gt_tgt_rgb * brd_mask
# make sure we got the right input
assert gt_tgt_rgb.min() >= -0.001
assert gt_tgt_rgb.max() <= 1.001
# VGG
_, fake_features = self.perception_loss_module(input_rgb, resize=False)
_, tgt_features = self.perception_loss_module(gt_tgt_rgb, resize=False)
loss_vgg = 0.
sig_vgg = sig
for fake, tgt in zip(fake_features,tgt_features):
# vgg_df = huber(((fake-tgt)**2).mean(1,keepdim=True),scaling=self.huber_thr)
vgg_df = huber(((fake-tgt)**2),scaling=self.huber_thr).mean(1,keepdim=True)
if sig_vgg is not None:
# first smooth the sigmas
# g_sigma = sum(sig_vgg.shape[i]/fake.shape[i] for i in (2,3))*0.5
# if g_sigma > 1.:
# sig_vgg = gauss_filter(sig_vgg, g_sigma)
sig_vgg = Fu.interpolate(sig_vgg, size=fake.shape[2:],mode='bilinear')
loss_vgg = loss_vgg + \
Fu.interpolate( \
logexploss(vgg_df, sig_vgg, \
coeff=self.sigma_coeff, accum=False),
size=image_size )
else:
loss_vgg = loss_vgg + Fu.interpolate(vgg_df, size=image_size)
# loss_vgg = loss_vgg + vgg_df #.mean((1,2,3))
# RGB L1 ... multiscale
loss_rgb = 0.
sig_rgb = sig
for scale in range(self.n_l1_scales):
if scale > 0:
input_rgb = Fu.interpolate(input_rgb, scale_factor=0.5, mode='bilinear')
gt_tgt_rgb= Fu.interpolate(gt_tgt_rgb, scale_factor=0.5, mode='bilinear')
if sig_rgb is not None:
sig_rgb = Fu.interpolate(sig_rgb, scale_factor=0.5, mode='bilinear')
rgb_diff = huber(((input_rgb-gt_tgt_rgb)**2),scaling=self.huber_thr).mean(1,keepdim=True)
if sig is not None:
loss_rgb = loss_rgb + Fu.interpolate(logexploss(rgb_diff, sig_rgb,
coeff=self.sigma_coeff, accum=False), size=image_size)
else:
loss_rgb = loss_rgb + Fu.interpolate(rgb_diff, size=image_size)
return loss_vgg, loss_rgb, 0
def multiscale_loss(pred, gt, n_scales=4, scaling=0.01,
downscale=0.5, per_dim_loss=False, loss_fun=None,
grid=None):
# basis rendering loss
size = pred.shape[2:]
loss = 0.
# get the gauss filter
sig = 2 * (1/downscale) / 6.0 # as in scipy
g_filter = GaussianLayer(sigma=sig, separated=True).to(pred.device)
for scl in range(n_scales):
if scl==0:
gt_ = gt; p_ = pred; grid_ = grid
else:
p_ = g_filter(p_)
p_ = Fu.interpolate(p_, scale_factor=downscale, mode='bilinear')
gt_ = g_filter(gt_)
gt_ = Fu.interpolate(gt_, scale_factor=downscale, mode='bilinear')
if grid is not None:
grid_ = g_filter(grid_)
grid_ = Fu.interpolate(grid_, scale_factor=downscale, mode='bilinear')
if grid is not None:
gt_sample = Fu.grid_sample(gt_, grid_.permute(0, 2, 3, 1))
else:
gt_sample = gt_
if loss_fun is None:
if per_dim_loss:
h = huber((p_ - gt_sample)**2, scaling=scaling).mean(dim=1, keepdim=True)
else:
h = huber(((p_ - gt_sample)**2).mean(dim=1, keepdim=True), scaling=scaling)
else:
h = loss_fun(p_, gt_sample)
loss = loss + Fu.interpolate(h, size=size, mode='bilinear')
return loss * (1 / n_scales)
|
c3dm-main
|
c3dm/tools/loss_models.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import os
import numpy as np
import sys
import time
import pickle
import matplotlib
import matplotlib.pyplot as plt
import copy
from matplotlib import colors as mcolors
from itertools import cycle
from collections.abc import Iterable
from tools.vis_utils import get_visdom_connection
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.history = []
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1, epoch=0):
# make sure the history is of the same len as epoch
while len(self.history) <= epoch:
self.history.append([])
self.history[epoch].append( val / n )
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def fill_undefined(self, max_epoch=None):
if len(self.history)==0:
return False
last = [float('NaN')]
had_undefined = False
if max_epoch is None:
max_epoch = len(self.history)
while len(self.history) < max_epoch:
self.history.append([])
assert len(self.history) == max_epoch
for hi in range(max_epoch):
h = self.history[min(hi, len(self.history)-1)]
if len(h) > 0:
last = h
else:
had_undefined = True
self.history[hi] = last
self.count = 1
self.val = copy.deepcopy(self.history[-1][0])
self.sum = self.val
self.avg = self.val
return had_undefined
def get_epoch_averages( self, epoch=-1):
if len(self.history) == 0: # no stats here
return None
else:
history = self.history
if epoch==-1:
return [ float(np.array(x).mean()) for x in history ]
else:
return float(np.array(history[epoch]).mean())
def get_all_values( self ):
all_vals = [ np.array(x) for x in self.history ]
all_vals = np.concatenate(all_vals)
return all_vals
def get_epoch(self):
return len(self.history)
class Stats(object):
"""
stats logging object useful for gathering statistics of training a deep net in pytorch
Example:
# init stats structure that logs statistics 'objective' and 'top1e'
stats = Stats( ('objective','top1e') )
network = init_net() # init a pytorch module (=nueral network)
dataloader = init_dataloader() # init a dataloader
for epoch in range(10):
# start of epoch -> call new_epoch
stats.new_epoch()
# iterate over batches
for batch in dataloader:
output = network(batch) # run and save into a dict of output variables "output"
# stats.update() automatically parses the 'objective' and 'top1e' from
# the "output" dict and stores this into the db
stats.update(output)
stats.print() # prints the averages over given epoch
# stores the training plots into '/tmp/epoch_stats.pdf'
# and plots into a visdom server running at localhost (if running)
stats.plot_stats(plot_file='/tmp/epoch_stats.pdf')
"""
def __init__(self,log_vars,verbose=False,epoch=-1,visdom_env='main',
do_plot=True, plot_file=None, visdom_server='http://localhost',
visdom_port=8097 ):
self.verbose = verbose
self.log_vars = log_vars
self.visdom_env = visdom_env
self.visdom_server = visdom_server
self.visdom_port = visdom_port
self.plot_file = plot_file
self.do_plot = do_plot
self.hard_reset(epoch=epoch)
# some sugar to be used with "with stats:" at the beginning of the epoch
def __enter__(self):
if self.do_plot and self.epoch >= 0:
self.plot_stats(self.visdom_env)
self.new_epoch()
def __exit__(self, type, value, traceback):
iserr = not(type is None ) and issubclass(type,Exception)
iserr = iserr or (type is KeyboardInterrupt)
if iserr:
print("error inside 'with' block")
return
if self.do_plot:
self.plot_stats(self.visdom_env)
def reset(self): # to be called after each epoch
stat_sets = list(self.stats.keys())
if self.verbose:
print("stats: epoch %d - reset" % self.epoch)
self.it = { k:-1 for k in stat_sets }
for stat_set in stat_sets:
for stat in self.stats[stat_set]:
self.stats[stat_set][stat].reset()
def hard_reset(self,epoch=-1): # to be called during object __init__
self.epoch = epoch
if self.verbose:
print("stats: epoch %d - hard reset" % self.epoch)
self.stats = {}
# reset
self.reset()
def new_epoch(self):
if self.verbose:
print("stats: new epoch %d" % (self.epoch+1))
self.epoch += 1
self.reset() #zero the stats + increase epoch counter
def gather_value(self,val):
if type(val)==float:
pass
else:
val = val.detach().data.cpu().numpy()
val = float(val.sum())
return val
def fill_undefined(self):
stat_sets = list(self.stats.keys())
undefined = {}
max_epoch = max( \
max(len(stat.history) for stat in self.stats[stat_set].values()) \
for stat_set in self.stats )
for stat_set in stat_sets:
undefined[stat_set] = []
# print(max_epoch)
for stat in self.stats[stat_set].keys():
had_undefined = self.stats[stat_set][\
stat].fill_undefined(max_epoch=max_epoch)
if had_undefined:
# print(stat)
undefined[stat_set].append(stat)
return undefined
def update(self,preds,time_start=None,freeze_iter=False,stat_set='train',log_vars=None):
if self.epoch==-1: # uninitialized
print("warning: epoch==-1 means uninitialized stats structure -> new_epoch() called")
self.new_epoch()
if stat_set not in self.stats:
self.stats[stat_set] = {}
self.it[stat_set] = -1
if not freeze_iter:
self.it[stat_set] += 1
epoch = self.epoch
it = self.it[stat_set]
log_vars = log_vars or self.log_vars #TODO: need it?
for stat in log_vars:
if stat not in self.stats[stat_set]:
self.stats[stat_set][stat] = AverageMeter()
if stat=='sec/it': # compute speed
if time_start is None:
elapsed = 0.
else:
elapsed = time.time() - time_start
time_per_it = float(elapsed) / float(it+1)
val = time_per_it
# self.stats[stat_set]['sec/it'].update(time_per_it,epoch=epoch,n=1)
else:
if stat in preds:
try:
val = self.gather_value(preds[stat])
except:
raise ValueError("could not extract prediction %s\
from the prediction dictionary" % stat)
else:
val = None
if val is not None:
self.stats[stat_set][stat].update(val,epoch=epoch,n=1)
def get_epoch_averages(self, epoch=None):
stat_sets = list(self.stats.keys())
if epoch is None: epoch = self.epoch
if epoch==-1: epoch = list(range(self.epoch))
outvals = {}
for stat_set in stat_sets:
outvals[stat_set] = { 'epoch': epoch,
'it': self.it[stat_set],
'epoch_max': self.epoch }
for stat in self.stats[stat_set].keys():
if self.stats[stat_set][stat].count==0: continue
if isinstance(epoch, Iterable):
avgs = self.stats[stat_set][stat].get_epoch_averages()
avgs = [ avgs[e] for e in epoch ]
else:
avgs = self.stats[stat_set][stat].get_epoch_averages(epoch=epoch)
outvals[stat_set][stat] = avgs
return outvals
def print(self,max_it=None,stat_set='train',vars_print=None,get_str=False):
epoch = self.epoch
stats = self.stats
str_out = ""
it = self.it[stat_set]
stat_str = ""
stats_print = sorted(stats[stat_set].keys())
for stat in stats_print:
if stats[stat_set][stat].count==0: continue
stat_str += " {0:.12}: {1:1.6f} |".format( \
stat,stats[stat_set][stat].avg)
head_str = "[%s] | epoch %3d | it %5d" % (stat_set,epoch,it)
if max_it: head_str += "/ %d" % max_it
str_out = "%s | %s" % (head_str,stat_str)
if get_str:
return str_out
else:
print(str_out)
def plot_stats( self, visdom_env=None, plot_file=None, \
visdom_server=None, visdom_port=None ):
# use the cached visdom env if none supplied
if visdom_env is None: visdom_env = self.visdom_env
if visdom_server is None: visdom_server = self.visdom_server
if visdom_port is None: visdom_port = self.visdom_port
if plot_file is None: plot_file = self.plot_file
stat_sets = list(self.stats.keys())
print("printing charts to visdom env '%s' (%s:%d)" % \
(visdom_env,visdom_server,visdom_port) )
novisdom = False
viz = get_visdom_connection(server=visdom_server,port=visdom_port)
if not viz.check_connection():
print("no visdom server! -> skipping visdom plots")
novisdom = True
lines = []
# plot metrics
if not novisdom:
viz.close(env=visdom_env,win=None)
for stat in self.log_vars:
vals = []
stat_sets_now = []
for stat_set in stat_sets:
val = self.stats[stat_set][stat].get_epoch_averages()
if val is None:
continue;
else:
val = np.array(val)[:,None]
stat_sets_now.append(stat_set)
vals.append(val)
if len(vals)==0:
continue
# pad for skipped test evals
size = np.max([val.shape[0] for val in vals])
vals = [
np.pad(val, ((0, size - val.shape[0]), (0, 0)), mode='edge')
for val in vals
]
try:
vals = np.concatenate(vals, axis=1)
except:
print('cant plot %s!' % stat)
continue
x = np.arange(vals.shape[0])
lines.append( (stat_sets_now,stat,x,vals,) )
if not novisdom:
for idx , ( tmodes, stat , x , vals ) in enumerate( lines ):
if vals.shape[1] == 1: # eval
continue
title = "%s" % stat
opts = dict(title=title,legend=list(tmodes))
try:
viz.line( Y=vals,X=x,env=visdom_env,opts=opts)
except:
print("Warning: problem adding data point", x.shape, vals.shape)
if plot_file:
print("exporting stats to %s" % plot_file)
ncol = 3
nrow = int(np.ceil(float(len(lines))/ncol))
matplotlib.rcParams.update({'font.size': 5})
color=cycle(plt.cm.tab10(np.linspace(0,1,10)))
fig = plt.figure(1); plt.clf()
for idx , ( tmodes, stat , x , vals ) in enumerate( lines ):
c=next(color)
plt.subplot(nrow,ncol,idx+1)
ax = plt.gca()
for vali,vals_ in enumerate(vals.T):
c_ = c * ( 1. - float(vali) * 0.3 )
plt.plot( x, vals_, c = c_, linewidth=1 )
plt.ylabel( stat )
plt.xlabel( "epoch" )
plt.gca().yaxis.label.set_color(c[0:3]*0.75)
plt.legend(tmodes)
gcolor = np.array(mcolors.to_rgba('lightgray'))
plt.grid(b=True, which='major', color=gcolor, linestyle='-', linewidth=0.4)
plt.grid(b=True, which='minor', color=gcolor, linestyle='--', linewidth=0.2)
plt.minorticks_on()
plt.tight_layout()
plt.show()
fig.savefig( plot_file )
def synchronize_logged_vars(self,log_vars,default_val=float('NaN')):
stat_sets = list(self.stats.keys())
# remove the additional log_vars
for stat_set in stat_sets:
for stat in self.stats[stat_set].keys():
if stat not in log_vars:
print("additional stat %s:%s -> removing" % (stat_set,stat) )
self.stats[stat_set] = {
stat: v for stat, v in self.stats[stat_set].items()
if stat in log_vars
}
self.log_vars = log_vars # !!!
for stat_set in stat_sets:
reference_stat = list(self.stats[stat_set].keys())[0]
for stat in log_vars:
if stat not in self.stats[stat_set]:
print("missing stat %s:%s -> filling with default values (%1.2f)" % \
(stat_set,stat,default_val) )
elif len(self.stats[stat_set][stat].history)!=self.epoch+1:
h = self.stats[stat_set][stat].history
if len(h)==0: # just never updated stat ... skip
continue
else:
print("padding stat %s:%s with the last value" % \
(stat_set,stat) )
self.stats[stat_set][stat].history = h + [h[-1]] * (self.epoch+1 - len(h))
assert len(self.stats[stat_set][stat].history) == self.epoch+1
continue
else:
continue
self.stats[stat_set][stat] = AverageMeter()
self.stats[stat_set][stat].reset()
lastep = self.epoch+1
for ep in range(lastep):
self.stats[stat_set][stat].update(default_val,n=1,epoch=ep)
epoch_self = self.stats[stat_set][reference_stat].get_epoch()
epoch_generated = self.stats[stat_set][stat].get_epoch()
assert epoch_self==epoch_generated, \
"bad epoch of synchronized log_var! %d vs %d" % \
(epoch_self,epoch_generated)
|
c3dm-main
|
c3dm/tools/stats.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import numpy as np
import copy
import io
import os
from matplotlib import cm
import matplotlib.pyplot as plt
from PIL import Image
from PIL import ImageFont
from PIL import ImageDraw
import torch
from tools.utils import NumpySeedFix
from visdom import Visdom
import plotly.graph_objects as go
from plotly.subplots import make_subplots
viz = None
def get_visdom_env(cfg):
if len(cfg.visdom_env)==0:
visdom_env = os.path.basename(cfg.exp_dir)
else:
visdom_env = cfg.visdom_env
return visdom_env
def get_visdom_connection(server='http://localhost',port=8097):
global viz
if viz is None:
viz = Visdom(server=server,port=port)
return viz
def denorm_image_trivial(im):
im = im - im.min()
im = im / (im.max()+1e-7)
return im
def ensure_im_width(img,basewidth):
# basewidth = 300
# img = Image.open('somepic.jpg')
wpercent = (basewidth/float(img.size[0]))
hsize = int((float(img.size[1])*float(wpercent)))
img = img.resize((basewidth,hsize), Image.ANTIALIAS)
return img
def denorm_image_trivial(im):
im = im - im.min()
im = im / (im.max()+1e-7)
return im
def fig2data(fig, size=None):
"""Convert a Matplotlib figure to a numpy array
Based on the ICARE wiki.
Args:
fig (matplotlib.Figure): a figure to be converted
Returns:
(ndarray): an array of RGB values
"""
# TODO(samuel): convert figure to provide a tight fit in image
buf = io.BytesIO()
plt.savefig(buf, format='png', bbox_inches='tight', pad_inches=0)
buf.seek(0)
im = Image.open(buf).convert('RGB')
if size:
im = im.resize(size)
# fig.canvas.draw()
# import ipdb ; ipdb.set_trace()
# # w,h = fig.canvas.get_width_height()
# # buf = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8)
# buf.shape = (h, w, 3)
# return buf
return np.array(im)
def get_depth_image(depth, mask=None, cmap='gray'):
cmap_ = cm.get_cmap(cmap)
clr = cmap_(depth)
clr = clr[0].transpose(2,0,1)[:3]
# if mask is not None:
# clr = clr * mask + (1-mask)
return clr
def show_flow(
viz,
env,
p,
image=None,
title='flow',
linewidth=2,
win=None,
):
fig = plt.figure(figsize=[11,11])
if image is not None:
plt.imshow(image.transpose( (1,2,0) ))
plt.axis('off')
plt.plot(p[:,0,:], p[:,1,:], '-', color='m', linewidth=linewidth, zorder=1)
if image is None:
plt.gca().invert_yaxis()
plt.axis('equal')
plt.gca().axes.get_xaxis().set_visible(False)
plt.gca().axes.get_yaxis().set_visible(False)
plt.gca().set_axis_off()
# return fig
improj = np.array(fig2data(fig))
if env is not None:
win = viz.image(
np.array(improj).transpose(2,0,1),
env=env,
opts={'title': title},
win=win,
)
else:
win = None
plt.close(fig)
return improj, win
def show_projections( viz,
env,
p,
v=None,
image_path=None,
image=None,
title='projs',
cmap__='gist_ncar',
markersize=None,
sticks=None,
stickwidth=2,
stick_color=None,
plot_point_order=False,
bbox = None,
win=None ):
if image is None:
try:
im = Image.open(image_path).convert('RGB')
im = np.array(im).transpose(2,0,1)
except:
im = None
print('!cant load image %s' % image_path)
else:
im = image
nkp = int(p.shape[2])
pid = np.linspace(0.,1.,nkp);
if v is not None:
okp = np.where(v > 0)[0]
else:
okp = np.where(np.ones(nkp))[0]
possible_markers = ['.','*','+']
markers = [possible_markers[i%len(possible_markers)] for i in range(len(p))]
if markersize is None:
msz = 50
if nkp > 40:
msz = 5
markersizes = [msz]*nkp
else:
markersizes = [markersize]*nkp
fig = plt.figure(figsize=[11,11])
if im is not None:
plt.imshow( im.transpose( (1,2,0) ) ); plt.axis('off')
if sticks is not None:
if stick_color is not None:
linecol = stick_color
else:
linecol = [0.,0.,0.]
for p_ in p:
for stick in sticks:
if v is not None:
if v[stick[0]]>0 and v[stick[1]]>0:
linestyle='-'
else:
continue
plt.plot( p_[0,stick], p_[1,stick], linestyle,
color=linecol, linewidth=stickwidth, zorder=1 )
for p_, marker, msz in zip(p, markers, markersizes):
plt.scatter( p_[0,okp], p_[1,okp], msz, pid[okp],
cmap=cmap__, linewidths=2, marker=marker, zorder=2, \
vmin=0., vmax=1. )
if plot_point_order:
for ii in okp:
plt.text( p_[0,ii], p_[1,ii], '%d' % ii, fontsize=int(msz*0.25) )
if bbox is not None:
import matplotlib.patches as patches
# Create a Rectangle patch
rect = patches.Rectangle((bbox[0],bbox[1]),bbox[2],bbox[3],\
linewidth=1,edgecolor='r',facecolor='none')
plt.gca().add_patch(rect)
if im is None:
plt.gca().invert_yaxis()
plt.axis('equal')
plt.gca().axes.get_xaxis().set_visible(False)
plt.gca().axes.get_yaxis().set_visible(False)
# plt.gca().set_frame_on(False)
plt.gca().set_axis_off()
else: # remove all margins
# plt.gca().axes.get_xaxis().set_visible(False)
# plt.gca().axes.get_yaxis().set_visible(False)
# plt.gca().set_frame_on(False)
# plt.gca().set_axis_off()
pass
# return fig
improj = np.array(fig2data(fig))
if env is not None:
win = viz.image( np.array(improj).transpose(2,0,1), \
env=env, opts={ 'title': title }, win=win )
else:
win = None
plt.close(fig)
return improj, win
def extend_to_3d_skeleton_simple(ptcloud,sticks,line_resol=10,rgb=None):
H36M_TO_MPII_PERM = [ 3, 2, 1, 4, 5, 6, 0, 8, 9, 10, 16, 15, 14, 11, 12, 13]
rgb_now = rgb.T if rgb is not None else None
ptcloud_now = ptcloud.T
ptcloud = ptcloud.T
rgb = rgb.T if rgb is not None else rgb
if ptcloud_now.shape[1]==16: # MPII
sticks_new = []
for stick in sticks:
if stick[0] in H36M_TO_MPII_PERM and stick[1] in H36M_TO_MPII_PERM:
s1 = H36M_TO_MPII_PERM.index(int(stick[0]))
s2 = H36M_TO_MPII_PERM.index(int(stick[1]))
sticks_new.append( [s1,s2] )
sticks = sticks_new
for sticki,stick in enumerate(sticks):
alpha = np.linspace(0,1,line_resol)[:,None]
linepoints = ptcloud[stick[0],:][None,:] * alpha + \
ptcloud[stick[1],:][None,:] * ( 1. - alpha )
ptcloud_now = np.concatenate((ptcloud_now,linepoints),axis=0)
if rgb is not None:
linergb = rgb[stick[0],:][None,:] * alpha + \
rgb[stick[1],:][None,:] * ( 1.-alpha )
rgb_now = np.concatenate((rgb_now,linergb.astype(np.int32)),axis=0)
if rgb is not None:
rgb_now = rgb_now.T
return ptcloud_now.T, rgb_now
def autocolor_point_cloud(pcl, dim=1):
d = pcl[dim]
d = d - d.mean()
d = d / d.std()
d = np.minimum(np.maximum(d,-2.),2.)
d = (d + 2.) / 4.
rgb = (cm.get_cmap('jet')(d)[:,:3]*255.).astype(np.int32)
return rgb.T
def visdom_plot_pointclouds( viz, pcl, visdom_env, title,\
plot_legend=False, markersize=2,\
nmax=5000, sticks=None, win=None, \
autocolor=False ):
if sticks is not None:
pcl = { k:extend_to_3d_skeleton_simple(v,sticks)[0] \
for k,v in pcl.items() }
legend = list(pcl.keys())
cmap = 'tab10'
npcl = len(pcl)
rgb = (cm.get_cmap(cmap)(np.linspace(0,1,10)) \
[:,:3]*255.).astype(np.int32).T
rgb = np.tile(rgb,(1,int(np.ceil(npcl/10))))[:,0:npcl]
rgb_cat = { k:np.tile(rgb[:,i:i+1],(1,p.shape[1])) for \
i,(k,p) in enumerate(pcl.items()) }
rgb_cat = np.concatenate(list(rgb_cat.values()),axis=1)
pcl_cat = np.concatenate(list(pcl.values()),axis=1)
if pcl_cat.shape[0] > 3:
rgb_cat = (pcl_cat[3:6, :] * 255).astype(np.int32)
pcl_cat = pcl_cat[0:3, :]
elif autocolor:
rgb_cat = autocolor_point_cloud(pcl_cat)
if pcl_cat.shape[1] > nmax:
with NumpySeedFix():
prm = np.random.permutation( \
pcl_cat.shape[1])[0:nmax]
pcl_cat = pcl_cat[:,prm]
rgb_cat = rgb_cat[:,prm]
win = viz.scatter( pcl_cat.T, env=visdom_env, \
opts= { 'title': title, 'markersize': markersize, \
'markercolor': rgb_cat.T }, win=win )
# legend
if plot_legend:
dummy_vals = np.tile(np.arange(npcl)[:,None],(1,2)).astype(np.float32)
title = "%s_%s" % (title,legend)
opts = dict( title=title, legend=legend, width=400, height=400 )
viz.line( dummy_vals.T,env=visdom_env,opts=opts,win=win+'_legend')
return win
def visdom_plotly_pointclouds( viz, pcl, visdom_env,
title=None,
markersize=2,
nmax=5000,
sticks=None,
win=None,
autocolor=False,
in_subplots=False,
height=500,
width=500,
normalise=False ):
if sticks is not None:
pcl = { k:extend_to_3d_skeleton_simple(v,sticks)[0] \
for k,v in pcl.items() }
npcl = len(pcl)
rgb = np.linspace(0,1,10)
rgb = np.array([rgb[i%10] for i in range(npcl)])
if in_subplots:
cols = npcl
else:
cols = 1
titles = [None]*cols; titles[0] = title
fig = make_subplots(
rows = 1, cols = cols,
specs=[[{"type": "scene"}]*cols],
subplot_titles=titles,
column_widths=[1.]*cols,
)
for pcli, ((pcl_name, pcl_data),color) in enumerate(zip(pcl.items(), rgb)):
if pcl_data.shape[1] > nmax:
with NumpySeedFix():
prm = np.random.permutation(pcl_data.shape[1])[0:nmax]
pcl_data = pcl_data[:,prm]
if pcl_data.shape[0]==6:
# we have color
pcl_color = np.minimum(np.maximum(pcl_data[3:],0.),1.)
pcl_data = pcl_data[:3]
pcl_color = [(255.*c).astype(int).tolist() for c in pcl_color.T]
marker=dict(
size=markersize,
color=pcl_color,
opacity=1.)
else:
marker=dict(
size=markersize,
color=color,
colorscale='Spectral',
opacity=1.)
if normalise:
pcl_data -= pcl_data.mean(axis=1, keepdims=True)
pcl_data /= (pcl_data.max(axis=1) - pcl_data.min(axis=1)).max()
pcl[pcl_name] = pcl_data
fig.add_trace(
go.Scatter3d(
x=pcl_data[0, :],
y=pcl_data[1, :],
z=pcl_data[2, :],
mode='markers',
name=pcl_name,
visible=True,
marker=marker,
),
row = 1,
col = pcli+1 if in_subplots else 1
)
pcl_cat = np.concatenate(list(pcl.values()),axis=1)[:3]
pcl_c = pcl_cat.mean(1)
maxextent = (pcl_cat.max(axis=1) - pcl_cat.min(axis=1)).max()
bounds = np.stack((pcl_c-maxextent, pcl_c+maxextent))
height = height
width = width * cols
fig.update_layout(height = height, width = width,
scene = dict(
xaxis=dict(range=[bounds[0,0],bounds[1,0]]),
yaxis=dict(range=[bounds[0,1],bounds[1,1]]),
zaxis=dict(range=[bounds[0,2],bounds[1,2]]),
aspectmode='cube',
)
)
# print(win)
viz.plotlyplot(fig, env=visdom_env, win=win)
return win
def write_into_image(image_np, txt, color=(0,0,255)):
img = Image.fromarray(image_np.transpose((1,2,0)))
draw = ImageDraw.Draw(img)
draw.text((0, 0), txt, color)
image_np = np.transpose(np.array(img),(2,0,1))
return image_np
def make_match_image(
im_kps,
im_paths,
nmax=5000,
line_width=5
):
# images
ims = [np.array(Image.open(im).convert('RGB')) for im in im_paths]
_, img_width, _ = ims[0].shape
# pad smaller image height if not the same
if ims[0].shape[0] != ims[1].shape[0]:
pad_amnt = np.abs(ims[0].shape[0] - ims[1].shape[0])
if ims[0].shape[0] < ims[1].shape[0]:
im_to_pad = 0
else:
im_to_pad = 1
ims[im_to_pad] = np.pad(
ims[im_to_pad], ((0, pad_amnt), (0, 0), (0, 0)), mode='constant')
assert ims[0].shape[0] == ims[1].shape[0]
ims = np.concatenate(ims, axis=1)
ims = Image.fromarray(ims.astype(np.uint8))
if im_kps is not None:
# image keypoints
if im_kps.shape[0] > nmax:
prm = np.random.permutation(im_kps.shape[0])[0:nmax]
im_kps = im_kps[prm]
else:
im_kps = im_kps.copy()
im_kps[:,0,1] += img_width
# round for imdraw
im_kps = np.round(im_kps).astype(int)
cmap = cm.get_cmap('rainbow')
d = ImageDraw.Draw(ims)
for mi, match in enumerate(im_kps):
clr = cmap(float(mi) / im_kps.shape[0])
clr = (np.array(clr) * 255.).astype(int).tolist()
d.line((
tuple(match[:,0].tolist()),
tuple(match[:,1].tolist())
), fill=tuple(clr), width=line_width)
return ims
def visdom_show_many_image_matches(
viz,
ims_kps,
ims_paths,
visdom_env='main',
visdom_win=None,
title=None,
line_width=10,
nmax=5000,
max_im_sz=200,
):
ims = []
for im_kps, im_paths in zip(ims_kps, ims_paths):
im_ = make_match_image(
im_kps,
im_paths,
nmax=nmax,
line_width=line_width,
)
sz_ = (
np.array(im_.size) * (max_im_sz / max(im_.size))
).astype(int).tolist()
im_ = im_.resize(sz_, Image.BILINEAR)
im_ = np.array(im_).astype(float)/255.
im_ = np.transpose(im_, (2,0,1))
ims.append(im_)
# pad all images so that we can stack
max_h = max(im.shape[1] for im in ims)
max_w = max(im.shape[2] for im in ims)
for imi, im in enumerate(ims):
pad_h = max_h - im.shape[1]
pad_w = max_w - im.shape[2]
ims[imi] = np.pad(
im, ((0, 0), (0, pad_h), (0, pad_w)), mode='constant')
ims = np.stack(ims)
viz.images(ims, env=visdom_env, win=visdom_win)
def _get_camera_wireframe(scale=1.):
a = 0.5*np.array([-2, 1.5, 4])
b = 0.5*np.array([ 2, 1.5, 4])
c = 0.5*np.array([-2, -1.5, 4])
d = 0.5*np.array([ 2, -1.5, 4])
C = np.zeros(3)
F = np.array([0, 0, 3])
lines = np.array([a,b,d,c,a,C,b,d,C,c,C,F]) * scale
return lines
def visdom_plotly_cameras(
viz,
cameras,
visdom_env='main',
visdom_win=None,
title=None,
markersize=2,
nmax=5000,
in_subplots=False,
camera_scale=0.05, # in multiples of std_dev of the scene pointcloud
height=1000,
width=1000,
):
titles = [title]
fig = make_subplots(
rows = 1, cols = 1,
specs=[[{"type": "scene"}]],
subplot_titles=titles,
column_widths=[1.],
)
all_pts = []
# add cameras
R = cameras[:,:,:3]
t = cameras[:,:,3:]
C = -np.matmul(R.transpose(0, 2, 1), t)
all_pts = C[:,:,0]
scene_std = all_pts.std(0).mean()
cam_lines_canonical = _get_camera_wireframe(scale=camera_scale*scene_std)
cmap = cm.get_cmap('rainbow')
camera_colors = cmap(np.linspace(0., 1., R.shape[0]))[:, :3]
# mult by 220 here to make the colors a bit darker
camera_colors = ['rgb(%s)' % ','.join(
[str(int(c*220.)) for c in clr]
) for clr in camera_colors]
for clr_, R_, t_ in zip(camera_colors, R, t):
cam_lines_world = R_.T @ (cam_lines_canonical.T - t_)
x, y, z = cam_lines_world
fig.add_trace(
go.Scatter3d(
x=x, y=y, z=z,
marker=dict(
size=2,
# colorscale='Spectral',
color=clr_,
),
line=dict(
# colorscale='Spectral',
color=clr_,
width=2,
)
),
row=1,
col=1,
)
pcl_c = all_pts.mean(0)
maxextent = (all_pts.max(axis=0) - all_pts.min(axis=0)).max()
bounds = np.stack((pcl_c.T-maxextent, pcl_c.T+maxextent))
fig.update_layout(height = height, width = width,
showlegend=False,
scene = dict(
xaxis=dict(range=[bounds[0,0], bounds[1,0]]),
yaxis=dict(range=[bounds[0,1], bounds[1,1]]),
zaxis=dict(range=[bounds[0,2], bounds[1,2]]),
aspectmode='cube',
)
)
viz.plotlyplot(fig, env=visdom_env, win=visdom_win)
|
c3dm-main
|
c3dm/tools/vis_utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from visdom import Visdom
from tools.vis_utils import get_visdom_connection, denorm_image_trivial
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import numpy as np
import os
from PIL import Image
fig = make_subplots(
rows = 1, cols = 1,
specs=[[{"type": "scene"}]],
subplot_titles=(title),
column_widths=[0.5],
)
fig.add_trace(
go.Scatter3d(
x=-pt_cloud_np[:, 0],
y=-pt_cloud_np[:, 1],
z=-pt_cloud_np[:, 2],
mode='markers',
name=k,
visible=True,
marker=dict(
size=8,
color=color,
opacity=1.,
)), row = 0, col = 0)
class VisdomPlotly():
def __init__(self, viz, visdom_env_imgs=None, win=None):
self.camera = dict(
up=dict(x=0, y=0, z=1),
center=dict(x=0, y=0, z=0),
eye=dict(x=0.0, y=2.0, z=0.0)
)
self.scene = dict(
xaxis = dict(nticks=10, range=[-100,100],),
yaxis = dict(nticks=10, range=[-100,100],),
zaxis = dict(nticks=10, range=[-100,100],),
camera = self.camera)
def extend_to_skeleton(self, pt_cloud, skeleton, line_resolution = 25):
ptcloud_now = pt_cloud
for stick in skeleton:
alpha = np.linspace(0,1,line_resolution)[:, None]
linepoints = pt_cloud[stick[0],:][None,:] * alpha + \
pt_cloud[stick[1],:][None,:] * ( 1. - alpha )
ptcloud_now = np.concatenate((ptcloud_now,linepoints),axis=0)
return ptcloud_now
def make_fig(self, rows, cols, epoch, it, idx_image, acc_detail, percent_agree):
# fig_dict['subplot_title']
title="e%d_it%d_im%d"%(epoch, it, idx_image)
self.fig = make_subplots(
rows = rows, cols = cols,
specs=[[{"type": "xy"},{"type": "xy"},{"type": "scene"}, {"type": "xy"}, {"type": "xy"}]],
subplot_titles=(
"Input: {0}".format(title),
"Projection",
acc_detail,
'Mode Freqs',
'Mode Freqs (Flow): {0}'.format(percent_agree)
),
column_widths=[0.5] * cols,
)
# vis_plot = VisdomPlotly(visdom_env_imgs, stats.visdom_server, stats.visdom_port)
# vis_plot.make_fig(1, 5, stats.epoch, stats.it[trainmode], idx_image, "sqerr [M{0}]: {1:.2f}".format(min_mode, rmse_h36m), flow_agree)
# vis_plot.add_image(img_with_gt)
# vis_plot.add_2d_points(keypoints_2d.reshape(-1, 2), 1, 1, 'Input (Joints)', 'green')
# vis_plot.add_2d_points(keypoints_2d.reshape(-1, 2), 1, 2, 'Input (Joints)', 'green')
# vis_plot.add_3d_points(gt_sample.reshape(-1, 3) * 0.1, 1, 3, 'GT', 'green', visible='legendonly')
# vis_plot.add_3d_points(in_verts[idx_image].reshape(-1, 3) * 0.1, 1, 3, 'GT', 'green', s=1, opacity=0.5)
def add_image(self, img):
bg_image = Image.fromarray(img)
self.fig.update_layout(
images = [
go.layout.Image(
source=bg_image,
xref="x1",
yref="y1",
x=0,
y=bg_image.size[1],
sizex=bg_image.size[0],
sizey=bg_image.size[1],
sizing="stretch",
opacity=0.75,
layer="below"),
go.layout.Image(
source=bg_image,
xref="x2",
yref="y2",
x=0,
y=bg_image.size[1],
sizex=bg_image.size[0],
sizey=bg_image.size[1],
sizing="stretch",
opacity=0.75,
layer="below")
]
)
def add_3d_points(self, pt_cloud_np, row, col, name, color, opacity=1.0, s=8, visible=True):
self.fig.add_trace(
go.Scatter3d(
x=-1 * pt_cloud_np[:, 0],
y=-1 * pt_cloud_np[:, 2],
z=-1 * pt_cloud_np[:, 1],
mode='markers',
name=name,
visible=visible,
marker=dict(
size=s,
color=color,
opacity=opacity,
)), row = row, col = col)
self.fig.update_scenes(patch = self.scene, row = row, col = col)
self.add_hack_points(row, col)
# def add_mesh(self, verts, triangles, row, col, name, color):
# self.fig.add_trace(
# go.Mesh3d(
# x=verts[:, 0],
# y=verts[:, 1],
# z=verts[:, 2],
# colorbar_title='z',
# colorscale=[[0, 'gold'],
# [0.5, 'mediumturquoise'],
# [1, 'magenta']],
# # Intensity of each vertex, which will be interpolated and color-coded
# intensity=[0, 0.33, 0.66, 1],
# # i, j and k give the vertices of triangles
# i=triangles[:, 0],
# j=triangles[:, 1],
# k=triangles[:, 2],
# name=name,
# showscale=True
# )
# )
# self.fig.update_scenes(patch = self.scene, row = row, col = col)
def add_2d_points(self, points, row, col, name, color, scale=6, opacity=1.0, im_size = 224, extend=False, visible=True):
points_npy = points
if extend:
points_npy = self.extend_to_skeleton(points_npy, SKELETON_2D)
self.fig.add_trace(
go.Scatter(
x=points_npy[:, 0],
y=im_size-points_npy[:, 1],
mode='markers',
name=name,
visible=visible,
marker=dict(
size=scale,
color=color, # set color to an array/list of desired values
opacity=opacity,
)), row = row, col = col)
self.fig.update_xaxes(range=[0, im_size], row=row, col=col)
self.fig.update_yaxes(range=[0, im_size], row=row, col=col)
def show(self):
raw_size = 400
self.fig.update_layout(height = raw_size, width = raw_size * 5)
self.viz.plotlyplot(self.fig, env=self.visdom_env_imgs)
def add_hack_points(self, row, col):
hack_points = np.array([
[-1000.0, -1000.0, -1000.0],
[-1000.0, -1000.0, 1000.0],
[-1000.0, 1000.0, -1000.0],
[-1000.0, 1000.0, 1000.0],
[1000.0, -1000.0, -1000.0],
[1000.0, -1000.0, 1000.0],
[1000.0, 1000.0, -1000.0],
[1000.0, 1000.0, 1000.0]])
self.fig.add_trace(
go.Scatter3d(
x=-1 * hack_points[:, 0],
y=-1 * hack_points[:, 2],
z=-1 * hack_points[:, 1],
mode='markers',
name='_fake_pts',
visible=False,
marker=dict(
size=1,
opacity = 0,
color=(0.0, 0.0, 0.0),
)), row = row, col = col)
def add_bar(self, stats, num_modes, row, col, name):
freqs = np.bincount(stats, minlength=num_modes)
fig = self.fig.add_trace(
go.Bar(
x=list(range(num_modes)),
y=freqs,
name=name), row = row, col = col)
|
c3dm-main
|
c3dm/tools/visdom_plotly.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import time
import torch
import torch.nn.functional as Fu
import numpy as np
import collections
from tools.functions import safe_sqrt
from tools.pcl_unproject import depth2pcl
def in_hull(p, hull, extendy=False):
"""
Test if points in `p` are in `hull`
`p` should be a `NxK` coordinates of `N` points in `K` dimensions
`hull` is either a scipy.spatial.Delaunay object or the `MxK` array of the
coordinates of `M` points in `K`dimensions for which Delaunay triangulation
will be computed
"""
from scipy.spatial import Delaunay
if not isinstance(hull, Delaunay):
hull = Delaunay(hull, incremental=True)
if extendy:
pts = hull.points
minx = np.min(pts[:,0])
maxx = np.max(pts[:,0])
new_pts = [[minx, 0], [maxx, 0]]
hull.add_points(new_pts)
return hull.find_simplex(p)>=0
def get_ff_head_mask(pcl_pred, kp_loc):
axx = np.arange( pcl_pred.shape[2] ) + 0.5
axy = np.arange( pcl_pred.shape[1] ) + 0.5
all_pt = np.stack(np.meshgrid(axx, axy))
all_pt = all_pt.reshape(2, -1)
kpmask = in_hull(all_pt.T, kp_loc.t().numpy())
kpmask = kpmask.reshape( list(pcl_pred.shape[1:]) ).astype(float)
return torch.tensor(kpmask).float()
def cut_ff_head(pcl_pred, kp_loc, mask):
if True:
axx = np.arange( pcl_pred.shape[2] ) + 0.5
axy = np.arange( pcl_pred.shape[1] ) + 0.5
all_pt = np.stack(np.meshgrid(axx, axy))
all_pt = all_pt.reshape(2, -1)
kpmask = in_hull(all_pt.T, kp_loc.t().numpy())
# kpmask = kpmask.reshape( list(pcl_pred.shape[1:]) ).astype(float)
ok = np.where(kpmask.reshape(-1))[0].tolist()
else:
chin_pt = kp_loc[:, 16].long()
nose_pt = kp_loc[:, 54].long()
chin_3d_pt = pcl_pred[:, chin_pt[1], chin_pt[0]]
nose_3d_pt = pcl_pred[:, nose_pt[1], nose_pt[0]]
thr = ((nose_3d_pt - chin_3d_pt)**2).sum().sqrt()
thr *= 1.01
df = ((pcl_pred - nose_3d_pt[:,None,None])**2).sum(0, keepdim=True).sqrt()
df = df * mask + (1-mask) * thr * 1000.
ok = torch.nonzero(df.view(-1) <= thr).squeeze()
# if True:
# npix = pcl_pred[0].numel()
# nok = np.setdiff1d(np.arange(npix), ok)
# pcl_pred_nok = pcl_pred.view(3,-1)[:, nok].numpy()
# pcl_pred_raw = pcl_pred.view(3,-1).numpy()
# pcl_pred_ok = pcl_pred.view(3,-1)[:, ok].numpy()
# from tools.vis_utils import get_visdom_connection, \
# visdom_plotly_pointclouds
# viz = get_visdom_connection()
# visdom_plotly_pointclouds( \
# viz,
# { 'pred': pcl_pred_ok,
# 'pred_nok': pcl_pred_nok,
# 'pred_raw': pcl_pred_raw, },
# 'ff_debug',
# title='ff_debug', win='ff_debug_',
# markersize=2,
# in_subplots=True,
# )
# import pdb; pdb.set_trace()
pcl_pred = pcl_pred.view(3,-1)[:, ok]
pcl_pred = apply_pcl_pred_transform(pcl_pred)
return pcl_pred
def apply_pcl_pred_transform(pcl_pred):
a = np.pi + np.pi/2. # original
Rx = [
[ 1., 0., 0. ],
[ 0., np.cos(a), -np.sin(a) ],
[ 0., np.sin(a), np.cos(a) ],
]
pcl_pred = torch.FloatTensor(Rx) @ pcl_pred
return pcl_pred
def get_nose_loc(pcl_gt):
nose_id = np.argmin(pcl_gt[1,:])
nose_loc = pcl_gt[:, nose_id:(nose_id+1)]
return nose_loc
def cut_nose(pcl_gt, thr=100., nose_loc=None):
if nose_loc is None:
nose_loc = get_nose_loc(pcl_gt)
df = pcl_gt - nose_loc
dst = np.sqrt((df*df).sum(0))
ok = np.where(dst <= thr)[0]
pcl_gt = pcl_gt[:, ok]
return pcl_gt
def cut_ff_nose(pcl_gt, do_rotate=True):
# 2) 45 deg along x
# a = np.pi / 4. # original
a = np.pi / 4. + np.pi / 10.
Rx = [
[ 1., 0., 0. ],
[ 0., np.cos(a), -np.sin(a) ],
[ 0., np.sin(a), np.cos(a) ],
]
if do_rotate:
pcl_gt = Rx @ pcl_gt
pcl_gt = cut_nose(pcl_gt)
return pcl_gt
def re_cut_ff_nose(matrix_scl, pcl_pred, kp_loc, trans_scl, mask, mu, scl):
ok = torch.nonzero(mask.view(-1) > 0.).squeeze()
# cut off the hull
if True:
axx = np.arange( pcl_pred.shape[2] ) + 0.5
axy = np.arange( pcl_pred.shape[1] ) + 0.5
all_pt = np.stack(np.meshgrid(axx, axy))
all_pt = all_pt.reshape(2, -1)
kpmask = in_hull(all_pt.T, kp_loc.t().numpy(), extendy=True)
# kpmask = kpmask.reshape( list(pcl_pred.shape[1:]) ).astype(float)
okkp = np.where(kpmask.reshape(-1))[0]
ok = np.intersect1d( okkp, ok.numpy() ).tolist()
if len(ok)==0:
print('WARNING: RE-CUT results in empty face!')
return None
pcl_pred_ok = pcl_pred.view(3, -1)[:, ok]
pcl_pred_ok = apply_pcl_pred_transform(pcl_pred_ok)
pcl_pred_ok -= torch.FloatTensor(mu)
pcl_pred_ok *= scl
R = torch.FloatTensor(matrix_scl[:3,:3])
T = torch.FloatTensor(matrix_scl[:3,3:4])
pcl_pred_ok_t_t = R @ pcl_pred_ok + T
nose_loc = torch.FloatTensor(get_nose_loc(trans_scl.T))
pcl_pred_recut = cut_nose(pcl_pred_ok_t_t, nose_loc=nose_loc)
pcl_pred_recut = pcl_pred_recut.numpy()
return pcl_pred_recut
def eval_pcl_icp(pcl_pred, mesh_gt, mask, kp_loc):
import trimesh
from tools.utils import Timer
profile = True # actually this is inverted
with Timer(quiet=profile):
# sample points from the surface
pcl_gt_orig = trimesh.sample.sample_surface(mesh_gt, 60000)[0]
# cut stuff
pcl_gt_cut = cut_ff_nose(pcl_gt_orig.T)
pcl_pred_cut = cut_ff_head(pcl_pred, kp_loc, mask).numpy()
# center
pred_cut_mean = pcl_pred_cut.mean(1)[:, None]
pcl_pred_cut = pcl_pred_cut - pred_cut_mean
pcl_gt_cut = pcl_gt_cut - pcl_gt_cut.mean(1)[:, None]
# align stds
pred_std = pcl_pred_cut.std(1).mean()
gt_std = pcl_gt_cut.std(1).mean()
pcl_pred_cut = pcl_pred_cut * (gt_std / pred_std)
# matrix, transformed, _ = \
# trimesh.registration.icp( \
# pcl_pred_cut.T, pcl_gt_cut.T, \
# initial=np.identity(4), threshold=1e-5, \
# max_iterations=50, **{'scale': True})
with Timer(quiet=profile):
matrix_scl, transformed_scl, _ = \
trimesh.registration.icp( \
pcl_pred_cut.T, pcl_gt_cut.T, \
initial=np.identity(4), threshold=1e-5, \
max_iterations=30, **{'scale': False})
with Timer(quiet=profile):
pcl_pred_recut = re_cut_ff_nose( matrix_scl, pcl_pred, kp_loc,
transformed_scl, mask,
pred_cut_mean,
gt_std / pred_std )
if pcl_pred_recut is None or pcl_pred_recut.size==0:
print('WARNING: RE-CUT results in empty face!')
pcl_pred_recut = pcl_pred_cut
with Timer(quiet=profile):
matrix_scl_recut, transformed_scl_recut, _ = \
trimesh.registration.icp( \
pcl_pred_recut.T, pcl_gt_cut.T, \
initial=np.identity(4), threshold=1e-5, \
max_iterations=30, **{'scale': False})
# if True:
# from tools.vis_utils import get_visdom_connection, \
# visdom_plotly_pointclouds
# viz = get_visdom_connection()
# visdom_plotly_pointclouds( \
# viz,
# {
# 'pred': pcl_pred_cut,
# 'pred_align': transformed_scl.T,
# # 'pred_align_scl': transformed.T,
# 'pcl_gt': pcl_gt_cut,
# 'pred_recut': pcl_pred_recut,
# 'pred_align_recut': transformed_scl_recut.T
# },
# 'ff_debug',
# title='ff_debug',
# win='ff_debug_align',
# markersize=2,
# in_subplots=False,
# height=600,
# width=600
# )
# time.sleep(1)
# import pdb; pdb.set_trace()
# pcl distance
ft = lambda x: torch.FloatTensor(x).t().cuda()
fl = lambda x: torch.FloatTensor(x).cuda()
with Timer(quiet=profile):
# err = chamfer(ft(transformed), fl(pcl_gt_cut))
err_scl = float(chamfer(ft(transformed_scl), fl(pcl_gt_cut)).detach())
err_scl_recut = float(chamfer(ft(transformed_scl_recut), fl(pcl_gt_cut)).detach())
res = collections.OrderedDict( [
('dist_pcl', err_scl),
('dist_pcl_scl', err_scl),
('dist_pcl_scl_recut', err_scl_recut),
# ('pred_t', ft(transformed)),
('pred_t_scl', ft(transformed_scl)),
('gt', fl(pcl_gt_cut)),
] )
return res
def eval_depth( pred, gt, crop=5, masks=None,
get_best_scale=False):
# chuck out border
gt = gt [ :, :, crop:-crop, crop:-crop ]
pred = pred[ :, :, crop:-crop, crop:-crop ]
if masks is not None:
# mult gt by mask
masks = masks[:,:,crop:-crop,crop:-crop]
gt = gt * (masks > 0.).float()
dmask = (gt > 0.).float()
dmask_mass = torch.clamp(dmask.sum((1,2,3)),1e-4)
if get_best_scale:
# mult preds by a scalar "scale_best"
# s.t. we get best possible mse error
xy = pred * gt ; xx = pred * pred
if masks is not None:
xy *= masks ; xx *= masks
scale_best = xy.mean((1,2,3)) / torch.clamp(xx.mean((1,2,3)), 1e-4)
pred = pred * scale_best[:, None, None, None]
df = gt - pred
mse_depth = (dmask*(df**2)).sum((1,2,3)) / dmask_mass
abs_depth = (dmask*df.abs()).sum((1,2,3)) / dmask_mass
res = collections.OrderedDict( [
('mse_depth', mse_depth),
('abs_depth', abs_depth),
] )
# as in https://arxiv.org/pdf/1606.00373.pdf
for thr_exp in (1.,2.,3.):
delta = (1.25**thr_exp) / 100. # to meters
lessdelta = (dmask*(df.abs()<=delta).float()).sum((1,2,3)) \
/ dmask_mass
res[ 'delta_%d'%int(thr_exp) ] = lessdelta.cpu()
# delta error for linspaced thresholds
for delta in np.linspace(0.,2.,21):
if delta <= 0.: continue
lessdelta = (dmask*(df.abs()<=delta).float()).sum((1,2,3)) \
/ dmask_mass
res[ 'delta_%03d'%int(100*delta) ] = lessdelta.cpu()
if get_best_scale:
res['scale_best'] = scale_best
return res
def set_mean_depth_to_0(x,mask=None):
x = x.copy()
if mask is not None:
x = x * mask[:,None,:]
mu_depth = (x.sum(2)/mask.sum(1)[:,None])[:,2]
else:
mu_depth = x.mean(2)[:,2]
x[:,2,:] = x[:,2,:] - mu_depth[:,None]
if mask is not None:
x = x * mask[:,None,:]
return x
def get_edm(pts,pts2=None):
dtype = pts.data.type()
ba, dim, N = pts.shape
if pts2 is not None:
edm = torch.bmm(-2. * pts2.transpose(1,2), pts)
fNorm1 = (pts*pts).sum(1,keepdim=True)
fNorm2 = (pts2*pts2).sum(1,keepdim=True)
edm += fNorm2.transpose(1,2) # inplace saves memory
edm += fNorm1
# edm = (fNorm2.transpose(1,2) + fGram) + fNorm1
else:
fGram = torch.bmm(2 * pts.transpose(1,2), pts)
fNorm1 = (pts*pts).sum(1,keepdim=True)
edm = (fNorm1.transpose(1,2) - fGram) + fNorm1
return edm.contiguous()
def chamfer(a, b, med=False):
return 0.5 * (nn_err(a, b, med=med) + nn_err(b, a, med=med))
def nn_err(a, b, med=False):
D = get_edm(a[None].detach(), b[None].detach())
minvals, minidx = D.min(dim=1)
minvals = torch.clamp(minvals,0.).squeeze().sqrt()
if med:
assert False
errs = minvals.median()
else:
errs = minvals.mean()
# if True:
# from pykeops.torch import LazyTensor
# a = a.t().contiguous()
# b = b.t().contiguous()
# A = LazyTensor(a[:, None, :]) # (M, 1, 3)
# B = LazyTensor(b[None, :, :]) # (1, N, 3)
# D = ((A - B) ** 2).sum(2) # (M, N) symbolic matrix of squared distances
# indKNN = D.argKmin(1, dim=1).squeeze() # Grid <-> Samples, (M**2, K) integer tensor
# errs_ = ((a - b[indKNN,:])**2).sum(1).sqrt()
# if True:
# nns = b[indKNN,:]
# from tools.vis_utils import get_visdom_connection, \
# visdom_plotly_pointclouds
# viz = get_visdom_connection()
# show = {
# 'in': a.t().contiguous().view(3,-1),
# 'nns': nns.t().contiguous().view(3,-1),
# }
# visdom_plotly_pointclouds( \
# viz,
# show,
# 'pcl_debug',
# title='pcl_debug',
# win='pcl_debug_nns',
# markersize=2,
# )
# import pdb; pdb.set_trace()
return errs
# def get_best_scale_cov(pcl_pred, pcl_gt):
# # compute the pcl centers
# pred_cnt, gt_cnt = [ \
# p.mean(2, keepdim=True) for p in (pcl_pred, pcl_gt) ]
# # center
# c_pred, c_gt = [ \
# p - c for p, c in zip((pcl_pred, pcl_gt), (pred_cnt, gt_cnt)) ]
# cov_pred, cov_gt = [torch.bmm(c, c.permute(0,2,1)) * (1. / c.shape[2]) for c in [c_pred, c_gt]]
# import pdb; pdb.set_trace()
# det_pred = torch.stack([torch.det(c) for c in cov_pred])
# det_gt = torch.stack([torch.det(c) for c in cov_gt])
# # eigs_pred = torch.stack([torch.eig(c)[0][:,0] for c in cov_pred])
# # eigs_gt = torch.stack([torch.eig(c)[0][:,0] for c in cov_gt])
# import pdb; pdb.set_trace()
def eval_full_pcl(pcl_pred,
pcl_gt,
K=None,
scale_best=None):
# faces=None):
import trimesh
# batch size
ba = pcl_pred.shape[0]
# compute the pcl centers
pred_cnt, gt_cnt = [ \
p.mean(2, keepdim=True) for p in (pcl_pred, pcl_gt) ]
# center
c_pred, c_gt = [ \
p - c for p, c in zip((pcl_pred, pcl_gt), (pred_cnt, gt_cnt)) ]
if False:
# apply the best scale
c_pred = c_pred * scale_best[:, None, None]
else:
# recompute the best scale
# scale_best = get_best_scale_cov(pcl_pred, pcl_gt)
scale_best = (c_gt.std(2) / c_pred.std(2)).mean(1)
if not np.isfinite(scale_best):
scale_best = scale_best.new_ones([1])
c_pred = c_pred * scale_best[:, None, None]
e = []
c_pred_align = []
for ip in range(ba):
_, transformed, _ = \
trimesh.registration.icp( \
c_pred[ip].numpy().T, c_gt[ip].numpy().T, \
initial=np.identity(4), threshold=1e-10, \
max_iterations=30, **{'scale': False})
c_pred_align.append(torch.FloatTensor(transformed.T))
e_ = chamfer(c_gt[ip].float().cuda(), c_pred[ip].float().cuda())
e_al_ = chamfer(c_gt[ip].float().cuda(), c_pred_align[ip].float().cuda())
e.append([e_, e_al_])
c_pred_align = torch.stack(c_pred_align)
e = torch.FloatTensor(e)
res = collections.OrderedDict( [
('pcl_error', e[:, 0]),
('pcl_error_align', e[:, 1]),
('scale_best', scale_best),
('pred_align', c_pred_align),
('pred_orig', pcl_pred),
('pred', c_pred),
('gt', c_gt),
] )
return res
def eval_sparse_pcl(pred, gt, rescale_factor):
# get best scale
xy = pred * gt ; xx = pred * pred
scale_best = xy.mean((1, 2)) / xx.mean((1, 2)).clamp(1e-4)
pred_scl = pred * scale_best[:, None, None]
err = ((pred_scl-gt)**2).sum(1).sqrt().mean(1)
err_resc = err * rescale_factor
return err_resc.mean()
def eval_depth_pcl( pred, gt, K=None, masks=None,
gt_projection_type='perspective',
pred_projection_type='orthographic',
debug=False,
lap_thr=0.3,
):
ba = gt.shape[0]
if masks is not None:
# mult gt by mask
gt = gt * (masks > 0.).float()
gt = depth_flat_filter(gt, size=5, thr=lap_thr)
dmask = (gt > 0.).float()
dmask_mass = torch.clamp(dmask.sum((1,2,3)), 1e-4)
# convert to point clouds
pcl_pred = depth2pcl(pred, K, projection_type=pred_projection_type)
pcl_gt = depth2pcl(gt, K, projection_type=gt_projection_type)
if gt_projection_type==pred_projection_type and \
gt_projection_type=='perspective' and False:
# estimate the best scale
xy = pred * gt ; xx = pred * pred
xy *= dmask ; xx *= dmask
scale_best = xy.mean((1,2,3)) / torch.clamp(xx.mean((1,2,3)), 1e-12)
pred = pred * scale_best[:, None, None, None]
# convert to point clouds
c_pred = depth2pcl(pred, K, projection_type=pred_projection_type)
c_gt = depth2pcl(gt, K, projection_type=gt_projection_type)
# if debug:
# import pdb; pdb.set_trace()
# c_pred = c_pred * 3
else:
# debug visualisations
# pcl_pred = pcl_pred * masks
# from tools.vis_utils import get_visdom_connection, visdom_plot_pointclouds
# pcl_show = pcl_pred[0].view(3,-1)[:,masks[0].view(-1)>0.]
# viz = get_visdom_connection()
# visdom_plot_pointclouds(viz, \
# {'pcl_pred': pcl_show.cpu().detach().numpy()},
# 'pcl_debug',
# 'pcl_debug',
# win='pcl_debug',
# )
# import pdb; pdb.set_trace()
# mask the point clouds
pcl_pred, pcl_gt = [p * dmask for p in (pcl_pred, pcl_gt)]
# compute the pcl centers
pred_cnt, gt_cnt = [ \
p.sum((2,3), keepdim=True) / dmask_mass[:,None,None,None] \
for p in (pcl_pred, pcl_gt) ]
# center
c_pred, c_gt = [ \
p - c for p, c in zip((pcl_pred, pcl_gt), (pred_cnt, gt_cnt)) ]
# mask the centered point clouds
c_pred, c_gt = [p * dmask for p in (c_pred, c_gt)]
# estimate the best scale
xy = c_pred * c_gt ; xx = c_pred * c_pred
xy *= dmask ; xx *= dmask
scale_best = xy.mean((1,2,3)) / torch.clamp(xx.mean((1,2,3)), 1e-4)
# apply the best scale
c_pred = c_pred * scale_best[:, None, None, None]
# translate the point clouds back to original meanxy
# xy_mask = torch.FloatTensor([1.,1.,0.])[None,:,None,None].type_as(c_pred)
# d_c_pred, d_c_gt = [ \
# p.clone() + c * xy_mask \
# for p, c in zip((c_pred, c_gt), (pred_cnt, gt_cnt)) ]
# compute the per-vertex distance
df = c_gt - c_pred
dist = torch.clamp(df**2, 0.).sum(1,keepdim=True).sqrt()
dist = (dmask * dist).sum((1,2,3)) / dmask_mass
# if float(dist) <= 1e-3:
# import pdb; pdb.set_trace()
res = collections.OrderedDict( [
('dist_pcl', dist),
('scale_best', scale_best),
('pred', c_pred),
('pred_orig', pcl_pred),
('gt', c_gt),
('dmask', dmask),
] )
return res
def depth_flat_filter(depth, size=5, thr=0.3):
mask = (depth > 0.).float()
fsz = size*2+1
w = depth.new_ones( (2,1,fsz,fsz) ) / float(fsz*fsz)
depthf = Fu.conv2d( \
torch.cat((depth, mask), dim=1), \
w,
padding=size,
groups=2)
depthf = depthf[:,0:1,:,:] / torch.clamp(depthf[:,1:2,:,:], 1e-4)
df = (depth - depthf).abs()
mask_mass = torch.clamp(mask.sum((1,2,3), keepdim=True), 1e-4)
dmean = (depth * mask) / mask_mass
dvar = (((depth - dmean) * mask) ** 2).sum((1,2,3), keepdim=True)
dstd = safe_sqrt(dvar / mask_mass)
bad = (df > dstd * thr).float()
return depth * (1-bad)
def eval_depth_scale_inv(
pred,
gt,
masks=None,
lap_thr=0.3,
):
if masks is not None:
# mult gt by mask
gt = gt * (masks > 0.).float()
gt = depth_flat_filter(gt, size=5, thr=lap_thr)
dmask = (gt > 0.).float()
dmask_mass = torch.clamp(dmask.sum((1,2,3)), 1e-4)
# estimate the best scale
xy = pred * gt ; xx = pred * pred
xy *= dmask ; xx *= dmask
scale_best = xy.mean((1,2,3)) / torch.clamp(xx.mean((1,2,3)), 1e-12)
pred = pred * scale_best[:, None, None, None]
df = pred - gt
err = (dmask * df.abs()).sum((1,2,3)) / dmask_mass
return err
|
c3dm-main
|
c3dm/tools/eval_functions.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import os
import copy
import io
import gzip
import urllib.request
from dataset.dataset_configs import (
IMAGE_ROOTS, MASK_ROOTS, DEPTH_ROOTS, DATASET_ROOT, DATASET_CFG,
IMAGE_URLS, MASK_URLS, DEPTH_URLS
)
from dataset.keypoints_dataset import KeypointsDataset
from tools import utils
def dataset_zoo( dataset_name='freicars',
sets_to_load = ['train','val'],
force_download = False,
test_on_trainset=False,
TRAIN= { 'rand_sample': 6000,
'limit_to': -1,
'limit_seq_to': [-1],
'subsample': 1,
'dilate_masks': 5,
},
VAL = { 'rand_sample': 1000,
'limit_to': -1,
'limit_seq_to': -1,
'subsample': 1,
'dilate_masks': 0,
},
TEST = { 'rand_sample': -1,
'limit_seq_to': -1,
'limit_to': -1,
'subsample': 1,
'dilate_masks': 0,
},
**kwargs ):
main_root = DATASET_ROOT
ext = '.json'
json_train = os.path.join( main_root, dataset_name + '_train' + ext )
json_val = os.path.join( main_root, dataset_name + '_val' + ext )
image_root_train, image_root_val = get_train_val_roots(dataset_name, IMAGE_ROOTS, IMAGE_URLS)
mask_root_train, mask_root_val = get_train_val_roots(dataset_name, MASK_ROOTS, MASK_URLS)
depth_root_train, depth_root_val = get_train_val_roots(dataset_name, DEPTH_ROOTS, DEPTH_URLS)
# auto-download dataset file if doesnt exist
for json_file in (json_train, json_val):
if not os.path.isfile(json_file) or force_download:
download_dataset_json(json_file)
dataset_train = None
dataset_val = None
dataset_test = None
if dataset_name in DATASET_CFG:
dataset_cfg = copy.deepcopy(DATASET_CFG[dataset_name])
else:
dataset_cfg = copy.deepcopy(DATASET_CFG['default'])
TRAIN, VAL, TEST = [ copy.deepcopy(set_) for set_ in (TRAIN, VAL, TEST) ]
for set_ in (TRAIN, VAL, TEST):
set_.update(dataset_cfg)
print(set_)
if 'train' in sets_to_load:
dataset_train = KeypointsDataset(\
image_root=image_root_train,
mask_root=mask_root_train,
depth_root=depth_root_train,
jsonfile=json_train, train=True, **TRAIN)
if 'val' in sets_to_load:
if dataset_name in ('celeba_ff',):
TEST['box_crop'] = True
VAL['box_crop'] = True
if test_on_trainset:
image_root_val, json_val = image_root_train, json_train
dataset_val = KeypointsDataset(\
image_root=image_root_val,
mask_root=mask_root_val,
depth_root=depth_root_val,
jsonfile=json_val, train=False, **VAL)
dataset_test = KeypointsDataset(\
image_root=image_root_val,
mask_root=mask_root_val,
depth_root=depth_root_val,
jsonfile=json_val, train=False, **TEST)
return dataset_train, dataset_val, dataset_test
def get_train_val_roots(dataset_name, image_roots, urls):
if dataset_name not in image_roots:
return None, None
for subset_idx, images_dir in enumerate(image_roots[dataset_name]):
if not os.path.exists(images_dir):
if dataset_name not in urls:
raise ValueError(
f"Images for {dataset_name} not found in {images_dir}. "
"Please download manually."
)
url = urls[dataset_name][subset_idx]
print('Downloading images to %s from %s' % (images_dir, url))
utils.untar_to_dir(url, images_dir)
image_roots = copy.copy(image_roots[dataset_name])
if len(image_roots) == 2:
return image_roots
elif len(image_roots) == 1:
return image_roots[0], image_roots[0]
else:
raise ValueError('Wrong image roots format.')
def download_dataset_json(json_file):
from dataset.dataset_configs import DATASET_URL
json_dir = '/'.join(json_file.split('/')[0:-1])
json_name = json_file.split('/')[-1].split('.')[0]
os.makedirs(json_dir, exist_ok=True)
url = DATASET_URL[json_name]
print('downloading dataset json %s from %s' % (json_name, url))
response = urllib.request.urlopen(url)
compressed_file = io.BytesIO(response.read())
decompressed_file = gzip.GzipFile(fileobj=compressed_file)
try:
with open(json_file, 'wb') as outfile:
outfile.write(decompressed_file.read())
except:
if os.path.isfile(json_file):
os.remove(json_file)
# can be zipped
# print('checking dataset')
# with open(json_file,'r') as f:
# dt = json.load(f)
# assert dt['dataset']==json_name
|
c3dm-main
|
c3dm/dataset/dataset_zoo.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import numpy as np
import torch
from tools.utils import Timer
from torch.utils.data.sampler import Sampler
from torch._six import int_classes as _int_classes
class SceneBatchSampler(Sampler):
def __init__(self, sampler, batch_size, drop_last, \
train=True, strategy='uniform_viewpoints'):
if not isinstance(sampler, Sampler):
raise ValueError("sampler should be an instance of "
"torch.utils.data.Sampler, but got sampler={}"
.format(sampler))
if not isinstance(batch_size, _int_classes) or isinstance(batch_size, bool) or \
batch_size <= 0:
raise ValueError("batch_size should be a positive integeral value, "
"but got batch_size={}".format(batch_size))
if not isinstance(drop_last, bool):
raise ValueError("drop_last should be a boolean value, but got "
"drop_last={}".format(drop_last))
assert strategy == 'uniform_viewpoints'
self.sampler = sampler
self.batch_size = batch_size
self.drop_last = drop_last
self.strategy = strategy
self.train = train
self.restrict_seq = None
def __iter__(self):
batch = []
for idx,_ in enumerate(self.sampler):
ii = idx % self.batch_size
if ii==0:
sample_fun = {
'uniform_viewpoints': self.sample_batch_vp_diff,
}[self.strategy]
with Timer(name='batch_sample', quiet=True):
batch, seq = sample_fun(idx)
if ii==(self.batch_size-1):
yield batch
batch = []
def _get_dataset_yaws(self):
dataset = self.sampler.data_source
rots = dataset.nrsfm_model_outputs['phi']['R']
pr_axes = rots[:, -1, :]
up = torch.svd(pr_axes)[2][:, -1]
x = torch.cross(up, torch.tensor([0., 0., 1.]))
x = x / x.norm()
y = torch.cross(x, up)
y = y / y.norm()
x_c = torch.matmul(pr_axes, x)
y_c = torch.matmul(pr_axes, y)
yaw = torch.atan2(x_c, y_c)
return yaw
def sample_batch_vp_diff(self, idx):
dataset = self.sampler.data_source
# get the cached log rots
assert (
hasattr(dataset, 'nrsfm_model_outputs') and
dataset.nrsfm_model_outputs is not None
), 'make sure to set cfg.annotate_with_c3dpo_outputs=True'
yaws = self._get_dataset_yaws()
hist, edges = np.histogram(yaws, bins=16)
bins = (yaws.cpu().data.numpy().reshape(-1, 1) > edges[1:]).sum(axis=1)
weights = 1. / hist[bins]
weights /= weights.sum()
pivot = np.random.choice(np.arange(len(dataset.db)), p=weights)
seq = dataset.dbT['seq'][pivot]
rots = dataset.nrsfm_model_outputs['phi']['R']
seqs = rots.new_tensor(dataset.dbT['seq'], dtype=torch.int64)
# convert bool array to indices
okdata = (seqs != seqs[pivot]).nonzero().view(-1).tolist()
for o in okdata:
assert o <= len(dataset.db), \
'%d out of range (%d)!' % (o, len(dataset.db))
if len(okdata) >= (self.batch_size-1):
replace = False
else:
replace = True
if len(okdata)==0:
print('no samples!!')
okdata = list(range(len(dataset.db)))
if weights is not None: # cross with okdata:
weights = weights[okdata] / weights[okdata].sum()
sample = np.random.choice(okdata, \
self.batch_size-1, replace=replace, p=weights).tolist()
sample.insert(0, pivot)
for si, s in enumerate(sample):
assert s < len(dataset.db), \
'%d out of range (%d)!' % (s, len(dataset.db))
return sample, seq
def __len__(self):
if self.drop_last:
return len(self.sampler) // self.batch_size
else:
return (len(self.sampler) + self.batch_size - 1) // self.batch_size
|
c3dm-main
|
c3dm/dataset/batch_samplers.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import torch
import numpy as np
import copy
from model import load_nrsfm_model
from tools.cache_preds import cache_preds
def run_c3dpo_model_on_dset(dset, nrsfm_exp_dir):
print('caching c3dpo outputs')
# make a dataset copy without any random sampling
# and image/mask/depth loading
dset_copy = copy.deepcopy(dset)
dset_copy.load_images = False
dset_copy.load_masks = False
dset_copy.load_depths = False
dset_copy.rand_sample = -1
nrsfm_model, nrsfm_cfg = load_nrsfm_model(nrsfm_exp_dir, get_cfg=True)
nrsfm_model.cuda()
nrsfm_model.eval()
loader = torch.utils.data.DataLoader( \
dset_copy,
num_workers=0,
pin_memory=True,
batch_size=nrsfm_cfg.batch_size )
cache_vars = ('phi', 'image_path')
cache = cache_preds(nrsfm_model, loader,
cache_vars=cache_vars, cat=True)
dset.nrsfm_model_outputs = cache
|
c3dm-main
|
c3dm/dataset/c3dpo_annotate.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from collections import defaultdict
import json
import os
import numpy as np
import torch
import trimesh
from visdom import Visdom
from dataset.dataset_configs import IMAGE_ROOTS
from dataset.keypoints_dataset import load_depth, load_mask
from tools.eval_functions import eval_depth_pcl, eval_full_pcl, eval_sparse_pcl
from tools.pcl_unproject import depth2pcl
import torch.nn.functional as Fu
from tqdm import tqdm
import pickle
import time
def eval_zoo(dataset_name, include_debug_vars=False):
if 'freicars_clickp_filtd' in dataset_name:
eval_script = eval_freicars
cache_vars = [
'masks', 'depth_dense',
'K_orig', 'image_path',
'orig_image_size',
'depth_path', 'mask_path',
'seq_name', 'R', 'T',
'embed_db_shape_camera_coord',
'cmr_faces',
'kp_loc_3d',
'shape_image_coord_cal',
'nrsfm_shape_image_coord'
]
eval_vars = [
'EVAL_depth_scl_perspective_med',
'EVAL_pcl_scl_perspective_med',
'EVAL_pcl_corr_scl_perspective_med',
'EVAL_depth_scl_orthographic_med',
'EVAL_pcl_scl_orthographic_med',
'EVAL_pcl_corr_scl_orthographic_med',
'EVAL_depth_scl_perspective',
'EVAL_pcl_scl_perspective',
'EVAL_pcl_corr_scl_perspective',
'EVAL_depth_scl_orthographic',
'EVAL_pcl_scl_orthographic',
'EVAL_pcl_corr_scl_orthographic',
'EVAL_sparse_pcl',
'EVAL_sparse_pcl_nrsfm',
]
elif dataset_name in ('celeba_ff',):
eval_script = eval_florence
cache_vars = [ 'masks', 'depth_dense',
'K_orig', 'image_path',
'orig_image_size',
'depth_path', 'mask_path',
'seq_name',
'images',
'embed_db_shape_camera_coord',
'shape_image_coord_cal_dense',
'cmr_faces',
'kp_loc',
'mesh_path',
'shape_image_coord_best_scale',
]
eval_vars = [ \
'EVAL_pcl_scl_recut_orthographic_flip_med',
'EVAL_pcl_scl_orthographic_flip_med',
'EVAL_pcl_orthographic_flip_med',
'EVAL_pcl_scl_recut_orthographic_med',
'EVAL_pcl_scl_orthographic_med',
'EVAL_pcl_orthographic_med',
'EVAL_pcl_scl_recut_orthographic_flip',
'EVAL_pcl_scl_orthographic_flip',
'EVAL_pcl_orthographic_flip',
'EVAL_pcl_scl_recut_orthographic',
'EVAL_pcl_scl_orthographic',
'EVAL_pcl_orthographic',
]
elif 'pascal3d' in dataset_name:
eval_script = eval_p3d
cache_vars = [ 'masks', 'depth_dense',
'image_path', 'R', 'T',
'orig_image_size',
'mask_path',
'images',
'embed_db_shape_camera_coord',
'shape_image_coord_cal_dense',
'cmr_faces',
'kp_loc',
'mesh_path' ]
eval_vars = [ \
'EVAL_pcl_scl_detkp',
'EVAL_pcl_corr_scl_detkp',
'EVAL_pcl_corr_scl_detkp_med',
]
else:
eval_script = eval_dummy
cache_vars = [ 'images', ]
eval_vars = [ 'EVAL_pcl_dist_scl', ]
return eval_script, cache_vars, eval_vars
def eval_dummy(cached_preds, eval_vars=None):
return {'EVAL_pcl_dist_scl': -1.}, None
def load_freicar_gt_pcl():
print('loading gt freicar point clouds ...')
# load the gt point clouds
gt_pcl_dir = '.../vpdr/freicars_sfm/'
unqseq = ['037', '036', '042', '022', '034']
gt_pcl_db = {}
for seq in unqseq:
fl = os.path.join(gt_pcl_dir, seq + '.pkl')
with open(fl, 'rb') as f:
pcl_data = pickle.load(f)
pcl_data = torch.FloatTensor(pcl_data)
pcl_std = pcl_data.std(1).mean()
gt_pcl_db[seq] = {
'xyz': pcl_data,
'scale_correction': 1. / pcl_std,
}
return gt_pcl_db
def load_freicar_data(imname, seq_name):
data_root = IMAGE_ROOTS['freicars_clickp_filtd'][0]
depth_name = imname + '.jpg.half.jpg.filtdepth.tiff'
depth_path = os.path.join(data_root, seq_name, \
'undistort/stereo/filtered_depth_0.2', depth_name)
assert 'filtdepth' in depth_path
mask_name = imname + '.jpg.half.png'
mask_path = os.path.join(data_root, seq_name, \
'masks', mask_name)
depth_gt = torch.FloatTensor(load_depth({'depth_path': depth_path}))
mask = torch.FloatTensor(load_mask({'mask_path': mask_path}))
return depth_gt, mask
def load_freicar_gt_pcl_clean(cache):
print('loading clean gt freicar point clouds ...')
# load the gt point clouds
gt_pcl_dir = '.../vpdr/freicars_sfm/'
unqseq = ['037', '036', '042', '022', '034']
gt_pcl_db = {}
for seq in tqdm(unqseq):
ok = [ (1 if seq==s else 0) for s in cache['seq_name'] ]
ok = np.where(np.array(ok))[0]
if len(ok)==0:
continue
pcl_seq = []
for idx in ok:
orig_sz = cache['orig_image_size'][idx].long().tolist()
imname = cache['depth_path'][idx].split('/')[-1].split('.')[0]
depth_gt, mask = load_freicar_data(imname, seq)
mask = Fu.interpolate(mask[None], size=orig_sz, mode='nearest')[0]
depth_gt = Fu.interpolate(depth_gt[None], size=orig_sz, mode='nearest')[0]
mask = mask * (depth_gt > 0.).float()
ok = torch.nonzero(mask.view(-1)).squeeze()
if len(ok)==0: continue
K, R, T = cache['K_orig'][idx], cache['R'][idx], cache['T'][idx]
pcl = depth2pcl(depth_gt[None], K[None], image_size=orig_sz, projection_type='perspective')[0]
pcl = pcl.view(3, -1)[:, ok]
pcl = R.t() @ (pcl - T[:,None])
pcl_seq.append(pcl)
pcl_seq = torch.cat(pcl_seq, dim=1)
if pcl_seq.shape[1] > 30000:
state = torch.get_rng_state()
torch.manual_seed(0)
prm = torch.randperm(pcl_seq.shape[1])[:30000]
torch.set_rng_state(state)
pcl_seq = pcl_seq[:, prm]
pcl_std = pcl_seq.std(1).mean()
gt_pcl_db[seq] = { 'xyz': pcl_seq, 'scale_correction': 1. / pcl_std }
outdir = './data/vpdr/'
os.makedirs(outdir, exist_ok=True)
outfile = os.path.join(outdir, 'freicars_pcl_db_eval.pth')
torch.save(gt_pcl_db, outfile)
return gt_pcl_db
def load_p3d_meshes(cached_preds, n_sample=30000):
mesh_db = {}
root = IMAGE_ROOTS['pascal3d_clickp_all'][0]
for mesh_path in cached_preds['mesh_path']:
if mesh_path not in mesh_db:
vertices, faces = load_off(os.path.join(root,mesh_path))
if vertices is None:
continue
mesh = trimesh.Trimesh( \
vertices=vertices.tolist(), \
faces=faces.tolist() )
pcl = trimesh.sample.sample_surface(mesh, n_sample)
mesh_db[mesh_path] = torch.from_numpy(pcl[0].T).float()
return mesh_db
def eval_p3d(cached_preds, eval_vars=None, visualize=False, \
dump_dir=None, skip_flip=False):
nim = len(cached_preds['masks'])
errs = []
mesh_db = load_p3d_meshes(cached_preds)
for imi in tqdm(range(nim)):
gt_pcl = mesh_db[cached_preds['mesh_path'][imi]]
gt_pcl_imcoord = (cached_preds['R'][imi] @ gt_pcl + cached_preds['T'][imi][:,None])
# pcl prediction
pcl_pred = cached_preds['embed_db_shape_camera_coord'][imi,:,:,0].clone()
errs_this_im = {}
pcl_out_this_im = {}
for flip in (False, True):
gt_pcl_test = gt_pcl_imcoord.clone()
if skip_flip and flip:
pass # use the previous result
else:
if flip: gt_pcl_test[2,:] *= -1.
errs_now_pcl = eval_full_pcl( \
pcl_pred[None].clone(),
gt_pcl_test[None].clone() )
pcl_full_err = float(errs_now_pcl['pcl_error'])
pcl_full_err_align = float(errs_now_pcl['pcl_error_align'])
errs_now = \
{ 'EVAL_pcl_scl_detkp': pcl_full_err,
'EVAL_pcl_corr_scl_detkp': pcl_full_err_align }
errs_this_im[flip] = errs_now
pcl_out_this_im[flip] = errs_now_pcl
decvar = 'EVAL_pcl_corr_scl_detkp' # decide whether we flip based on this
flip_better = errs_this_im[True][decvar] < errs_this_im[False][decvar]
# take the better one in case of flipping
pcl_out_this_im = pcl_out_this_im[flip_better]
errs_this_im = errs_this_im[flip_better]
if False:
from tools.vis_utils import get_visdom_connection, \
visdom_plotly_pointclouds
viz = get_visdom_connection()
from PIL import Image
im = Image.open(cached_preds['image_path'][imi]).convert('RGB')
im = torch.FloatTensor(np.array(im)).permute(2,0,1)
viz.image(im, env='pcl_debug', win='im')
pcl_gt = pcl_out_this_im['gt']
pcl_pred = pcl_out_this_im['pred']
pcl_pred_orig = pcl_out_this_im['pred_orig']
pcl_pred_align = pcl_out_this_im['pred_align']
for imii in (0,):
show = {
'gt': pcl_gt[imii].view(3, -1),
# 'pred': pcl_pred[imii].view(3, -1),
'pred_orig': pcl_pred_orig[imii].view(3, -1),
'pred_align': pcl_pred_align[imii].view(3, -1),
}
visdom_plotly_pointclouds( \
viz,
show,
'pcl_debug',
title='pcl_debug',
win='pcl_debug',
markersize=2,
height=800,
width=800,
)
import pdb; pdb.set_trace()
errs.append(errs_this_im)
results = {}
for med in (False, True): # dont show the median
for k in errs[0]:
res = torch.FloatTensor([float(err[k]) for err in errs])
res = float(res.median()) if med else float(res.mean())
results[(k+'_med') if med else k] = res
print('P3D evaluation results:')
for k, v in results.items():
print('%20s: %1.5f' % (k,v) )
if eval_vars is not None:
for eval_var in eval_vars:
assert eval_var in results, \
'evaluation variable missing! (%s)' % eval_var
print('eval vars check ok!')
# if TGT_NIMS==None:
# results = { k+'_DBG':v for k, v in results.items() }
return results, None
def eval_freicars(
cached_preds, eval_vars=None, visualize=True,
TGT_NIMS=1427, dump_dir=None
):
from dataset.dataset_configs import FREIBURG_VAL_IMAGES
cache_path = './cache/vpdr/freicars_pcl_db_eval.pth'
if not os.path.isfile(cache_path):
gt_pcl_db = load_freicar_gt_pcl_clean(cached_preds)
else:
gt_pcl_db = torch.load(cache_path)
nim = len(cached_preds['depth_path'])
if TGT_NIMS is None:
print('\n\n\n!!!! DEBUG MODE !!!!\n\n\n')
errs = []
for imi in tqdm(range(nim)):
seq_name = cached_preds['seq_name'][imi]
gt_pcl = gt_pcl_db[seq_name]['xyz']
gt_pcl_imcoord = (cached_preds['R'][imi] @ gt_pcl + \
cached_preds['T'][imi][:,None])
scale_correction = gt_pcl_db[seq_name]['scale_correction']
orig_sz = cached_preds[
'orig_image_size'][imi].type(torch.int32).tolist()
imname = cached_preds['depth_path'][imi].split('/')[-1].split('.')[0]
depth_gt, mask = load_freicar_data(imname, seq_name)
depth_gt = Fu.interpolate(depth_gt[None], size=orig_sz, mode='nearest' )[0]
mask = Fu.interpolate(mask[None], size=orig_sz, mode='nearest')[0]
# check we have a correct size
for s, s_ in zip(orig_sz, depth_gt.shape[1:]): assert s==s_
depth_pred = cached_preds['depth_dense'][imi].clone()
minscale = min(depth_pred.shape[i] / orig_sz[i-1] for i in [1, 2])
newsz = np.ceil(np.array(depth_pred.shape[1:])/minscale).astype(int).tolist()
depth_pred_up = Fu.interpolate( \
depth_pred[None], \
size=newsz, \
mode='bilinear' )[0]
depth_pred_up = depth_pred_up[:,:depth_gt.shape[1],:depth_gt.shape[2]]
depth_pred_up /= minscale
K = cached_preds['K_orig'][imi:imi+1].clone()
errs_this_im = {}
for pred_projection_type in ( 'perspective', 'orthographic'):
errs_now = eval_depth_pcl(depth_pred_up[None].clone(),
depth_gt[None].clone(),
K=K.clone(),
pred_projection_type=pred_projection_type,
gt_projection_type='perspective',
masks=mask[None],
lap_thr=0.01)
pcl_err_corrected = scale_correction * float(errs_now['dist_pcl'])
errs_this_im.update( \
{ 'EVAL_depth_scl_'+pred_projection_type: pcl_err_corrected} )
if True:
pcl_pred = cached_preds['embed_db_shape_camera_coord'][imi,:,:,0].clone()
pcl_pred /= minscale # !!!!
errs_now_pcl = eval_full_pcl( \
pcl_pred[None].clone(),
gt_pcl_imcoord[None].clone(),
K=K.clone(),
scale_best=errs_now['scale_best'], )
pcl_full_err_corrected = \
scale_correction * float(errs_now_pcl['pcl_error'])
pcl_full_err_align_corrected = \
scale_correction * float(errs_now_pcl['pcl_error_align'])
for pred_projection_type in ('perspective', 'orthographic'):
errs_this_im.update( \
{ 'EVAL_pcl_scl_'+pred_projection_type: \
pcl_full_err_corrected,
'EVAL_pcl_corr_scl_'+pred_projection_type: \
pcl_full_err_align_corrected} )
errs.append(errs_this_im)
results = {}
for med in (True, False):
for k in errs[0]:
res = torch.FloatTensor([float(err[k]) for err in errs])
res = float(res.median()) if med else float(res.mean())
results[(k+'_med') if med else k] = res
if True: # eval sparse kps
gt_kp_loc_3d = cached_preds['kp_loc_3d']
pred_kp_loc_3d = cached_preds['shape_image_coord_cal']
nrsfm_kp_loc_3d = cached_preds['nrsfm_shape_image_coord']
scale_corrs = torch.stack([
gt_pcl_db[cached_preds['seq_name'][imi]]['scale_correction']
for imi in range(nim)
])
results['EVAL_sparse_pcl'] = float(eval_sparse_pcl(
pred_kp_loc_3d, gt_kp_loc_3d, scale_corrs))
results['EVAL_sparse_pcl_nrsfm'] = float(eval_sparse_pcl(
nrsfm_kp_loc_3d, gt_kp_loc_3d, scale_corrs))
print('Freiburg Cars evaluation results:')
for k, v in results.items():
print('%20s: %1.5f' % (k,v) )
if eval_vars is not None:
for eval_var in eval_vars:
assert eval_var in results, \
'evaluation variable missing! (%s)' % eval_var
print('eval vars check ok!')
if TGT_NIMS==None:
results = { k+'_DBG':v for k, v in results.items() }
return results, None
def load_off(obj_path):
if not os.path.isfile(obj_path):
print('%s does not exist!' % obj_path)
return None, None
with open(obj_path, 'r', encoding='utf-8', errors='ignore') as f:
lines = f.readlines()
lines = [ l.strip() for l in lines ]
nv, nf, _ = [int(x) for x in lines[1].split(' ')]
entries = lines[2:]
for vertface in ('v', 'f'):
if vertface=='v':
vertices = [ [float(v_) for v_ in v.split(' ')] for v in entries[:nv]]
vertices = torch.FloatTensor(vertices).float()
entries = entries[nv:]
elif vertface=='f':
faces = [ [int(v_) for v_ in v.split(' ')[1:]] for v in entries]
faces = torch.LongTensor(faces)
assert faces.shape[0]==nf
else:
raise ValueError()
return vertices, faces
def load_ff_obj(obj_path):
with open(obj_path, 'r', encoding='utf-8', errors='ignore') as f:
lines = f.readlines()
lines = [ l.strip() for l in lines ]
for vertface in ('v', 'f'):
entries = [ [ v for v in l.split(' ')[1:4] ] \
for l in lines if l.split(' ')[0]==vertface ]
if vertface=='v':
entries = [ [float(v_) for v_ in v] for v in entries ]
entries = torch.FloatTensor(entries)
elif vertface=='f':
entries = [ [ int(v_.split('/')[0]) for v_ in v ] \
for v in entries ]
entries = torch.LongTensor(entries)
else:
raise ValueError()
if vertface=='v':
vertices = entries.float()
else:
faces = (entries-1).long()
return vertices, faces
def eval_florence(cached_preds, eval_vars=None, TGT_NIMS=1427, visualize=False):
from tools.pcl_unproject import depth2pcl
from tools.eval_functions import eval_pcl_icp
root = IMAGE_ROOTS['celeba_ff'][1]
nim = len(cached_preds['mesh_path'])
errs = []
for imi in tqdm(range(nim)):
# if imi <= 775:
# continue
# get the ff mesh
mesh_path = cached_preds['mesh_path'][imi]
if len(mesh_path)==0: continue
mesh_path = os.path.join(root, mesh_path)
vertices, faces = load_ff_obj(mesh_path)
mesh_gt = trimesh.Trimesh(
vertices=vertices.tolist(),
faces=faces.tolist()
)
# get our prediction
kp_loc = cached_preds['kp_loc'][imi]
# image_size = list(cached_preds['images'][imi].shape[1:])
mask = cached_preds['masks'][imi]
if mask.sum()<=1:
print('Empty mask!!!')
continue
image_size = list(mask.shape[1:])
# mask = Fu.interpolate(mask[None], size=image_size)[0]
pcl_pred = cached_preds['shape_image_coord_best_scale'][imi]
pcl_pred = Fu.interpolate(pcl_pred[None], size=image_size)[0]
err_now = {}
for flip in (True, False):
pcl_pred_now = pcl_pred.clone()
if flip: pcl_pred_now[2,:] = -pcl_pred_now[2,:]
# compute icp error
err = eval_pcl_icp(pcl_pred_now, mesh_gt, mask, kp_loc)
err = {
'EVAL_pcl_scl_recut_orthographic': err['dist_pcl_scl_recut'],
'EVAL_pcl_scl_orthographic': err['dist_pcl_scl'],
'EVAL_pcl_orthographic': err['dist_pcl'],
}
if flip: err = {k+'_flip':v for k, v in err.items()}
err_now.update(err)
errs.append(err_now)
print('<EVAL_STATE>')
print(f'IMAGE={imi}')
print(err_now)
print('<\EVAL_STATE>')
results = {}
for med in (True, False):
for k in errs[0]:
res = torch.FloatTensor([float(err[k]) for err in errs])
if med:
res = float(res.median())
else:
res = float(res.mean())
results[(k+'_med') if med else k] = res
print('Florence Face evaluation results:')
for k, v in results.items():
print('%20s: %1.5f' % (k,v) )
if eval_vars is not None:
for eval_var in eval_vars:
assert eval_var in results, \
'evaluation variable missing! (%s)' % str(eval_var)
print('eval vars check ok!')
return results, None
|
c3dm-main
|
c3dm/dataset/eval_zoo.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import torch
import os
import sys
import json
import copy
import glob
import pickle, gzip
import numpy as np
import torch
from PIL import Image
from torch.utils import data
from tools.utils import NumpySeedFix, auto_init_args
class KeypointsDataset(data.Dataset):
"""
This is a generalized class suitable for storing object keypoint annotations
The input jsonfile needs to be a list of dictionaries
(one dictionary per pose annotation) of the form:
{
# REQUIRED FIELDS #
"kp_loc" : 2 x N list of keypoints
"kp_vis" : 1 x N list of 1/0 boolean indicators
# OPTIONAL FIELDS #
"file_name": name of file from image_root
"kp_loc_3d": 3 x N list of 3D keypoint locations in camera coords
}
"""
def __init__( self,
jsonfile=None,
train=True,
limit_to=0,
limit_seq_to=-1,
rand_sample=0,
image_root=None,
mask_root=None,
depth_root=None,
refresh_db=False,
min_visible=0,
subsample=1,
load_images=True,
load_depths=True,
load_masks=True,
image_height=9*20*2,
image_width=9*20*2,
dilate_masks=5,
max_frame_diff = -1.,
max_angle_diff = 4.,
kp_conf_thr = 0.,
nrsfm_model_outputs = None,
box_crop_context=1.,
box_crop=False,
):
auto_init_args(self)
self.load_db_file()
has_classes = 'class_mask' in self.db[0]
if has_classes:
self.class_db = self.get_class_db()
else:
self.class_db = None
self.get_transposed_db()
def get_transposed_db(self):
print('getting transposed db ...')
self.dbT = {}
self.dbT['unqseq'] = sorted(list(set([e['seq'] for e in self.db])))
self.dbT['seq_dict'] = {}
self.dbT['seq'] = [e['seq'] for e in self.db]
dict_seq = {s:i for i,s in enumerate(self.dbT['seq'])}
for i in range(len(self.db)):
# seq_ = self.dbT['unqseq'].index(self.db[i]['seq'])
seq = dict_seq[self.db[i]['seq']]
# assert seq_==seq
# print('%d==%d' % (seq_,seq))
if seq not in self.dbT['seq_dict']:
self.dbT['seq_dict'][seq] = []
self.dbT['seq_dict'][seq].append(i)
def load_db_file(self):
print("loading data from %s" % self.jsonfile)
ext = self.jsonfile.split('.')[-1]
if ext=='json':
with open(self.jsonfile,'r') as data_file:
db = json.load(data_file)
elif ext=='pkl':
with open(self.jsonfile,'rb') as data_file:
db = pickle.load(data_file)
elif ext=='pgz':
with gzip.GzipFile(self.jsonfile, 'r') as data_file:
db = pickle.load(data_file)
else:
raise ValueError('bad extension %s' % ext)
if 'seq' not in db[0]:
print('no sequence numbers! => filling with unique seq per image')
for ei, e in enumerate(db):
e['seq_name'] = str(ei)
e['seq'] = ei
unqseq = list(range(len(db)))
else:
unqseq = sorted(list(set([e['seq'] for e in db])))
for e in db:
e['seq_name'] = copy.deepcopy(e['seq'])
e['seq'] = unqseq.index(e['seq'])
print("data train=%d , n frames = %d, n seq = %d" % \
(self.train, len(db), len(unqseq)))
self.db = db
self.restrict_images()
def get_class_db(self):
print('parsing class db ...')
masks = np.stack([np.array(e['class_mask']) for e in self.db])
unq_masks = np.unique(masks, axis=0)
n_cls = unq_masks.shape[0]
class_db = {tuple(m.tolist()):[] for m in unq_masks}
for ei,e in enumerate(self.db):
class_db[tuple(e['class_mask'])].append(ei)
class_db = list(class_db.values())
for eis in class_db: # sanity check
cls_array = np.stack([self.db[ei]['class_mask'] for ei in eis])
assert ((cls_array - cls_array[0:1,:])**2).sum()<=1e-6
return class_db
def restrict_images(self):
print( "limitting dataset to seqs: " + str(self.limit_seq_to) )
if type(self.limit_seq_to) in (tuple,list):
if len(self.limit_seq_to) > 1 or self.limit_seq_to[0] >= 0:
self.db = [f for f in self.db if f['seq'] in self.limit_seq_to ]
elif type(self.limit_seq_to)==int:
if self.limit_seq_to > 0:
self.db = [f for f in self.db if f['seq'] < self.limit_seq_to ]
else:
assert False, "bad seq limit type"
if self.limit_to > 0:
tgtnum = min( self.limit_to, len(self.db) )
prm = list(range(len(self.db)))[0:tgtnum]
# with NumpySeedFix():
# prm = np.random.permutation( \
# len(self.db))[0:tgtnum]
print( "limitting dataset to %d samples" % tgtnum )
self.db = [self.db[i] for i in prm]
if self.subsample > 1:
orig_len = len(self.db)
self.db = [self.db[i] for i in range(0, len(self.db), self.subsample)]
print('db subsampled %d -> %d' % (orig_len, len(self.db)))
if self.kp_conf_thr > 0. and 'kp_conf' in self.db[0]:
for e in self.db:
v = torch.FloatTensor(e['kp_vis'])
c = torch.FloatTensor(e['kp_conf'])
e['kp_vis'] = (c > self.kp_conf_thr).float().tolist()
if self.min_visible > 0:
len_orig = len(self.db)
self.db = [ e for e in self.db \
if (torch.FloatTensor(e['kp_vis'])>0).float().sum()>self.min_visible]
print('kept %3.1f %% entries' % (100.*len(self.db)/float(len_orig)) )
assert len(self.db) > 10
def resize_image(self, image, mode='bilinear'):
image_size = [self.image_height, self.image_width]
minscale = min(image_size[i] / image.shape[i+1] for i in [0, 1])
imre = torch.nn.functional.interpolate( \
image[None], scale_factor=minscale, mode=mode)[0]
imre_ = torch.zeros(image.shape[0],image_size[0],image_size[1])
imre_[:,0:imre.shape[1],0:imre.shape[2]] = imre
return imre_, minscale
def load_image(self, entry):
im = np.array(Image.open(entry['image_path']).convert('RGB'))
im = im.transpose((2,0,1))
im = im.astype(np.float32) / 255.
return im
def crop_around_box(self, entry, box_context=1.):
bbox = entry['bbox'].clone() # [xmin, ymin, w, h]
# increase box size
c = box_context
bbox[0] -= bbox[2]*c/2
bbox[1] -= bbox[3]*c/2
bbox[2] += bbox[2]*c
bbox[3] += bbox[3]*c
bbox = bbox.long()
# assert bbox[2] >= 2, 'weird box!'
# assert bbox[3] >= 2, 'weird box!'
bbox[2:4] = torch.clamp(bbox[2:4], 2)
entry['orig_image_size'] = bbox[[3,2]].float()
bbox[2:4] += bbox[0:2]+1 # convert to [xmin, ymin, xmax, ymax]
for k in ['images', 'masks', 'depths']:
if getattr(self, 'load_'+k) and k in entry:
crop_tensor = entry[k]
bbox[[0,2]] = torch.clamp(bbox[[0,2]], 0., crop_tensor.shape[2])
bbox[[1,3]] = torch.clamp(bbox[[1,3]], 0., crop_tensor.shape[1])
crop_tensor = crop_tensor[:, bbox[1]:bbox[3], bbox[0]:bbox[2]]
assert all(c>0 for c in crop_tensor.shape), 'squashed image'
entry[k] = crop_tensor
entry['kp_loc'] = entry['kp_loc'] - bbox[0:2,None].float()
return entry
def __len__(self):
if self.rand_sample > 0:
return self.rand_sample
else:
return len(self.db)
def __getitem__(self, index):
assert index < len(self.db), \
'index %d out of range (%d)' % (index, len(self.db))
entry = copy.deepcopy(self.db[index])
if self.image_root is not None and 'image_path' in entry:
entry['image_path'] = os.path.join(self.image_root,entry['image_path'])
if self.mask_root is not None and 'mask_path' in entry:
entry['mask_path'] = os.path.join(self.mask_root,entry['mask_path'])
if self.depth_root is not None and 'depth_path' in entry:
entry['depth_path'] = os.path.join(self.depth_root,entry['depth_path'])
if self.load_images:
entry['images'] = self.load_image(entry)
entry['orig_image_size'] = list(entry['images'].shape[1:])
if self.load_depths:
entry['depths'] = load_depth(entry)
if self.load_masks:
entry['masks'] = load_mask(entry)
if entry['masks'] is None:
entry['masks'] = np.zeros(entry['images'].shape[1:3] \
)[None].astype(np.float32)
else:
# assert entry['masks'].shape[1:3]==entry['images'].shape[1:3]
if self.load_images and \
entry['masks'].shape[1:3] != entry['images'].shape[1:3]:
# print(entry['mask_path'])
# print(entry['image_path'])
# import pdb; pdb.set_trace()
print('bad mask size!!!!')
# print(entry['image_path'])
# print(entry['mask_path'])
# import pdb; pdb.set_trace()
entry['masks'] = np.zeros(entry['images'].shape[1:3] \
)[None].astype(np.float32)
# convert to torch Tensors where possible
for fld in ( 'kp_loc', 'kp_vis', 'kp_loc_3d',
'class_mask', 'kp_defined', 'images',
'orig_image_size', 'masks', 'K', 'depths', 'bbox',
'kp_conf', 'R', 'T'):
if fld in entry:
entry[fld] = torch.FloatTensor(entry[fld])
# first crop if needed, then resize
if self.box_crop and self.load_images:
entry = self.crop_around_box(entry, self.box_crop_context)
if 'sfm_model' not in entry:
entry['sfm_model'] = '<NO_MODEL>'
entry['K_orig'] = entry['K'].clone()
if self.load_images:
# resize image
entry['images'], scale = self.resize_image(entry['images'],
mode='bilinear')
for fld in ('kp_loc', 'kp_loc_3d', 'K'):
if fld in entry:
entry[fld] *= scale
if fld=='K':
entry[fld][2,2] = 1.
else:
scale = 1.
if self.load_masks:
entry['masks'], _ = self.resize_image(entry['masks'],
mode='nearest')
if self.dilate_masks > 0:
#print('mask dilation')
entry['masks'] = torch.nn.functional.max_pool2d(
entry['masks'],
self.dilate_masks*2+1,
stride=1,
padding=self.dilate_masks )
elif self.dilate_masks < 0:
imask_dil = torch.nn.functional.max_pool2d(
1-entry['masks'],
abs(self.dilate_masks)*2+1,
stride=1,
padding=abs(self.dilate_masks) )
entry['masks'] = torch.clamp(entry['masks'] - imask_dil, 0.)
if self.load_depths:
entry['depths'], _ = self.resize_image(entry['depths'],
mode='nearest')
entry['depths'] *= scale
if 'p3d_info' in entry: # filter the kp out of bbox
bbox = torch.FloatTensor(entry['p3d_info']['bbox'])
bbox_vis, bbox_err = bbox_kp_visibility( \
bbox, entry['kp_loc'], entry['kp_vis'])
entry['kp_vis'] = entry['kp_vis'] * bbox_vis.float()
# mask out invisible
entry['kp_loc'] = entry['kp_loc'] * entry['kp_vis'][None]
return entry
def bbox_kp_visibility(bbox, keypoints, vis):
bx,by,bw,bh = bbox
x = keypoints[0]; y = keypoints[1]
ctx_ = 0.1
in_box = (x>=bx-ctx_*bw) * (x<=bx+bw*(1+ctx_)) * \
(y>=by-ctx_*bh) * (y<=by+bh*(1+ctx_))
in_box = in_box * (vis==1)
err = torch.stack( [ (bx-ctx_*bw)-x,
x-(bx+bw*(1+ctx_)),
(by-ctx_*bh)-y,
y-(by+bh*(1+ctx_)) ] )
err = torch.relu(err) * vis[None].float()
err = torch.stack( ( torch.max( err[0],err[1] ),
torch.max( err[2],err[3] ) ) ).max(dim=1)[0]
return in_box, err
def read_colmap_depth(path):
with open(path, "rb") as fid:
width, height, channels = np.genfromtxt(fid, delimiter="&", max_rows=1,
usecols=(0, 1, 2), dtype=int)
fid.seek(0)
num_delimiter = 0
byte = fid.read(1)
while True:
if byte == b"&":
num_delimiter += 1
if num_delimiter >= 3:
break
byte = fid.read(1)
array = np.fromfile(fid, np.float32)
array = array.reshape((width, height, channels), order="F")
return np.transpose(array, (1, 0, 2)).squeeze()
def load_depth(entry):
if entry['depth_path'].endswith('<NO_DEPTH>'):
# we dont have depth
d = np.ones(entry['images'].shape[1:]).astype(float)[None]
else:
ext = os.path.splitext(entry['depth_path'])[-1]
if ext=='.bin': # colmap binary format
d = read_colmap_depth(entry['depth_path'])
# clamp the values
min_depth, max_depth = np.percentile(d, [1, 95])
d[d < min_depth] = min_depth
d[d > max_depth] = max_depth
d = d.astype(np.float32)[None]
elif ext=='.png': # ldos depth
postfixl = len('081276300.rgb.jpg')
dpath_corrected = glob.glob(entry['depth_path'][0:-postfixl]+'*')
assert len(dpath_corrected)==1
d = np.array(Image.open(dpath_corrected[0])).astype(float)[None]
d /= 1000. # to meters
elif ext=='.tiff': # sparse colmap depth
d = np.array(Image.open(entry['depth_path'])).astype(float)[None]
else:
raise ValueError('unsupported depth ext "%s"' % ext)
return d
def load_mask(entry):
# fix for birds
if not os.path.isfile(entry['mask_path']):
for ext in ('.png', '.jpg'):
new_path = os.path.splitext(entry['mask_path'])[0] + ext
if os.path.isfile(new_path):
entry['mask_path'] = new_path
if not os.path.isfile(entry['mask_path']):
print('no mask!')
print(entry['mask_path'])
mask = None
else:
mask = np.array(Image.open(entry['mask_path']))
mask = mask.astype(np.float32)[None]
return mask
|
c3dm-main
|
c3dm/dataset/keypoints_dataset.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import copy
# list of root folders containing the dataset images
IMAGE_ROOTS = {
'freicars_clickp_filtd': ('./dataset_root/freicars/',),
'freicars_clickp_filtd_dbg': ('./dataset_root/freicars/',),
'cub_birds_hrnet_v2': ('./dataset_root/cub_birds/',),
'celeba_ff': ('./dataset_root/celeba/',
'./dataset_root/florence/'),
'pascal3d_clickp_all': ('./dataset_root/PASCAL3D+_release1.1',),
}
MASK_ROOTS = copy.deepcopy(IMAGE_ROOTS)
DEPTH_ROOTS = copy.deepcopy(IMAGE_ROOTS)
MASK_ROOTS['cub_birds_hrnet_v2'] = ('./dataset_root/cub_birds/',)
DATASET_ROOT = './dataset_root'
DATASET_URL = {
'freicars_clickp_filtd_train': 'https://dl.fbaipublicfiles.com/c3dm/freicars_clickp_filtd_train.json.gz',
'freicars_clickp_filtd_val': 'https://dl.fbaipublicfiles.com/c3dm/freicars_clickp_filtd_val.json.gz',
}
IMAGE_URLS = {
'cub_birds_hrnet_v2': ('http://www.vision.caltech.edu/visipedia-data/CUB-200-2011/CUB_200_2011.tgz',),
'pascal3d_clickp_all': ('ftp://cs.stanford.edu/cs/cvgl/PASCAL3D+_release1.1.zip',),
}
MASK_URLS = {
'cub_birds_hrnet_v2': ('',),
}
DEPTH_URLS = {
'cub_birds_hrnet_v2': ('',),
}
C3DM_URLS = {
'freicars_clickp_filtd': 'https://dl.fbaipublicfiles.com/c3dm/c3dm_freicars.tar.gz',
}
C3DPO_MODELS = {
'cub_birds_hrnet_orth_b50': './dataset_root/c3dpo_cub',
'celeba_orth_b50': '',
'p3d_all_orth_b10': '',
'freicars_clickp_persp_b10_ray': './dataset_root/c3dpo_freicars',
}
C3DPO_URLS = {
'cub_birds_hrnet_orth_b50': '',
'celeba_orth_b50': '',
'p3d_all_orth_b10': '',
'freicars_clickp_persp_b10_ray': 'https://dl.fbaipublicfiles.com/c3dm/c3dpo_freicars.tar.gz',
}
# ----- connectivity patterns for visualizing the stick-men
STICKS = {
'pose_track': [ [2, 0],[0, 1],[1, 5],[5, 7],
[9, 7],[1, 6],[6, 8],[10, 8],
[1, 12],[12, 11],[11, 1],[14, 12],
[11, 13],[15, 13],[16, 14]] ,
'h36m': [ [10, 9], [9, 8], [8, 14],
[14, 15], [15, 16], [8, 11],
[11, 12], [12, 13], [8, 7],
[7, 0], [1, 0], [1, 2],
[2, 3], [0, 4], [4, 5], [5, 6] ],
'cub_birds': [ [1, 5], [5, 4], [4, 9],
[9, 0], [0, 13], [0, 12],
[0, 8], [12, 13], [1, 14],
[14, 3], [3, 2], [2, 7],
[1, 10], [1, 6], [2, 11],
[2, 7], [8, 13] ],
'coco': [ [13,15], [14,16], [12,14], [11,12,], [11,13],
[0,12], [0,11], [8,10], [6,8],
[7,9], [5,7], [0,5], [0,6],
[0,3], [0,4], [0,2], [0,1] ],
'freicars': [[0, 8], [0, 4], [4, 10], [8, 10],
[10, 9], [9, 11], [8, 11],
[11, 6], [9, 2], [2, 6],
[4, 1], [5, 1], [0, 5], [5, 7], [1, 3],
[7, 3], [3, 2], [7, 6]],
'pascal3d': {
'car': [[0, 8], [0, 4], [4, 10], [8, 10],
[10, 9], [9, 11], [8, 11],
[11, 6], [9, 2], [2, 6],
[4, 1], [5, 1], [0, 5], [5, 7], [1, 3],
[7, 3], [3, 2], [7, 6]],
'aeroplane': [[2, 5], [1, 4], [5, 3], [3, 7],
[7, 0], [0, 5], [5, 7], [5, 6],
[6, 0], [6, 3], [2, 4], [2, 1]],
'motorbike': [[6, 2],
[2, 9],
[2, 3],
[3, 8],
[5, 8],
[3, 5],
[2, 1],
[1, 0],
[0, 7],
[0, 4],
[4, 7],
[1, 4],
[1, 7],
[1, 5],
[1, 8]],
'sofa': [[1, 5],
[5, 4],
[4, 6],
[6, 2],
[2, 0],
[1, 0],
[0, 4],
[1, 3],
[7, 5],
[2, 3],
[3, 7],
[9, 7],
[7, 6],
[6, 8],
[8, 9]],
'chair': [[7, 3],
[6, 2],
[9, 5],
[8, 4],
[7, 9],
[8, 6],
[6, 7],
[9, 8],
[9, 1],
[8, 0],
[1, 0]],
},
}
STICKS['cub_birds_hrnet'] = STICKS['cub_birds']
H36M_ACTIONS = [ 'Directions','Discussion','Eating','Greeting',
'Phoning','Photo','Posing','Purchases','Sitting',
'SittingDown','Smoking','Waiting','WalkDog',
'Walking','WalkTogether' ]
P3D_NUM_KEYPOINTS = {\
'aeroplane': 8,
'car': 12,
'tvmonitor': 8,
'sofa': 10,
'motorbike': 10,
'diningtable': 12,
'chair': 10,
'bus': 12,
'bottle': 7,
'boat': 7,
'bicycle': 11,
'train': 17 }
P3D_CLASSES = list(P3D_NUM_KEYPOINTS.keys())
# add the per-class p3d db paths
for cls_ in P3D_CLASSES:
IMAGE_ROOTS['pascal3d_clickp_'+cls_] = IMAGE_ROOTS['pascal3d_clickp_all']
IMAGE_ROOTS['pascal3d_clickp_mesh_'+cls_] = IMAGE_ROOTS['pascal3d_clickp_all']
IMAGE_ROOTS['pascal3d_clickp_clean_'+cls_] = IMAGE_ROOTS['pascal3d_clickp_all']
P3D_NUM_IMAGES={
'train':{"aeroplane": 1953, "car": 5627,
"tvmonitor": 1374,"sofa": 669,
"motorbike": 725,"diningtable": 751,
"chair": 1186,"bus": 1185,
"bottle": 1601,"boat": 2046,
"bicycle": 904,"train": 1113,},
'val': {"aeroplane": 269,"car": 294,
"tvmonitor": 206,"sofa": 37,
"motorbike": 116,"diningtable": 12,
"chair": 227,"bus": 153,
"bottle": 249,"boat": 163,
"bicycle": 115,"train": 109}}
DATASET_CFG = {
'freicars_clickp_filtd':
{
'image_height': 9*40,
'image_width': 16*40,
'max_angle_diff': 3.14/2,
'box_crop': False,
},
'celeba':
{
'image_height': 3*130,
'image_width': 3*130,
'max_angle_diff': 3.14/2,
'box_crop': False,
'subsample': 4,
},
'ldos_chairs':
{
'image_height': 3*110,
'image_width': 4*110,
'max_angle_diff': 3.14/2,
'min_visible': 6,
'kp_conf_thr': 0.8,
'box_crop': False,
},
'ldos_chairs_armchair':
{
'image_height': 3*110,
'image_width': 4*110,
'max_angle_diff': 3.14/2,
'min_visible': 4,
'kp_conf_thr': 0.6,
'box_crop': False,
},
'pascal3d_clickp':
{
'image_height': 3*6*20,
'image_width': 4*6*20,
'max_angle_diff': 3.14/2,
'min_visible': 6,
'box_crop': True,
},
'pascal3d_clickp_clean':
{
'image_height': 3*6*20,
'image_width': 4*6*20,
'max_angle_diff': 3.14/2,
# 'min_visible': 4,
'box_crop': True,
'dilate_masks': 0,
'box_crop_context': 0.2,
},
'h36m_sparse':
{
'image_height': 25*20,
'image_width': 15*20,
'max_angle_diff': 3.14/2,
# 'max_frame_diff': 0.33,
# 'min_visible': 6,
'subsample': 10,
'box_crop': True,
'box_crop_context': 0.2,
'dilate_masks': 0,
},
'cub_birds_hrnet_v2':
{
'image_height': 3*130,
'image_width': 3*130,
'max_angle_diff': 3.14/2,
'box_crop': False,
},
'default':
{
'image_height': 3*110,
'image_width': 4*110,
'max_angle_diff': 3.14/2,
'box_crop': False,
}
}
for cls_ in P3D_CLASSES:
DATASET_CFG['pascal3d_clickp_'+cls_] = DATASET_CFG['pascal3d_clickp']
DATASET_CFG['pascal3d_clickp_clean_'+cls_] = DATASET_CFG['pascal3d_clickp_clean']
FILTER_DB_SETTINGS = {
'freicars_clickp_filtd': {
'nn': 1e-3,
'perc_keep': 0.95,
'sig': 0.02,
'lap_size': 5e-4,
'lap_alpha': 0.9,
},
'default': {
'nn': 1e-3,
'perc_keep': 0.9,
'sig': 0.01,
'lap_size': 1e-3,
'lap_alpha': 0.9,
}
}
FREIBURG_VAL_IMAGES = [
"022/undistort/images/frame_0000001.jpg.half.jpg",
"022/undistort/images/frame_0000002.jpg.half.jpg",
"022/undistort/images/frame_0000003.jpg.half.jpg",
"022/undistort/images/frame_0000004.jpg.half.jpg",
"022/undistort/images/frame_0000005.jpg.half.jpg",
"022/undistort/images/frame_0000006.jpg.half.jpg",
"022/undistort/images/frame_0000007.jpg.half.jpg",
"022/undistort/images/frame_0000008.jpg.half.jpg",
"022/undistort/images/frame_0000009.jpg.half.jpg",
"022/undistort/images/frame_0000010.jpg.half.jpg",
"022/undistort/images/frame_0000011.jpg.half.jpg",
"022/undistort/images/frame_0000012.jpg.half.jpg",
"022/undistort/images/frame_0000013.jpg.half.jpg",
"022/undistort/images/frame_0000014.jpg.half.jpg",
"022/undistort/images/frame_0000015.jpg.half.jpg",
"022/undistort/images/frame_0000016.jpg.half.jpg",
"022/undistort/images/frame_0000017.jpg.half.jpg",
"022/undistort/images/frame_0000018.jpg.half.jpg",
"022/undistort/images/frame_0000019.jpg.half.jpg",
"022/undistort/images/frame_0000020.jpg.half.jpg",
"022/undistort/images/frame_0000021.jpg.half.jpg",
"022/undistort/images/frame_0000022.jpg.half.jpg",
"022/undistort/images/frame_0000023.jpg.half.jpg",
"022/undistort/images/frame_0000024.jpg.half.jpg",
"022/undistort/images/frame_0000025.jpg.half.jpg",
"022/undistort/images/frame_0000026.jpg.half.jpg",
"022/undistort/images/frame_0000027.jpg.half.jpg",
"022/undistort/images/frame_0000030.jpg.half.jpg",
"022/undistort/images/frame_0000031.jpg.half.jpg",
"022/undistort/images/frame_0000032.jpg.half.jpg",
"022/undistort/images/frame_0000033.jpg.half.jpg",
"022/undistort/images/frame_0000034.jpg.half.jpg",
"022/undistort/images/frame_0000035.jpg.half.jpg",
"022/undistort/images/frame_0000036.jpg.half.jpg",
"022/undistort/images/frame_0000037.jpg.half.jpg",
"022/undistort/images/frame_0000038.jpg.half.jpg",
"022/undistort/images/frame_0000039.jpg.half.jpg",
"022/undistort/images/frame_0000040.jpg.half.jpg",
"022/undistort/images/frame_0000041.jpg.half.jpg",
"022/undistort/images/frame_0000042.jpg.half.jpg",
"022/undistort/images/frame_0000043.jpg.half.jpg",
"022/undistort/images/frame_0000044.jpg.half.jpg",
"022/undistort/images/frame_0000045.jpg.half.jpg",
"022/undistort/images/frame_0000046.jpg.half.jpg",
"022/undistort/images/frame_0000047.jpg.half.jpg",
"022/undistort/images/frame_0000048.jpg.half.jpg",
"022/undistort/images/frame_0000049.jpg.half.jpg",
"022/undistort/images/frame_0000050.jpg.half.jpg",
"022/undistort/images/frame_0000051.jpg.half.jpg",
"022/undistort/images/frame_0000052.jpg.half.jpg",
"022/undistort/images/frame_0000053.jpg.half.jpg",
"022/undistort/images/frame_0000054.jpg.half.jpg",
"022/undistort/images/frame_0000055.jpg.half.jpg",
"022/undistort/images/frame_0000056.jpg.half.jpg",
"022/undistort/images/frame_0000057.jpg.half.jpg",
"022/undistort/images/frame_0000058.jpg.half.jpg",
"022/undistort/images/frame_0000059.jpg.half.jpg",
"022/undistort/images/frame_0000060.jpg.half.jpg",
"022/undistort/images/frame_0000061.jpg.half.jpg",
"022/undistort/images/frame_0000062.jpg.half.jpg",
"022/undistort/images/frame_0000063.jpg.half.jpg",
"022/undistort/images/frame_0000064.jpg.half.jpg",
"022/undistort/images/frame_0000065.jpg.half.jpg",
"022/undistort/images/frame_0000066.jpg.half.jpg",
"022/undistort/images/frame_0000067.jpg.half.jpg",
"022/undistort/images/frame_0000068.jpg.half.jpg",
"022/undistort/images/frame_0000069.jpg.half.jpg",
"022/undistort/images/frame_0000070.jpg.half.jpg",
"022/undistort/images/frame_0000071.jpg.half.jpg",
"022/undistort/images/frame_0000072.jpg.half.jpg",
"022/undistort/images/frame_0000073.jpg.half.jpg",
"022/undistort/images/frame_0000074.jpg.half.jpg",
"022/undistort/images/frame_0000075.jpg.half.jpg",
"022/undistort/images/frame_0000076.jpg.half.jpg",
"022/undistort/images/frame_0000077.jpg.half.jpg",
"022/undistort/images/frame_0000078.jpg.half.jpg",
"022/undistort/images/frame_0000079.jpg.half.jpg",
"022/undistort/images/frame_0000080.jpg.half.jpg",
"022/undistort/images/frame_0000081.jpg.half.jpg",
"022/undistort/images/frame_0000082.jpg.half.jpg",
"022/undistort/images/frame_0000083.jpg.half.jpg",
"022/undistort/images/frame_0000084.jpg.half.jpg",
"022/undistort/images/frame_0000085.jpg.half.jpg",
"022/undistort/images/frame_0000086.jpg.half.jpg",
"022/undistort/images/frame_0000087.jpg.half.jpg",
"022/undistort/images/frame_0000088.jpg.half.jpg",
"022/undistort/images/frame_0000089.jpg.half.jpg",
"022/undistort/images/frame_0000090.jpg.half.jpg",
"022/undistort/images/frame_0000091.jpg.half.jpg",
"022/undistort/images/frame_0000092.jpg.half.jpg",
"022/undistort/images/frame_0000093.jpg.half.jpg",
"022/undistort/images/frame_0000094.jpg.half.jpg",
"022/undistort/images/frame_0000095.jpg.half.jpg",
"022/undistort/images/frame_0000096.jpg.half.jpg",
"022/undistort/images/frame_0000097.jpg.half.jpg",
"022/undistort/images/frame_0000098.jpg.half.jpg",
"022/undistort/images/frame_0000099.jpg.half.jpg",
"022/undistort/images/frame_0000101.jpg.half.jpg",
"022/undistort/images/frame_0000104.jpg.half.jpg",
"022/undistort/images/frame_0000105.jpg.half.jpg",
"022/undistort/images/frame_0000106.jpg.half.jpg",
"022/undistort/images/frame_0000107.jpg.half.jpg",
"022/undistort/images/frame_0000108.jpg.half.jpg",
"022/undistort/images/frame_0000109.jpg.half.jpg",
"022/undistort/images/frame_0000110.jpg.half.jpg",
"022/undistort/images/frame_0000111.jpg.half.jpg",
"022/undistort/images/frame_0000112.jpg.half.jpg",
"022/undistort/images/frame_0000113.jpg.half.jpg",
"022/undistort/images/frame_0000114.jpg.half.jpg",
"022/undistort/images/frame_0000115.jpg.half.jpg",
"022/undistort/images/frame_0000116.jpg.half.jpg",
"022/undistort/images/frame_0000117.jpg.half.jpg",
"022/undistort/images/frame_0000118.jpg.half.jpg",
"022/undistort/images/frame_0000119.jpg.half.jpg",
"022/undistort/images/frame_0000120.jpg.half.jpg",
"022/undistort/images/frame_0000121.jpg.half.jpg",
"022/undistort/images/frame_0000122.jpg.half.jpg",
"022/undistort/images/frame_0000123.jpg.half.jpg",
"022/undistort/images/frame_0000124.jpg.half.jpg",
"022/undistort/images/frame_0000125.jpg.half.jpg",
"022/undistort/images/frame_0000126.jpg.half.jpg",
"022/undistort/images/frame_0000127.jpg.half.jpg",
"022/undistort/images/frame_0000128.jpg.half.jpg",
"022/undistort/images/frame_0000129.jpg.half.jpg",
"022/undistort/images/frame_0000130.jpg.half.jpg",
"022/undistort/images/frame_0000131.jpg.half.jpg",
"022/undistort/images/frame_0000132.jpg.half.jpg",
"022/undistort/images/frame_0000133.jpg.half.jpg",
"022/undistort/images/frame_0000134.jpg.half.jpg",
"022/undistort/images/frame_0000135.jpg.half.jpg",
"022/undistort/images/frame_0000136.jpg.half.jpg",
"022/undistort/images/frame_0000137.jpg.half.jpg",
"022/undistort/images/frame_0000138.jpg.half.jpg",
"022/undistort/images/frame_0000139.jpg.half.jpg",
"022/undistort/images/frame_0000140.jpg.half.jpg",
"022/undistort/images/frame_0000141.jpg.half.jpg",
"022/undistort/images/frame_0000142.jpg.half.jpg",
"022/undistort/images/frame_0000143.jpg.half.jpg",
"022/undistort/images/frame_0000144.jpg.half.jpg",
"022/undistort/images/frame_0000145.jpg.half.jpg",
"022/undistort/images/frame_0000146.jpg.half.jpg",
"022/undistort/images/frame_0000147.jpg.half.jpg",
"022/undistort/images/frame_0000148.jpg.half.jpg",
"022/undistort/images/frame_0000149.jpg.half.jpg",
"022/undistort/images/frame_0000150.jpg.half.jpg",
"022/undistort/images/frame_0000151.jpg.half.jpg",
"022/undistort/images/frame_0000152.jpg.half.jpg",
"022/undistort/images/frame_0000153.jpg.half.jpg",
"022/undistort/images/frame_0000154.jpg.half.jpg",
"022/undistort/images/frame_0000155.jpg.half.jpg",
"022/undistort/images/frame_0000156.jpg.half.jpg",
"022/undistort/images/frame_0000157.jpg.half.jpg",
"022/undistort/images/frame_0000158.jpg.half.jpg",
"022/undistort/images/frame_0000159.jpg.half.jpg",
"022/undistort/images/frame_0000160.jpg.half.jpg",
"022/undistort/images/frame_0000161.jpg.half.jpg",
"022/undistort/images/frame_0000162.jpg.half.jpg",
"022/undistort/images/frame_0000163.jpg.half.jpg",
"022/undistort/images/frame_0000164.jpg.half.jpg",
"022/undistort/images/frame_0000165.jpg.half.jpg",
"022/undistort/images/frame_0000166.jpg.half.jpg",
"022/undistort/images/frame_0000167.jpg.half.jpg",
"022/undistort/images/frame_0000168.jpg.half.jpg",
"022/undistort/images/frame_0000169.jpg.half.jpg",
"022/undistort/images/frame_0000170.jpg.half.jpg",
"022/undistort/images/frame_0000171.jpg.half.jpg",
"022/undistort/images/frame_0000172.jpg.half.jpg",
"022/undistort/images/frame_0000173.jpg.half.jpg",
"022/undistort/images/frame_0000174.jpg.half.jpg",
"022/undistort/images/frame_0000176.jpg.half.jpg",
"022/undistort/images/frame_0000177.jpg.half.jpg",
"022/undistort/images/frame_0000178.jpg.half.jpg",
"022/undistort/images/frame_0000179.jpg.half.jpg",
"022/undistort/images/frame_0000180.jpg.half.jpg",
"022/undistort/images/frame_0000181.jpg.half.jpg",
"022/undistort/images/frame_0000182.jpg.half.jpg",
"022/undistort/images/frame_0000183.jpg.half.jpg",
"022/undistort/images/frame_0000184.jpg.half.jpg",
"022/undistort/images/frame_0000185.jpg.half.jpg",
"022/undistort/images/frame_0000186.jpg.half.jpg",
"022/undistort/images/frame_0000187.jpg.half.jpg",
"022/undistort/images/frame_0000188.jpg.half.jpg",
"022/undistort/images/frame_0000189.jpg.half.jpg",
"022/undistort/images/frame_0000190.jpg.half.jpg",
"022/undistort/images/frame_0000191.jpg.half.jpg",
"022/undistort/images/frame_0000192.jpg.half.jpg",
"022/undistort/images/frame_0000193.jpg.half.jpg",
"022/undistort/images/frame_0000194.jpg.half.jpg",
"022/undistort/images/frame_0000195.jpg.half.jpg",
"022/undistort/images/frame_0000196.jpg.half.jpg",
"022/undistort/images/frame_0000197.jpg.half.jpg",
"022/undistort/images/frame_0000198.jpg.half.jpg",
"022/undistort/images/frame_0000199.jpg.half.jpg",
"022/undistort/images/frame_0000200.jpg.half.jpg",
"022/undistort/images/frame_0000201.jpg.half.jpg",
"022/undistort/images/frame_0000202.jpg.half.jpg",
"022/undistort/images/frame_0000203.jpg.half.jpg",
"022/undistort/images/frame_0000204.jpg.half.jpg",
"022/undistort/images/frame_0000205.jpg.half.jpg",
"022/undistort/images/frame_0000206.jpg.half.jpg",
"022/undistort/images/frame_0000207.jpg.half.jpg",
"022/undistort/images/frame_0000208.jpg.half.jpg",
"022/undistort/images/frame_0000209.jpg.half.jpg",
"022/undistort/images/frame_0000210.jpg.half.jpg",
"022/undistort/images/frame_0000211.jpg.half.jpg",
"022/undistort/images/frame_0000212.jpg.half.jpg",
"022/undistort/images/frame_0000213.jpg.half.jpg",
"022/undistort/images/frame_0000214.jpg.half.jpg",
"022/undistort/images/frame_0000215.jpg.half.jpg",
"022/undistort/images/frame_0000216.jpg.half.jpg",
"022/undistort/images/frame_0000217.jpg.half.jpg",
"022/undistort/images/frame_0000218.jpg.half.jpg",
"022/undistort/images/frame_0000219.jpg.half.jpg",
"022/undistort/images/frame_0000220.jpg.half.jpg",
"022/undistort/images/frame_0000221.jpg.half.jpg",
"022/undistort/images/frame_0000222.jpg.half.jpg",
"022/undistort/images/frame_0000223.jpg.half.jpg",
"022/undistort/images/frame_0000224.jpg.half.jpg",
"022/undistort/images/frame_0000225.jpg.half.jpg",
"022/undistort/images/frame_0000226.jpg.half.jpg",
"022/undistort/images/frame_0000227.jpg.half.jpg",
"022/undistort/images/frame_0000228.jpg.half.jpg",
"022/undistort/images/frame_0000229.jpg.half.jpg",
"022/undistort/images/frame_0000230.jpg.half.jpg",
"022/undistort/images/frame_0000231.jpg.half.jpg",
"022/undistort/images/frame_0000232.jpg.half.jpg",
"022/undistort/images/frame_0000233.jpg.half.jpg",
"022/undistort/images/frame_0000234.jpg.half.jpg",
"022/undistort/images/frame_0000235.jpg.half.jpg",
"022/undistort/images/frame_0000236.jpg.half.jpg",
"022/undistort/images/frame_0000237.jpg.half.jpg",
"022/undistort/images/frame_0000238.jpg.half.jpg",
"022/undistort/images/frame_0000239.jpg.half.jpg",
"022/undistort/images/frame_0000240.jpg.half.jpg",
"022/undistort/images/frame_0000241.jpg.half.jpg",
"022/undistort/images/frame_0000242.jpg.half.jpg",
"022/undistort/images/frame_0000243.jpg.half.jpg",
"022/undistort/images/frame_0000244.jpg.half.jpg",
"022/undistort/images/frame_0000245.jpg.half.jpg",
"022/undistort/images/frame_0000246.jpg.half.jpg",
"022/undistort/images/frame_0000247.jpg.half.jpg",
"022/undistort/images/frame_0000248.jpg.half.jpg",
"022/undistort/images/frame_0000249.jpg.half.jpg",
"022/undistort/images/frame_0000250.jpg.half.jpg",
"022/undistort/images/frame_0000251.jpg.half.jpg",
"022/undistort/images/frame_0000252.jpg.half.jpg",
"022/undistort/images/frame_0000253.jpg.half.jpg",
"022/undistort/images/frame_0000254.jpg.half.jpg",
"022/undistort/images/frame_0000255.jpg.half.jpg",
"022/undistort/images/frame_0000256.jpg.half.jpg",
"022/undistort/images/frame_0000257.jpg.half.jpg",
"022/undistort/images/frame_0000258.jpg.half.jpg",
"022/undistort/images/frame_0000259.jpg.half.jpg",
"022/undistort/images/frame_0000260.jpg.half.jpg",
"022/undistort/images/frame_0000261.jpg.half.jpg",
"022/undistort/images/frame_0000262.jpg.half.jpg",
"022/undistort/images/frame_0000263.jpg.half.jpg",
"022/undistort/images/frame_0000264.jpg.half.jpg",
"022/undistort/images/frame_0000265.jpg.half.jpg",
"022/undistort/images/frame_0000266.jpg.half.jpg",
"022/undistort/images/frame_0000267.jpg.half.jpg",
"022/undistort/images/frame_0000268.jpg.half.jpg",
"022/undistort/images/frame_0000269.jpg.half.jpg",
"022/undistort/images/frame_0000270.jpg.half.jpg",
"022/undistort/images/frame_0000271.jpg.half.jpg",
"022/undistort/images/frame_0000272.jpg.half.jpg",
"022/undistort/images/frame_0000273.jpg.half.jpg",
"022/undistort/images/frame_0000274.jpg.half.jpg",
"022/undistort/images/frame_0000275.jpg.half.jpg",
"022/undistort/images/frame_0000276.jpg.half.jpg",
"022/undistort/images/frame_0000277.jpg.half.jpg",
"022/undistort/images/frame_0000278.jpg.half.jpg",
"022/undistort/images/frame_0000279.jpg.half.jpg",
"022/undistort/images/frame_0000280.jpg.half.jpg",
"022/undistort/images/frame_0000281.jpg.half.jpg",
"022/undistort/images/frame_0000283.jpg.half.jpg",
"022/undistort/images/frame_0000284.jpg.half.jpg",
"022/undistort/images/frame_0000285.jpg.half.jpg",
"022/undistort/images/frame_0000286.jpg.half.jpg",
"022/undistort/images/frame_0000287.jpg.half.jpg",
"022/undistort/images/frame_0000288.jpg.half.jpg",
"022/undistort/images/frame_0000289.jpg.half.jpg",
"022/undistort/images/frame_0000290.jpg.half.jpg",
"022/undistort/images/frame_0000291.jpg.half.jpg",
"022/undistort/images/frame_0000292.jpg.half.jpg",
"022/undistort/images/frame_0000293.jpg.half.jpg",
"022/undistort/images/frame_0000294.jpg.half.jpg",
"022/undistort/images/frame_0000295.jpg.half.jpg",
"022/undistort/images/frame_0000296.jpg.half.jpg",
"022/undistort/images/frame_0000297.jpg.half.jpg",
"022/undistort/images/frame_0000298.jpg.half.jpg",
"022/undistort/images/frame_0000299.jpg.half.jpg",
"022/undistort/images/frame_0000300.jpg.half.jpg",
"022/undistort/images/frame_0000301.jpg.half.jpg",
"022/undistort/images/frame_0000302.jpg.half.jpg",
"022/undistort/images/frame_0000303.jpg.half.jpg",
"022/undistort/images/frame_0000304.jpg.half.jpg",
"022/undistort/images/frame_0000305.jpg.half.jpg",
"022/undistort/images/frame_0000306.jpg.half.jpg",
"022/undistort/images/frame_0000307.jpg.half.jpg",
"022/undistort/images/frame_0000308.jpg.half.jpg",
"022/undistort/images/frame_0000309.jpg.half.jpg",
"022/undistort/images/frame_0000310.jpg.half.jpg",
"022/undistort/images/frame_0000311.jpg.half.jpg",
"022/undistort/images/frame_0000312.jpg.half.jpg",
"022/undistort/images/frame_0000313.jpg.half.jpg",
"022/undistort/images/frame_0000314.jpg.half.jpg",
"022/undistort/images/frame_0000315.jpg.half.jpg",
"022/undistort/images/frame_0000316.jpg.half.jpg",
"022/undistort/images/frame_0000317.jpg.half.jpg",
"022/undistort/images/frame_0000318.jpg.half.jpg",
"022/undistort/images/frame_0000319.jpg.half.jpg",
"022/undistort/images/frame_0000320.jpg.half.jpg",
"022/undistort/images/frame_0000321.jpg.half.jpg",
"022/undistort/images/frame_0000322.jpg.half.jpg",
"022/undistort/images/frame_0000323.jpg.half.jpg",
"022/undistort/images/frame_0000324.jpg.half.jpg",
"022/undistort/images/frame_0000325.jpg.half.jpg",
"022/undistort/images/frame_0000326.jpg.half.jpg",
"022/undistort/images/frame_0000327.jpg.half.jpg",
"022/undistort/images/frame_0000328.jpg.half.jpg",
"022/undistort/images/frame_0000329.jpg.half.jpg",
"022/undistort/images/frame_0000330.jpg.half.jpg",
"022/undistort/images/frame_0000331.jpg.half.jpg",
"022/undistort/images/frame_0000332.jpg.half.jpg",
"022/undistort/images/frame_0000333.jpg.half.jpg",
"022/undistort/images/frame_0000334.jpg.half.jpg",
"022/undistort/images/frame_0000335.jpg.half.jpg",
"022/undistort/images/frame_0000336.jpg.half.jpg",
"022/undistort/images/frame_0000337.jpg.half.jpg",
"022/undistort/images/frame_0000338.jpg.half.jpg",
"022/undistort/images/frame_0000339.jpg.half.jpg",
"022/undistort/images/frame_0000340.jpg.half.jpg",
"022/undistort/images/frame_0000341.jpg.half.jpg",
"022/undistort/images/frame_0000342.jpg.half.jpg",
"022/undistort/images/frame_0000343.jpg.half.jpg",
"022/undistort/images/frame_0000344.jpg.half.jpg",
"022/undistort/images/frame_0000345.jpg.half.jpg",
"022/undistort/images/frame_0000346.jpg.half.jpg",
"022/undistort/images/frame_0000347.jpg.half.jpg",
"022/undistort/images/frame_0000348.jpg.half.jpg",
"022/undistort/images/frame_0000349.jpg.half.jpg",
"022/undistort/images/frame_0000350.jpg.half.jpg",
"022/undistort/images/frame_0000351.jpg.half.jpg",
"022/undistort/images/frame_0000352.jpg.half.jpg",
"022/undistort/images/frame_0000353.jpg.half.jpg",
"034/undistort/images/frame_0000001.jpg.half.jpg",
"034/undistort/images/frame_0000002.jpg.half.jpg",
"034/undistort/images/frame_0000003.jpg.half.jpg",
"034/undistort/images/frame_0000004.jpg.half.jpg",
"034/undistort/images/frame_0000005.jpg.half.jpg",
"034/undistort/images/frame_0000006.jpg.half.jpg",
"034/undistort/images/frame_0000007.jpg.half.jpg",
"034/undistort/images/frame_0000008.jpg.half.jpg",
"034/undistort/images/frame_0000009.jpg.half.jpg",
"034/undistort/images/frame_0000010.jpg.half.jpg",
"034/undistort/images/frame_0000011.jpg.half.jpg",
"034/undistort/images/frame_0000013.jpg.half.jpg",
"034/undistort/images/frame_0000014.jpg.half.jpg",
"034/undistort/images/frame_0000015.jpg.half.jpg",
"034/undistort/images/frame_0000016.jpg.half.jpg",
"034/undistort/images/frame_0000017.jpg.half.jpg",
"034/undistort/images/frame_0000018.jpg.half.jpg",
"034/undistort/images/frame_0000019.jpg.half.jpg",
"034/undistort/images/frame_0000020.jpg.half.jpg",
"034/undistort/images/frame_0000021.jpg.half.jpg",
"034/undistort/images/frame_0000022.jpg.half.jpg",
"034/undistort/images/frame_0000023.jpg.half.jpg",
"034/undistort/images/frame_0000024.jpg.half.jpg",
"034/undistort/images/frame_0000025.jpg.half.jpg",
"034/undistort/images/frame_0000027.jpg.half.jpg",
"034/undistort/images/frame_0000028.jpg.half.jpg",
"034/undistort/images/frame_0000029.jpg.half.jpg",
"034/undistort/images/frame_0000031.jpg.half.jpg",
"034/undistort/images/frame_0000032.jpg.half.jpg",
"034/undistort/images/frame_0000033.jpg.half.jpg",
"034/undistort/images/frame_0000036.jpg.half.jpg",
"034/undistort/images/frame_0000037.jpg.half.jpg",
"034/undistort/images/frame_0000038.jpg.half.jpg",
"034/undistort/images/frame_0000039.jpg.half.jpg",
"034/undistort/images/frame_0000040.jpg.half.jpg",
"034/undistort/images/frame_0000041.jpg.half.jpg",
"034/undistort/images/frame_0000043.jpg.half.jpg",
"034/undistort/images/frame_0000044.jpg.half.jpg",
"034/undistort/images/frame_0000045.jpg.half.jpg",
"034/undistort/images/frame_0000049.jpg.half.jpg",
"034/undistort/images/frame_0000106.jpg.half.jpg",
"034/undistort/images/frame_0000107.jpg.half.jpg",
"034/undistort/images/frame_0000108.jpg.half.jpg",
"034/undistort/images/frame_0000109.jpg.half.jpg",
"034/undistort/images/frame_0000110.jpg.half.jpg",
"034/undistort/images/frame_0000111.jpg.half.jpg",
"034/undistort/images/frame_0000112.jpg.half.jpg",
"034/undistort/images/frame_0000113.jpg.half.jpg",
"034/undistort/images/frame_0000114.jpg.half.jpg",
"034/undistort/images/frame_0000115.jpg.half.jpg",
"034/undistort/images/frame_0000116.jpg.half.jpg",
"034/undistort/images/frame_0000117.jpg.half.jpg",
"034/undistort/images/frame_0000118.jpg.half.jpg",
"034/undistort/images/frame_0000119.jpg.half.jpg",
"034/undistort/images/frame_0000120.jpg.half.jpg",
"034/undistort/images/frame_0000121.jpg.half.jpg",
"034/undistort/images/frame_0000122.jpg.half.jpg",
"034/undistort/images/frame_0000123.jpg.half.jpg",
"034/undistort/images/frame_0000124.jpg.half.jpg",
"034/undistort/images/frame_0000125.jpg.half.jpg",
"034/undistort/images/frame_0000126.jpg.half.jpg",
"034/undistort/images/frame_0000127.jpg.half.jpg",
"034/undistort/images/frame_0000128.jpg.half.jpg",
"034/undistort/images/frame_0000129.jpg.half.jpg",
"034/undistort/images/frame_0000130.jpg.half.jpg",
"034/undistort/images/frame_0000131.jpg.half.jpg",
"034/undistort/images/frame_0000132.jpg.half.jpg",
"034/undistort/images/frame_0000133.jpg.half.jpg",
"034/undistort/images/frame_0000134.jpg.half.jpg",
"034/undistort/images/frame_0000135.jpg.half.jpg",
"034/undistort/images/frame_0000136.jpg.half.jpg",
"034/undistort/images/frame_0000137.jpg.half.jpg",
"034/undistort/images/frame_0000138.jpg.half.jpg",
"034/undistort/images/frame_0000139.jpg.half.jpg",
"034/undistort/images/frame_0000140.jpg.half.jpg",
"034/undistort/images/frame_0000141.jpg.half.jpg",
"034/undistort/images/frame_0000142.jpg.half.jpg",
"034/undistort/images/frame_0000143.jpg.half.jpg",
"034/undistort/images/frame_0000144.jpg.half.jpg",
"034/undistort/images/frame_0000145.jpg.half.jpg",
"034/undistort/images/frame_0000146.jpg.half.jpg",
"034/undistort/images/frame_0000147.jpg.half.jpg",
"034/undistort/images/frame_0000148.jpg.half.jpg",
"034/undistort/images/frame_0000149.jpg.half.jpg",
"034/undistort/images/frame_0000150.jpg.half.jpg",
"034/undistort/images/frame_0000151.jpg.half.jpg",
"034/undistort/images/frame_0000152.jpg.half.jpg",
"034/undistort/images/frame_0000153.jpg.half.jpg",
"034/undistort/images/frame_0000154.jpg.half.jpg",
"034/undistort/images/frame_0000155.jpg.half.jpg",
"034/undistort/images/frame_0000156.jpg.half.jpg",
"034/undistort/images/frame_0000157.jpg.half.jpg",
"034/undistort/images/frame_0000158.jpg.half.jpg",
"034/undistort/images/frame_0000159.jpg.half.jpg",
"034/undistort/images/frame_0000160.jpg.half.jpg",
"034/undistort/images/frame_0000161.jpg.half.jpg",
"034/undistort/images/frame_0000162.jpg.half.jpg",
"034/undistort/images/frame_0000163.jpg.half.jpg",
"034/undistort/images/frame_0000164.jpg.half.jpg",
"034/undistort/images/frame_0000165.jpg.half.jpg",
"034/undistort/images/frame_0000166.jpg.half.jpg",
"034/undistort/images/frame_0000167.jpg.half.jpg",
"034/undistort/images/frame_0000168.jpg.half.jpg",
"034/undistort/images/frame_0000169.jpg.half.jpg",
"034/undistort/images/frame_0000170.jpg.half.jpg",
"034/undistort/images/frame_0000171.jpg.half.jpg",
"034/undistort/images/frame_0000172.jpg.half.jpg",
"034/undistort/images/frame_0000173.jpg.half.jpg",
"034/undistort/images/frame_0000174.jpg.half.jpg",
"034/undistort/images/frame_0000175.jpg.half.jpg",
"034/undistort/images/frame_0000176.jpg.half.jpg",
"034/undistort/images/frame_0000177.jpg.half.jpg",
"034/undistort/images/frame_0000178.jpg.half.jpg",
"034/undistort/images/frame_0000179.jpg.half.jpg",
"034/undistort/images/frame_0000180.jpg.half.jpg",
"034/undistort/images/frame_0000181.jpg.half.jpg",
"034/undistort/images/frame_0000182.jpg.half.jpg",
"034/undistort/images/frame_0000184.jpg.half.jpg",
"034/undistort/images/frame_0000185.jpg.half.jpg",
"034/undistort/images/frame_0000186.jpg.half.jpg",
"034/undistort/images/frame_0000187.jpg.half.jpg",
"034/undistort/images/frame_0000188.jpg.half.jpg",
"034/undistort/images/frame_0000189.jpg.half.jpg",
"034/undistort/images/frame_0000190.jpg.half.jpg",
"034/undistort/images/frame_0000191.jpg.half.jpg",
"034/undistort/images/frame_0000192.jpg.half.jpg",
"034/undistort/images/frame_0000193.jpg.half.jpg",
"034/undistort/images/frame_0000194.jpg.half.jpg",
"034/undistort/images/frame_0000195.jpg.half.jpg",
"034/undistort/images/frame_0000196.jpg.half.jpg",
"034/undistort/images/frame_0000197.jpg.half.jpg",
"034/undistort/images/frame_0000198.jpg.half.jpg",
"034/undistort/images/frame_0000199.jpg.half.jpg",
"034/undistort/images/frame_0000200.jpg.half.jpg",
"034/undistort/images/frame_0000201.jpg.half.jpg",
"034/undistort/images/frame_0000202.jpg.half.jpg",
"034/undistort/images/frame_0000203.jpg.half.jpg",
"034/undistort/images/frame_0000204.jpg.half.jpg",
"034/undistort/images/frame_0000205.jpg.half.jpg",
"034/undistort/images/frame_0000206.jpg.half.jpg",
"034/undistort/images/frame_0000207.jpg.half.jpg",
"034/undistort/images/frame_0000208.jpg.half.jpg",
"034/undistort/images/frame_0000209.jpg.half.jpg",
"034/undistort/images/frame_0000210.jpg.half.jpg",
"034/undistort/images/frame_0000211.jpg.half.jpg",
"034/undistort/images/frame_0000213.jpg.half.jpg",
"034/undistort/images/frame_0000214.jpg.half.jpg",
"034/undistort/images/frame_0000215.jpg.half.jpg",
"034/undistort/images/frame_0000216.jpg.half.jpg",
"034/undistort/images/frame_0000218.jpg.half.jpg",
"034/undistort/images/frame_0000219.jpg.half.jpg",
"034/undistort/images/frame_0000220.jpg.half.jpg",
"034/undistort/images/frame_0000221.jpg.half.jpg",
"034/undistort/images/frame_0000222.jpg.half.jpg",
"034/undistort/images/frame_0000223.jpg.half.jpg",
"034/undistort/images/frame_0000224.jpg.half.jpg",
"034/undistort/images/frame_0000225.jpg.half.jpg",
"034/undistort/images/frame_0000226.jpg.half.jpg",
"034/undistort/images/frame_0000227.jpg.half.jpg",
"034/undistort/images/frame_0000228.jpg.half.jpg",
"034/undistort/images/frame_0000229.jpg.half.jpg",
"034/undistort/images/frame_0000232.jpg.half.jpg",
"034/undistort/images/frame_0000233.jpg.half.jpg",
"034/undistort/images/frame_0000234.jpg.half.jpg",
"034/undistort/images/frame_0000236.jpg.half.jpg",
"034/undistort/images/frame_0000237.jpg.half.jpg",
"034/undistort/images/frame_0000239.jpg.half.jpg",
"034/undistort/images/frame_0000240.jpg.half.jpg",
"034/undistort/images/frame_0000241.jpg.half.jpg",
"034/undistort/images/frame_0000242.jpg.half.jpg",
"034/undistort/images/frame_0000243.jpg.half.jpg",
"034/undistort/images/frame_0000247.jpg.half.jpg",
"034/undistort/images/frame_0000248.jpg.half.jpg",
"034/undistort/images/frame_0000249.jpg.half.jpg",
"034/undistort/images/frame_0000250.jpg.half.jpg",
"034/undistort/images/frame_0000254.jpg.half.jpg",
"034/undistort/images/frame_0000255.jpg.half.jpg",
"034/undistort/images/frame_0000256.jpg.half.jpg",
"034/undistort/images/frame_0000257.jpg.half.jpg",
"034/undistort/images/frame_0000259.jpg.half.jpg",
"034/undistort/images/frame_0000260.jpg.half.jpg",
"034/undistort/images/frame_0000261.jpg.half.jpg",
"034/undistort/images/frame_0000262.jpg.half.jpg",
"034/undistort/images/frame_0000263.jpg.half.jpg",
"034/undistort/images/frame_0000264.jpg.half.jpg",
"034/undistort/images/frame_0000265.jpg.half.jpg",
"034/undistort/images/frame_0000268.jpg.half.jpg",
"036/undistort/images/frame_0000001.jpg.half.jpg",
"036/undistort/images/frame_0000002.jpg.half.jpg",
"036/undistort/images/frame_0000003.jpg.half.jpg",
"036/undistort/images/frame_0000004.jpg.half.jpg",
"036/undistort/images/frame_0000005.jpg.half.jpg",
"036/undistort/images/frame_0000006.jpg.half.jpg",
"036/undistort/images/frame_0000007.jpg.half.jpg",
"036/undistort/images/frame_0000008.jpg.half.jpg",
"036/undistort/images/frame_0000009.jpg.half.jpg",
"036/undistort/images/frame_0000010.jpg.half.jpg",
"036/undistort/images/frame_0000011.jpg.half.jpg",
"036/undistort/images/frame_0000012.jpg.half.jpg",
"036/undistort/images/frame_0000013.jpg.half.jpg",
"036/undistort/images/frame_0000014.jpg.half.jpg",
"036/undistort/images/frame_0000015.jpg.half.jpg",
"036/undistort/images/frame_0000016.jpg.half.jpg",
"036/undistort/images/frame_0000017.jpg.half.jpg",
"036/undistort/images/frame_0000018.jpg.half.jpg",
"036/undistort/images/frame_0000019.jpg.half.jpg",
"036/undistort/images/frame_0000020.jpg.half.jpg",
"036/undistort/images/frame_0000021.jpg.half.jpg",
"036/undistort/images/frame_0000022.jpg.half.jpg",
"036/undistort/images/frame_0000023.jpg.half.jpg",
"036/undistort/images/frame_0000024.jpg.half.jpg",
"036/undistort/images/frame_0000025.jpg.half.jpg",
"036/undistort/images/frame_0000026.jpg.half.jpg",
"036/undistort/images/frame_0000027.jpg.half.jpg",
"036/undistort/images/frame_0000028.jpg.half.jpg",
"036/undistort/images/frame_0000029.jpg.half.jpg",
"036/undistort/images/frame_0000030.jpg.half.jpg",
"036/undistort/images/frame_0000031.jpg.half.jpg",
"036/undistort/images/frame_0000032.jpg.half.jpg",
"036/undistort/images/frame_0000033.jpg.half.jpg",
"036/undistort/images/frame_0000034.jpg.half.jpg",
"036/undistort/images/frame_0000035.jpg.half.jpg",
"036/undistort/images/frame_0000036.jpg.half.jpg",
"036/undistort/images/frame_0000037.jpg.half.jpg",
"036/undistort/images/frame_0000038.jpg.half.jpg",
"036/undistort/images/frame_0000039.jpg.half.jpg",
"036/undistort/images/frame_0000041.jpg.half.jpg",
"036/undistort/images/frame_0000042.jpg.half.jpg",
"036/undistort/images/frame_0000043.jpg.half.jpg",
"036/undistort/images/frame_0000044.jpg.half.jpg",
"036/undistort/images/frame_0000045.jpg.half.jpg",
"036/undistort/images/frame_0000046.jpg.half.jpg",
"036/undistort/images/frame_0000047.jpg.half.jpg",
"036/undistort/images/frame_0000048.jpg.half.jpg",
"036/undistort/images/frame_0000049.jpg.half.jpg",
"036/undistort/images/frame_0000050.jpg.half.jpg",
"036/undistort/images/frame_0000051.jpg.half.jpg",
"036/undistort/images/frame_0000052.jpg.half.jpg",
"036/undistort/images/frame_0000053.jpg.half.jpg",
"036/undistort/images/frame_0000054.jpg.half.jpg",
"036/undistort/images/frame_0000055.jpg.half.jpg",
"036/undistort/images/frame_0000056.jpg.half.jpg",
"036/undistort/images/frame_0000057.jpg.half.jpg",
"036/undistort/images/frame_0000058.jpg.half.jpg",
"036/undistort/images/frame_0000059.jpg.half.jpg",
"036/undistort/images/frame_0000060.jpg.half.jpg",
"036/undistort/images/frame_0000061.jpg.half.jpg",
"036/undistort/images/frame_0000062.jpg.half.jpg",
"036/undistort/images/frame_0000063.jpg.half.jpg",
"036/undistort/images/frame_0000064.jpg.half.jpg",
"036/undistort/images/frame_0000065.jpg.half.jpg",
"036/undistort/images/frame_0000066.jpg.half.jpg",
"036/undistort/images/frame_0000067.jpg.half.jpg",
"036/undistort/images/frame_0000068.jpg.half.jpg",
"036/undistort/images/frame_0000069.jpg.half.jpg",
"036/undistort/images/frame_0000070.jpg.half.jpg",
"036/undistort/images/frame_0000071.jpg.half.jpg",
"036/undistort/images/frame_0000072.jpg.half.jpg",
"036/undistort/images/frame_0000073.jpg.half.jpg",
"036/undistort/images/frame_0000074.jpg.half.jpg",
"036/undistort/images/frame_0000075.jpg.half.jpg",
"036/undistort/images/frame_0000076.jpg.half.jpg",
"036/undistort/images/frame_0000077.jpg.half.jpg",
"036/undistort/images/frame_0000078.jpg.half.jpg",
"036/undistort/images/frame_0000079.jpg.half.jpg",
"036/undistort/images/frame_0000080.jpg.half.jpg",
"036/undistort/images/frame_0000081.jpg.half.jpg",
"036/undistort/images/frame_0000082.jpg.half.jpg",
"036/undistort/images/frame_0000083.jpg.half.jpg",
"036/undistort/images/frame_0000084.jpg.half.jpg",
"036/undistort/images/frame_0000085.jpg.half.jpg",
"036/undistort/images/frame_0000086.jpg.half.jpg",
"036/undistort/images/frame_0000087.jpg.half.jpg",
"036/undistort/images/frame_0000088.jpg.half.jpg",
"036/undistort/images/frame_0000089.jpg.half.jpg",
"036/undistort/images/frame_0000090.jpg.half.jpg",
"036/undistort/images/frame_0000091.jpg.half.jpg",
"036/undistort/images/frame_0000092.jpg.half.jpg",
"036/undistort/images/frame_0000093.jpg.half.jpg",
"036/undistort/images/frame_0000095.jpg.half.jpg",
"036/undistort/images/frame_0000096.jpg.half.jpg",
"036/undistort/images/frame_0000097.jpg.half.jpg",
"036/undistort/images/frame_0000098.jpg.half.jpg",
"036/undistort/images/frame_0000099.jpg.half.jpg",
"036/undistort/images/frame_0000100.jpg.half.jpg",
"036/undistort/images/frame_0000101.jpg.half.jpg",
"036/undistort/images/frame_0000102.jpg.half.jpg",
"036/undistort/images/frame_0000103.jpg.half.jpg",
"036/undistort/images/frame_0000104.jpg.half.jpg",
"036/undistort/images/frame_0000105.jpg.half.jpg",
"036/undistort/images/frame_0000106.jpg.half.jpg",
"036/undistort/images/frame_0000107.jpg.half.jpg",
"036/undistort/images/frame_0000108.jpg.half.jpg",
"036/undistort/images/frame_0000109.jpg.half.jpg",
"036/undistort/images/frame_0000110.jpg.half.jpg",
"036/undistort/images/frame_0000111.jpg.half.jpg",
"036/undistort/images/frame_0000112.jpg.half.jpg",
"036/undistort/images/frame_0000113.jpg.half.jpg",
"036/undistort/images/frame_0000114.jpg.half.jpg",
"036/undistort/images/frame_0000115.jpg.half.jpg",
"036/undistort/images/frame_0000116.jpg.half.jpg",
"036/undistort/images/frame_0000117.jpg.half.jpg",
"036/undistort/images/frame_0000118.jpg.half.jpg",
"036/undistort/images/frame_0000121.jpg.half.jpg",
"036/undistort/images/frame_0000122.jpg.half.jpg",
"036/undistort/images/frame_0000123.jpg.half.jpg",
"036/undistort/images/frame_0000124.jpg.half.jpg",
"036/undistort/images/frame_0000125.jpg.half.jpg",
"036/undistort/images/frame_0000126.jpg.half.jpg",
"036/undistort/images/frame_0000127.jpg.half.jpg",
"036/undistort/images/frame_0000128.jpg.half.jpg",
"036/undistort/images/frame_0000129.jpg.half.jpg",
"036/undistort/images/frame_0000130.jpg.half.jpg",
"036/undistort/images/frame_0000131.jpg.half.jpg",
"036/undistort/images/frame_0000132.jpg.half.jpg",
"036/undistort/images/frame_0000133.jpg.half.jpg",
"036/undistort/images/frame_0000134.jpg.half.jpg",
"036/undistort/images/frame_0000135.jpg.half.jpg",
"036/undistort/images/frame_0000136.jpg.half.jpg",
"036/undistort/images/frame_0000137.jpg.half.jpg",
"036/undistort/images/frame_0000138.jpg.half.jpg",
"036/undistort/images/frame_0000139.jpg.half.jpg",
"036/undistort/images/frame_0000140.jpg.half.jpg",
"036/undistort/images/frame_0000141.jpg.half.jpg",
"036/undistort/images/frame_0000142.jpg.half.jpg",
"036/undistort/images/frame_0000143.jpg.half.jpg",
"036/undistort/images/frame_0000144.jpg.half.jpg",
"036/undistort/images/frame_0000145.jpg.half.jpg",
"036/undistort/images/frame_0000146.jpg.half.jpg",
"036/undistort/images/frame_0000147.jpg.half.jpg",
"036/undistort/images/frame_0000148.jpg.half.jpg",
"036/undistort/images/frame_0000149.jpg.half.jpg",
"036/undistort/images/frame_0000150.jpg.half.jpg",
"036/undistort/images/frame_0000151.jpg.half.jpg",
"036/undistort/images/frame_0000152.jpg.half.jpg",
"036/undistort/images/frame_0000153.jpg.half.jpg",
"036/undistort/images/frame_0000154.jpg.half.jpg",
"036/undistort/images/frame_0000155.jpg.half.jpg",
"036/undistort/images/frame_0000156.jpg.half.jpg",
"036/undistort/images/frame_0000157.jpg.half.jpg",
"036/undistort/images/frame_0000158.jpg.half.jpg",
"036/undistort/images/frame_0000159.jpg.half.jpg",
"036/undistort/images/frame_0000160.jpg.half.jpg",
"036/undistort/images/frame_0000161.jpg.half.jpg",
"036/undistort/images/frame_0000162.jpg.half.jpg",
"036/undistort/images/frame_0000163.jpg.half.jpg",
"036/undistort/images/frame_0000164.jpg.half.jpg",
"036/undistort/images/frame_0000165.jpg.half.jpg",
"036/undistort/images/frame_0000166.jpg.half.jpg",
"036/undistort/images/frame_0000167.jpg.half.jpg",
"036/undistort/images/frame_0000168.jpg.half.jpg",
"036/undistort/images/frame_0000169.jpg.half.jpg",
"036/undistort/images/frame_0000170.jpg.half.jpg",
"036/undistort/images/frame_0000171.jpg.half.jpg",
"036/undistort/images/frame_0000172.jpg.half.jpg",
"036/undistort/images/frame_0000173.jpg.half.jpg",
"036/undistort/images/frame_0000174.jpg.half.jpg",
"036/undistort/images/frame_0000175.jpg.half.jpg",
"036/undistort/images/frame_0000176.jpg.half.jpg",
"036/undistort/images/frame_0000177.jpg.half.jpg",
"036/undistort/images/frame_0000178.jpg.half.jpg",
"036/undistort/images/frame_0000179.jpg.half.jpg",
"036/undistort/images/frame_0000180.jpg.half.jpg",
"036/undistort/images/frame_0000181.jpg.half.jpg",
"036/undistort/images/frame_0000182.jpg.half.jpg",
"036/undistort/images/frame_0000183.jpg.half.jpg",
"036/undistort/images/frame_0000184.jpg.half.jpg",
"036/undistort/images/frame_0000185.jpg.half.jpg",
"036/undistort/images/frame_0000186.jpg.half.jpg",
"036/undistort/images/frame_0000187.jpg.half.jpg",
"036/undistort/images/frame_0000188.jpg.half.jpg",
"036/undistort/images/frame_0000189.jpg.half.jpg",
"036/undistort/images/frame_0000190.jpg.half.jpg",
"036/undistort/images/frame_0000191.jpg.half.jpg",
"036/undistort/images/frame_0000192.jpg.half.jpg",
"036/undistort/images/frame_0000193.jpg.half.jpg",
"036/undistort/images/frame_0000194.jpg.half.jpg",
"036/undistort/images/frame_0000195.jpg.half.jpg",
"036/undistort/images/frame_0000196.jpg.half.jpg",
"036/undistort/images/frame_0000197.jpg.half.jpg",
"036/undistort/images/frame_0000198.jpg.half.jpg",
"036/undistort/images/frame_0000199.jpg.half.jpg",
"036/undistort/images/frame_0000200.jpg.half.jpg",
"036/undistort/images/frame_0000201.jpg.half.jpg",
"036/undistort/images/frame_0000202.jpg.half.jpg",
"036/undistort/images/frame_0000203.jpg.half.jpg",
"036/undistort/images/frame_0000204.jpg.half.jpg",
"036/undistort/images/frame_0000205.jpg.half.jpg",
"036/undistort/images/frame_0000206.jpg.half.jpg",
"036/undistort/images/frame_0000207.jpg.half.jpg",
"036/undistort/images/frame_0000208.jpg.half.jpg",
"036/undistort/images/frame_0000209.jpg.half.jpg",
"036/undistort/images/frame_0000210.jpg.half.jpg",
"036/undistort/images/frame_0000211.jpg.half.jpg",
"036/undistort/images/frame_0000212.jpg.half.jpg",
"036/undistort/images/frame_0000213.jpg.half.jpg",
"036/undistort/images/frame_0000214.jpg.half.jpg",
"036/undistort/images/frame_0000215.jpg.half.jpg",
"036/undistort/images/frame_0000216.jpg.half.jpg",
"036/undistort/images/frame_0000217.jpg.half.jpg",
"036/undistort/images/frame_0000218.jpg.half.jpg",
"036/undistort/images/frame_0000219.jpg.half.jpg",
"036/undistort/images/frame_0000220.jpg.half.jpg",
"036/undistort/images/frame_0000221.jpg.half.jpg",
"036/undistort/images/frame_0000222.jpg.half.jpg",
"036/undistort/images/frame_0000223.jpg.half.jpg",
"036/undistort/images/frame_0000224.jpg.half.jpg",
"036/undistort/images/frame_0000225.jpg.half.jpg",
"036/undistort/images/frame_0000226.jpg.half.jpg",
"036/undistort/images/frame_0000227.jpg.half.jpg",
"036/undistort/images/frame_0000228.jpg.half.jpg",
"036/undistort/images/frame_0000229.jpg.half.jpg",
"036/undistort/images/frame_0000230.jpg.half.jpg",
"036/undistort/images/frame_0000231.jpg.half.jpg",
"036/undistort/images/frame_0000232.jpg.half.jpg",
"036/undistort/images/frame_0000233.jpg.half.jpg",
"036/undistort/images/frame_0000234.jpg.half.jpg",
"036/undistort/images/frame_0000235.jpg.half.jpg",
"036/undistort/images/frame_0000236.jpg.half.jpg",
"036/undistort/images/frame_0000237.jpg.half.jpg",
"036/undistort/images/frame_0000238.jpg.half.jpg",
"036/undistort/images/frame_0000239.jpg.half.jpg",
"036/undistort/images/frame_0000240.jpg.half.jpg",
"036/undistort/images/frame_0000241.jpg.half.jpg",
"036/undistort/images/frame_0000242.jpg.half.jpg",
"036/undistort/images/frame_0000243.jpg.half.jpg",
"036/undistort/images/frame_0000244.jpg.half.jpg",
"036/undistort/images/frame_0000245.jpg.half.jpg",
"036/undistort/images/frame_0000246.jpg.half.jpg",
"036/undistort/images/frame_0000247.jpg.half.jpg",
"036/undistort/images/frame_0000248.jpg.half.jpg",
"036/undistort/images/frame_0000249.jpg.half.jpg",
"036/undistort/images/frame_0000250.jpg.half.jpg",
"036/undistort/images/frame_0000251.jpg.half.jpg",
"036/undistort/images/frame_0000252.jpg.half.jpg",
"036/undistort/images/frame_0000253.jpg.half.jpg",
"036/undistort/images/frame_0000254.jpg.half.jpg",
"036/undistort/images/frame_0000255.jpg.half.jpg",
"036/undistort/images/frame_0000256.jpg.half.jpg",
"036/undistort/images/frame_0000257.jpg.half.jpg",
"036/undistort/images/frame_0000258.jpg.half.jpg",
"036/undistort/images/frame_0000259.jpg.half.jpg",
"036/undistort/images/frame_0000260.jpg.half.jpg",
"036/undistort/images/frame_0000261.jpg.half.jpg",
"036/undistort/images/frame_0000262.jpg.half.jpg",
"036/undistort/images/frame_0000263.jpg.half.jpg",
"036/undistort/images/frame_0000264.jpg.half.jpg",
"036/undistort/images/frame_0000265.jpg.half.jpg",
"036/undistort/images/frame_0000266.jpg.half.jpg",
"036/undistort/images/frame_0000267.jpg.half.jpg",
"036/undistort/images/frame_0000268.jpg.half.jpg",
"036/undistort/images/frame_0000269.jpg.half.jpg",
"036/undistort/images/frame_0000270.jpg.half.jpg",
"036/undistort/images/frame_0000271.jpg.half.jpg",
"036/undistort/images/frame_0000272.jpg.half.jpg",
"036/undistort/images/frame_0000273.jpg.half.jpg",
"036/undistort/images/frame_0000274.jpg.half.jpg",
"036/undistort/images/frame_0000275.jpg.half.jpg",
"036/undistort/images/frame_0000276.jpg.half.jpg",
"036/undistort/images/frame_0000277.jpg.half.jpg",
"036/undistort/images/frame_0000278.jpg.half.jpg",
"036/undistort/images/frame_0000279.jpg.half.jpg",
"036/undistort/images/frame_0000280.jpg.half.jpg",
"036/undistort/images/frame_0000281.jpg.half.jpg",
"036/undistort/images/frame_0000282.jpg.half.jpg",
"036/undistort/images/frame_0000283.jpg.half.jpg",
"036/undistort/images/frame_0000284.jpg.half.jpg",
"036/undistort/images/frame_0000285.jpg.half.jpg",
"036/undistort/images/frame_0000286.jpg.half.jpg",
"036/undistort/images/frame_0000287.jpg.half.jpg",
"036/undistort/images/frame_0000288.jpg.half.jpg",
"036/undistort/images/frame_0000289.jpg.half.jpg",
"036/undistort/images/frame_0000290.jpg.half.jpg",
"036/undistort/images/frame_0000291.jpg.half.jpg",
"036/undistort/images/frame_0000292.jpg.half.jpg",
"036/undistort/images/frame_0000293.jpg.half.jpg",
"036/undistort/images/frame_0000294.jpg.half.jpg",
"036/undistort/images/frame_0000295.jpg.half.jpg",
"036/undistort/images/frame_0000296.jpg.half.jpg",
"036/undistort/images/frame_0000297.jpg.half.jpg",
"036/undistort/images/frame_0000298.jpg.half.jpg",
"036/undistort/images/frame_0000299.jpg.half.jpg",
"036/undistort/images/frame_0000300.jpg.half.jpg",
"036/undistort/images/frame_0000301.jpg.half.jpg",
"036/undistort/images/frame_0000302.jpg.half.jpg",
"036/undistort/images/frame_0000303.jpg.half.jpg",
"036/undistort/images/frame_0000304.jpg.half.jpg",
"036/undistort/images/frame_0000305.jpg.half.jpg",
"036/undistort/images/frame_0000306.jpg.half.jpg",
"036/undistort/images/frame_0000307.jpg.half.jpg",
"036/undistort/images/frame_0000308.jpg.half.jpg",
"036/undistort/images/frame_0000309.jpg.half.jpg",
"036/undistort/images/frame_0000310.jpg.half.jpg",
"036/undistort/images/frame_0000311.jpg.half.jpg",
"036/undistort/images/frame_0000312.jpg.half.jpg",
"036/undistort/images/frame_0000313.jpg.half.jpg",
"036/undistort/images/frame_0000314.jpg.half.jpg",
"036/undistort/images/frame_0000315.jpg.half.jpg",
"036/undistort/images/frame_0000316.jpg.half.jpg",
"036/undistort/images/frame_0000317.jpg.half.jpg",
"036/undistort/images/frame_0000318.jpg.half.jpg",
"036/undistort/images/frame_0000319.jpg.half.jpg",
"036/undistort/images/frame_0000320.jpg.half.jpg",
"036/undistort/images/frame_0000321.jpg.half.jpg",
"036/undistort/images/frame_0000322.jpg.half.jpg",
"036/undistort/images/frame_0000323.jpg.half.jpg",
"036/undistort/images/frame_0000324.jpg.half.jpg",
"036/undistort/images/frame_0000325.jpg.half.jpg",
"036/undistort/images/frame_0000326.jpg.half.jpg",
"036/undistort/images/frame_0000327.jpg.half.jpg",
"036/undistort/images/frame_0000328.jpg.half.jpg",
"036/undistort/images/frame_0000329.jpg.half.jpg",
"036/undistort/images/frame_0000330.jpg.half.jpg",
"036/undistort/images/frame_0000331.jpg.half.jpg",
"036/undistort/images/frame_0000332.jpg.half.jpg",
"036/undistort/images/frame_0000334.jpg.half.jpg",
"036/undistort/images/frame_0000335.jpg.half.jpg",
"036/undistort/images/frame_0000336.jpg.half.jpg",
"036/undistort/images/frame_0000337.jpg.half.jpg",
"036/undistort/images/frame_0000338.jpg.half.jpg",
"036/undistort/images/frame_0000339.jpg.half.jpg",
"036/undistort/images/frame_0000340.jpg.half.jpg",
"036/undistort/images/frame_0000341.jpg.half.jpg",
"036/undistort/images/frame_0000342.jpg.half.jpg",
"036/undistort/images/frame_0000343.jpg.half.jpg",
"036/undistort/images/frame_0000344.jpg.half.jpg",
"036/undistort/images/frame_0000345.jpg.half.jpg",
"036/undistort/images/frame_0000346.jpg.half.jpg",
"036/undistort/images/frame_0000347.jpg.half.jpg",
"036/undistort/images/frame_0000348.jpg.half.jpg",
"036/undistort/images/frame_0000349.jpg.half.jpg",
"036/undistort/images/frame_0000350.jpg.half.jpg",
"036/undistort/images/frame_0000351.jpg.half.jpg",
"036/undistort/images/frame_0000352.jpg.half.jpg",
"036/undistort/images/frame_0000353.jpg.half.jpg",
"036/undistort/images/frame_0000354.jpg.half.jpg",
"036/undistort/images/frame_0000355.jpg.half.jpg",
"036/undistort/images/frame_0000356.jpg.half.jpg",
"036/undistort/images/frame_0000357.jpg.half.jpg",
"036/undistort/images/frame_0000358.jpg.half.jpg",
"036/undistort/images/frame_0000359.jpg.half.jpg",
"036/undistort/images/frame_0000360.jpg.half.jpg",
"036/undistort/images/frame_0000361.jpg.half.jpg",
"036/undistort/images/frame_0000362.jpg.half.jpg",
"036/undistort/images/frame_0000363.jpg.half.jpg",
"036/undistort/images/frame_0000364.jpg.half.jpg",
"036/undistort/images/frame_0000365.jpg.half.jpg",
"036/undistort/images/frame_0000366.jpg.half.jpg",
"036/undistort/images/frame_0000367.jpg.half.jpg",
"036/undistort/images/frame_0000368.jpg.half.jpg",
"036/undistort/images/frame_0000369.jpg.half.jpg",
"036/undistort/images/frame_0000370.jpg.half.jpg",
"036/undistort/images/frame_0000371.jpg.half.jpg",
"036/undistort/images/frame_0000372.jpg.half.jpg",
"037/undistort/images/frame_0000001.jpg.half.jpg",
"037/undistort/images/frame_0000002.jpg.half.jpg",
"037/undistort/images/frame_0000003.jpg.half.jpg",
"037/undistort/images/frame_0000004.jpg.half.jpg",
"037/undistort/images/frame_0000005.jpg.half.jpg",
"037/undistort/images/frame_0000006.jpg.half.jpg",
"037/undistort/images/frame_0000007.jpg.half.jpg",
"037/undistort/images/frame_0000008.jpg.half.jpg",
"037/undistort/images/frame_0000009.jpg.half.jpg",
"037/undistort/images/frame_0000010.jpg.half.jpg",
"037/undistort/images/frame_0000011.jpg.half.jpg",
"037/undistort/images/frame_0000012.jpg.half.jpg",
"037/undistort/images/frame_0000013.jpg.half.jpg",
"037/undistort/images/frame_0000014.jpg.half.jpg",
"037/undistort/images/frame_0000015.jpg.half.jpg",
"037/undistort/images/frame_0000016.jpg.half.jpg",
"037/undistort/images/frame_0000017.jpg.half.jpg",
"037/undistort/images/frame_0000018.jpg.half.jpg",
"037/undistort/images/frame_0000019.jpg.half.jpg",
"037/undistort/images/frame_0000020.jpg.half.jpg",
"037/undistort/images/frame_0000021.jpg.half.jpg",
"037/undistort/images/frame_0000022.jpg.half.jpg",
"037/undistort/images/frame_0000023.jpg.half.jpg",
"037/undistort/images/frame_0000024.jpg.half.jpg",
"037/undistort/images/frame_0000025.jpg.half.jpg",
"037/undistort/images/frame_0000026.jpg.half.jpg",
"037/undistort/images/frame_0000027.jpg.half.jpg",
"037/undistort/images/frame_0000028.jpg.half.jpg",
"037/undistort/images/frame_0000029.jpg.half.jpg",
"037/undistort/images/frame_0000030.jpg.half.jpg",
"037/undistort/images/frame_0000031.jpg.half.jpg",
"037/undistort/images/frame_0000032.jpg.half.jpg",
"037/undistort/images/frame_0000033.jpg.half.jpg",
"037/undistort/images/frame_0000034.jpg.half.jpg",
"037/undistort/images/frame_0000035.jpg.half.jpg",
"037/undistort/images/frame_0000036.jpg.half.jpg",
"037/undistort/images/frame_0000037.jpg.half.jpg",
"037/undistort/images/frame_0000038.jpg.half.jpg",
"037/undistort/images/frame_0000039.jpg.half.jpg",
"037/undistort/images/frame_0000040.jpg.half.jpg",
"037/undistort/images/frame_0000041.jpg.half.jpg",
"037/undistort/images/frame_0000042.jpg.half.jpg",
"037/undistort/images/frame_0000043.jpg.half.jpg",
"037/undistort/images/frame_0000044.jpg.half.jpg",
"037/undistort/images/frame_0000045.jpg.half.jpg",
"037/undistort/images/frame_0000046.jpg.half.jpg",
"037/undistort/images/frame_0000047.jpg.half.jpg",
"037/undistort/images/frame_0000048.jpg.half.jpg",
"037/undistort/images/frame_0000049.jpg.half.jpg",
"037/undistort/images/frame_0000050.jpg.half.jpg",
"037/undistort/images/frame_0000051.jpg.half.jpg",
"037/undistort/images/frame_0000052.jpg.half.jpg",
"037/undistort/images/frame_0000053.jpg.half.jpg",
"037/undistort/images/frame_0000054.jpg.half.jpg",
"037/undistort/images/frame_0000055.jpg.half.jpg",
"037/undistort/images/frame_0000056.jpg.half.jpg",
"037/undistort/images/frame_0000057.jpg.half.jpg",
"037/undistort/images/frame_0000058.jpg.half.jpg",
"037/undistort/images/frame_0000059.jpg.half.jpg",
"037/undistort/images/frame_0000060.jpg.half.jpg",
"037/undistort/images/frame_0000061.jpg.half.jpg",
"037/undistort/images/frame_0000062.jpg.half.jpg",
"037/undistort/images/frame_0000063.jpg.half.jpg",
"037/undistort/images/frame_0000064.jpg.half.jpg",
"037/undistort/images/frame_0000065.jpg.half.jpg",
"037/undistort/images/frame_0000066.jpg.half.jpg",
"037/undistort/images/frame_0000067.jpg.half.jpg",
"037/undistort/images/frame_0000068.jpg.half.jpg",
"037/undistort/images/frame_0000069.jpg.half.jpg",
"037/undistort/images/frame_0000070.jpg.half.jpg",
"037/undistort/images/frame_0000071.jpg.half.jpg",
"037/undistort/images/frame_0000072.jpg.half.jpg",
"037/undistort/images/frame_0000073.jpg.half.jpg",
"037/undistort/images/frame_0000074.jpg.half.jpg",
"037/undistort/images/frame_0000075.jpg.half.jpg",
"037/undistort/images/frame_0000076.jpg.half.jpg",
"037/undistort/images/frame_0000077.jpg.half.jpg",
"037/undistort/images/frame_0000078.jpg.half.jpg",
"037/undistort/images/frame_0000079.jpg.half.jpg",
"037/undistort/images/frame_0000080.jpg.half.jpg",
"037/undistort/images/frame_0000081.jpg.half.jpg",
"037/undistort/images/frame_0000082.jpg.half.jpg",
"037/undistort/images/frame_0000083.jpg.half.jpg",
"037/undistort/images/frame_0000084.jpg.half.jpg",
"037/undistort/images/frame_0000085.jpg.half.jpg",
"037/undistort/images/frame_0000086.jpg.half.jpg",
"037/undistort/images/frame_0000087.jpg.half.jpg",
"037/undistort/images/frame_0000088.jpg.half.jpg",
"037/undistort/images/frame_0000089.jpg.half.jpg",
"037/undistort/images/frame_0000090.jpg.half.jpg",
"037/undistort/images/frame_0000091.jpg.half.jpg",
"037/undistort/images/frame_0000092.jpg.half.jpg",
"037/undistort/images/frame_0000093.jpg.half.jpg",
"037/undistort/images/frame_0000094.jpg.half.jpg",
"037/undistort/images/frame_0000095.jpg.half.jpg",
"037/undistort/images/frame_0000096.jpg.half.jpg",
"037/undistort/images/frame_0000097.jpg.half.jpg",
"037/undistort/images/frame_0000098.jpg.half.jpg",
"037/undistort/images/frame_0000099.jpg.half.jpg",
"037/undistort/images/frame_0000100.jpg.half.jpg",
"037/undistort/images/frame_0000101.jpg.half.jpg",
"037/undistort/images/frame_0000102.jpg.half.jpg",
"037/undistort/images/frame_0000103.jpg.half.jpg",
"037/undistort/images/frame_0000104.jpg.half.jpg",
"037/undistort/images/frame_0000105.jpg.half.jpg",
"037/undistort/images/frame_0000106.jpg.half.jpg",
"037/undistort/images/frame_0000107.jpg.half.jpg",
"037/undistort/images/frame_0000108.jpg.half.jpg",
"037/undistort/images/frame_0000109.jpg.half.jpg",
"037/undistort/images/frame_0000110.jpg.half.jpg",
"037/undistort/images/frame_0000111.jpg.half.jpg",
"037/undistort/images/frame_0000112.jpg.half.jpg",
"037/undistort/images/frame_0000113.jpg.half.jpg",
"037/undistort/images/frame_0000114.jpg.half.jpg",
"037/undistort/images/frame_0000115.jpg.half.jpg",
"037/undistort/images/frame_0000116.jpg.half.jpg",
"037/undistort/images/frame_0000117.jpg.half.jpg",
"037/undistort/images/frame_0000118.jpg.half.jpg",
"037/undistort/images/frame_0000119.jpg.half.jpg",
"037/undistort/images/frame_0000120.jpg.half.jpg",
"037/undistort/images/frame_0000121.jpg.half.jpg",
"037/undistort/images/frame_0000122.jpg.half.jpg",
"037/undistort/images/frame_0000123.jpg.half.jpg",
"037/undistort/images/frame_0000124.jpg.half.jpg",
"037/undistort/images/frame_0000125.jpg.half.jpg",
"037/undistort/images/frame_0000126.jpg.half.jpg",
"037/undistort/images/frame_0000127.jpg.half.jpg",
"037/undistort/images/frame_0000128.jpg.half.jpg",
"037/undistort/images/frame_0000129.jpg.half.jpg",
"037/undistort/images/frame_0000130.jpg.half.jpg",
"037/undistort/images/frame_0000131.jpg.half.jpg",
"037/undistort/images/frame_0000132.jpg.half.jpg",
"037/undistort/images/frame_0000133.jpg.half.jpg",
"037/undistort/images/frame_0000134.jpg.half.jpg",
"037/undistort/images/frame_0000135.jpg.half.jpg",
"037/undistort/images/frame_0000136.jpg.half.jpg",
"037/undistort/images/frame_0000137.jpg.half.jpg",
"037/undistort/images/frame_0000138.jpg.half.jpg",
"037/undistort/images/frame_0000139.jpg.half.jpg",
"037/undistort/images/frame_0000140.jpg.half.jpg",
"037/undistort/images/frame_0000141.jpg.half.jpg",
"037/undistort/images/frame_0000142.jpg.half.jpg",
"037/undistort/images/frame_0000143.jpg.half.jpg",
"037/undistort/images/frame_0000144.jpg.half.jpg",
"037/undistort/images/frame_0000145.jpg.half.jpg",
"037/undistort/images/frame_0000146.jpg.half.jpg",
"037/undistort/images/frame_0000147.jpg.half.jpg",
"037/undistort/images/frame_0000148.jpg.half.jpg",
"037/undistort/images/frame_0000149.jpg.half.jpg",
"037/undistort/images/frame_0000150.jpg.half.jpg",
"037/undistort/images/frame_0000151.jpg.half.jpg",
"037/undistort/images/frame_0000152.jpg.half.jpg",
"037/undistort/images/frame_0000153.jpg.half.jpg",
"037/undistort/images/frame_0000154.jpg.half.jpg",
"037/undistort/images/frame_0000155.jpg.half.jpg",
"037/undistort/images/frame_0000156.jpg.half.jpg",
"037/undistort/images/frame_0000157.jpg.half.jpg",
"037/undistort/images/frame_0000158.jpg.half.jpg",
"037/undistort/images/frame_0000159.jpg.half.jpg",
"037/undistort/images/frame_0000160.jpg.half.jpg",
"037/undistort/images/frame_0000161.jpg.half.jpg",
"037/undistort/images/frame_0000162.jpg.half.jpg",
"037/undistort/images/frame_0000163.jpg.half.jpg",
"037/undistort/images/frame_0000164.jpg.half.jpg",
"037/undistort/images/frame_0000165.jpg.half.jpg",
"037/undistort/images/frame_0000166.jpg.half.jpg",
"037/undistort/images/frame_0000167.jpg.half.jpg",
"037/undistort/images/frame_0000168.jpg.half.jpg",
"037/undistort/images/frame_0000169.jpg.half.jpg",
"037/undistort/images/frame_0000170.jpg.half.jpg",
"037/undistort/images/frame_0000171.jpg.half.jpg",
"037/undistort/images/frame_0000172.jpg.half.jpg",
"037/undistort/images/frame_0000173.jpg.half.jpg",
"037/undistort/images/frame_0000174.jpg.half.jpg",
"037/undistort/images/frame_0000175.jpg.half.jpg",
"037/undistort/images/frame_0000176.jpg.half.jpg",
"037/undistort/images/frame_0000177.jpg.half.jpg",
"037/undistort/images/frame_0000178.jpg.half.jpg",
"037/undistort/images/frame_0000179.jpg.half.jpg",
"037/undistort/images/frame_0000180.jpg.half.jpg",
"037/undistort/images/frame_0000181.jpg.half.jpg",
"037/undistort/images/frame_0000182.jpg.half.jpg",
"037/undistort/images/frame_0000183.jpg.half.jpg",
"037/undistort/images/frame_0000184.jpg.half.jpg",
"037/undistort/images/frame_0000185.jpg.half.jpg",
"037/undistort/images/frame_0000186.jpg.half.jpg",
"037/undistort/images/frame_0000187.jpg.half.jpg",
"037/undistort/images/frame_0000188.jpg.half.jpg",
"037/undistort/images/frame_0000189.jpg.half.jpg",
"037/undistort/images/frame_0000190.jpg.half.jpg",
"037/undistort/images/frame_0000191.jpg.half.jpg",
"037/undistort/images/frame_0000192.jpg.half.jpg",
"037/undistort/images/frame_0000193.jpg.half.jpg",
"037/undistort/images/frame_0000194.jpg.half.jpg",
"037/undistort/images/frame_0000195.jpg.half.jpg",
"037/undistort/images/frame_0000196.jpg.half.jpg",
"037/undistort/images/frame_0000197.jpg.half.jpg",
"037/undistort/images/frame_0000198.jpg.half.jpg",
"037/undistort/images/frame_0000199.jpg.half.jpg",
"037/undistort/images/frame_0000200.jpg.half.jpg",
"037/undistort/images/frame_0000201.jpg.half.jpg",
"037/undistort/images/frame_0000202.jpg.half.jpg",
"037/undistort/images/frame_0000203.jpg.half.jpg",
"037/undistort/images/frame_0000204.jpg.half.jpg",
"037/undistort/images/frame_0000205.jpg.half.jpg",
"037/undistort/images/frame_0000206.jpg.half.jpg",
"037/undistort/images/frame_0000207.jpg.half.jpg",
"037/undistort/images/frame_0000208.jpg.half.jpg",
"037/undistort/images/frame_0000209.jpg.half.jpg",
"037/undistort/images/frame_0000210.jpg.half.jpg",
"037/undistort/images/frame_0000211.jpg.half.jpg",
"037/undistort/images/frame_0000212.jpg.half.jpg",
"037/undistort/images/frame_0000213.jpg.half.jpg",
"037/undistort/images/frame_0000214.jpg.half.jpg",
"037/undistort/images/frame_0000215.jpg.half.jpg",
"037/undistort/images/frame_0000216.jpg.half.jpg",
"037/undistort/images/frame_0000217.jpg.half.jpg",
"037/undistort/images/frame_0000218.jpg.half.jpg",
"037/undistort/images/frame_0000219.jpg.half.jpg",
"037/undistort/images/frame_0000220.jpg.half.jpg",
"037/undistort/images/frame_0000221.jpg.half.jpg",
"037/undistort/images/frame_0000222.jpg.half.jpg",
"037/undistort/images/frame_0000223.jpg.half.jpg",
"037/undistort/images/frame_0000224.jpg.half.jpg",
"037/undistort/images/frame_0000225.jpg.half.jpg",
"037/undistort/images/frame_0000226.jpg.half.jpg",
"037/undistort/images/frame_0000227.jpg.half.jpg",
"037/undistort/images/frame_0000228.jpg.half.jpg",
"037/undistort/images/frame_0000229.jpg.half.jpg",
"037/undistort/images/frame_0000230.jpg.half.jpg",
"037/undistort/images/frame_0000231.jpg.half.jpg",
"037/undistort/images/frame_0000232.jpg.half.jpg",
"037/undistort/images/frame_0000233.jpg.half.jpg",
"037/undistort/images/frame_0000234.jpg.half.jpg",
"037/undistort/images/frame_0000235.jpg.half.jpg",
"037/undistort/images/frame_0000236.jpg.half.jpg",
"037/undistort/images/frame_0000237.jpg.half.jpg",
"037/undistort/images/frame_0000238.jpg.half.jpg",
"037/undistort/images/frame_0000239.jpg.half.jpg",
"037/undistort/images/frame_0000240.jpg.half.jpg",
"037/undistort/images/frame_0000241.jpg.half.jpg",
"037/undistort/images/frame_0000242.jpg.half.jpg",
"037/undistort/images/frame_0000243.jpg.half.jpg",
"037/undistort/images/frame_0000244.jpg.half.jpg",
"037/undistort/images/frame_0000245.jpg.half.jpg",
"037/undistort/images/frame_0000246.jpg.half.jpg",
"037/undistort/images/frame_0000247.jpg.half.jpg",
"037/undistort/images/frame_0000248.jpg.half.jpg",
"037/undistort/images/frame_0000249.jpg.half.jpg",
"037/undistort/images/frame_0000250.jpg.half.jpg",
"037/undistort/images/frame_0000252.jpg.half.jpg",
"037/undistort/images/frame_0000253.jpg.half.jpg",
"037/undistort/images/frame_0000254.jpg.half.jpg",
"037/undistort/images/frame_0000255.jpg.half.jpg",
"037/undistort/images/frame_0000257.jpg.half.jpg",
"037/undistort/images/frame_0000260.jpg.half.jpg",
"037/undistort/images/frame_0000261.jpg.half.jpg",
"037/undistort/images/frame_0000262.jpg.half.jpg",
"037/undistort/images/frame_0000263.jpg.half.jpg",
"037/undistort/images/frame_0000264.jpg.half.jpg",
"037/undistort/images/frame_0000265.jpg.half.jpg",
"037/undistort/images/frame_0000266.jpg.half.jpg",
"037/undistort/images/frame_0000267.jpg.half.jpg",
"037/undistort/images/frame_0000268.jpg.half.jpg",
"037/undistort/images/frame_0000269.jpg.half.jpg",
"037/undistort/images/frame_0000270.jpg.half.jpg",
"037/undistort/images/frame_0000271.jpg.half.jpg",
"037/undistort/images/frame_0000272.jpg.half.jpg",
"037/undistort/images/frame_0000273.jpg.half.jpg",
"037/undistort/images/frame_0000274.jpg.half.jpg",
"037/undistort/images/frame_0000275.jpg.half.jpg",
"037/undistort/images/frame_0000276.jpg.half.jpg",
"037/undistort/images/frame_0000277.jpg.half.jpg",
"037/undistort/images/frame_0000278.jpg.half.jpg",
"037/undistort/images/frame_0000279.jpg.half.jpg",
"037/undistort/images/frame_0000280.jpg.half.jpg",
"037/undistort/images/frame_0000281.jpg.half.jpg",
"037/undistort/images/frame_0000282.jpg.half.jpg",
"037/undistort/images/frame_0000283.jpg.half.jpg",
"037/undistort/images/frame_0000284.jpg.half.jpg",
"037/undistort/images/frame_0000285.jpg.half.jpg",
"037/undistort/images/frame_0000286.jpg.half.jpg",
"037/undistort/images/frame_0000287.jpg.half.jpg",
"037/undistort/images/frame_0000288.jpg.half.jpg",
"037/undistort/images/frame_0000289.jpg.half.jpg",
"037/undistort/images/frame_0000290.jpg.half.jpg",
"037/undistort/images/frame_0000291.jpg.half.jpg",
"037/undistort/images/frame_0000292.jpg.half.jpg",
"037/undistort/images/frame_0000293.jpg.half.jpg",
"037/undistort/images/frame_0000294.jpg.half.jpg",
"037/undistort/images/frame_0000295.jpg.half.jpg",
"037/undistort/images/frame_0000296.jpg.half.jpg",
"037/undistort/images/frame_0000297.jpg.half.jpg",
"037/undistort/images/frame_0000298.jpg.half.jpg",
"037/undistort/images/frame_0000299.jpg.half.jpg",
"037/undistort/images/frame_0000300.jpg.half.jpg",
"037/undistort/images/frame_0000301.jpg.half.jpg",
"037/undistort/images/frame_0000302.jpg.half.jpg",
"037/undistort/images/frame_0000303.jpg.half.jpg",
"037/undistort/images/frame_0000304.jpg.half.jpg",
"037/undistort/images/frame_0000305.jpg.half.jpg",
"037/undistort/images/frame_0000306.jpg.half.jpg",
"037/undistort/images/frame_0000307.jpg.half.jpg",
"037/undistort/images/frame_0000308.jpg.half.jpg",
"037/undistort/images/frame_0000309.jpg.half.jpg",
"037/undistort/images/frame_0000310.jpg.half.jpg",
"037/undistort/images/frame_0000311.jpg.half.jpg",
"037/undistort/images/frame_0000312.jpg.half.jpg",
"037/undistort/images/frame_0000313.jpg.half.jpg",
"037/undistort/images/frame_0000314.jpg.half.jpg",
"037/undistort/images/frame_0000315.jpg.half.jpg",
"037/undistort/images/frame_0000316.jpg.half.jpg",
"037/undistort/images/frame_0000317.jpg.half.jpg",
"037/undistort/images/frame_0000318.jpg.half.jpg",
"037/undistort/images/frame_0000319.jpg.half.jpg",
"037/undistort/images/frame_0000320.jpg.half.jpg",
"037/undistort/images/frame_0000321.jpg.half.jpg",
"037/undistort/images/frame_0000322.jpg.half.jpg",
"037/undistort/images/frame_0000323.jpg.half.jpg",
"037/undistort/images/frame_0000324.jpg.half.jpg",
"037/undistort/images/frame_0000325.jpg.half.jpg",
"037/undistort/images/frame_0000326.jpg.half.jpg",
"037/undistort/images/frame_0000327.jpg.half.jpg",
"037/undistort/images/frame_0000328.jpg.half.jpg",
"037/undistort/images/frame_0000329.jpg.half.jpg",
"037/undistort/images/frame_0000330.jpg.half.jpg",
"037/undistort/images/frame_0000331.jpg.half.jpg",
"037/undistort/images/frame_0000332.jpg.half.jpg",
"037/undistort/images/frame_0000333.jpg.half.jpg",
"037/undistort/images/frame_0000334.jpg.half.jpg",
"037/undistort/images/frame_0000335.jpg.half.jpg",
"037/undistort/images/frame_0000336.jpg.half.jpg",
"037/undistort/images/frame_0000337.jpg.half.jpg",
"037/undistort/images/frame_0000338.jpg.half.jpg",
"037/undistort/images/frame_0000339.jpg.half.jpg",
"037/undistort/images/frame_0000340.jpg.half.jpg",
"037/undistort/images/frame_0000341.jpg.half.jpg",
"037/undistort/images/frame_0000342.jpg.half.jpg",
"037/undistort/images/frame_0000343.jpg.half.jpg",
"037/undistort/images/frame_0000344.jpg.half.jpg",
"037/undistort/images/frame_0000345.jpg.half.jpg",
"037/undistort/images/frame_0000346.jpg.half.jpg",
"037/undistort/images/frame_0000347.jpg.half.jpg",
"037/undistort/images/frame_0000348.jpg.half.jpg",
"037/undistort/images/frame_0000349.jpg.half.jpg",
"037/undistort/images/frame_0000350.jpg.half.jpg",
"037/undistort/images/frame_0000351.jpg.half.jpg",
"037/undistort/images/frame_0000352.jpg.half.jpg",
"037/undistort/images/frame_0000353.jpg.half.jpg",
"037/undistort/images/frame_0000354.jpg.half.jpg",
"037/undistort/images/frame_0000355.jpg.half.jpg",
"037/undistort/images/frame_0000356.jpg.half.jpg",
"037/undistort/images/frame_0000357.jpg.half.jpg",
"037/undistort/images/frame_0000358.jpg.half.jpg",
"037/undistort/images/frame_0000359.jpg.half.jpg",
"037/undistort/images/frame_0000360.jpg.half.jpg",
"037/undistort/images/frame_0000361.jpg.half.jpg",
"037/undistort/images/frame_0000362.jpg.half.jpg",
"037/undistort/images/frame_0000363.jpg.half.jpg",
"037/undistort/images/frame_0000364.jpg.half.jpg",
"037/undistort/images/frame_0000365.jpg.half.jpg",
"037/undistort/images/frame_0000366.jpg.half.jpg",
"037/undistort/images/frame_0000367.jpg.half.jpg",
"037/undistort/images/frame_0000368.jpg.half.jpg",
"037/undistort/images/frame_0000369.jpg.half.jpg",
"037/undistort/images/frame_0000370.jpg.half.jpg",
"037/undistort/images/frame_0000371.jpg.half.jpg",
"037/undistort/images/frame_0000372.jpg.half.jpg",
"037/undistort/images/frame_0000373.jpg.half.jpg",
"037/undistort/images/frame_0000374.jpg.half.jpg",
"037/undistort/images/frame_0000375.jpg.half.jpg",
"037/undistort/images/frame_0000376.jpg.half.jpg",
"037/undistort/images/frame_0000377.jpg.half.jpg",
"037/undistort/images/frame_0000378.jpg.half.jpg",
"037/undistort/images/frame_0000379.jpg.half.jpg",
"037/undistort/images/frame_0000380.jpg.half.jpg",
"037/undistort/images/frame_0000381.jpg.half.jpg",
"037/undistort/images/frame_0000382.jpg.half.jpg",
"037/undistort/images/frame_0000383.jpg.half.jpg",
"037/undistort/images/frame_0000384.jpg.half.jpg",
"037/undistort/images/frame_0000385.jpg.half.jpg",
"037/undistort/images/frame_0000386.jpg.half.jpg",
"042/undistort/images/frame_0000001.jpg.half.jpg",
"042/undistort/images/frame_0000002.jpg.half.jpg",
"042/undistort/images/frame_0000003.jpg.half.jpg",
"042/undistort/images/frame_0000004.jpg.half.jpg",
"042/undistort/images/frame_0000005.jpg.half.jpg",
"042/undistort/images/frame_0000006.jpg.half.jpg",
"042/undistort/images/frame_0000008.jpg.half.jpg",
"042/undistort/images/frame_0000009.jpg.half.jpg",
"042/undistort/images/frame_0000010.jpg.half.jpg",
"042/undistort/images/frame_0000011.jpg.half.jpg",
"042/undistort/images/frame_0000013.jpg.half.jpg",
"042/undistort/images/frame_0000014.jpg.half.jpg",
"042/undistort/images/frame_0000015.jpg.half.jpg",
"042/undistort/images/frame_0000016.jpg.half.jpg",
"042/undistort/images/frame_0000017.jpg.half.jpg",
"042/undistort/images/frame_0000018.jpg.half.jpg",
"042/undistort/images/frame_0000019.jpg.half.jpg",
"042/undistort/images/frame_0000020.jpg.half.jpg",
"042/undistort/images/frame_0000021.jpg.half.jpg",
"042/undistort/images/frame_0000022.jpg.half.jpg",
"042/undistort/images/frame_0000023.jpg.half.jpg",
"042/undistort/images/frame_0000024.jpg.half.jpg",
"042/undistort/images/frame_0000025.jpg.half.jpg",
"042/undistort/images/frame_0000026.jpg.half.jpg",
"042/undistort/images/frame_0000027.jpg.half.jpg",
"042/undistort/images/frame_0000029.jpg.half.jpg",
"042/undistort/images/frame_0000031.jpg.half.jpg",
"042/undistort/images/frame_0000032.jpg.half.jpg",
"042/undistort/images/frame_0000033.jpg.half.jpg",
"042/undistort/images/frame_0000034.jpg.half.jpg",
"042/undistort/images/frame_0000035.jpg.half.jpg",
"042/undistort/images/frame_0000037.jpg.half.jpg",
"042/undistort/images/frame_0000040.jpg.half.jpg",
"042/undistort/images/frame_0000042.jpg.half.jpg",
"042/undistort/images/frame_0000043.jpg.half.jpg",
"042/undistort/images/frame_0000045.jpg.half.jpg",
"042/undistort/images/frame_0000046.jpg.half.jpg",
"042/undistort/images/frame_0000047.jpg.half.jpg",
"042/undistort/images/frame_0000048.jpg.half.jpg",
"042/undistort/images/frame_0000050.jpg.half.jpg",
"042/undistort/images/frame_0000051.jpg.half.jpg",
"042/undistort/images/frame_0000052.jpg.half.jpg",
"042/undistort/images/frame_0000053.jpg.half.jpg",
"042/undistort/images/frame_0000054.jpg.half.jpg",
"042/undistort/images/frame_0000056.jpg.half.jpg",
"042/undistort/images/frame_0000057.jpg.half.jpg",
"042/undistort/images/frame_0000058.jpg.half.jpg",
"042/undistort/images/frame_0000061.jpg.half.jpg",
"042/undistort/images/frame_0000126.jpg.half.jpg",
"042/undistort/images/frame_0000127.jpg.half.jpg",
"042/undistort/images/frame_0000129.jpg.half.jpg",
"042/undistort/images/frame_0000133.jpg.half.jpg",
"042/undistort/images/frame_0000134.jpg.half.jpg",
"042/undistort/images/frame_0000135.jpg.half.jpg",
"042/undistort/images/frame_0000136.jpg.half.jpg",
"042/undistort/images/frame_0000137.jpg.half.jpg",
"042/undistort/images/frame_0000138.jpg.half.jpg",
"042/undistort/images/frame_0000139.jpg.half.jpg",
"042/undistort/images/frame_0000140.jpg.half.jpg",
"042/undistort/images/frame_0000141.jpg.half.jpg",
"042/undistort/images/frame_0000143.jpg.half.jpg",
"042/undistort/images/frame_0000144.jpg.half.jpg",
"042/undistort/images/frame_0000146.jpg.half.jpg",
"042/undistort/images/frame_0000147.jpg.half.jpg",
"042/undistort/images/frame_0000148.jpg.half.jpg",
"042/undistort/images/frame_0000150.jpg.half.jpg",
"042/undistort/images/frame_0000151.jpg.half.jpg",
"042/undistort/images/frame_0000152.jpg.half.jpg",
"042/undistort/images/frame_0000153.jpg.half.jpg",
"042/undistort/images/frame_0000154.jpg.half.jpg",
"042/undistort/images/frame_0000158.jpg.half.jpg",
"042/undistort/images/frame_0000159.jpg.half.jpg",
"042/undistort/images/frame_0000161.jpg.half.jpg",
"042/undistort/images/frame_0000163.jpg.half.jpg",
"042/undistort/images/frame_0000164.jpg.half.jpg",
"042/undistort/images/frame_0000165.jpg.half.jpg",
"042/undistort/images/frame_0000166.jpg.half.jpg",
"042/undistort/images/frame_0000167.jpg.half.jpg",
"042/undistort/images/frame_0000168.jpg.half.jpg",
"042/undistort/images/frame_0000169.jpg.half.jpg",
"042/undistort/images/frame_0000170.jpg.half.jpg",
"042/undistort/images/frame_0000172.jpg.half.jpg",
"042/undistort/images/frame_0000173.jpg.half.jpg",
"042/undistort/images/frame_0000174.jpg.half.jpg",
"042/undistort/images/frame_0000175.jpg.half.jpg",
"042/undistort/images/frame_0000176.jpg.half.jpg",
"042/undistort/images/frame_0000177.jpg.half.jpg",
"042/undistort/images/frame_0000178.jpg.half.jpg",
"042/undistort/images/frame_0000179.jpg.half.jpg",
"042/undistort/images/frame_0000180.jpg.half.jpg",
"042/undistort/images/frame_0000181.jpg.half.jpg",
"042/undistort/images/frame_0000182.jpg.half.jpg",
"042/undistort/images/frame_0000183.jpg.half.jpg",
"042/undistort/images/frame_0000184.jpg.half.jpg",
"042/undistort/images/frame_0000185.jpg.half.jpg",
"042/undistort/images/frame_0000186.jpg.half.jpg",
"042/undistort/images/frame_0000187.jpg.half.jpg",
"042/undistort/images/frame_0000188.jpg.half.jpg",
"042/undistort/images/frame_0000189.jpg.half.jpg",
"042/undistort/images/frame_0000190.jpg.half.jpg",
"042/undistort/images/frame_0000191.jpg.half.jpg",
"042/undistort/images/frame_0000192.jpg.half.jpg",
"042/undistort/images/frame_0000193.jpg.half.jpg",
"042/undistort/images/frame_0000194.jpg.half.jpg",
"042/undistort/images/frame_0000195.jpg.half.jpg",
"042/undistort/images/frame_0000196.jpg.half.jpg",
"042/undistort/images/frame_0000197.jpg.half.jpg",
"042/undistort/images/frame_0000198.jpg.half.jpg",
"042/undistort/images/frame_0000199.jpg.half.jpg",
"042/undistort/images/frame_0000200.jpg.half.jpg",
"042/undistort/images/frame_0000201.jpg.half.jpg",
"042/undistort/images/frame_0000202.jpg.half.jpg",
"042/undistort/images/frame_0000203.jpg.half.jpg",
"042/undistort/images/frame_0000204.jpg.half.jpg",
"042/undistort/images/frame_0000205.jpg.half.jpg",
"042/undistort/images/frame_0000207.jpg.half.jpg",
"042/undistort/images/frame_0000208.jpg.half.jpg",
"042/undistort/images/frame_0000209.jpg.half.jpg",
"042/undistort/images/frame_0000210.jpg.half.jpg",
"042/undistort/images/frame_0000211.jpg.half.jpg",
"042/undistort/images/frame_0000214.jpg.half.jpg",
"042/undistort/images/frame_0000225.jpg.half.jpg",
"042/undistort/images/frame_0000231.jpg.half.jpg",
"042/undistort/images/frame_0000232.jpg.half.jpg",
"042/undistort/images/frame_0000233.jpg.half.jpg",
"042/undistort/images/frame_0000234.jpg.half.jpg",
"042/undistort/images/frame_0000235.jpg.half.jpg",
"042/undistort/images/frame_0000237.jpg.half.jpg",
"042/undistort/images/frame_0000238.jpg.half.jpg",
"042/undistort/images/frame_0000239.jpg.half.jpg",
"042/undistort/images/frame_0000241.jpg.half.jpg",
"042/undistort/images/frame_0000242.jpg.half.jpg",
"042/undistort/images/frame_0000243.jpg.half.jpg",
"042/undistort/images/frame_0000244.jpg.half.jpg",
"042/undistort/images/frame_0000245.jpg.half.jpg",
"042/undistort/images/frame_0000246.jpg.half.jpg",
"042/undistort/images/frame_0000247.jpg.half.jpg",
"042/undistort/images/frame_0000248.jpg.half.jpg",
"042/undistort/images/frame_0000251.jpg.half.jpg",
"042/undistort/images/frame_0000252.jpg.half.jpg",
"042/undistort/images/frame_0000253.jpg.half.jpg",
"042/undistort/images/frame_0000254.jpg.half.jpg",
"042/undistort/images/frame_0000255.jpg.half.jpg",
"042/undistort/images/frame_0000256.jpg.half.jpg",
"042/undistort/images/frame_0000257.jpg.half.jpg",]
|
c3dm-main
|
c3dm/dataset/dataset_configs.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
from setuptools import find_packages, setup
install_requires = [
"numpy",
"pandas",
"Pillow",
"pytorch-lightning",
"pyyaml",
"scipy",
"torch",
"torchvision",
"tqdm",
]
setup(
name="covidprognosis",
author="Facebook AI Research",
author_email="mmuckley@fb.com",
version="0.1",
packages=find_packages(exclude=["tests", "cp_examples", "configs"]),
setup_requires=["wheel"],
install_requires=install_requires,
)
|
CovidPrognosis-main
|
setup.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
from pathlib import Path
import numpy as np
import pytest
import yaml
from covidprognosis.data import (
CheXpertDataset,
CombinedXrayDataset,
MimicCxrJpgDataset,
NIHChestDataset,
)
from PIL import Image
DATA_CONFIG = "configs/data.yaml"
def create_input(shape, label_count=12):
image = np.arange(np.product(shape)).reshape(shape).astype(np.uint8)
image = Image.fromarray(image)
labels = []
for _ in range(label_count):
if np.random.normal() < 0.1:
labels.append(np.nan)
elif np.random.normal() < 0.2:
labels.append(-1)
elif np.random.normal() < 0.6:
labels.append(0)
else:
labels.append(1)
labels = np.array(labels)
return {"image": image, "labels": labels, "metadata": {}}
def fetch_dataset(dataset_name, transform):
dataset_name, split = dataset_name.split("_")
with open(DATA_CONFIG, "r") as f:
paths = yaml.load(f, Loader=yaml.SafeLoader)["paths"]
if dataset_name == "combined":
data_path = [paths["chexpert"], paths["nih"], paths["mimic"]]
split = [split, split, split]
transform = [transform, transform, transform]
else:
data_path = paths[dataset_name]
if dataset_name == "combined":
for path in data_path:
if not Path(path).exists():
pytest.skip()
elif not Path(data_path).exists():
return None
if dataset_name == "nih":
dataset = NIHChestDataset(directory=data_path, split=split, transform=transform)
elif dataset_name == "chexpert":
dataset = CheXpertDataset(directory=data_path, split=split, transform=transform)
elif dataset_name == "mimic":
dataset = MimicCxrJpgDataset(
directory=data_path, split=split, transform=transform
)
elif dataset_name == "combined":
dataset = CombinedXrayDataset(
directory_list=data_path,
dataset_list=["chexpert_v1", "nih-chest-xrays", "mimic-cxr"],
split_list=split,
transform_list=transform,
)
return dataset
@pytest.fixture
def dataset_length_dict():
datalengths = {
"nih_train": 112120,
"nih_all": 112120,
"chexpert_train": 223414,
"chexpert_val": 234,
"chexpert_all": 223648,
"mimic_train": 368960,
"mimic_val": 2991,
"mimic_test": 5159,
"mimic_all": 377110,
"combined_train": 704494,
"combined_val": 3225,
"combined_test": 5159,
"combined_all": 712878,
}
return datalengths
|
CovidPrognosis-main
|
tests/conftest.py
|
CovidPrognosis-main
|
tests/__init__.py
|
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import pytest
import torchvision.transforms as tvt
from covidprognosis.data.transforms import Compose
from .conftest import fetch_dataset
@pytest.mark.parametrize(
"dataset_name",
[
"nih_train",
"nih_all",
"chexpert_train",
"chexpert_val",
"chexpert_all",
"mimic_train",
"mimic_val",
"mimic_test",
"mimic_all",
"combined_train",
"combined_val",
"combined_test",
"combined_all",
],
)
def test_dataset_lengths(dataset_name, dataset_length_dict):
transform = Compose([tvt.ToTensor()])
dataset = fetch_dataset(dataset_name, transform)
if dataset is None:
pytest.skip()
else:
assert len(dataset) == dataset_length_dict[dataset_name]
@pytest.mark.parametrize(
"dataset_name",
[
"nih_train",
"nih_all",
"chexpert_train",
"chexpert_val",
"chexpert_all",
"mimic_train",
"mimic_val",
"mimic_test",
"mimic_all",
"combined_train",
"combined_val",
"combined_test",
"combined_all",
],
)
def test_dataset_getitem(dataset_name):
transform = Compose([tvt.ToTensor()])
dataset = fetch_dataset(dataset_name, transform)
if dataset is None:
pytest.skip()
else:
item1 = dataset[0]
item2 = dataset[-1]
assert item1 is not None
assert item2 is not None
def test_combined_loader():
transform = Compose([tvt.ToTensor()])
dataset = fetch_dataset("combined_all", transform=transform)
sample = dataset[0]
assert "CheXpert" in str(sample["metadata"]["filename"])
sample = dataset[300000]
assert "nih-chest-xrays" in str(sample["metadata"]["filename"])
sample = dataset[600000]
assert "mimic-cxr-jpg" in str(sample["metadata"]["filename"])
|
CovidPrognosis-main
|
tests/test_xray_datasets.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import covidprognosis.data.transforms as cpt
import numpy as np
import pytest
import torch
import torchvision.transforms as tvt
from scipy.ndimage import gaussian_filter
from .conftest import create_input
@pytest.mark.parametrize("shape", [[32, 32, 3], [45, 16, 3]])
def test_compose(shape):
sample = create_input(shape)
transform = cpt.Compose(
[tvt.RandomHorizontalFlip(), tvt.ToTensor(), cpt.RandomGaussianBlur()]
)
sample = transform(sample)
assert sample["image"] is not None
@pytest.mark.parametrize("shape, label_idx", [[[32, 32, 3], 0], [[45, 16, 3], 5]])
def test_nan_to_int(shape, label_idx):
sample = create_input(shape)
transform = cpt.Compose([tvt.ToTensor(), cpt.NanToInt(5)])
sample["labels"][label_idx] = np.nan
sample = transform(sample)
assert sample["labels"][label_idx] == 5
@pytest.mark.parametrize(
"shape, label_idx, start_label, end_label",
[[[32, 32, 3], 2, -1, 0], [[45, 16, 3], 10, 1, 0]],
)
def test_remap_label(shape, label_idx, start_label, end_label):
sample = create_input(shape)
transform = cpt.Compose([tvt.ToTensor(), cpt.RemapLabel(start_label, end_label)])
sample["labels"][label_idx] = start_label
sample = transform(sample)
assert sample["labels"][label_idx] == end_label
@pytest.mark.parametrize("shape", [[32, 32, 3], [45, 16, 3]])
def test_histnorm(shape):
"""Test this to guard against an implementation change."""
sample = create_input(shape)
transform = cpt.Compose([tvt.ToTensor(), cpt.HistogramNormalize()])
image = np.transpose(
torch.tensor(np.array(sample["image"]), dtype=torch.float).numpy(), (2, 0, 1)
)
# get image histogram
image_histogram, bins = np.histogram(
image.flatten(), transform.transforms[1].number_bins, density=True
)
cdf = image_histogram.cumsum() # cumulative distribution function
cdf = 255 * cdf / cdf[-1] # normalize
# use linear interpolation of cdf to find new pixel values
image_equalized = np.interp(image.flatten(), bins[:-1], cdf)
image_equalized.reshape(image.shape)
image = torch.tensor(image_equalized.reshape(image.shape)).to(torch.float)
sample = transform(sample)
assert torch.allclose(sample["image"], image)
@pytest.mark.parametrize("shape", [[32, 32, 3], [45, 16, 3]])
def test_rand_gauss_blur(shape):
"""Test this to guard against an implementation change."""
seed = 123
sample = create_input(shape)
transform = cpt.Compose([tvt.ToTensor(), cpt.RandomGaussianBlur(p=1)])
# run the custom blur
np.random.seed(seed)
image = tvt.functional.to_tensor(sample["image"]) * 1
sigma = np.random.uniform(
transform.transforms[1].sigma_range[0], transform.transforms[1].sigma_range[1]
)
image = torch.tensor(gaussian_filter(image.numpy(), sigma), dtype=image.dtype,)
# transform blur
transform = cpt.Compose(
[tvt.ToTensor(), cpt.RandomGaussianBlur(p=1, sigma_range=(sigma, sigma))]
)
sample = transform(sample)
assert torch.allclose(sample["image"], image)
# retest for 0 probability
sample = create_input(shape)
transform = cpt.Compose([tvt.ToTensor(), cpt.RandomGaussianBlur(p=-0.1)])
# run the custom blur
image = tvt.functional.to_tensor(sample["image"]) * 1
# transform blur
sample = transform(sample)
assert torch.allclose(sample["image"], image)
@pytest.mark.parametrize("shape", [[32, 32, 3], [45, 16, 3]])
def test_add_noise(shape):
"""Test this to guard against an implementation change."""
seed = 456
sample = create_input(shape)
transform = cpt.Compose([tvt.ToTensor(), cpt.AddGaussianNoise(p=1)])
# run the custom noise
np.random.seed(seed)
image = tvt.functional.to_tensor(sample["image"]) * 1
np.random.uniform()
snr_level = np.random.uniform(
low=transform.transforms[1].snr_range[0],
high=transform.transforms[1].snr_range[1],
)
signal_level = np.mean(image.numpy())
image = image + (signal_level / snr_level) * torch.tensor(
np.random.normal(size=tuple(image.shape)), dtype=image.dtype,
)
# transform blur
np.random.seed(seed)
sample = transform(sample)
assert torch.allclose(sample["image"], image)
# retest for 0 probability
sample = create_input(shape)
transform = cpt.Compose([tvt.ToTensor(), cpt.AddGaussianNoise(p=-0.1)])
# run the custom blur
image = tvt.functional.to_tensor(sample["image"]) * 1
# transform blur
sample = transform(sample)
assert torch.allclose(sample["image"], image)
@pytest.mark.parametrize("shape", [[32, 32, 3], [45, 16, 3]])
def test_tensor_to_rgb(shape):
sample = create_input(shape)
transform = cpt.Compose([tvt.ToTensor(), cpt.TensorToRGB()])
image = tvt.functional.to_tensor(sample["image"]) * 1
expands = list()
for i in range(image.ndim):
if i == 0:
expands.append(3)
else:
expands.append(-1)
image = image.expand(*expands)
sample = transform(sample)
assert torch.allclose(sample["image"], image)
|
CovidPrognosis-main
|
tests/test_transforms.py
|
import covidprognosis.data
import covidprognosis.models
import covidprognosis.plmodules
|
CovidPrognosis-main
|
covidprognosis/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# Code adapted from https://github.com/facebookresearch/moco
from typing import Tuple
import torch
import torch.nn as nn
from torch import Tensor
class MoCo(nn.Module):
"""
Build a MoCo model with: a query encoder, a key encoder, and a queue
https://arxiv.org/abs/1911.05722
"""
def __init__(
self,
encoder_q: nn.Module,
encoder_k: nn.Module,
dim: int = 128,
K: int = 65536,
m: float = 0.999,
T: float = 0.07,
mlp: bool = False,
):
"""
dim: feature dimension (default: 128)
K: queue size; number of negative keys (default: 65536)
m: moco momentum of updating key encoder (default: 0.999)
T: softmax temperature (default: 0.07)
"""
super().__init__()
self.K = K
self.m = m
self.T = T
# create the encoders
# num_classes is the output fc dimension
self.encoder_q = encoder_q
self.encoder_k = encoder_k
if mlp: # hack: brute-force replacement
if hasattr(self.encoder_q, "fc"): # ResNet models
dim_mlp = self.encoder_q.fc.weight.shape[1]
self.encoder_q.fc = nn.Sequential(
nn.Linear(dim_mlp, dim_mlp), nn.ReLU(), self.encoder_q.fc
)
self.encoder_k.fc = nn.Sequential(
nn.Linear(dim_mlp, dim_mlp), nn.ReLU(), self.encoder_k.fc
)
elif hasattr(self.encoder_q, "classifier"): # Densenet models
dim_mlp = self.encoder_q.classifier.weight.shape[1]
self.encoder_q.classifier = nn.Sequential(
nn.Linear(dim_mlp, dim_mlp), nn.ReLU(), self.encoder_q.classifier
)
self.encoder_k.classifier = nn.Sequential(
nn.Linear(dim_mlp, dim_mlp), nn.ReLU(), self.encoder_k.classifier
)
for param_q, param_k in zip(
self.encoder_q.parameters(), self.encoder_k.parameters()
):
param_k.data.copy_(param_q.data) # initialize
param_k.requires_grad = False # not update by gradient
# create the queue
self.register_buffer(
"queue", nn.functional.normalize(torch.randn(dim, K), dim=0)
)
self.register_buffer("queue_ptr", torch.zeros(1, dtype=torch.long))
@torch.no_grad()
def _momentum_update_key_encoder(self):
"""
Momentum update of the key encoder
"""
for param_q, param_k in zip(
self.encoder_q.parameters(), self.encoder_k.parameters()
):
param_k.data = param_k.data * self.m + param_q.data * (1.0 - self.m)
@torch.no_grad()
def _dequeue_and_enqueue(self, keys: Tensor):
# gather keys before updating queue
keys = concat_all_gather(keys)
batch_size = keys.shape[0]
assert isinstance(self.queue_ptr, Tensor)
ptr = int(self.queue_ptr)
assert (
self.K % batch_size == 0
), f"batch_size={batch_size}, K={self.K}" # for simplicity
# replace the keys at ptr (dequeue and enqueue)
self.queue[:, ptr : ptr + batch_size] = keys.T
ptr = (ptr + batch_size) % self.K # move pointer
self.queue_ptr[0] = ptr
@torch.no_grad()
def _batch_shuffle_ddp(self, x: Tensor) -> Tuple[Tensor, Tensor]:
"""
Batch shuffle, for making use of BatchNorm.
*** Only support DistributedDataParallel (DDP) model. ***
"""
# gather from all gpus
batch_size_this = x.shape[0]
x_gather = concat_all_gather(x)
batch_size_all = x_gather.shape[0]
num_gpus = batch_size_all // batch_size_this
# random shuffle index
idx_shuffle = torch.randperm(batch_size_all).cuda()
# broadcast to all gpus
torch.distributed.broadcast(idx_shuffle, src=0)
# index for restoring
idx_unshuffle = torch.argsort(idx_shuffle)
# shuffled index for this gpu
gpu_idx = torch.distributed.get_rank()
idx_this = idx_shuffle.view(num_gpus, -1)[gpu_idx]
return x_gather[idx_this], idx_unshuffle
@torch.no_grad()
def _batch_unshuffle_ddp(self, x: Tensor, idx_unshuffle: Tensor) -> Tensor:
"""
Undo batch shuffle.
*** Only support DistributedDataParallel (DDP) model. ***
"""
# gather from all gpus
batch_size_this = x.shape[0]
x_gather = concat_all_gather(x)
batch_size_all = x_gather.shape[0]
num_gpus = batch_size_all // batch_size_this
# restored index for this gpu
gpu_idx = torch.distributed.get_rank()
idx_this = idx_unshuffle.view(num_gpus, -1)[gpu_idx]
return x_gather[idx_this]
def forward(self, im_q: Tensor, im_k: Tensor) -> Tuple[Tensor, Tensor]:
"""
Input:
im_q: a batch of query images
im_k: a batch of key images
Output:
logits, targets
"""
# compute query features
q = self.encoder_q(im_q) # queries: NxC
q = nn.functional.normalize(q, dim=1)
# compute key features
with torch.no_grad(): # no gradient to keys
self._momentum_update_key_encoder() # update the key encoder
# shuffle for making use of BN
im_k, idx_unshuffle = self._batch_shuffle_ddp(im_k)
k = self.encoder_k(im_k) # keys: NxC
k = nn.functional.normalize(k, dim=1)
# undo shuffle
k = self._batch_unshuffle_ddp(k, idx_unshuffle)
# compute logits
# Einstein sum is more intuitive
# positive logits: Nx1
l_pos = torch.einsum("nc,nc->n", [q, k]).unsqueeze(-1)
# negative logits: NxK
l_neg = torch.einsum("nc,ck->nk", [q, self.queue.clone().detach()])
# logits: Nx(1+K)
logits = torch.cat([l_pos, l_neg], dim=1)
# apply temperature
logits /= self.T
# labels: positive key indicators
labels = torch.zeros(logits.shape[0], dtype=torch.long).cuda()
# dequeue and enqueue
self._dequeue_and_enqueue(k)
return logits, labels
# utils
@torch.no_grad()
def concat_all_gather(tensor: Tensor) -> Tensor:
"""
Performs all_gather operation on the provided tensors.
*** Warning ***: torch.distributed.all_gather has no gradient.
"""
tensors_gather = [
torch.ones_like(tensor) for _ in range(torch.distributed.get_world_size())
]
torch.distributed.all_gather(tensors_gather, tensor, async_op=False)
output = torch.cat(tensors_gather, dim=0)
return output
|
CovidPrognosis-main
|
covidprognosis/models/moco_model.py
|
from .moco_model import MoCo
|
CovidPrognosis-main
|
covidprognosis/models/__init__.py
|
from .xray_datamodule import XrayDataModule
|
CovidPrognosis-main
|
covidprognosis/plmodules/__init__.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import os
from argparse import ArgumentParser
from typing import Callable, List, Optional, Union
import covidprognosis as cp
import numpy as np
import pytorch_lightning as pl
import torch
class TwoImageDataset(torch.utils.data.Dataset):
"""
Wrapper for returning two augmentations of the same image.
Args:
dataset: Pre-initialized data set to return multiple samples from.
"""
def __init__(self, dataset: cp.data.BaseDataset):
assert isinstance(dataset, cp.data.BaseDataset)
self.dataset = dataset
def __len__(self):
return len(self.dataset)
def __getitem__(self, idx):
# randomness handled via the transform objects
# this requires the transforms to sample randomness from the process
# generator
item0 = self.dataset[idx]
item1 = self.dataset[idx]
sample = {
"image0": item0["image"],
"image1": item1["image"],
"label": item0["labels"],
}
return sample
def fetch_dataset(
dataset_name: str,
dataset_dir: Union[List[Union[str, os.PathLike]], Union[str, os.PathLike]],
split: str,
transform: Optional[Callable],
two_image: bool = False,
label_list="all",
):
"""Dataset fetcher for config handling."""
assert split in ("train", "val", "test")
dataset: Union[cp.data.BaseDataset, TwoImageDataset]
# determine the dataset
if dataset_name == "nih":
assert not isinstance(dataset_dir, list)
dataset = cp.data.NIHChestDataset(
directory=dataset_dir,
split=split,
transform=transform,
label_list=label_list,
resplit=True,
)
if dataset_name == "mimic":
assert not isinstance(dataset_dir, list)
dataset = cp.data.MimicCxrJpgDataset(
directory=dataset_dir,
split=split,
transform=transform,
label_list=label_list,
)
elif dataset_name == "chexpert":
assert not isinstance(dataset_dir, list)
dataset = cp.data.CheXpertDataset(
directory=dataset_dir,
split=split,
transform=transform,
label_list=label_list,
)
elif dataset_name == "mimic-chexpert":
assert isinstance(dataset_dir, list)
dataset = cp.data.CombinedXrayDataset(
dataset_list=["chexpert_v1", "mimic-cxr"],
directory_list=dataset_dir,
transform_list=[transform, transform],
label_list=[label_list, label_list],
split_list=[split, split],
)
else:
raise ValueError(f"dataset {dataset_name} not recognized")
if two_image is True:
dataset = TwoImageDataset(dataset)
return dataset
def worker_init_fn(worker_id):
"""Handle random seeding."""
worker_info = torch.utils.data.get_worker_info()
seed = worker_info.seed % (2 ** 32 - 1) # pylint: disable=no-member
np.random.seed(seed)
class XrayDataModule(pl.LightningDataModule):
"""
X-ray data module for training models with PyTorch Lightning.
Args:
dataset_name: Name of the dataset.
dataset_dir: Location of the data.
label_list: Labels to load for training.
batch_size: Training batch size.
num_workers: Number of workers for dataloaders.
use_two_images: Whether to return two augmentations of same image from
dataset (for MoCo pretraining).
train_transform: Transform for training loop.
val_transform: Transform for validation loop.
test_transform: Transform for test loop.
"""
def __init__(
self,
dataset_name: str,
dataset_dir: Union[List[Union[str, os.PathLike]], Union[str, os.PathLike]],
label_list: Union[str, List[str]] = "all",
batch_size: int = 1,
num_workers: int = 4,
use_two_images: bool = False,
train_transform: Optional[Callable] = None,
val_transform: Optional[Callable] = None,
test_transform: Optional[Callable] = None,
):
super().__init__()
self.dataset_name = dataset_name
self.dataset_dir = dataset_dir
self.batch_size = batch_size
self.num_workers = num_workers
self.train_dataset = fetch_dataset(
self.dataset_name,
self.dataset_dir,
"train",
train_transform,
label_list=label_list,
two_image=use_two_images,
)
self.val_dataset = fetch_dataset(
self.dataset_name,
self.dataset_dir,
"val",
val_transform,
label_list=label_list,
two_image=use_two_images,
)
self.test_dataset = fetch_dataset(
self.dataset_name,
self.dataset_dir,
"test",
test_transform,
label_list=label_list,
two_image=use_two_images,
)
if isinstance(self.train_dataset, TwoImageDataset):
self.label_list = None
else:
self.label_list = self.train_dataset.label_list
def __dataloader(self, split: str) -> torch.utils.data.DataLoader:
assert split in ("train", "val", "test")
shuffle = False
if split == "train":
dataset = self.train_dataset
shuffle = True
elif split == "val":
dataset = self.val_dataset
else:
dataset = self.test_dataset
loader = torch.utils.data.DataLoader(
dataset=dataset,
batch_size=self.batch_size,
num_workers=self.num_workers,
drop_last=True,
shuffle=shuffle,
worker_init_fn=worker_init_fn,
)
return loader
def train_dataloader(self):
return self.__dataloader(split="train")
def val_dataloader(self):
return self.__dataloader(split="val")
def test_dataloader(self):
return self.__dataloader(split="test")
@staticmethod
def add_model_specific_args(parent_parser):
parser = ArgumentParser(parents=[parent_parser], add_help=False)
parser.add_argument("--dataset_name", default="mimic", type=str)
parser.add_argument("--dataset_dir", default=None, type=str)
parser.add_argument("--batch_size", default=64, type=int)
parser.add_argument("--num_workers", default=4, type=int)
return parser
|
CovidPrognosis-main
|
covidprognosis/plmodules/xray_datamodule.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
from typing import Callable, Dict, List, Tuple, Union
import numpy as np
import torch
from scipy.ndimage import gaussian_filter
class XRayTransform:
"""XRayTransform base class."""
def __repr__(self):
return "XRayTransform: {}".format(self.__class__.__name__)
class Compose(XRayTransform):
"""
Compose a list of transforms into one.
Args:
transforms: The list of transforms.
"""
def __init__(self, transforms: List[Callable]):
self.transforms = transforms
def __call__(self, sample: Dict) -> Dict:
for t in self.transforms:
if isinstance(t, XRayTransform):
sample = t(sample)
else:
# assume torchvision transform
sample["image"] = t(sample["image"])
return sample
class NanToInt(XRayTransform):
"""
Convert an np.nan label to an integer.
Args:
num: Integer to convert to.
"""
def __init__(self, num: int = -100):
self.num = num
def __call__(self, sample: Dict) -> Dict:
sample["labels"][np.isnan(sample["labels"])] = self.num
return sample
class RemapLabel(XRayTransform):
"""
Convert a label from one value to another.
Args:
input_val: Value to convert from.
output_val: Value to convert to.
"""
def __init__(self, input_val: Union[float, int], output_val: Union[float, int]):
self.input_val = input_val
self.output_val = output_val
def __call__(self, sample: Dict) -> Dict:
labels = np.copy(sample["labels"])
labels[labels == self.input_val] = self.output_val
sample["labels"] = labels
return sample
class HistogramNormalize(XRayTransform):
"""
Apply histogram normalization.
Args:
number_bins: Number of bins to use in histogram.
"""
def __init__(self, number_bins: int = 256):
self.number_bins = number_bins
def __call__(self, sample: Dict) -> Dict:
image = sample["image"].numpy()
# get image histogram
image_histogram, bins = np.histogram(
image.flatten(), self.number_bins, density=True
)
cdf = image_histogram.cumsum() # cumulative distribution function
cdf = 255 * cdf / cdf[-1] # normalize
# use linear interpolation of cdf to find new pixel values
image_equalized = np.interp(image.flatten(), bins[:-1], cdf)
image_equalized.reshape(image.shape)
sample["image"] = torch.tensor(image_equalized.reshape(image.shape)).to(
sample["image"]
)
return sample
class RandomGaussianBlur(XRayTransform):
"""
Random Gaussian blur transform.
Args:
p: Probability to apply transform.
sigma_range: Range of sigma values for Gaussian kernel.
"""
def __init__(self, p: float = 0.5, sigma_range: Tuple[float, float] = (0.1, 2.0)):
self.p = p
self.sigma_range = sigma_range
def __call__(self, sample: Dict) -> Dict:
if np.random.uniform() <= self.p:
sigma = np.random.uniform(self.sigma_range[0], self.sigma_range[1])
sample["image"] = torch.tensor(
gaussian_filter(sample["image"].numpy(), sigma),
dtype=sample["image"].dtype,
)
return sample
class AddGaussianNoise(XRayTransform):
"""
Gaussian noise transform.
Args:
p: Probability of adding Gaussian noise.
snr_range: SNR range for Gaussian noise addition.
"""
def __init__(self, p: float = 0.5, snr_range: Tuple[float, float] = (2.0, 8.0)):
self.p = p
self.snr_range = snr_range
def __call__(self, sample: Dict) -> Dict:
if np.random.uniform() <= self.p:
snr_level = np.random.uniform(low=self.snr_range[0], high=self.snr_range[1])
signal_level = np.mean(sample["image"].numpy())
# use numpy to keep things consistent on numpy random seed
sample["image"] = sample["image"] + (
signal_level / snr_level
) * torch.tensor(
np.random.normal(size=tuple(sample["image"].shape)),
dtype=sample["image"].dtype,
)
return sample
class TensorToRGB(XRayTransform):
"""
Convert Tensor to RGB by replicating channels.
Args:
num_output_channels: Number of output channels (3 for RGB).
"""
def __init__(self, num_output_channels: int = 3):
self.num_output_channels = num_output_channels
def __call__(self, sample: Dict) -> Dict:
expands = list()
for i in range(sample["image"].ndim):
if i == 0:
expands.append(self.num_output_channels)
else:
expands.append(-1)
sample["image"] = sample["image"].expand(*expands)
return sample
|
CovidPrognosis-main
|
covidprognosis/data/transforms.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import os
from pathlib import Path
from typing import Callable, Dict, List, Optional, Union
import numpy as np
import pandas as pd
from PIL import Image
from torch.utils.data import Dataset
class BaseDataset(Dataset):
"""
Root class for X-ray data sets.
The base data seet logs parameters as attributes, reducing code duplication
across the various public X-ray data loaders.
Args:
dataset_name: Name of the dataset.
directory: Location of the data.
split: One of ('train', 'val', 'test', 'all').
label_list: A list of labels for the data loader to extract.
subselect: Argument to pass to `pandas` subselect.
transform: A set of data transforms.
"""
def __init__(
self,
dataset_name: str,
directory: Union[str, os.PathLike],
split: str,
label_list: Union[str, List[str]],
subselect: Optional[str],
transform: Optional[Callable],
):
self.dataset_name = dataset_name
split_list = ["train", "val", "test", "all"]
if split not in split_list:
raise ValueError("split {} not a valid split".format(split))
self.directory = Path(directory)
self.csv = None
self.split = split
self.label_list = label_list
self.subselect = subselect
self.transform = transform
self.metadata_keys: List[str] = []
def preproc_csv(self, csv: pd.DataFrame, subselect: str) -> pd.DataFrame:
if subselect is not None:
csv = csv.query(subselect)
return csv
def open_image(self, path: Union[str, os.PathLike]) -> Image:
with open(path, "rb") as f:
with Image.open(f) as img:
return img.convert("F")
def __len__(self) -> int:
return 0
@property
def calc_pos_weights(self) -> float:
if self.csv is None:
return 0.0
pos = (self.csv[self.label_list] == 1).sum()
neg = (self.csv[self.label_list] == 0).sum()
neg_pos_ratio = (neg / np.maximum(pos, 1)).values.astype(np.float)
return neg_pos_ratio
def retrieve_metadata(
self, idx: int, filename: Union[str, os.PathLike], exam: pd.Series
) -> Dict:
metadata = {}
metadata["dataset_name"] = self.dataset_name
metadata["dataloader class"] = self.__class__.__name__
metadata["idx"] = idx # type: ignore
for key in self.metadata_keys:
# cast to string due to typing issues with dataloader
metadata[key] = str(exam[key])
metadata["filename"] = str(filename)
metadata["label_list"] = self.label_list # type: ignore
return metadata
def __repr__(self):
return self.__class__.__name__ + " num_samples={}".format(len(self))
@property
def labels(self) -> Union[str, List[str]]:
return self.label_list
|
CovidPrognosis-main
|
covidprognosis/data/base_dataset.py
|
from .base_dataset import BaseDataset
from .chexpert import CheXpertDataset
from .combined_datasets import CombinedXrayDataset
from .mimic_cxr import MimicCxrJpgDataset
from .nih_chest_xrays import NIHChestDataset
|
CovidPrognosis-main
|
covidprognosis/data/__init__.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import logging
import os
from typing import Callable, Dict, List, Optional, Union
import numpy as np
import pandas as pd
from .base_dataset import BaseDataset
class CheXpertDataset(BaseDataset):
"""
Data loader for CheXpert data set.
Args:
directory: Base directory for data set with subdirectory
'CheXpert-v1.0'.
split: String specifying split.
options include:
'all': Include all splits.
'train': Include training split.
'val': Include validation split.
label_list: String specifying labels to include. Default is 'all',
which loads all labels.
transform: A composible transform list to be applied to the data.
Irvin, Jeremy, et al. "Chexpert: A large chest radiograph dataset with
uncertainty labels and expert comparison." Proceedings of the AAAI
Conference on Artificial Intelligence. Vol. 33. 2019.
Dataset website here:
https://stanfordmlgroup.github.io/competitions/chexpert/
"""
def __init__(
self,
directory: Union[str, os.PathLike],
split: str = "train",
label_list: Union[str, List[str]] = "all",
subselect: Optional[str] = None,
transform: Optional[Callable] = None,
):
super().__init__(
"chexpert_v1", directory, split, label_list, subselect, transform
)
if label_list == "all":
self.label_list = self.default_labels()
else:
self.label_list = label_list
self.metadata_keys = [
"Patient ID",
"Path",
"Sex",
"Age",
"Frontal/Lateral",
"AP/PA",
]
if self.split == "train":
self.csv_path = self.directory / "CheXpert-v1.0" / "train.csv"
self.csv = pd.read_csv(self.directory / self.csv_path)
elif self.split == "val":
self.csv_path = self.directory / "CheXpert-v1.0" / "valid.csv"
self.csv = pd.read_csv(self.directory / self.csv_path)
elif self.split == "all":
self.csv_path = self.directory / "train.csv"
self.csv = pd.concat(
[
pd.read_csv(self.directory / "CheXpert-v1.0" / "train.csv"),
pd.read_csv(self.directory / "CheXpert-v1.0" / "valid.csv"),
]
)
else:
logging.warning(
"split {} not recognized for dataset {}, "
"not returning samples".format(split, self.__class__.__name__)
)
self.csv = self.preproc_csv(self.csv, self.subselect)
@staticmethod
def default_labels() -> List[str]:
return [
"No Finding",
"Enlarged Cardiomediastinum",
"Cardiomegaly",
"Lung Opacity",
"Lung Lesion",
"Edema",
"Consolidation",
"Pneumonia",
"Atelectasis",
"Pneumothorax",
"Pleural Effusion",
"Pleural Other",
"Fracture",
"Support Devices",
]
def preproc_csv(self, csv: pd.DataFrame, subselect: Optional[str]) -> pd.DataFrame:
if csv is not None:
csv["Patient ID"] = csv["Path"].str.extract(pat="(patient\\d+)")
csv["view"] = csv["Frontal/Lateral"].str.lower()
if subselect is not None:
csv = csv.query(subselect)
return csv
def __len__(self) -> int:
length = 0
if self.csv is not None:
length = len(self.csv)
return length
def __getitem__(self, idx: int) -> Dict:
assert self.csv is not None
exam = self.csv.iloc[idx]
filename = self.directory / exam["Path"]
image = self.open_image(filename)
metadata = self.retrieve_metadata(idx, filename, exam)
# retrieve labels while handling missing ones for combined data loader
labels = np.array(exam.reindex(self.label_list)[self.label_list]).astype(
np.float
)
sample = {"image": image, "labels": labels, "metadata": metadata}
if self.transform is not None:
sample = self.transform(sample)
return sample
|
CovidPrognosis-main
|
covidprognosis/data/chexpert.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import os
from typing import Callable, List, Optional, Union
from .base_dataset import BaseDataset
from .chexpert import CheXpertDataset
from .mimic_cxr import MimicCxrJpgDataset
from .nih_chest_xrays import NIHChestDataset
class CombinedXrayDataset(BaseDataset):
"""
Combine several x-ray datasets into one.
Args:
directory_list: List of paths for directories for each dataset.
dataset_list: List of datasets to load. Current options include:
'all': Include all datasets.
'chexpert': Include CheXpert dataset (223,414 in 'train').
'nih-chest-xrays': Include NIH Chest x-rays (112,120 images in
'train').
split_list: List of strings specifying split. If a string is passed
(e.g., 'train'), that split will be broacast to all
sub-dataloaders.
options include:
'all': Include all splits.
'train': Include training split.
'val': Include validation split.
label_list: String specifying labels to include. Default is 'all',
which loads all labels from all datasets.
transform_list: A list of composed transforms. If a single composed
transform is passed, it will be broadcast to all sub-dataloaders.
"""
def __init__(
self,
directory_list: List[Union[str, os.PathLike]],
dataset_list: Union[str, List[str]] = "all",
split_list: Union[str, List[str]] = "train",
label_list: Union[str, List[str]] = "all",
subselect_list: Optional[List[str]] = None,
transform_list: Optional[List[Optional[Callable]]] = None,
):
self.dataset_name = "combined-xray-dataset"
if dataset_list == "all":
dataset_list = ["chexpert_v1", "nih-chest-xrays", "mimic-cxr"]
self.dataset_list = dataset_list
elif isinstance(dataset_list, str):
raise RuntimeError("Unrecognized dataset list string.")
else:
self.dataset_list = dataset_list # type:ignore
self.directory_list = directory_list = self.to_list(directory_list)
self.split_list = split_list = self.to_list(split_list)
self.subselect_list = self.to_list(subselect_list)
self.transform_list = transform_list = self.to_list(transform_list)
# find all possible labels if using 'all'
if label_list == "all":
self.label_list = self.fetch_label_list(self.dataset_list)
else:
if isinstance(label_list, str):
raise ValueError(
"If inputting label_list, label_list must not be a string"
)
self.label_list = label_list
self.datasets = []
for (dataset_name, directory, split, subselect, transform) in zip(
self.dataset_list,
self.directory_list,
self.split_list,
self.subselect_list,
self.transform_list,
):
self.datasets.append(
self.fetch_dataset(
dataset_name,
directory,
split,
self.label_list,
subselect,
transform,
)
)
def to_list(self, item):
if not isinstance(item, list):
item = [item] * len(self.dataset_list)
assert len(item) == len(self.dataset_list)
return item
def fetch_label_list(self, dataset_name_list: List[str]) -> List[str]:
label_list: List[str] = []
for dataset_name in dataset_name_list:
if dataset_name == "chexpert_v1":
label_list = label_list + CheXpertDataset.default_labels()
elif dataset_name == "nih-chest-xrays":
label_list = label_list + NIHChestDataset.default_labels()
elif dataset_name == "mimic-cxr":
label_list = label_list + MimicCxrJpgDataset.default_labels()
# remove duplicates
label_list = list(set(label_list))
return label_list
def fetch_dataset(
self,
dataset_name: str,
directory: Union[str, os.PathLike],
split: str,
label_list: Union[str, List[str]],
subselect: str,
transform: Callable,
) -> BaseDataset:
dataset: BaseDataset
if dataset_name == "chexpert_v1":
dataset = CheXpertDataset(
directory=directory,
split=split,
label_list=label_list,
subselect=subselect,
transform=transform,
)
elif dataset_name == "nih-chest-xrays":
dataset = NIHChestDataset(
directory=directory,
split=split,
label_list=label_list,
subselect=subselect,
transform=transform,
)
elif dataset_name == "mimic-cxr":
dataset = MimicCxrJpgDataset(
directory=directory,
split=split,
label_list=label_list,
subselect=subselect,
transform=transform,
)
else:
raise RuntimeError(f"Data set {dataset_name} not found.")
return dataset
def __len__(self) -> int:
count = 0
for dataset in self.datasets:
count = count + len(dataset)
return count
def __getitem__(self, idx: int):
if idx < 0:
idx = len(self) + idx
for dataset in self.datasets:
if idx < len(dataset):
return dataset[idx]
else:
idx = idx - len(dataset)
|
CovidPrognosis-main
|
covidprognosis/data/combined_datasets.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import logging
import os
from typing import Callable, Dict, List, Optional, Union
import numpy as np
import pandas as pd
from .base_dataset import BaseDataset
class NIHChestDataset(BaseDataset):
"""
Data loader for NIH data set.
Args:
directory: Base directory for data set.
split: String specifying split.
options include:
'all': Include all splits.
'train': Include training split.
label_list: String specifying labels to include. Default is 'all',
which loads all labels.
transform: A composible transform list to be applied to the data.
"""
def __init__(
self,
directory: Union[str, os.PathLike],
split: str = "train",
label_list: Union[str, List[str]] = "all",
subselect: Optional[str] = None,
transform: Optional[Callable] = None,
resplit: bool = False,
resplit_seed: int = 2019,
resplit_ratios: List[float] = [0.7, 0.2, 0.1],
):
super().__init__(
"nih-chest-xrays", directory, split, label_list, subselect, transform
)
if label_list == "all":
self.label_list = self.default_labels()
else:
self.label_list = label_list
self.metadata_keys = [
"Image Index",
"Follow-up #",
"Patient ID",
"Patient Age",
"Patient Gender",
"View Position",
]
if resplit:
rg = np.random.default_rng(resplit_seed)
self.csv_path = self.directory / "Data_Entry_2017.csv"
csv = pd.read_csv(self.csv_path)
patient_list = csv["Patient ID"].unique()
rand_inds = rg.permutation(len(patient_list))
train_count = int(np.round(resplit_ratios[0] * len(patient_list)))
val_count = int(np.round(resplit_ratios[1] * len(patient_list)))
grouped = csv.groupby("Patient ID")
if self.split == "train":
patient_list = patient_list[rand_inds[:train_count]]
self.csv = pd.concat([grouped.get_group(pat) for pat in patient_list])
elif self.split == "val":
patient_list = patient_list[
rand_inds[train_count : train_count + val_count]
]
self.csv = pd.concat([grouped.get_group(pat) for pat in patient_list])
elif self.split == "test":
patient_list = patient_list[rand_inds[train_count + val_count :]]
self.csv = pd.concat([grouped.get_group(pat) for pat in patient_list])
else:
logging.warning(
"split {} not recognized for dataset {}, "
"not returning samples".format(split, self.__class__.__name__)
)
else:
if self.split == "train":
self.csv_path = self.directory / "Data_Entry_2017.csv"
self.csv = pd.read_csv(self.csv_path)
elif self.split == "all":
self.csv_path = self.directory / "Data_Entry_2017.csv"
self.csv = pd.read_csv(self.csv_path)
else:
logging.warning(
"split {} not recognized for dataset {}, "
"not returning samples".format(split, self.__class__.__name__)
)
self.csv = self.preproc_csv(self.csv, self.subselect)
@staticmethod
def default_labels() -> List[str]:
return [
"Atelectasis",
"Consolidation",
"Infiltration",
"Pneumothorax",
"Edema",
"Emphysema",
"Fibrosis",
"Effusion",
"Pneumonia",
"Pleural_Thickening",
"Cardiomegaly",
"Nodule",
"Mass",
"Hernia",
]
def preproc_csv(self, csv: pd.DataFrame, subselect: Optional[str]) -> pd.DataFrame:
if csv is not None:
def format_view(s):
return "frontal" if s in ("AP", "PA") else None
csv["view"] = csv["View Position"].apply(format_view)
if subselect is not None:
csv = csv = csv.query(subselect)
return csv
def __len__(self) -> int:
length = 0
if self.csv is not None:
length = len(self.csv)
return length
def __getitem__(self, idx: int) -> Dict:
assert self.csv is not None
exam = self.csv.iloc[idx]
filename = self.directory / "images" / exam["Image Index"]
image = self.open_image(filename)
metadata = self.retrieve_metadata(idx, filename, exam)
# example: exam['Finding Labels'] = 'Pneumonia|Cardiomegaly'
# goal here is to see if label is a substring of
# 'Pneumonia|Cardiomegaly' for each label in self.label_list
labels = [
1 if label in exam["Finding Labels"] else 0 for label in self.label_list
]
labels = np.array(labels).astype(np.float)
sample = {"image": image, "labels": labels, "metadata": metadata}
if self.transform is not None:
sample = self.transform(sample)
return sample
|
CovidPrognosis-main
|
covidprognosis/data/nih_chest_xrays.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import logging
import os
from typing import Callable, Dict, List, Optional, Union
import numpy as np
import pandas as pd
from .base_dataset import BaseDataset
class MimicCxrJpgDataset(BaseDataset):
"""
Data loader for MIMIC CXR data set.
Args:
directory: Base directory for data set.
split: String specifying split.
options include:
'all': Include all splits.
'train': Include training split.
'val': Include validation split.
'test': Include testing split.
label_list: String specifying labels to include. Default is 'all',
which loads all labels.
transform: A composible transform list to be applied to the data.
"""
def __init__(
self,
directory: Union[str, os.PathLike],
split: str = "train",
label_list: Union[str, List[str]] = "all",
subselect: Optional[str] = None,
transform: Optional[Callable] = None,
):
super().__init__(
"mimic-cxr-jpg", directory, split, label_list, subselect, transform
)
if label_list == "all":
self.label_list = self.default_labels()
else:
self.label_list = label_list
self.metadata_keys = [
"dicom_id",
"subject_id",
"study_id",
"PerformedProcedureStepDescription",
"ViewPosition",
"Rows",
"Columns",
"StudyDate",
"StudyTime",
"ProcedureCodeSequence_CodeMeaning",
"ViewCodeSequence_CodeMeaning",
"PatientOrientationCodeSequence_CodeMeaning",
]
self.label_csv_path = (
self.directory / "2.0.0" / "mimic-cxr-2.0.0-chexpert.csv.gz"
)
self.meta_csv_path = (
self.directory / "2.0.0" / "mimic-cxr-2.0.0-metadata.csv.gz"
)
self.split_csv_path = self.directory / "2.0.0" / "mimic-cxr-2.0.0-split.csv.gz"
if self.split in ("train", "val", "test"):
split_csv = pd.read_csv(self.split_csv_path)["split"].str.contains(
self.split
)
meta_csv = pd.read_csv(self.meta_csv_path)[split_csv].set_index(
["subject_id", "study_id"]
)
label_csv = pd.read_csv(self.label_csv_path).set_index(
["subject_id", "study_id"]
)
self.csv = meta_csv.join(label_csv).reset_index()
elif self.split == "all":
meta_csv = pd.read_csv(self.meta_csv_path).set_index(
["subject_id", "study_id"]
)
label_csv = pd.read_csv(self.label_csv_path).set_index(
["subject_id", "study_id"]
)
self.csv = meta_csv.join(label_csv).reset_index()
else:
logging.warning(
"split {} not recognized for dataset {}, "
"not returning samples".format(split, self.__class__.__name__)
)
self.csv = self.preproc_csv(self.csv, self.subselect)
@staticmethod
def default_labels() -> List[str]:
return [
"No Finding",
"Enlarged Cardiomediastinum",
"Cardiomegaly",
"Lung Opacity",
"Lung Lesion",
"Edema",
"Consolidation",
"Pneumonia",
"Atelectasis",
"Pneumothorax",
"Pleural Effusion",
"Pleural Other",
"Fracture",
"Support Devices",
]
def preproc_csv(self, csv: pd.DataFrame, subselect: Optional[str]) -> pd.DataFrame:
if csv is not None:
def format_view(s):
if s in ("AP", "PA", "AP|PA"):
return "frontal"
elif s in ("LATERAL", "LL"):
return "lateral"
else:
return None
csv["view"] = csv.ViewPosition.apply(format_view)
if subselect is not None:
csv = csv.query(subselect)
return csv
def __len__(self):
length = 0
if self.csv is not None:
length = len(self.csv)
return length
def __getitem__(self, idx: int) -> Dict:
assert self.csv is not None
exam = self.csv.iloc[idx]
subject_id = str(exam["subject_id"])
study_id = str(exam["study_id"])
dicom_id = str(exam["dicom_id"])
filename = self.directory / "2.0.0" / "files"
filename = (
filename
/ "p{}".format(subject_id[:2])
/ "p{}".format(subject_id)
/ "s{}".format(study_id)
/ "{}.jpg".format(dicom_id)
)
image = self.open_image(filename)
metadata = self.retrieve_metadata(idx, filename, exam)
# retrieve labels while handling missing ones for combined data loader
labels = np.array(exam.reindex(self.label_list)[self.label_list]).astype(
np.float
)
sample = {"image": image, "labels": labels, "metadata": metadata}
if self.transform is not None:
sample = self.transform(sample)
return sample
|
CovidPrognosis-main
|
covidprognosis/data/mimic_cxr.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
from torch.utils.data._utils.collate import default_collate
def collate_fn(batch):
"""Collate function to handle X-ray metadata."""
metadata = []
for el in batch:
metadata.append(el["metadata"])
del el["metadata"]
batch = default_collate(batch)
batch["metadata"] = metadata
return batch
|
CovidPrognosis-main
|
covidprognosis/data/collate_fn.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
from argparse import ArgumentParser
import covidprognosis as cp
import pytorch_lightning as pl
import torch
import torchvision.models as models
class MoCoModule(pl.LightningModule):
def __init__(
self,
arch,
feature_dim,
queue_size,
use_mlp=False,
learning_rate=1.0,
momentum=0.9,
weight_decay=1e-4,
epochs=1,
):
super().__init__()
self.learning_rate = learning_rate
self.momentum = momentum
self.weight_decay = weight_decay
self.epochs = epochs
# build model
self.model = cp.models.MoCo(
encoder_q=models.__dict__[arch](num_classes=feature_dim),
encoder_k=models.__dict__[arch](num_classes=feature_dim),
dim=feature_dim,
K=queue_size,
mlp=use_mlp,
)
self.loss_fn = torch.nn.CrossEntropyLoss()
self.train_acc = pl.metrics.Accuracy()
self.val_acc = pl.metrics.Accuracy()
def forward(self, image0, image1):
return self.model(image0, image1)
def training_step(self, batch, batch_idx):
image0, image1 = batch["image0"], batch["image1"]
output, target = self(image0, image1)
# metrics
loss_val = self.loss_fn(output, target)
self.train_acc(output, target)
self.log("train_metrics/loss", loss_val)
self.log("train_metrics/accuracy", self.train_acc, on_step=True, on_epoch=False)
return loss_val
def configure_optimizers(self):
optimizer = torch.optim.SGD(
self.model.parameters(),
self.learning_rate,
momentum=self.momentum,
weight_decay=self.weight_decay,
)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, self.epochs)
return [optimizer], [scheduler]
@staticmethod
def add_model_specific_args(parent_parser):
parser = ArgumentParser(parents=[parent_parser], add_help=False)
parser.add_argument("--arch", default="densenet121", type=str)
parser.add_argument("--feature_dim", default=256, type=int)
parser.add_argument("--queue_size", default=65536, type=int)
parser.add_argument("--use_mlp", default=False, type=bool)
parser.add_argument("--learning_rate", default=1.0, type=float)
parser.add_argument("--momentum", default=0.9, type=float)
parser.add_argument("--weight-decay", default=1e-4, type=float)
return parser
|
CovidPrognosis-main
|
cp_examples/moco_pretrain/moco_module.py
|
CovidPrognosis-main
|
cp_examples/moco_pretrain/__init__.py
|
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import os
from argparse import ArgumentParser
from pathlib import Path
import pytorch_lightning as pl
import yaml
from covidprognosis.data.transforms import (
AddGaussianNoise,
Compose,
HistogramNormalize,
RandomGaussianBlur,
TensorToRGB,
)
from covidprognosis.plmodules import XrayDataModule
from torchvision import transforms
from moco_module import MoCoModule
def build_args(arg_defaults=None):
pl.seed_everything(1234)
data_config = Path.cwd() / "../../configs/data.yaml"
tmp = arg_defaults
arg_defaults = {
"accelerator": "ddp",
"max_epochs": 200,
"gpus": 2,
"num_workers": 10,
"batch_size": 128,
"callbacks": [],
}
if tmp is not None:
arg_defaults.update(tmp)
# ------------
# args
# ------------
parser = ArgumentParser()
parser.add_argument("--im_size", default=224, type=int)
parser = pl.Trainer.add_argparse_args(parser)
parser = XrayDataModule.add_model_specific_args(parser)
parser = MoCoModule.add_model_specific_args(parser)
parser.set_defaults(**arg_defaults)
args = parser.parse_args()
if args.default_root_dir is None:
args.default_root_dir = Path.cwd()
if args.dataset_dir is None:
with open(data_config, "r") as f:
paths = yaml.load(f, Loader=yaml.SafeLoader)["paths"]
if args.dataset_name == "nih":
args.dataset_dir = paths["nih"]
if args.dataset_name == "mimic":
args.dataset_dir = paths["mimic"]
elif args.dataset_name == "chexpert":
args.dataset_dir = paths["chexpert"]
elif args.dataset_name == "mimic-chexpert":
args.dataset_dir = [paths["chexpert"], paths["mimic"]]
else:
raise ValueError("Unrecognized path config.")
# ------------
# checkpoints
# ------------
checkpoint_dir = Path(args.default_root_dir) / "checkpoints"
if not checkpoint_dir.exists():
checkpoint_dir.mkdir(parents=True)
elif args.resume_from_checkpoint is None:
ckpt_list = sorted(checkpoint_dir.glob("*.ckpt"), key=os.path.getmtime)
if ckpt_list:
args.resume_from_checkpoint = str(ckpt_list[-1])
args.callbacks.append(
pl.callbacks.ModelCheckpoint(dirpath=checkpoint_dir, verbose=True)
)
return args
def cli_main(args):
# ------------
# data
# ------------
transform_list = [
transforms.RandomResizedCrop(args.im_size, scale=(0.2, 1.0)),
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.ToTensor(),
RandomGaussianBlur(),
AddGaussianNoise(snr_range=(4, 8)),
HistogramNormalize(),
TensorToRGB(),
]
data_module = XrayDataModule(
dataset_name=args.dataset_name,
dataset_dir=args.dataset_dir,
batch_size=args.batch_size,
num_workers=args.num_workers,
use_two_images=True,
train_transform=Compose(transform_list),
val_transform=Compose(transform_list),
test_transform=Compose(transform_list),
)
# ------------
# model
# ------------
model = MoCoModule(
arch=args.arch,
feature_dim=args.feature_dim,
queue_size=args.queue_size,
use_mlp=args.use_mlp,
learning_rate=args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay,
epochs=args.max_epochs,
)
# ------------
# training
# ------------
trainer = pl.Trainer.from_argparse_args(args)
trainer.fit(model, datamodule=data_module)
if __name__ == "__main__":
args = build_args()
cli_main(args)
|
CovidPrognosis-main
|
cp_examples/moco_pretrain/train_moco.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
from argparse import ArgumentParser
from pathlib import Path
import math
import pytorch_lightning as pl
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as tvmodels
class DenseNet(tvmodels.DenseNet):
def forward(self, x):
features = self.features(x)
return F.relu(features, inplace=True)
def filter_nans(logits, labels):
logits = logits[~torch.isnan(labels)]
labels = labels[~torch.isnan(labels)]
return logits, labels
def load_pretrained_model(arch, pretrained_file):
pretrained_dict = torch.load(pretrained_file)["state_dict"]
state_dict = {}
for k, v in pretrained_dict.items():
if k.startswith("model.encoder_q."):
k = k.replace("model.encoder_q.", "")
state_dict[k] = v
if arch.startswith("densenet"):
num_classes = pretrained_dict["model.encoder_q.classifier.weight"].shape[0]
model = DenseNet(num_classes=num_classes)
model.load_state_dict(state_dict)
feature_dim = pretrained_dict["model.encoder_q.classifier.weight"].shape[1]
del model.classifier
else:
raise ValueError(f"Model architecture {arch} is not supported.")
return model, feature_dim
class ContinuousPosEncoding(nn.Module):
def __init__(self, dim, drop=0.1, maxtime=360):
super().__init__()
self.dropout = nn.Dropout(drop)
position = torch.arange(0, maxtime, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(
torch.arange(0, dim, 2).float() * (-math.log(10000.0) / dim)
)
pe = torch.zeros(maxtime, dim)
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
self.register_buffer("pe", pe)
def forward(self, xs, times):
ys = xs
times = times.long()
for b in range(xs.shape[1]):
ys[:, b] += self.pe[times[b]]
return self.dropout(ys)
class MIPModel(nn.Module):
def __init__(
self,
image_model,
feature_dim,
projection_dim,
num_classes,
num_heads,
feedforward_dim,
drop_transformer,
drop_cpe,
pooling,
image_shape=(7, 7),
):
super().__init__()
self.image_shape = image_shape
self.pooling = pooling
self.image_model = image_model
self.group_norm = nn.GroupNorm(32, feature_dim)
self.projection = nn.Conv2d(feature_dim, projection_dim, (1, 1))
transformer_dim = projection_dim * image_shape[0] * image_shape[1]
self.pos_encoding = ContinuousPosEncoding(transformer_dim, drop=drop_cpe)
self.transformer = nn.TransformerEncoderLayer(
d_model=transformer_dim,
dim_feedforward=feedforward_dim,
nhead=num_heads,
dropout=drop_transformer,
)
self.classifier = nn.Linear(feature_dim + projection_dim, num_classes)
def _apply_transformer(self, image_feats: torch.Tensor, times, lens):
B, N, C, H, W = image_feats.shape
image_feats = image_feats.flatten(start_dim=2).permute(
[1, 0, 2]
) # [N, B, C * H * W]
image_feats = self.pos_encoding(image_feats, times)
image_feats = self.transformer(image_feats)
return image_feats.permute([1, 0, 2]).reshape([B, N, C, H, W])
def _pool(self, image_feats, lens):
if self.pooling == "last_timestep":
pooled_feats = []
for b, l in enumerate(lens.tolist()):
pooled_feats.append(image_feats[b, int(l) - 1])
elif self.pooling == "sum":
pooled_feats = []
for b, l in enumerate(lens.tolist()):
pooled_feats.append(image_feats[b, : int(l)].sum(0))
else:
raise ValueError(f"Unkown pooling method: {self.pooling}")
pooled_feats = torch.stack(pooled_feats)
pooled_feats = F.adaptive_avg_pool2d(pooled_feats, (1, 1))
return pooled_feats.squeeze(3).squeeze(2)
def forward(self, images, times, lens):
B, N, C, H, W = images.shape
images = images.reshape([B * N, C, H, W])
# Apply Image Model
image_feats = self.image_model(images)
image_feats = F.relu(self.group_norm(image_feats))
# Apply transformer
image_feats_proj = self.projection(image_feats).reshape(
[B, N, -1, *self.image_shape]
)
image_feats_trans = self._apply_transformer(image_feats_proj, times, lens)
# Concat and apply classifier
image_feats = image_feats.reshape([B, N, -1, *self.image_shape])
image_feats_combined = torch.cat([image_feats, image_feats_trans], dim=2)
image_feats_pooled = self._pool(image_feats_combined, lens)
return self.classifier(image_feats_pooled)
class MIPModule(pl.LightningModule):
def __init__(
self, args, label_list, pos_weights=None,
):
super().__init__()
self.args = args
self.label_list = label_list
self.val_pathology_list = args.val_pathology_list
self.learning_rate = args.learning_rate
self.epochs = args.epochs
# loss function
pos_weights = pos_weights or torch.ones(args.num_classes)
self.register_buffer("pos_weights", pos_weights)
# metrics
self.train_acc = torch.nn.ModuleList(
[pl.metrics.Accuracy() for _ in args.val_pathology_list]
)
self.val_acc = torch.nn.ModuleList(
[pl.metrics.Accuracy() for _ in args.val_pathology_list]
)
image_model, feature_dim = load_pretrained_model(
args.arch, args.pretrained_file
)
self.model = MIPModel(
image_model,
feature_dim,
args.projection_dim,
args.num_classes,
args.num_heads,
args.feedforward_dim,
args.drop_transformer,
args.drop_cpe,
args.pooling,
args.image_shape,
)
def forward(self, images, times, lens):
return self.model(images, times, lens)
def loss(self, output, target):
counts = 0
loss = 0
for i in range(len(output)):
pos_weights, _ = filter_nans(self.pos_weights, target[i])
loss_fn = torch.nn.BCEWithLogitsLoss(
pos_weight=pos_weights, reduction="sum"
)
bind_logits, bind_labels = filter_nans(output[i], target[i])
loss = loss + loss_fn(bind_logits, bind_labels)
counts = counts + bind_labels.numel()
counts = 1 if counts == 0 else counts
loss = loss / counts
return loss
def training_step(self, batch, batch_idx):
# forward pass
output = self(batch["images"], batch["times"], batch["lens"])
target = batch["labels"]
# calculate loss
loss_val = self.loss(output, target)
# metrics
self.log("train_metrics/loss", loss_val)
for i, path in enumerate(self.val_pathology_list):
j = self.label_list.index(path)
logits, labels = filter_nans(output[:, j], target[:, j])
self.train_acc[i](logits, labels)
self.log(
f"train_metrics/accuracy_{path}",
self.train_acc[i],
on_step=True,
on_epoch=False,
)
return loss_val
def validation_step(self, batch, batch_idx):
# forward pass
output = self(batch["images"], batch["times"], batch["lens"])
target = batch["labels"]
# calculate loss
loss_val = self.loss(output, target)
# metrics
result_logits = {}
result_labels = {}
self.log("val_metrics/loss", loss_val)
for path in self.val_pathology_list:
j = self.label_list.index(path)
logits, labels = filter_nans(output[:, j], target[:, j])
result_logits[path] = logits
result_labels[path] = labels
return {"logits": result_logits, "targets": result_labels}
def validation_epoch_end(self, outputs):
auc_vals = []
for i, path in enumerate(self.val_pathology_list):
logits = []
targets = []
for output in outputs:
logits.append(output["logits"][path].flatten())
targets.append(output["targets"][path].flatten())
logits = torch.cat(logits)
targets = torch.cat(targets)
print(f"path: {path}, len: {len(logits)}")
self.val_acc[i](logits, targets)
try:
auc_val = pl.metrics.functional.auroc(torch.sigmoid(logits), targets)
auc_vals.append(auc_val)
except ValueError:
auc_val = 0
print(f"path: {path}, auc_val: {auc_val}")
self.log(
f"val_metrics/accuracy_{path}",
self.val_acc[i],
on_step=False,
on_epoch=True,
)
self.log(f"val_metrics/auc_{path}", auc_val)
self.log("val_metrics/auc_mean", sum(auc_vals) / len(auc_vals))
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.model.parameters(), self.learning_rate)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, self.epochs)
return [optimizer], [scheduler]
@staticmethod
def add_model_specific_args(parent_parser):
parser = ArgumentParser(parents=[parent_parser], add_help=False)
parser.add_argument("--pretrained_file", type=Path, required=True)
parser.add_argument("--arch", default="densenet121", type=str)
parser.add_argument("--num_classes", default=14, type=int)
parser.add_argument("--val_pathology_list", nargs="+")
parser.add_argument("--pos_weights", default=None, type=float)
# Training params
parser.add_argument("--learning_rate", default=1e-3, type=float)
parser.add_argument("--epochs", default=50, type=int)
# Model params
parser.add_argument("--projection_dim", type=int, default=64)
parser.add_argument("--num_heads", type=int, default=2)
parser.add_argument("--feedforward_dim", type=int, default=128)
parser.add_argument("--drop_transformer", type=float, default=0.5)
parser.add_argument("--drop_cpe", type=float, default=0.5)
parser.add_argument(
"--pooling", choices=["last_timestep", "sum"], default="last_timestep"
)
parser.add_argument("--image_shape", default=(7, 7))
return parser
|
CovidPrognosis-main
|
cp_examples/mip_finetune/mip_model.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import logging
import os
from argparse import ArgumentParser
from pathlib import Path
from warnings import warn
import numpy as np
import pytorch_lightning as pl
import torch
from covidprognosis.data.transforms import (
HistogramNormalize,
NanToInt,
RemapLabel,
TensorToRGB,
)
from covidprognosis.plmodules import XrayDataModule
from torchvision import transforms
from cp_examples.mip_finetune.mip_model import MIPModule
def build_args(arg_defaults=None):
pl.seed_everything(1234)
tmp = arg_defaults
arg_defaults = {
"accelerator": "ddp",
"batch_size": 32,
"max_epochs": 50,
"gpus": 1,
"num_workers": 10,
"callbacks": [],
}
if tmp is not None:
arg_defaults.update(tmp)
# ------------
# args
# ------------
parser = ArgumentParser()
parser.add_argument("--im_size", default=224, type=int)
parser.add_argument("--uncertain_label", default=np.nan, type=float)
parser.add_argument("--nan_label", default=np.nan, type=float)
parser = pl.Trainer.add_argparse_args(parser)
parser = XrayDataModule.add_model_specific_args(parser)
parser = MIPModule.add_model_specific_args(parser)
parser.set_defaults(**arg_defaults)
args = parser.parse_args()
if args.default_root_dir is None:
args.default_root_dir = Path.cwd()
if args.pretrained_file is None:
warn("Pretrained file not specified, training from scratch.")
else:
logging.info(f"Loading pretrained file from {args.pretrained_file}")
checkpoint_dir = Path(args.default_root_dir) / "checkpoints"
checkpoint_dir.mkdir(exist_ok=True, parents=True)
ckpt_list = sorted(checkpoint_dir.glob("*.ckpt"), key=os.path.getmtime)
if ckpt_list:
args.resume_from_checkpoint = str(ckpt_list[-1])
args.callbacks.append(
pl.callbacks.ModelCheckpoint(dirpath=checkpoint_dir, verbose=True)
)
return args
def fetch_pos_weights(csv, label_list, uncertain_label, nan_label):
pos = (csv[label_list] == 1).sum()
neg = (csv[label_list] == 0).sum()
if uncertain_label == 1:
pos = pos + (csv[label_list] == -1).sum()
elif uncertain_label == -1:
neg = neg + (csv[label_list] == -1).sum()
if nan_label == 1:
pos = pos + (csv[label_list].isna()).sum()
elif nan_label == -1:
neg = neg + (csv[label_list].isna()).sum()
pos_weights = torch.tensor((neg / np.maximum(pos, 1)).values.astype(np.float))
return pos_weights
def create_data_module(train_transform_list, val_transform_list):
data_module = None # TODO: Create data loader
return data_module
def cli_main(args):
# ------------
# data
# ------------
train_transform_list = [
transforms.Resize(args.im_size),
transforms.CenterCrop(args.im_size),
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.ToTensor(),
HistogramNormalize(),
TensorToRGB(),
RemapLabel(-1, args.uncertain_label),
NanToInt(args.nan_label),
]
val_transform_list = [
transforms.Resize(args.im_size),
transforms.CenterCrop(args.im_size),
transforms.ToTensor(),
HistogramNormalize(),
TensorToRGB(),
RemapLabel(-1, args.uncertain_label),
]
data_module = create_data_module(train_transform_list, val_transform_list)
# ------------
# model
# ------------
pos_weights = fetch_pos_weights(
csv=data_module.train_dataset.csv,
label_list=data_module.label_list,
uncertain_label=args.uncertain_label,
nan_label=args.nan_label,
)
model = MIPModule(
args,
data_module.label_list,
pos_weights,
)
# ------------
# training
# ------------
trainer = pl.Trainer.from_argparse_args(args)
trainer.fit(model, datamodule=data_module)
if __name__ == "__main__":
args = build_args()
cli_main(args)
|
CovidPrognosis-main
|
cp_examples/mip_finetune/train_mip.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
from argparse import ArgumentParser
from pathlib import Path
import pytorch_lightning as pl
import requests
import torch
import torchvision.models as models
from tqdm import tqdm
def filter_nans(logits, labels):
logits = logits[~torch.isnan(labels)]
labels = labels[~torch.isnan(labels)]
return logits, labels
def validate_pretrained_model(state_dict, pretrained_file):
# sanity check to make sure we're not altering weights
pretrained_dict = torch.load(pretrained_file, map_location="cpu")["state_dict"]
model_dict = dict()
for k, v in pretrained_dict.items():
if "model.encoder_q" in k:
model_dict[k[len("model.encoder_q.") :]] = v
for k in list(model_dict.keys()):
# only ignore fc layer
if "classifier.weight" in k or "classifier.bias" in k:
continue
if "fc.weight" in k or "fc.bias" in k:
continue
assert (
state_dict[k].cpu() == model_dict[k]
).all(), f"{k} changed in linear classifier training."
def download_model(url, fname):
response = requests.get(url, timeout=10, stream=True)
chunk_size = 8 * 1024 * 1024 # 8 MB chunks
total_size_in_bytes = int(response.headers.get("content-length", 0))
progress_bar = tqdm(
desc="Downloading state_dict",
total=total_size_in_bytes,
unit="iB",
unit_scale=True,
)
with open(fname, "wb") as fh:
for chunk in response.iter_content(chunk_size):
progress_bar.update(len(chunk))
fh.write(chunk)
class SipModule(pl.LightningModule):
def __init__(
self,
arch,
num_classes,
label_list,
val_pathology_list,
pretrained_file=None,
learning_rate=1e-3,
pos_weights=None,
epochs=5,
):
super().__init__()
pretrained_file = str(pretrained_file)
self.label_list = label_list
self.val_pathology_list = val_pathology_list
self.learning_rate = learning_rate
self.epochs = epochs
self.pretrained_file = pretrained_file
# load the pretrained model
if pretrained_file is not None:
self.pretrained_file = str(self.pretrained_file)
# download the model if given a url
if "https://" in pretrained_file:
url = self.pretrained_file
self.pretrained_file = Path.cwd() / pretrained_file.split("/")[-1]
download_model(url, self.pretrained_file)
pretrained_dict = torch.load(self.pretrained_file)["state_dict"]
state_dict = {}
for k, v in pretrained_dict.items():
if k.startswith("model.encoder_q."):
k = k.replace("model.encoder_q.", "")
state_dict[k] = v
if "model.encoder_q.classifier.weight" in pretrained_dict.keys():
feature_dim = pretrained_dict[
"model.encoder_q.classifier.weight"
].shape[0]
in_features = pretrained_dict[
"model.encoder_q.classifier.weight"
].shape[1]
self.model = models.__dict__[arch](num_classes=feature_dim)
self.model.load_state_dict(state_dict)
del self.model.classifier
self.model.add_module(
"classifier", torch.nn.Linear(in_features, num_classes)
)
elif "model.encoder_q.fc.weight" in pretrained_dict.keys():
feature_dim = pretrained_dict["model.encoder_q.fc.weight"].shape[0]
in_features = pretrained_dict["model.encoder_q.fc.weight"].shape[1]
self.model = models.__dict__[arch](num_classes=feature_dim)
self.model.load_state_dict(state_dict)
del self.model.fc
self.model.add_module("fc", torch.nn.Linear(in_features, num_classes))
else:
raise RuntimeError("Unrecognized classifier.")
else:
self.model = models.__dict__[arch](num_classes=num_classes)
# loss function
if pos_weights is None:
pos_weights = torch.ones(num_classes)
self.register_buffer("pos_weights", pos_weights)
print(self.pos_weights)
# metrics
self.train_acc = torch.nn.ModuleList(
[pl.metrics.Accuracy() for _ in val_pathology_list]
)
self.val_acc = torch.nn.ModuleList(
[pl.metrics.Accuracy() for _ in val_pathology_list]
)
def on_epoch_start(self):
if self.pretrained_file is not None:
self.model.eval()
def forward(self, image):
return self.model(image)
def loss(self, output, target):
counts = 0
loss = 0
for i in range(len(output)):
pos_weights, _ = filter_nans(self.pos_weights, target[i])
loss_fn = torch.nn.BCEWithLogitsLoss(
pos_weight=pos_weights, reduction="sum"
)
bind_logits, bind_labels = filter_nans(output[i], target[i])
loss = loss + loss_fn(bind_logits, bind_labels)
counts = counts + bind_labels.numel()
counts = 1 if counts == 0 else counts
loss = loss / counts
return loss
def training_step(self, batch, batch_idx):
# forward pass
output = self(batch["image"])
target = batch["labels"]
# calculate loss
loss_val = self.loss(output, target)
# metrics
self.log("train_metrics/loss", loss_val)
for i, path in enumerate(self.val_pathology_list):
j = self.label_list.index(path)
logits, labels = filter_nans(output[:, j], target[:, j])
self.train_acc[i](logits, labels)
self.log(
f"train_metrics/accuracy_{path}",
self.train_acc[i],
on_step=True,
on_epoch=False,
)
return loss_val
def validation_step(self, batch, batch_idx):
# forward pass
output = self(batch["image"])
target = batch["labels"]
# calculate loss
loss_val = self.loss(output, target)
# metrics
result_logits = {}
result_labels = {}
self.log("val_metrics/loss", loss_val)
for path in self.val_pathology_list:
j = self.label_list.index(path)
logits, labels = filter_nans(output[:, j], target[:, j])
result_logits[path] = logits
result_labels[path] = labels
return {"logits": result_logits, "targets": result_labels}
def validation_epoch_end(self, outputs):
# make sure we didn't change the pretrained weights
if self.pretrained_file is not None:
validate_pretrained_model(self.model.state_dict(), self.pretrained_file)
auc_vals = []
for i, path in enumerate(self.val_pathology_list):
logits = []
targets = []
for output in outputs:
logits.append(output["logits"][path].flatten())
targets.append(output["targets"][path].flatten())
logits = torch.cat(logits)
targets = torch.cat(targets)
print(f"path: {path}, len: {len(logits)}")
self.val_acc[i](logits, targets)
try:
auc_val = pl.metrics.functional.auroc(torch.sigmoid(logits), targets)
auc_vals.append(auc_val)
except ValueError:
auc_val = 0
print(f"path: {path}, auc_val: {auc_val}")
self.log(
f"val_metrics/accuracy_{path}",
self.val_acc[i],
on_step=False,
on_epoch=True,
)
self.log(f"val_metrics/auc_{path}", auc_val)
self.log("val_metrics/auc_mean", sum(auc_vals) / len(auc_vals))
def configure_optimizers(self):
if self.pretrained_file is None:
model = self.model
else:
if hasattr(self.model, "classifier"):
model = self.model.classifier
elif hasattr(self.model, "fc"):
model = self.model.fc
else:
raise RuntimeError("Unrecognized classifier.")
optimizer = torch.optim.Adam(model.parameters(), self.learning_rate)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, self.epochs)
return [optimizer], [scheduler]
@staticmethod
def add_model_specific_args(parent_parser):
parser = ArgumentParser(parents=[parent_parser], add_help=False)
parser.add_argument("--arch", default="densenet121", type=str)
parser.add_argument("--num_classes", default=14, type=int)
parser.add_argument("--pretrained_file", default=None, type=str)
parser.add_argument("--val_pathology_list", nargs="+")
parser.add_argument("--learning_rate", default=1e-2, type=float)
parser.add_argument("--pos_weights", default=None, type=float)
return parser
|
CovidPrognosis-main
|
cp_examples/sip_finetune/sip_finetune.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import logging
import os
from argparse import ArgumentParser
from pathlib import Path
from warnings import warn
import numpy as np
import pytorch_lightning as pl
import torch
import yaml
from covidprognosis.data.transforms import (
Compose,
HistogramNormalize,
NanToInt,
RemapLabel,
TensorToRGB,
)
from covidprognosis.plmodules import XrayDataModule
from torchvision import transforms
from sip_finetune import SipModule
def build_args(arg_defaults=None):
pl.seed_everything(1234)
data_config = Path.cwd() / "../../configs/data.yaml"
tmp = arg_defaults
arg_defaults = {
"accelerator": "ddp",
"batch_size": 32,
"max_epochs": 5,
"gpus": 1,
"num_workers": 10,
"callbacks": [],
}
if tmp is not None:
arg_defaults.update(tmp)
# ------------
# args
# ------------
parser = ArgumentParser()
parser.add_argument("--im_size", default=224, type=int)
parser.add_argument("--uncertain_label", default=np.nan, type=float)
parser.add_argument("--nan_label", default=np.nan, type=float)
parser = pl.Trainer.add_argparse_args(parser)
parser = XrayDataModule.add_model_specific_args(parser)
parser = SipModule.add_model_specific_args(parser)
parser.set_defaults(**arg_defaults)
args = parser.parse_args()
if args.default_root_dir is None:
args.default_root_dir = Path.cwd()
if args.pretrained_file is None:
warn("Pretrained file not specified, training from scratch.")
else:
logging.info(f"Loading pretrained file from {args.pretrained_file}")
if args.dataset_dir is None:
with open(data_config, "r") as f:
paths = yaml.load(f, Loader=yaml.SafeLoader)["paths"]
if args.dataset_name == "nih":
args.dataset_dir = paths["nih"]
if args.dataset_name == "mimic":
args.dataset_dir = paths["mimic"]
elif args.dataset_name == "chexpert":
args.dataset_dir = paths["chexpert"]
elif args.dataset_name == "mimic-chexpert":
args.dataset_dir = [paths["chexpert"], paths["mimic"]]
else:
raise ValueError("Unrecognized path config.")
if args.dataset_name in ("chexpert", "mimic", "mimic-chexpert"):
args.val_pathology_list = [
"Atelectasis",
"Cardiomegaly",
"Consolidation",
"Edema",
"Pleural Effusion",
]
elif args.dataset_name == "nih":
args.val_pathology_list = [
"Atelectasis",
"Cardiomegaly",
"Consolidation",
"Edema",
"Effusion",
]
else:
raise ValueError("Unrecognized dataset.")
# ------------
# checkpoints
# ------------
checkpoint_dir = Path(args.default_root_dir) / "checkpoints"
if not checkpoint_dir.exists():
checkpoint_dir.mkdir(parents=True)
elif args.resume_from_checkpoint is None:
ckpt_list = sorted(checkpoint_dir.glob("*.ckpt"), key=os.path.getmtime)
if ckpt_list:
args.resume_from_checkpoint = str(ckpt_list[-1])
args.callbacks.append(
pl.callbacks.ModelCheckpoint(dirpath=checkpoint_dir, verbose=True)
)
return args
def fetch_pos_weights(dataset_name, csv, label_list, uncertain_label, nan_label):
if dataset_name == "nih":
pos = [(csv["Finding Labels"].str.contains(lab)).sum() for lab in label_list]
neg = [(~csv["Finding Labels"].str.contains(lab)).sum() for lab in label_list]
pos_weights = torch.tensor((neg / np.maximum(pos, 1)).astype(np.float))
else:
pos = (csv[label_list] == 1).sum()
neg = (csv[label_list] == 0).sum()
if uncertain_label == 1:
pos = pos + (csv[label_list] == -1).sum()
elif uncertain_label == -1:
neg = neg + (csv[label_list] == -1).sum()
if nan_label == 1:
pos = pos + (csv[label_list].isna()).sum()
elif nan_label == -1:
neg = neg + (csv[label_list].isna()).sum()
pos_weights = torch.tensor((neg / np.maximum(pos, 1)).values.astype(np.float))
return pos_weights
def cli_main(args):
# ------------
# data
# ------------
train_transform_list = [
transforms.Resize(args.im_size),
transforms.CenterCrop(args.im_size),
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.ToTensor(),
HistogramNormalize(),
TensorToRGB(),
RemapLabel(-1, args.uncertain_label),
NanToInt(args.nan_label),
]
val_transform_list = [
transforms.Resize(args.im_size),
transforms.CenterCrop(args.im_size),
transforms.ToTensor(),
HistogramNormalize(),
TensorToRGB(),
RemapLabel(-1, args.uncertain_label),
]
data_module = XrayDataModule(
dataset_name=args.dataset_name,
dataset_dir=args.dataset_dir,
batch_size=args.batch_size,
num_workers=args.num_workers,
train_transform=Compose(train_transform_list),
val_transform=Compose(val_transform_list),
test_transform=Compose(val_transform_list),
)
# ------------
# model
# ------------
pos_weights = fetch_pos_weights(
dataset_name=args.dataset_name,
csv=data_module.train_dataset.csv,
label_list=data_module.label_list,
uncertain_label=args.uncertain_label,
nan_label=args.nan_label,
)
model = SipModule(
arch=args.arch,
num_classes=len(data_module.label_list),
pretrained_file=args.pretrained_file,
label_list=data_module.label_list,
val_pathology_list=args.val_pathology_list,
learning_rate=args.learning_rate,
pos_weights=pos_weights,
epochs=args.max_epochs,
)
# ------------
# training
# ------------
trainer = pl.Trainer.from_argparse_args(args)
trainer.fit(model, datamodule=data_module)
if __name__ == "__main__":
args = build_args()
cli_main(args)
|
CovidPrognosis-main
|
cp_examples/sip_finetune/train_sip.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import datetime
import numpy as np
import time
import json
import os
from pathlib import Path
import torch
import torch.backends.cudnn as cudnn
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import timm
assert timm.__version__ == "0.3.2" # version check
import timm.optim.optim_factory as optim_factory
from engine_pretrain import train_one_epoch
import models.fcmae as fcmae
import utils
from utils import NativeScalerWithGradNormCount as NativeScaler
from utils import str2bool
def get_args_parser():
parser = argparse.ArgumentParser('FCMAE pre-training', add_help=False)
parser.add_argument('--batch_size', default=64, type=int,
help='Per GPU batch size')
parser.add_argument('--epochs', default=800, type=int)
parser.add_argument('--warmup_epochs', type=int, default=40, metavar='N',
help='epochs to warmup LR')
parser.add_argument('--update_freq', default=1, type=int,
help='gradient accumulation step')
# Model parameters
parser.add_argument('--model', default='convnextv2_base', type=str, metavar='MODEL',
help='Name of model to train')
parser.add_argument('--input_size', default=224, type=int,
help='image input size')
parser.add_argument('--mask_ratio', default=0.6, type=float,
help='Masking ratio (percentage of removed patches).')
parser.add_argument('--norm_pix_loss', action='store_true',
help='Use (per-patch) normalized pixels as targets for computing loss')
parser.set_defaults(norm_pix_loss=True)
parser.add_argument('--decoder_depth', type=int, default=1)
parser.add_argument('--decoder_embed_dim', type=int, default=512)
# Optimizer parameters
parser.add_argument('--weight_decay', type=float, default=0.05,
help='weight decay (default: 0.05)')
parser.add_argument('--lr', type=float, default=None, metavar='LR',
help='learning rate (absolute lr)')
parser.add_argument('--blr', type=float, default=1.5e-4, metavar='LR',
help='base learning rate: absolute_lr = base_lr * total_batch_size / 256')
parser.add_argument('--min_lr', type=float, default=0., metavar='LR',
help='lower lr bound for cyclic schedulers that hit 0')
# Dataset parameters
parser.add_argument('--data_path', default='/datasets01/imagenet_full_size/061417/', type=str,
help='dataset path')
parser.add_argument('--output_dir', default='',
help='path where to save, empty for no saving')
parser.add_argument('--log_dir', default=None,
help='path where to tensorboard log')
parser.add_argument('--device', default='cuda',
help='device to use for training / testing')
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--resume', default='',
help='resume from checkpoint')
parser.add_argument('--auto_resume', type=str2bool, default=True)
parser.add_argument('--save_ckpt', type=str2bool, default=True)
parser.add_argument('--save_ckpt_freq', default=1, type=int)
parser.add_argument('--save_ckpt_num', default=3, type=int)
parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
help='start epoch')
parser.add_argument('--num_workers', default=10, type=int)
parser.add_argument('--pin_mem', type=str2bool, default=True,
help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
# Evaluation parameters
parser.add_argument('--crop_pct', type=float, default=None)
# distributed training parameters
parser.add_argument('--world_size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--local_rank', default=-1, type=int)
parser.add_argument('--dist_on_itp', type=str2bool, default=False)
parser.add_argument('--dist_url', default='env://',
help='url used to set up distributed training')
return parser
def main(args):
utils.init_distributed_mode(args)
print(args)
device = torch.device(args.device)
# fix the seed for reproducibility
seed = args.seed + utils.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
cudnn.benchmark = True
# simple augmentation
transform_train = transforms.Compose([
transforms.RandomResizedCrop(args.input_size, scale=(0.2, 1.0), interpolation=3), # 3 is bicubic
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
dataset_train = datasets.ImageFolder(os.path.join(args.data_path, 'train'), transform=transform_train)
print(dataset_train)
num_tasks = utils.get_world_size()
global_rank = utils.get_rank()
sampler_train = torch.utils.data.DistributedSampler(
dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True, seed=args.seed,
)
print("Sampler_train = %s" % str(sampler_train))
if global_rank == 0 and args.log_dir is not None:
os.makedirs(args.log_dir, exist_ok=True)
# log_writer = SummaryWriter(log_dir=args.log_dir)
log_writer = utils.TensorboardLogger(log_dir=args.log_dir)
else:
log_writer = None
data_loader_train = torch.utils.data.DataLoader(
dataset_train, sampler=sampler_train,
batch_size=args.batch_size,
num_workers=args.num_workers,
pin_memory=args.pin_mem,
drop_last=True,
)
# define the model
model = fcmae.__dict__[args.model](
mask_ratio=args.mask_ratio,
decoder_depth=args.decoder_depth,
decoder_embed_dim=args.decoder_embed_dim,
norm_pix_loss=args.norm_pix_loss
)
model.to(device)
model_without_ddp = model
n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
print("Model = %s" % str(model_without_ddp))
print('number of params:', n_parameters)
eff_batch_size = args.batch_size * args.update_freq * utils.get_world_size()
num_training_steps_per_epoch = len(dataset_train) // eff_batch_size
if args.lr is None:
args.lr = args.blr * eff_batch_size / 256
print("base lr: %.2e" % (args.lr * 256 / eff_batch_size))
print("actual lr: %.2e" % args.lr)
print("accumulate grad iterations: %d" % args.update_freq)
print("effective batch size: %d" % eff_batch_size)
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu], find_unused_parameters=True)
model_without_ddp = model.module
param_groups = optim_factory.add_weight_decay(model_without_ddp, args.weight_decay)
optimizer = torch.optim.AdamW(param_groups, lr=args.lr, betas=(0.9, 0.95))
print(optimizer)
loss_scaler = NativeScaler()
utils.auto_load_model(
args=args, model=model, model_without_ddp=model_without_ddp,
optimizer=optimizer, loss_scaler=loss_scaler)
print(f"Start training for {args.epochs} epochs")
start_time = time.time()
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
data_loader_train.sampler.set_epoch(epoch)
if log_writer is not None:
log_writer.set_step(epoch * num_training_steps_per_epoch * args.update_freq)
train_stats = train_one_epoch(
model, data_loader_train,
optimizer, device, epoch, loss_scaler,
log_writer=log_writer,
args=args
)
if args.output_dir and args.save_ckpt:
if (epoch + 1) % args.save_ckpt_freq == 0 or epoch + 1 == args.epochs:
utils.save_model(
args=args, model=model, model_without_ddp=model_without_ddp, optimizer=optimizer,
loss_scaler=loss_scaler, epoch=epoch)
log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},
'epoch': epoch,
'n_parameters': n_parameters}
if args.output_dir and utils.is_main_process():
if log_writer is not None:
log_writer.flush()
with open(os.path.join(args.output_dir, "log.txt"), mode="a", encoding="utf-8") as f:
f.write(json.dumps(log_stats) + "\n")
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Training time {}'.format(total_time_str))
if __name__ == '__main__':
args = get_args_parser()
args = args.parse_args()
if args.output_dir:
Path(args.output_dir).mkdir(parents=True, exist_ok=True)
main(args)
|
ConvNeXt-V2-main
|
main_pretrain.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import os
import uuid
from pathlib import Path
import main_finetune as trainer
import submitit
def parse_args():
trainer_parser = trainer.get_args_parser()
parser = argparse.ArgumentParser("Submitit for finetune", parents=[trainer_parser])
parser.add_argument("--ngpus", default=8, type=int, help="Number of gpus to request on each node")
parser.add_argument("--nodes", default=2, type=int, help="Number of nodes to request")
parser.add_argument("--timeout", default=4320, type=int, help="Duration of the job")
parser.add_argument("--job_dir", default="", type=str, help="Job dir. Leave empty for automatic.")
parser.add_argument("--partition", default="learnlab", type=str, help="Partition where to submit")
parser.add_argument("--use_volta32", action='store_true', help="Request 32G V100 GPUs")
parser.add_argument('--comment', default="", type=str, help="Comment to pass to scheduler")
return parser.parse_args()
def get_shared_folder() -> Path:
user = os.getenv("USER")
if Path("/checkpoint/").is_dir():
p = Path(f"/checkpoint/{user}/experiments")
p.mkdir(exist_ok=True)
return p
raise RuntimeError("No shared folder available")
def get_init_file():
# Init file must not exist, but it's parent dir must exist.
os.makedirs(str(get_shared_folder()), exist_ok=True)
init_file = get_shared_folder() / f"{uuid.uuid4().hex}_init"
if init_file.exists():
os.remove(str(init_file))
return init_file
class Trainer(object):
def __init__(self, args):
self.args = args
def __call__(self):
import main_finetune as trainer
self._setup_gpu_args()
trainer.main(self.args)
def checkpoint(self):
import os
import submitit
self.args.dist_url = get_init_file().as_uri()
checkpoint_file = os.path.join(self.args.output_dir, "checkpoint.pth")
if os.path.exists(checkpoint_file):
self.args.resume = checkpoint_file
print("Requeuing ", self.args)
empty_trainer = type(self)(self.args)
return submitit.helpers.DelayedSubmission(empty_trainer)
def _setup_gpu_args(self):
import submitit
from pathlib import Path
job_env = submitit.JobEnvironment()
self.args.output_dir = Path(str(self.args.output_dir).replace("%j", str(job_env.job_id)))
self.args.log_dir = self.args.output_dir
self.args.gpu = job_env.local_rank
self.args.rank = job_env.global_rank
self.args.world_size = job_env.num_tasks
print(f"Process group: {job_env.num_tasks} tasks, rank: {job_env.global_rank}")
def main():
args = parse_args()
if args.job_dir == "":
args.job_dir = get_shared_folder() / "%j"
# Note that the folder will depend on the job_id, to easily track experiments
executor = submitit.AutoExecutor(folder=args.job_dir, slurm_max_num_timeout=30)
num_gpus_per_node = args.ngpus
nodes = args.nodes
timeout_min = args.timeout
partition = args.partition
kwargs = {}
if args.use_volta32:
kwargs['slurm_constraint'] = 'volta32gb'
if args.comment:
kwargs['slurm_comment'] = args.comment
executor.update_parameters(
mem_gb=40 * num_gpus_per_node,
gpus_per_node=num_gpus_per_node,
tasks_per_node=num_gpus_per_node, # one task per GPU
cpus_per_task=10,
nodes=nodes,
timeout_min=timeout_min, # max is 60 * 72
# Below are cluster dependent parameters
slurm_partition=partition,
slurm_signal_delay_s=120,
**kwargs
)
executor.update_parameters(name="finetune")
args.dist_url = get_init_file().as_uri()
args.output_dir = args.job_dir
trainer = Trainer(args)
job = executor.submit(trainer)
# print("Submitted job_id:", job.job_id)
print(job.job_id)
if __name__ == "__main__":
main()
|
ConvNeXt-V2-main
|
submitit_finetune.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
from torchvision import datasets, transforms
from timm.data.constants import \
IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD
from timm.data import create_transform
def build_dataset(is_train, args):
transform = build_transform(is_train, args)
print("Transform = ")
if isinstance(transform, tuple):
for trans in transform:
print(" - - - - - - - - - - ")
for t in trans.transforms:
print(t)
else:
for t in transform.transforms:
print(t)
print("---------------------------")
if args.data_set == 'CIFAR':
dataset = datasets.CIFAR100(args.data_path, train=is_train, transform=transform, download=True)
nb_classes = 100
elif args.data_set == 'IMNET':
print("reading from datapath", args.data_path)
root = os.path.join(args.data_path, 'train' if is_train else 'val')
dataset = datasets.ImageFolder(root, transform=transform)
nb_classes = 1000
elif args.data_set == "image_folder":
root = args.data_path if is_train else args.eval_data_path
dataset = datasets.ImageFolder(root, transform=transform)
nb_classes = args.nb_classes
assert len(dataset.class_to_idx) == nb_classes
else:
raise NotImplementedError()
print("Number of the class = %d" % nb_classes)
return dataset, nb_classes
def build_transform(is_train, args):
resize_im = args.input_size > 32
imagenet_default_mean_and_std = args.imagenet_default_mean_and_std
mean = IMAGENET_INCEPTION_MEAN if not imagenet_default_mean_and_std else IMAGENET_DEFAULT_MEAN
std = IMAGENET_INCEPTION_STD if not imagenet_default_mean_and_std else IMAGENET_DEFAULT_STD
if is_train:
# this should always dispatch to transforms_imagenet_train
transform = create_transform(
input_size=args.input_size,
is_training=True,
color_jitter=args.color_jitter,
auto_augment=args.aa,
interpolation=args.train_interpolation,
re_prob=args.reprob,
re_mode=args.remode,
re_count=args.recount,
mean=mean,
std=std,
)
if not resize_im:
transform.transforms[0] = transforms.RandomCrop(
args.input_size, padding=4)
return transform
t = []
if resize_im:
# warping (no cropping) when evaluated at 384 or larger
if args.input_size >= 384:
t.append(
transforms.Resize((args.input_size, args.input_size),
interpolation=transforms.InterpolationMode.BICUBIC),
)
print(f"Warping {args.input_size} size input images...")
else:
if args.crop_pct is None:
args.crop_pct = 224 / 256
size = int(args.input_size / args.crop_pct)
t.append(
# to maintain same ratio w.r.t. 224 images
transforms.Resize(size, interpolation=transforms.InterpolationMode.BICUBIC),
)
t.append(transforms.CenterCrop(args.input_size))
t.append(transforms.ToTensor())
t.append(transforms.Normalize(mean, std))
return transforms.Compose(t)
|
ConvNeXt-V2-main
|
datasets.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import math
from typing import Iterable, Optional
import torch
from timm.data import Mixup
from timm.utils import accuracy, ModelEma
import utils
from utils import adjust_learning_rate
def train_one_epoch(model: torch.nn.Module, criterion: torch.nn.Module,
data_loader: Iterable, optimizer: torch.optim.Optimizer,
device: torch.device, epoch: int, loss_scaler, max_norm: float = 0,
model_ema: Optional[ModelEma] = None, mixup_fn: Optional[Mixup] = None,
log_writer=None, args=None):
model.train(True)
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
header = 'Epoch: [{}]'.format(epoch)
print_freq = 20
update_freq = args.update_freq
use_amp = args.use_amp
optimizer.zero_grad()
for data_iter_step, (samples, targets) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):
# we use a per iteration (instead of per epoch) lr scheduler
if data_iter_step % update_freq == 0:
adjust_learning_rate(optimizer, data_iter_step / len(data_loader) + epoch, args)
samples = samples.to(device, non_blocking=True)
targets = targets.to(device, non_blocking=True)
if mixup_fn is not None:
samples, targets = mixup_fn(samples, targets)
if use_amp:
with torch.cuda.amp.autocast():
output = model(samples)
loss = criterion(output, targets)
else: # full precision
output = model(samples)
loss = criterion(output, targets)
loss_value = loss.item()
if not math.isfinite(loss_value):
print("Loss is {}, stopping training".format(loss_value))
assert math.isfinite(loss_value)
if use_amp:
# this attribute is added by timm on one optimizer (adahessian)
is_second_order = hasattr(optimizer, 'is_second_order') and optimizer.is_second_order
loss /= update_freq
grad_norm = loss_scaler(loss, optimizer, clip_grad=max_norm,
parameters=model.parameters(), create_graph=is_second_order,
update_grad=(data_iter_step + 1) % update_freq == 0)
if (data_iter_step + 1) % update_freq == 0:
optimizer.zero_grad()
if model_ema is not None:
model_ema.update(model)
else: # full precision
loss /= update_freq
loss.backward()
if (data_iter_step + 1) % update_freq == 0:
optimizer.step()
optimizer.zero_grad()
if model_ema is not None:
model_ema.update(model)
torch.cuda.synchronize()
if mixup_fn is None:
class_acc = (output.max(-1)[-1] == targets).float().mean()
else:
class_acc = None
metric_logger.update(loss=loss_value)
metric_logger.update(class_acc=class_acc)
min_lr = 10.
max_lr = 0.
for group in optimizer.param_groups:
min_lr = min(min_lr, group["lr"])
max_lr = max(max_lr, group["lr"])
metric_logger.update(lr=max_lr)
metric_logger.update(min_lr=min_lr)
weight_decay_value = None
for group in optimizer.param_groups:
if group["weight_decay"] > 0:
weight_decay_value = group["weight_decay"]
metric_logger.update(weight_decay=weight_decay_value)
if use_amp:
metric_logger.update(grad_norm=grad_norm)
if log_writer is not None:
log_writer.update(loss=loss_value, head="loss")
log_writer.update(class_acc=class_acc, head="loss")
log_writer.update(lr=max_lr, head="opt")
log_writer.update(min_lr=min_lr, head="opt")
log_writer.update(weight_decay=weight_decay_value, head="opt")
if use_amp:
log_writer.update(grad_norm=grad_norm, head="opt")
log_writer.set_step()
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger)
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
@torch.no_grad()
def evaluate(data_loader, model, device, use_amp=False):
criterion = torch.nn.CrossEntropyLoss()
metric_logger = utils.MetricLogger(delimiter=" ")
header = 'Test:'
# switch to evaluation mode
model.eval()
for batch in metric_logger.log_every(data_loader, 10, header):
images = batch[0]
target = batch[-1]
images = images.to(device, non_blocking=True)
target = target.to(device, non_blocking=True)
# compute output
if use_amp:
with torch.cuda.amp.autocast():
output = model(images)
if isinstance(output, dict):
output = output['logits']
loss = criterion(output, target)
else:
output = model(images)
if isinstance(output, dict):
output = output['logits']
loss = criterion(output, target)
torch.cuda.synchronize()
acc1, acc5 = accuracy(output, target, topk=(1, 5))
batch_size = images.shape[0]
metric_logger.update(loss=loss.item())
metric_logger.meters['acc1'].update(acc1.item(), n=batch_size)
metric_logger.meters['acc5'].update(acc5.item(), n=batch_size)
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print('* Acc@1 {top1.global_avg:.3f} Acc@5 {top5.global_avg:.3f} loss {losses.global_avg:.3f}'
.format(top1=metric_logger.acc1, top5=metric_logger.acc5, losses=metric_logger.loss))
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
|
ConvNeXt-V2-main
|
engine_finetune.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import math
import sys
from typing import Iterable
import torch
import utils
def train_one_epoch(model: torch.nn.Module,
data_loader: Iterable, optimizer: torch.optim.Optimizer,
device: torch.device, epoch: int, loss_scaler,
log_writer=None,
args=None):
model.train(True)
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
header = 'Epoch: [{}]'.format(epoch)
print_freq = 20
update_freq = args.update_freq
optimizer.zero_grad()
for data_iter_step, (samples, labels) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):
# we use a per iteration (instead of per epoch) lr scheduler
if data_iter_step % update_freq == 0:
utils.adjust_learning_rate(optimizer, data_iter_step / len(data_loader) + epoch, args)
if not isinstance(samples, list):
samples = samples.to(device, non_blocking=True)
labels = labels.to(device, non_blocking=True)
loss, _, _ = model(samples, labels, mask_ratio=args.mask_ratio)
loss_value = loss.item()
if not math.isfinite(loss_value):
print("Loss is {}, stopping training".format(loss_value))
sys.exit(1)
loss /= update_freq
loss_scaler(loss, optimizer, parameters=model.parameters(),
update_grad=(data_iter_step + 1) % update_freq == 0)
if (data_iter_step + 1) % update_freq == 0:
optimizer.zero_grad()
torch.cuda.empty_cache() # clear the GPU cache at a regular interval for training ME network
metric_logger.update(loss=loss_value)
lr = optimizer.param_groups[0]["lr"]
metric_logger.update(lr=lr)
loss_value_reduce = utils.all_reduce_mean(loss_value)
if log_writer is not None and (data_iter_step + 1) % update_freq == 0:
""" We use epoch_1000x as the x-axis in tensorboard.
This calibrates different curves when batch size changes.
"""
epoch_1000x = int((data_iter_step / len(data_loader) + epoch) * 1000)
log_writer.update(train_loss=loss_value_reduce, head="loss", step=epoch_1000x)
log_writer.update(lr=lr, head="opt", step=epoch_1000x)
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger)
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
|
ConvNeXt-V2-main
|
engine_pretrain.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import math
import time
from collections import defaultdict, deque
import datetime
import numpy as np
from timm.utils import get_state_dict
from pathlib import Path
import torch
import torch.distributed as dist
from torch._six import inf
from tensorboardX import SummaryWriter
from collections import OrderedDict
def str2bool(v):
"""
Converts string to bool type; enables command line
arguments in the format of '--arg1 true --arg2 false'
"""
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
class SmoothedValue(object):
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
"""
def __init__(self, window_size=20, fmt=None):
if fmt is None:
fmt = "{median:.4f} ({global_avg:.4f})"
self.deque = deque(maxlen=window_size)
self.total = 0.0
self.count = 0
self.fmt = fmt
def update(self, value, n=1):
self.deque.append(value)
self.count += n
self.total += value * n
def synchronize_between_processes(self):
"""
Warning: does not synchronize the deque!
"""
if not is_dist_avail_and_initialized():
return
t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda')
dist.barrier()
dist.all_reduce(t)
t = t.tolist()
self.count = int(t[0])
self.total = t[1]
@property
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item()
@property
def avg(self):
d = torch.tensor(list(self.deque), dtype=torch.float32)
return d.mean().item()
@property
def global_avg(self):
return self.total / self.count
@property
def max(self):
return max(self.deque)
@property
def value(self):
return self.deque[-1]
def __str__(self):
return self.fmt.format(
median=self.median,
avg=self.avg,
global_avg=self.global_avg,
max=self.max,
value=self.value)
class MetricLogger(object):
def __init__(self, delimiter="\t"):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for k, v in kwargs.items():
if v is None:
continue
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if attr in self.meters:
return self.meters[attr]
if attr in self.__dict__:
return self.__dict__[attr]
raise AttributeError("'{}' object has no attribute '{}'".format(
type(self).__name__, attr))
def __str__(self):
loss_str = []
for name, meter in self.meters.items():
loss_str.append(
"{}: {}".format(name, str(meter))
)
return self.delimiter.join(loss_str)
def synchronize_between_processes(self):
for meter in self.meters.values():
meter.synchronize_between_processes()
def add_meter(self, name, meter):
self.meters[name] = meter
def log_every(self, iterable, print_freq, header=None):
i = 0
if not header:
header = ''
start_time = time.time()
end = time.time()
iter_time = SmoothedValue(fmt='{avg:.4f}')
data_time = SmoothedValue(fmt='{avg:.4f}')
space_fmt = ':' + str(len(str(len(iterable)))) + 'd'
log_msg = [
header,
'[{0' + space_fmt + '}/{1}]',
'eta: {eta}',
'{meters}',
'time: {time}',
'data: {data}'
]
if torch.cuda.is_available():
log_msg.append('max mem: {memory:.0f}')
log_msg = self.delimiter.join(log_msg)
MB = 1024.0 * 1024.0
for obj in iterable:
data_time.update(time.time() - end)
yield obj
iter_time.update(time.time() - end)
if i % print_freq == 0 or i == len(iterable) - 1:
eta_seconds = iter_time.global_avg * (len(iterable) - i)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if torch.cuda.is_available():
print(log_msg.format(
i, len(iterable), eta=eta_string,
meters=str(self),
time=str(iter_time), data=str(data_time),
memory=torch.cuda.max_memory_allocated() / MB))
else:
print(log_msg.format(
i, len(iterable), eta=eta_string,
meters=str(self),
time=str(iter_time), data=str(data_time)))
i += 1
end = time.time()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('{} Total time: {} ({:.4f} s / it)'.format(
header, total_time_str, total_time / len(iterable)))
class TensorboardLogger(object):
def __init__(self, log_dir):
self.writer = SummaryWriter(logdir=log_dir)
self.step = 0
def set_step(self, step=None):
if step is not None:
self.step = step
else:
self.step += 1
def update(self, head='scalar', step=None, **kwargs):
for k, v in kwargs.items():
if v is None:
continue
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.writer.add_scalar(head + "/" + k, v, self.step if step is None else step)
def flush(self):
self.writer.flush()
class WandbLogger(object):
def __init__(self, args):
self.args = args
try:
import wandb
self._wandb = wandb
except ImportError:
raise ImportError(
"To use the Weights and Biases Logger please install wandb."
"Run `pip install wandb` to install it."
)
# Initialize a W&B run
if self._wandb.run is None:
self._wandb.init(
project=args.project,
config=args
)
def log_epoch_metrics(self, metrics, commit=True):
"""
Log train/test metrics onto W&B.
"""
# Log number of model parameters as W&B summary
self._wandb.summary['n_parameters'] = metrics.get('n_parameters', None)
metrics.pop('n_parameters', None)
# Log current epoch
self._wandb.log({'epoch': metrics.get('epoch')}, commit=False)
metrics.pop('epoch')
for k, v in metrics.items():
if 'train' in k:
self._wandb.log({f'Global Train/{k}': v}, commit=False)
elif 'test' in k:
self._wandb.log({f'Global Test/{k}': v}, commit=False)
self._wandb.log({})
def log_checkpoints(self):
output_dir = self.args.output_dir
model_artifact = self._wandb.Artifact(
self._wandb.run.id + "_model", type="model"
)
model_artifact.add_dir(output_dir)
self._wandb.log_artifact(model_artifact, aliases=["latest", "best"])
def set_steps(self):
# Set global training step
self._wandb.define_metric('Rank-0 Batch Wise/*', step_metric='Rank-0 Batch Wise/global_train_step')
# Set epoch-wise step
self._wandb.define_metric('Global Train/*', step_metric='epoch')
self._wandb.define_metric('Global Test/*', step_metric='epoch')
def setup_for_distributed(is_master):
"""
This function disables printing when not in master process
"""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop('force', False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
def is_dist_avail_and_initialized():
if not dist.is_available():
return False
if not dist.is_initialized():
return False
return True
def get_world_size():
if not is_dist_avail_and_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not is_dist_avail_and_initialized():
return 0
return dist.get_rank()
def is_main_process():
return get_rank() == 0
def save_on_master(*args, **kwargs):
if is_main_process():
torch.save(*args, **kwargs)
def init_distributed_mode(args):
if args.dist_on_itp:
args.rank = int(os.environ['OMPI_COMM_WORLD_RANK'])
args.world_size = int(os.environ['OMPI_COMM_WORLD_SIZE'])
args.gpu = int(os.environ['OMPI_COMM_WORLD_LOCAL_RANK'])
args.dist_url = "tcp://%s:%s" % (os.environ['MASTER_ADDR'], os.environ['MASTER_PORT'])
os.environ['LOCAL_RANK'] = str(args.gpu)
os.environ['RANK'] = str(args.rank)
os.environ['WORLD_SIZE'] = str(args.world_size)
# ["RANK", "WORLD_SIZE", "MASTER_ADDR", "MASTER_PORT", "LOCAL_RANK"]
elif 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:
args.rank = int(os.environ["RANK"])
args.world_size = int(os.environ['WORLD_SIZE'])
args.gpu = int(os.environ['LOCAL_RANK'])
elif 'SLURM_PROCID' in os.environ:
args.rank = int(os.environ['SLURM_PROCID'])
args.gpu = args.rank % torch.cuda.device_count()
os.environ['RANK'] = str(args.rank)
os.environ['LOCAL_RANK'] = str(args.gpu)
os.environ['WORLD_SIZE'] = str(args.world_size)
else:
print('Not using distributed mode')
args.distributed = False
return
args.distributed = True
torch.cuda.set_device(args.gpu)
args.dist_backend = 'nccl'
print('| distributed init (rank {}): {}, gpu {}'.format(
args.rank, args.dist_url, args.gpu), flush=True)
torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
torch.distributed.barrier()
setup_for_distributed(args.rank == 0)
def all_reduce_mean(x):
world_size = get_world_size()
if world_size > 1:
x_reduce = torch.tensor(x).cuda()
dist.all_reduce(x_reduce)
x_reduce /= world_size
return x_reduce.item()
else:
return x
def load_state_dict(model, state_dict, prefix='', ignore_missing="relative_position_index"):
missing_keys = []
unexpected_keys = []
error_msgs = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=''):
local_metadata = {} if metadata is None else metadata.get(
prefix[:-1], {})
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
load(model, prefix=prefix)
warn_missing_keys = []
ignore_missing_keys = []
for key in missing_keys:
keep_flag = True
for ignore_key in ignore_missing.split('|'):
if ignore_key in key:
keep_flag = False
break
if keep_flag:
warn_missing_keys.append(key)
else:
ignore_missing_keys.append(key)
missing_keys = warn_missing_keys
if len(missing_keys) > 0:
print("Weights of {} not initialized from pretrained model: {}".format(
model.__class__.__name__, missing_keys))
if len(unexpected_keys) > 0:
print("Weights from pretrained model not used in {}: {}".format(
model.__class__.__name__, unexpected_keys))
if len(ignore_missing_keys) > 0:
print("Ignored weights of {} not initialized from pretrained model: {}".format(
model.__class__.__name__, ignore_missing_keys))
if len(error_msgs) > 0:
print('\n'.join(error_msgs))
class NativeScalerWithGradNormCount:
state_dict_key = "amp_scaler"
def __init__(self):
self._scaler = torch.cuda.amp.GradScaler()
def __call__(self, loss, optimizer, clip_grad=None, parameters=None, create_graph=False, update_grad=True):
self._scaler.scale(loss).backward(create_graph=create_graph)
if update_grad:
if clip_grad is not None:
assert parameters is not None
self._scaler.unscale_(optimizer) # unscale the gradients of optimizer's assigned params in-place
norm = torch.nn.utils.clip_grad_norm_(parameters, clip_grad)
else:
self._scaler.unscale_(optimizer)
norm = get_grad_norm_(parameters)
self._scaler.step(optimizer)
self._scaler.update()
else:
norm = None
return norm
def state_dict(self):
return self._scaler.state_dict()
def load_state_dict(self, state_dict):
self._scaler.load_state_dict(state_dict)
def get_grad_norm_(parameters, norm_type: float = 2.0) -> torch.Tensor:
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
parameters = [p for p in parameters if p.grad is not None]
norm_type = float(norm_type)
if len(parameters) == 0:
return torch.tensor(0.)
device = parameters[0].grad.device
if norm_type == inf:
total_norm = max(p.grad.detach().abs().max().to(device) for p in parameters)
else:
total_norm = torch.norm(torch.stack([torch.norm(p.grad.detach(), norm_type).to(device) for p in parameters]), norm_type)
return total_norm
def save_model(args, epoch, model, model_without_ddp, optimizer, loss_scaler, model_ema=None):
output_dir = Path(args.output_dir)
epoch_name = str(epoch)
checkpoint_paths = [output_dir / ('checkpoint-%s.pth' % epoch_name)]
for checkpoint_path in checkpoint_paths:
to_save = {
'model': model_without_ddp.state_dict(),
'optimizer': optimizer.state_dict(),
'epoch': epoch,
'scaler': loss_scaler.state_dict(),
'args': args,
}
if model_ema is not None:
to_save['model_ema'] = get_state_dict(model_ema)
save_on_master(to_save, checkpoint_path)
if is_main_process() and isinstance(epoch, int):
to_del = epoch - args.save_ckpt_num * args.save_ckpt_freq
old_ckpt = output_dir / ('checkpoint-%s.pth' % to_del)
if os.path.exists(old_ckpt):
os.remove(old_ckpt)
def auto_load_model(args, model, model_without_ddp, optimizer, loss_scaler, model_ema=None):
output_dir = Path(args.output_dir)
if args.auto_resume and len(args.resume) == 0:
import glob
all_checkpoints = glob.glob(os.path.join(output_dir, 'checkpoint-*.pth'))
latest_ckpt = -1
for ckpt in all_checkpoints:
t = ckpt.split('-')[-1].split('.')[0]
if t.isdigit():
latest_ckpt = max(int(t), latest_ckpt)
if latest_ckpt >= 0:
args.resume = os.path.join(output_dir, 'checkpoint-%d.pth' % latest_ckpt)
print("Auto resume checkpoint: %s" % args.resume)
if args.resume:
if args.resume.startswith('https'):
checkpoint = torch.hub.load_state_dict_from_url(
args.resume, map_location='cpu', check_hash=True)
else:
checkpoint = torch.load(args.resume, map_location='cpu')
model_without_ddp.load_state_dict(checkpoint['model'])
print("Resume checkpoint %s" % args.resume)
if 'optimizer' in checkpoint and 'epoch' in checkpoint:
optimizer.load_state_dict(checkpoint['optimizer'])
if not isinstance(checkpoint['epoch'], str): # does not support resuming with 'best', 'best-ema'
args.start_epoch = checkpoint['epoch'] + 1
else:
assert args.eval, 'Does not support resuming with checkpoint-best'
if hasattr(args, 'model_ema') and args.model_ema:
if 'model_ema' in checkpoint.keys():
model_ema.ema.load_state_dict(checkpoint['model_ema'])
else:
model_ema.ema.load_state_dict(checkpoint['model'])
if 'scaler' in checkpoint:
loss_scaler.load_state_dict(checkpoint['scaler'])
print("With optim & sched!")
def cosine_scheduler(base_value, final_value, epochs, niter_per_ep, warmup_epochs=0,
start_warmup_value=0, warmup_steps=-1):
warmup_schedule = np.array([])
warmup_iters = warmup_epochs * niter_per_ep
if warmup_steps > 0:
warmup_iters = warmup_steps
print("Set warmup steps = %d" % warmup_iters)
if warmup_epochs > 0:
warmup_schedule = np.linspace(start_warmup_value, base_value, warmup_iters)
iters = np.arange(epochs * niter_per_ep - warmup_iters)
schedule = np.array(
[final_value + 0.5 * (base_value - final_value) * (1 + math.cos(math.pi * i / (len(iters)))) for i in iters])
schedule = np.concatenate((warmup_schedule, schedule))
assert len(schedule) == epochs * niter_per_ep
return schedule
def adjust_learning_rate(optimizer, epoch, args):
"""Decay the learning rate with half-cycle cosine after warmup"""
if epoch < args.warmup_epochs:
lr = args.lr * epoch / args.warmup_epochs
else:
lr = args.min_lr + (args.lr - args.min_lr) * 0.5 * \
(1. + math.cos(math.pi * (epoch - args.warmup_epochs) / (args.epochs - args.warmup_epochs)))
for param_group in optimizer.param_groups:
if "lr_scale" in param_group:
param_group["lr"] = lr * param_group["lr_scale"]
else:
param_group["lr"] = lr
return lr
def remap_checkpoint_keys(ckpt):
new_ckpt = OrderedDict()
for k, v in ckpt.items():
if k.startswith('encoder'):
k = '.'.join(k.split('.')[1:]) # remove encoder in the name
if k.endswith('kernel'):
k = '.'.join(k.split('.')[:-1]) # remove kernel in the name
new_k = k + '.weight'
if len(v.shape) == 3: # resahpe standard convolution
kv, in_dim, out_dim = v.shape
ks = int(math.sqrt(kv))
new_ckpt[new_k] = v.permute(2, 1, 0).\
reshape(out_dim, in_dim, ks, ks).transpose(3, 2)
elif len(v.shape) == 2: # reshape depthwise convolution
kv, dim = v.shape
ks = int(math.sqrt(kv))
new_ckpt[new_k] = v.permute(1, 0).\
reshape(dim, 1, ks, ks).transpose(3, 2)
continue
elif 'ln' in k or 'linear' in k:
k = k.split('.')
k.pop(-2) # remove ln and linear in the name
new_k = '.'.join(k)
else:
new_k = k
new_ckpt[new_k] = v
# reshape grn affine parameters and biases
for k, v in new_ckpt.items():
if k.endswith('bias') and len(v.shape) != 1:
new_ckpt[k] = v.reshape(-1)
elif 'grn' in k:
new_ckpt[k] = v.unsqueeze(0).unsqueeze(1)
return new_ckpt
|
ConvNeXt-V2-main
|
utils.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import os
import uuid
from pathlib import Path
import main_pretrain as trainer
import submitit
def parse_args():
trainer_parser = trainer.get_args_parser()
parser = argparse.ArgumentParser("Submitit for pretrain", parents=[trainer_parser])
parser.add_argument("--ngpus", default=8, type=int, help="Number of gpus to request on each node")
parser.add_argument("--nodes", default=2, type=int, help="Number of nodes to request")
parser.add_argument("--timeout", default=4320, type=int, help="Duration of the job")
parser.add_argument("--job_dir", default="", type=str, help="Job dir. Leave empty for automatic.")
parser.add_argument("--partition", default="learnlab", type=str, help="Partition where to submit")
parser.add_argument("--use_volta32", action='store_true', help="Request 32G V100 GPUs")
parser.add_argument('--comment', default="", type=str, help="Comment to pass to scheduler")
return parser.parse_args()
def get_shared_folder() -> Path:
user = os.getenv("USER")
if Path("/checkpoint/").is_dir():
p = Path(f"/checkpoint/{user}/experiments")
p.mkdir(exist_ok=True)
return p
raise RuntimeError("No shared folder available")
def get_init_file():
# Init file must not exist, but it's parent dir must exist.
os.makedirs(str(get_shared_folder()), exist_ok=True)
init_file = get_shared_folder() / f"{uuid.uuid4().hex}_init"
if init_file.exists():
os.remove(str(init_file))
return init_file
class Trainer(object):
def __init__(self, args):
self.args = args
def __call__(self):
import main_pretrain as trainer
self._setup_gpu_args()
trainer.main(self.args)
def checkpoint(self):
import os
import submitit
self.args.dist_url = get_init_file().as_uri()
checkpoint_file = os.path.join(self.args.output_dir, "checkpoint.pth")
if os.path.exists(checkpoint_file):
self.args.resume = checkpoint_file
print("Requeuing ", self.args)
empty_trainer = type(self)(self.args)
return submitit.helpers.DelayedSubmission(empty_trainer)
def _setup_gpu_args(self):
import submitit
from pathlib import Path
job_env = submitit.JobEnvironment()
self.args.output_dir = Path(str(self.args.output_dir).replace("%j", str(job_env.job_id)))
self.args.log_dir = self.args.output_dir
self.args.gpu = job_env.local_rank
self.args.rank = job_env.global_rank
self.args.world_size = job_env.num_tasks
print(f"Process group: {job_env.num_tasks} tasks, rank: {job_env.global_rank}")
def main():
args = parse_args()
if args.job_dir == "":
args.job_dir = get_shared_folder() / "%j"
# Note that the folder will depend on the job_id, to easily track experiments
executor = submitit.AutoExecutor(folder=args.job_dir, slurm_max_num_timeout=30)
num_gpus_per_node = args.ngpus
nodes = args.nodes
timeout_min = args.timeout
partition = args.partition
kwargs = {}
if args.use_volta32:
kwargs['slurm_constraint'] = 'volta32gb'
if args.comment:
kwargs['slurm_comment'] = args.comment
executor.update_parameters(
mem_gb=40 * num_gpus_per_node,
gpus_per_node=num_gpus_per_node,
tasks_per_node=num_gpus_per_node, # one task per GPU
cpus_per_task=10,
nodes=nodes,
timeout_min=timeout_min, # max is 60 * 72
# Below are cluster dependent parameters
slurm_partition=partition,
slurm_signal_delay_s=120,
**kwargs
)
executor.update_parameters(name="pretrain")
args.dist_url = get_init_file().as_uri()
args.output_dir = args.job_dir
trainer = Trainer(args)
job = executor.submit(trainer)
# print("Submitted job_id:", job.job_id)
print(job.job_id)
if __name__ == "__main__":
main()
|
ConvNeXt-V2-main
|
submitit_pretrain.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import datetime
import numpy as np
import time
import json
import os
from pathlib import Path
import torch
import torch.backends.cudnn as cudnn
from timm.models.layers import trunc_normal_
from timm.data.mixup import Mixup
from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy
from timm.utils import ModelEma
from optim_factory import create_optimizer, LayerDecayValueAssigner
from datasets import build_dataset
from engine_finetune import train_one_epoch, evaluate
import utils
from utils import NativeScalerWithGradNormCount as NativeScaler
from utils import str2bool, remap_checkpoint_keys
import models.convnextv2 as convnextv2
def get_args_parser():
parser = argparse.ArgumentParser('FCMAE fine-tuning', add_help=False)
parser.add_argument('--batch_size', default=64, type=int,
help='Per GPU batch size')
parser.add_argument('--epochs', default=100, type=int)
parser.add_argument('--update_freq', default=1, type=int,
help='gradient accumulation steps')
# Model parameters
parser.add_argument('--model', default='convnextv2_base', type=str, metavar='MODEL',
help='Name of model to train')
parser.add_argument('--input_size', default=224, type=int,
help='image input size')
parser.add_argument('--drop_path', type=float, default=0., metavar='PCT',
help='Drop path rate (default: 0.1)')
parser.add_argument('--layer_decay_type', type=str, choices=['single', 'group'], default='single',
help="""Layer decay strategies. The single strategy assigns a distinct decaying value for each layer,
whereas the group strategy assigns the same decaying value for three consecutive layers""")
# EMA related parameters
parser.add_argument('--model_ema', type=str2bool, default=False)
parser.add_argument('--model_ema_decay', type=float, default=0.9999, help='')
parser.add_argument('--model_ema_force_cpu', type=str2bool, default=False, help='')
parser.add_argument('--model_ema_eval', type=str2bool, default=False, help='Using ema to eval during training.')
# Optimization parameters
parser.add_argument('--clip_grad', type=float, default=None, metavar='NORM',
help='Clip gradient norm (default: None, no clipping)')
parser.add_argument('--weight_decay', type=float, default=0.05,
help='weight decay (default: 0.05)')
parser.add_argument('--lr', type=float, default=None, metavar='LR',
help='learning rate (absolute lr)')
parser.add_argument('--blr', type=float, default=5e-4, metavar='LR',
help='base learning rate: absolute_lr = base_lr * total_batch_size / 256')
parser.add_argument('--layer_decay', type=float, default=1.0)
parser.add_argument('--min_lr', type=float, default=1e-6, metavar='LR',
help='lower lr bound for cyclic schedulers that hit 0 (1e-6)')
parser.add_argument('--warmup_epochs', type=int, default=20, metavar='N',
help='epochs to warmup LR, if scheduler supports')
parser.add_argument('--warmup_steps', type=int, default=-1, metavar='N',
help='num of steps to warmup LR, will overload warmup_epochs if set > 0')
parser.add_argument('--opt', default='adamw', type=str, metavar='OPTIMIZER',
help='Optimizer (default: "adamw"')
parser.add_argument('--opt_eps', default=1e-8, type=float, metavar='EPSILON',
help='Optimizer Epsilon (default: 1e-8)')
parser.add_argument('--opt_betas', default=None, type=float, nargs='+', metavar='BETA',
help='Optimizer Betas (default: None, use opt default)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum (default: 0.9)')
parser.add_argument('--weight_decay_end', type=float, default=None, help="""Final value of the
weight decay. We use a cosine schedule for WD and using a larger decay by
the end of training improves performance for ViTs.""")
# Augmentation parameters
parser.add_argument('--color_jitter', type=float, default=None, metavar='PCT',
help='Color jitter factor (enabled only when not using Auto/RandAug)')
parser.add_argument('--aa', type=str, default='rand-m9-mstd0.5-inc1', metavar='NAME',
help='Use AutoAugment policy. "v0" or "original". " + "(default: rand-m9-mstd0.5-inc1)')
parser.add_argument('--smoothing', type=float, default=0.1,
help='Label smoothing (default: 0.1)')
parser.add_argument('--train_interpolation', type=str, default='bicubic',
help='Training interpolation (random, bilinear, bicubic default: "bicubic")')
# * Random Erase params
parser.add_argument('--reprob', type=float, default=0.25, metavar='PCT',
help='Random erase prob (default: 0.25)')
parser.add_argument('--remode', type=str, default='pixel',
help='Random erase mode (default: "pixel")')
parser.add_argument('--recount', type=int, default=1,
help='Random erase count (default: 1)')
parser.add_argument('--resplit', type=str2bool, default=False,
help='Do not random erase first (clean) augmentation split')
# * Mixup params
parser.add_argument('--mixup', type=float, default=0.,
help='mixup alpha, mixup enabled if > 0.')
parser.add_argument('--cutmix', type=float, default=0.,
help='cutmix alpha, cutmix enabled if > 0.')
parser.add_argument('--cutmix_minmax', type=float, nargs='+', default=None,
help='cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)')
parser.add_argument('--mixup_prob', type=float, default=1.0,
help='Probability of performing mixup or cutmix when either/both is enabled')
parser.add_argument('--mixup_switch_prob', type=float, default=0.5,
help='Probability of switching to cutmix when both mixup and cutmix enabled')
parser.add_argument('--mixup_mode', type=str, default='batch',
help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"')
# * Finetuning params
parser.add_argument('--finetune', default='',
help='finetune from checkpoint')
parser.add_argument('--head_init_scale', default=0.001, type=float,
help='classifier head initial scale, typically adjusted in fine-tuning')
parser.add_argument('--model_key', default='model|module', type=str,
help='which key to load from saved state dict, usually model or model_ema')
parser.add_argument('--model_prefix', default='', type=str)
# Dataset parameters
parser.add_argument('--data_path', default='/datasets01/imagenet_full_size/061417/', type=str,
help='dataset path')
parser.add_argument('--nb_classes', default=1000, type=int,
help='number of the classification types')
parser.add_argument('--output_dir', default='',
help='path where to save, empty for no saving')
parser.add_argument('--log_dir', default=None,
help='path where to tensorboard log')
parser.add_argument('--device', default='cuda',
help='device to use for training / testing')
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--resume', default='',
help='resume from checkpoint')
parser.add_argument('--eval_data_path', default=None, type=str,
help='dataset path for evaluation')
parser.add_argument('--imagenet_default_mean_and_std', type=str2bool, default=True)
parser.add_argument('--data_set', default='IMNET', choices=['CIFAR', 'IMNET', 'image_folder'],
type=str, help='ImageNet dataset path')
parser.add_argument('--auto_resume', type=str2bool, default=True)
parser.add_argument('--save_ckpt', type=str2bool, default=True)
parser.add_argument('--save_ckpt_freq', default=1, type=int)
parser.add_argument('--save_ckpt_num', default=3, type=int)
parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
help='start epoch')
parser.add_argument('--eval', type=str2bool, default=False,
help='Perform evaluation only')
parser.add_argument('--dist_eval', type=str2bool, default=True,
help='Enabling distributed evaluation')
parser.add_argument('--disable_eval', type=str2bool, default=False,
help='Disabling evaluation during training')
parser.add_argument('--num_workers', default=10, type=int)
parser.add_argument('--pin_mem', type=str2bool, default=True,
help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
# Evaluation parameters
parser.add_argument('--crop_pct', type=float, default=None)
# distributed training parameters
parser.add_argument('--world_size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--local_rank', default=-1, type=int)
parser.add_argument('--dist_on_itp', type=str2bool, default=False)
parser.add_argument('--dist_url', default='env://',
help='url used to set up distributed training')
parser.add_argument('--use_amp', type=str2bool, default=False,
help="Use apex AMP (Automatic Mixed Precision) or not")
return parser
def main(args):
utils.init_distributed_mode(args)
print(args)
device = torch.device(args.device)
# fix the seed for reproducibility
seed = args.seed + utils.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
cudnn.benchmark = True
dataset_train, args.nb_classes = build_dataset(is_train=True, args=args)
if args.disable_eval:
args.dist_eval = False
dataset_val = None
else:
dataset_val, _ = build_dataset(is_train=False, args=args)
num_tasks = utils.get_world_size()
global_rank = utils.get_rank()
sampler_train = torch.utils.data.DistributedSampler(
dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True, seed=args.seed,
)
print("Sampler_train = %s" % str(sampler_train))
if args.dist_eval:
if len(dataset_val) % num_tasks != 0:
print('Warning: Enabling distributed evaluation with an eval dataset not divisible by process number. '
'This will slightly alter validation results as extra duplicate entries are added to achieve '
'equal num of samples per-process.')
sampler_val = torch.utils.data.DistributedSampler(
dataset_val, num_replicas=num_tasks, rank=global_rank, shuffle=False)
else:
sampler_val = torch.utils.data.SequentialSampler(dataset_val)
if global_rank == 0 and args.log_dir is not None:
os.makedirs(args.log_dir, exist_ok=True)
log_writer = utils.TensorboardLogger(log_dir=args.log_dir)
else:
log_writer = None
data_loader_train = torch.utils.data.DataLoader(
dataset_train, sampler=sampler_train,
batch_size=args.batch_size,
num_workers=args.num_workers,
pin_memory=args.pin_mem,
drop_last=True,
)
if dataset_val is not None:
data_loader_val = torch.utils.data.DataLoader(
dataset_val, sampler=sampler_val,
batch_size=args.batch_size,
num_workers=args.num_workers,
pin_memory=args.pin_mem,
drop_last=False
)
else:
data_loader_val = None
mixup_fn = None
mixup_active = args.mixup > 0 or args.cutmix > 0. or args.cutmix_minmax is not None
if mixup_active:
print("Mixup is activated!")
mixup_fn = Mixup(
mixup_alpha=args.mixup, cutmix_alpha=args.cutmix, cutmix_minmax=args.cutmix_minmax,
prob=args.mixup_prob, switch_prob=args.mixup_switch_prob, mode=args.mixup_mode,
label_smoothing=args.smoothing, num_classes=args.nb_classes)
model = convnextv2.__dict__[args.model](
num_classes=args.nb_classes,
drop_path_rate=args.drop_path,
head_init_scale=args.head_init_scale,
)
if args.finetune:
checkpoint = torch.load(args.finetune, map_location='cpu')
print("Load pre-trained checkpoint from: %s" % args.finetune)
checkpoint_model = checkpoint['model']
state_dict = model.state_dict()
for k in ['head.weight', 'head.bias']:
if k in checkpoint_model and checkpoint_model[k].shape != state_dict[k].shape:
print(f"Removing key {k} from pretrained checkpoint")
del checkpoint_model[k]
# remove decoder weights
checkpoint_model_keys = list(checkpoint_model.keys())
for k in checkpoint_model_keys:
if 'decoder' in k or 'mask_token'in k or \
'proj' in k or 'pred' in k:
print(f"Removing key {k} from pretrained checkpoint")
del checkpoint_model[k]
checkpoint_model = remap_checkpoint_keys(checkpoint_model)
utils.load_state_dict(model, checkpoint_model, prefix=args.model_prefix)
# manually initialize fc layer
trunc_normal_(model.head.weight, std=2e-5)
torch.nn.init.constant_(model.head.bias, 0.)
model.to(device)
model_ema = None
if args.model_ema:
# Important to create EMA model after cuda(), DP wrapper, and AMP but before SyncBN and DDP wrapper
model_ema = ModelEma(
model,
decay=args.model_ema_decay,
device='cpu' if args.model_ema_force_cpu else '',
resume='')
print("Using EMA with decay = %.8f" % args.model_ema_decay)
model_without_ddp = model
n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
print("Model = %s" % str(model_without_ddp))
print('number of params:', n_parameters)
eff_batch_size = args.batch_size * args.update_freq * utils.get_world_size()
num_training_steps_per_epoch = len(dataset_train) // eff_batch_size
if args.lr is None:
args.lr = args.blr * eff_batch_size / 256
print("base lr: %.2e" % (args.lr * 256 / eff_batch_size))
print("actual lr: %.2e" % args.lr)
print("accumulate grad iterations: %d" % args.update_freq)
print("effective batch size: %d" % eff_batch_size)
if args.layer_decay < 1.0 or args.layer_decay > 1.0:
assert args.layer_decay_type in ['single', 'group']
if args.layer_decay_type == 'group': # applies for Base and Large models
num_layers = 12
else:
num_layers = sum(model_without_ddp.depths)
assigner = LayerDecayValueAssigner(
list(args.layer_decay ** (num_layers + 1 - i) for i in range(num_layers + 2)),
depths=model_without_ddp.depths, layer_decay_type=args.layer_decay_type)
else:
assigner = None
if assigner is not None:
print("Assigned values = %s" % str(assigner.values))
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu], find_unused_parameters=False)
model_without_ddp = model.module
optimizer = create_optimizer(
args, model_without_ddp, skip_list=None,
get_num_layer=assigner.get_layer_id if assigner is not None else None,
get_layer_scale=assigner.get_scale if assigner is not None else None)
loss_scaler = NativeScaler()
if mixup_fn is not None:
# smoothing is handled with mixup label transform
criterion = SoftTargetCrossEntropy()
elif args.smoothing > 0.:
criterion = LabelSmoothingCrossEntropy(smoothing=args.smoothing)
else:
criterion = torch.nn.CrossEntropyLoss()
print("criterion = %s" % str(criterion))
utils.auto_load_model(
args=args, model=model, model_without_ddp=model_without_ddp,
optimizer=optimizer, loss_scaler=loss_scaler, model_ema=model_ema)
if args.eval:
print(f"Eval only mode")
test_stats = evaluate(data_loader_val, model, device)
print(f"Accuracy of the network on {len(dataset_val)} test images: {test_stats['acc1']:.5f}%")
return
max_accuracy = 0.0
if args.model_ema and args.model_ema_eval:
max_accuracy_ema = 0.0
print("Start training for %d epochs" % args.epochs)
start_time = time.time()
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
data_loader_train.sampler.set_epoch(epoch)
if log_writer is not None:
log_writer.set_step(epoch * num_training_steps_per_epoch * args.update_freq)
train_stats = train_one_epoch(
model, criterion, data_loader_train,
optimizer, device, epoch, loss_scaler,
args.clip_grad, model_ema, mixup_fn,
log_writer=log_writer,
args=args
)
if args.output_dir and args.save_ckpt:
if (epoch + 1) % args.save_ckpt_freq == 0 or epoch + 1 == args.epochs:
utils.save_model(
args=args, model=model, model_without_ddp=model_without_ddp, optimizer=optimizer,
loss_scaler=loss_scaler, epoch=epoch, model_ema=model_ema)
if data_loader_val is not None:
test_stats = evaluate(data_loader_val, model, device, use_amp=args.use_amp)
print(f"Accuracy of the model on the {len(dataset_val)} test images: {test_stats['acc1']:.1f}%")
if max_accuracy < test_stats["acc1"]:
max_accuracy = test_stats["acc1"]
if args.output_dir and args.save_ckpt:
utils.save_model(
args=args, model=model, model_without_ddp=model_without_ddp, optimizer=optimizer,
loss_scaler=loss_scaler, epoch="best", model_ema=model_ema)
print(f'Max accuracy: {max_accuracy:.2f}%')
if log_writer is not None:
log_writer.update(test_acc1=test_stats['acc1'], head="perf", step=epoch)
log_writer.update(test_acc5=test_stats['acc5'], head="perf", step=epoch)
log_writer.update(test_loss=test_stats['loss'], head="perf", step=epoch)
log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},
**{f'test_{k}': v for k, v in test_stats.items()},
'epoch': epoch,
'n_parameters': n_parameters}
# repeat testing routines for EMA, if ema eval is turned on
if args.model_ema and args.model_ema_eval:
test_stats_ema = evaluate(data_loader_val, model_ema.ema, device, use_amp=args.use_amp)
print(f"Accuracy of the model EMA on {len(dataset_val)} test images: {test_stats_ema['acc1']:.1f}%")
if max_accuracy_ema < test_stats_ema["acc1"]:
max_accuracy_ema = test_stats_ema["acc1"]
if args.output_dir and args.save_ckpt:
utils.save_model(
args=args, model=model, model_without_ddp=model_without_ddp, optimizer=optimizer,
loss_scaler=loss_scaler, epoch="best-ema", model_ema=model_ema)
print(f'Max EMA accuracy: {max_accuracy_ema:.2f}%')
if log_writer is not None:
log_writer.update(test_acc1_ema=test_stats_ema['acc1'], head="perf", step=epoch)
log_stats.update({**{f'test_{k}_ema': v for k, v in test_stats_ema.items()}})
else:
log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},
'epoch': epoch,
'n_parameters': n_parameters}
if args.output_dir and utils.is_main_process():
if log_writer is not None:
log_writer.flush()
with open(os.path.join(args.output_dir, "log.txt"), mode="a", encoding="utf-8") as f:
f.write(json.dumps(log_stats) + "\n")
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Training time {}'.format(total_time_str))
if __name__ == '__main__':
parser = argparse.ArgumentParser('FCMAE fine-tuning', parents=[get_args_parser()])
args = parser.parse_args()
if args.output_dir:
Path(args.output_dir).mkdir(parents=True, exist_ok=True)
main(args)
|
ConvNeXt-V2-main
|
main_finetune.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
from torch import optim as optim
from timm.optim.adafactor import Adafactor
from timm.optim.adahessian import Adahessian
from timm.optim.adamp import AdamP
from timm.optim.lookahead import Lookahead
from timm.optim.nadam import Nadam
from timm.optim.novograd import NovoGrad
from timm.optim.nvnovograd import NvNovoGrad
from timm.optim.radam import RAdam
from timm.optim.rmsprop_tf import RMSpropTF
from timm.optim.sgdp import SGDP
import json
try:
from apex.optimizers import FusedNovoGrad, FusedAdam, FusedLAMB, FusedSGD
has_apex = True
except ImportError:
has_apex = False
def get_num_layer_for_convnext_single(var_name, depths):
"""
Each layer is assigned distinctive layer ids
"""
if var_name.startswith("downsample_layers"):
stage_id = int(var_name.split('.')[1])
layer_id = sum(depths[:stage_id]) + 1
return layer_id
elif var_name.startswith("stages"):
stage_id = int(var_name.split('.')[1])
block_id = int(var_name.split('.')[2])
layer_id = sum(depths[:stage_id]) + block_id + 1
return layer_id
else:
return sum(depths) + 1
def get_num_layer_for_convnext(var_name):
"""
Divide [3, 3, 27, 3] layers into 12 groups; each group is three
consecutive blocks, including possible neighboring downsample layers;
adapted from https://github.com/microsoft/unilm/blob/master/beit/optim_factory.py
"""
num_max_layer = 12
if var_name.startswith("downsample_layers"):
stage_id = int(var_name.split('.')[1])
if stage_id == 0:
layer_id = 0
elif stage_id == 1 or stage_id == 2:
layer_id = stage_id + 1
elif stage_id == 3:
layer_id = 12
return layer_id
elif var_name.startswith("stages"):
stage_id = int(var_name.split('.')[1])
block_id = int(var_name.split('.')[2])
if stage_id == 0 or stage_id == 1:
layer_id = stage_id + 1
elif stage_id == 2:
layer_id = 3 + block_id // 3
elif stage_id == 3:
layer_id = 12
return layer_id
else:
return num_max_layer + 1
class LayerDecayValueAssigner(object):
def __init__(self, values, depths=[3,3,27,3], layer_decay_type='single'):
self.values = values
self.depths = depths
self.layer_decay_type = layer_decay_type
def get_scale(self, layer_id):
return self.values[layer_id]
def get_layer_id(self, var_name):
if self.layer_decay_type == 'single':
return get_num_layer_for_convnext_single(var_name, self.depths)
else:
return get_num_layer_for_convnext(var_name)
def get_parameter_groups(model, weight_decay=1e-5, skip_list=(), get_num_layer=None, get_layer_scale=None):
parameter_group_names = {}
parameter_group_vars = {}
for name, param in model.named_parameters():
if not param.requires_grad:
continue # frozen weights
if len(param.shape) == 1 or name.endswith(".bias") or name in skip_list or \
name.endswith(".gamma") or name.endswith(".beta"):
group_name = "no_decay"
this_weight_decay = 0.
else:
group_name = "decay"
this_weight_decay = weight_decay
if get_num_layer is not None:
layer_id = get_num_layer(name)
group_name = "layer_%d_%s" % (layer_id, group_name)
else:
layer_id = None
if group_name not in parameter_group_names:
if get_layer_scale is not None:
scale = get_layer_scale(layer_id)
else:
scale = 1.
parameter_group_names[group_name] = {
"weight_decay": this_weight_decay,
"params": [],
"lr_scale": scale
}
parameter_group_vars[group_name] = {
"weight_decay": this_weight_decay,
"params": [],
"lr_scale": scale
}
parameter_group_vars[group_name]["params"].append(param)
parameter_group_names[group_name]["params"].append(name)
print("Param groups = %s" % json.dumps(parameter_group_names, indent=2))
return list(parameter_group_vars.values())
def create_optimizer(args, model, get_num_layer=None, get_layer_scale=None, filter_bias_and_bn=True, skip_list=None):
opt_lower = args.opt.lower()
weight_decay = args.weight_decay
# if weight_decay and filter_bias_and_bn:
if filter_bias_and_bn:
skip = {}
if skip_list is not None:
skip = skip_list
elif hasattr(model, 'no_weight_decay'):
skip = model.no_weight_decay()
parameters = get_parameter_groups(model, weight_decay, skip, get_num_layer, get_layer_scale)
weight_decay = 0.
else:
parameters = model.parameters()
if 'fused' in opt_lower:
assert has_apex and torch.cuda.is_available(), 'APEX and CUDA required for fused optimizers'
opt_args = dict(lr=args.lr, weight_decay=weight_decay)
if hasattr(args, 'opt_eps') and args.opt_eps is not None:
opt_args['eps'] = args.opt_eps
if hasattr(args, 'opt_betas') and args.opt_betas is not None:
opt_args['betas'] = args.opt_betas
opt_split = opt_lower.split('_')
opt_lower = opt_split[-1]
if opt_lower == 'sgd' or opt_lower == 'nesterov':
opt_args.pop('eps', None)
optimizer = optim.SGD(parameters, momentum=args.momentum, nesterov=True, **opt_args)
elif opt_lower == 'momentum':
opt_args.pop('eps', None)
optimizer = optim.SGD(parameters, momentum=args.momentum, nesterov=False, **opt_args)
elif opt_lower == 'adam':
optimizer = optim.Adam(parameters, **opt_args)
elif opt_lower == 'adamw':
optimizer = optim.AdamW(parameters, **opt_args)
elif opt_lower == 'nadam':
optimizer = Nadam(parameters, **opt_args)
elif opt_lower == 'radam':
optimizer = RAdam(parameters, **opt_args)
elif opt_lower == 'adamp':
optimizer = AdamP(parameters, wd_ratio=0.01, nesterov=True, **opt_args)
elif opt_lower == 'sgdp':
optimizer = SGDP(parameters, momentum=args.momentum, nesterov=True, **opt_args)
elif opt_lower == 'adadelta':
optimizer = optim.Adadelta(parameters, **opt_args)
elif opt_lower == 'adafactor':
if not args.lr:
opt_args['lr'] = None
optimizer = Adafactor(parameters, **opt_args)
elif opt_lower == 'adahessian':
optimizer = Adahessian(parameters, **opt_args)
elif opt_lower == 'rmsprop':
optimizer = optim.RMSprop(parameters, alpha=0.9, momentum=args.momentum, **opt_args)
elif opt_lower == 'rmsproptf':
optimizer = RMSpropTF(parameters, alpha=0.9, momentum=args.momentum, **opt_args)
elif opt_lower == 'novograd':
optimizer = NovoGrad(parameters, **opt_args)
elif opt_lower == 'nvnovograd':
optimizer = NvNovoGrad(parameters, **opt_args)
elif opt_lower == 'fusedsgd':
opt_args.pop('eps', None)
optimizer = FusedSGD(parameters, momentum=args.momentum, nesterov=True, **opt_args)
elif opt_lower == 'fusedmomentum':
opt_args.pop('eps', None)
optimizer = FusedSGD(parameters, momentum=args.momentum, nesterov=False, **opt_args)
elif opt_lower == 'fusedadam':
optimizer = FusedAdam(parameters, adam_w_mode=False, **opt_args)
elif opt_lower == 'fusedadamw':
optimizer = FusedAdam(parameters, adam_w_mode=True, **opt_args)
elif opt_lower == 'fusedlamb':
optimizer = FusedLAMB(parameters, **opt_args)
elif opt_lower == 'fusednovograd':
opt_args.setdefault('betas', (0.95, 0.98))
optimizer = FusedNovoGrad(parameters, **opt_args)
else:
assert False and "Invalid optimizer"
if len(opt_split) > 1:
if opt_split[0] == 'lookahead':
optimizer = Lookahead(optimizer)
return optimizer
|
ConvNeXt-V2-main
|
optim_factory.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.models.layers import trunc_normal_, DropPath
from .utils import LayerNorm, GRN
class Block(nn.Module):
""" ConvNeXtV2 Block.
Args:
dim (int): Number of input channels.
drop_path (float): Stochastic depth rate. Default: 0.0
"""
def __init__(self, dim, drop_path=0.):
super().__init__()
self.dwconv = nn.Conv2d(dim, dim, kernel_size=7, padding=3, groups=dim) # depthwise conv
self.norm = LayerNorm(dim, eps=1e-6)
self.pwconv1 = nn.Linear(dim, 4 * dim) # pointwise/1x1 convs, implemented with linear layers
self.act = nn.GELU()
self.grn = GRN(4 * dim)
self.pwconv2 = nn.Linear(4 * dim, dim)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def forward(self, x):
input = x
x = self.dwconv(x)
x = x.permute(0, 2, 3, 1) # (N, C, H, W) -> (N, H, W, C)
x = self.norm(x)
x = self.pwconv1(x)
x = self.act(x)
x = self.grn(x)
x = self.pwconv2(x)
x = x.permute(0, 3, 1, 2) # (N, H, W, C) -> (N, C, H, W)
x = input + self.drop_path(x)
return x
class ConvNeXtV2(nn.Module):
""" ConvNeXt V2
Args:
in_chans (int): Number of input image channels. Default: 3
num_classes (int): Number of classes for classification head. Default: 1000
depths (tuple(int)): Number of blocks at each stage. Default: [3, 3, 9, 3]
dims (int): Feature dimension at each stage. Default: [96, 192, 384, 768]
drop_path_rate (float): Stochastic depth rate. Default: 0.
head_init_scale (float): Init scaling value for classifier weights and biases. Default: 1.
"""
def __init__(self, in_chans=3, num_classes=1000,
depths=[3, 3, 9, 3], dims=[96, 192, 384, 768],
drop_path_rate=0., head_init_scale=1.
):
super().__init__()
self.depths = depths
self.downsample_layers = nn.ModuleList() # stem and 3 intermediate downsampling conv layers
stem = nn.Sequential(
nn.Conv2d(in_chans, dims[0], kernel_size=4, stride=4),
LayerNorm(dims[0], eps=1e-6, data_format="channels_first")
)
self.downsample_layers.append(stem)
for i in range(3):
downsample_layer = nn.Sequential(
LayerNorm(dims[i], eps=1e-6, data_format="channels_first"),
nn.Conv2d(dims[i], dims[i+1], kernel_size=2, stride=2),
)
self.downsample_layers.append(downsample_layer)
self.stages = nn.ModuleList() # 4 feature resolution stages, each consisting of multiple residual blocks
dp_rates=[x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))]
cur = 0
for i in range(4):
stage = nn.Sequential(
*[Block(dim=dims[i], drop_path=dp_rates[cur + j]) for j in range(depths[i])]
)
self.stages.append(stage)
cur += depths[i]
self.norm = nn.LayerNorm(dims[-1], eps=1e-6) # final norm layer
self.head = nn.Linear(dims[-1], num_classes)
self.apply(self._init_weights)
self.head.weight.data.mul_(head_init_scale)
self.head.bias.data.mul_(head_init_scale)
def _init_weights(self, m):
if isinstance(m, (nn.Conv2d, nn.Linear)):
trunc_normal_(m.weight, std=.02)
nn.init.constant_(m.bias, 0)
def forward_features(self, x):
for i in range(4):
x = self.downsample_layers[i](x)
x = self.stages[i](x)
return self.norm(x.mean([-2, -1])) # global average pooling, (N, C, H, W) -> (N, C)
def forward(self, x):
x = self.forward_features(x)
x = self.head(x)
return x
def convnextv2_atto(**kwargs):
model = ConvNeXtV2(depths=[2, 2, 6, 2], dims=[40, 80, 160, 320], **kwargs)
return model
def convnextv2_femto(**kwargs):
model = ConvNeXtV2(depths=[2, 2, 6, 2], dims=[48, 96, 192, 384], **kwargs)
return model
def convnext_pico(**kwargs):
model = ConvNeXtV2(depths=[2, 2, 6, 2], dims=[64, 128, 256, 512], **kwargs)
return model
def convnextv2_nano(**kwargs):
model = ConvNeXtV2(depths=[2, 2, 8, 2], dims=[80, 160, 320, 640], **kwargs)
return model
def convnextv2_tiny(**kwargs):
model = ConvNeXtV2(depths=[3, 3, 9, 3], dims=[96, 192, 384, 768], **kwargs)
return model
def convnextv2_base(**kwargs):
model = ConvNeXtV2(depths=[3, 3, 27, 3], dims=[128, 256, 512, 1024], **kwargs)
return model
def convnextv2_large(**kwargs):
model = ConvNeXtV2(depths=[3, 3, 27, 3], dims=[192, 384, 768, 1536], **kwargs)
return model
def convnextv2_huge(**kwargs):
model = ConvNeXtV2(depths=[3, 3, 27, 3], dims=[352, 704, 1408, 2816], **kwargs)
return model
|
ConvNeXt-V2-main
|
models/convnextv2.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
from timm.models.layers import trunc_normal_
from .utils import (
LayerNorm,
MinkowskiLayerNorm,
MinkowskiGRN,
MinkowskiDropPath
)
from MinkowskiEngine import (
MinkowskiConvolution,
MinkowskiDepthwiseConvolution,
MinkowskiLinear,
MinkowskiGELU
)
from MinkowskiOps import (
to_sparse,
)
class Block(nn.Module):
""" Sparse ConvNeXtV2 Block.
Args:
dim (int): Number of input channels.
drop_path (float): Stochastic depth rate. Default: 0.0
layer_scale_init_value (float): Init value for Layer Scale. Default: 1e-6.
"""
def __init__(self, dim, drop_path=0., D=3):
super().__init__()
self.dwconv = MinkowskiDepthwiseConvolution(dim, kernel_size=7, bias=True, dimension=D)
self.norm = MinkowskiLayerNorm(dim, 1e-6)
self.pwconv1 = MinkowskiLinear(dim, 4 * dim)
self.act = MinkowskiGELU()
self.pwconv2 = MinkowskiLinear(4 * dim, dim)
self.grn = MinkowskiGRN(4 * dim)
self.drop_path = MinkowskiDropPath(drop_path)
def forward(self, x):
input = x
x = self.dwconv(x)
x = self.norm(x)
x = self.pwconv1(x)
x = self.act(x)
x = self.grn(x)
x = self.pwconv2(x)
x = input + self.drop_path(x)
return x
class SparseConvNeXtV2(nn.Module):
""" Sparse ConvNeXtV2.
Args:
in_chans (int): Number of input image channels. Default: 3
num_classes (int): Number of classes for classification head. Default: 1000
depths (tuple(int)): Number of blocks at each stage. Default: [3, 3, 9, 3]
dims (int): Feature dimension at each stage. Default: [96, 192, 384, 768]
drop_path_rate (float): Stochastic depth rate. Default: 0.
head_init_scale (float): Init scaling value for classifier weights and biases. Default: 1.
"""
def __init__(self,
in_chans=3,
num_classes=1000,
depths=[3, 3, 9, 3],
dims=[96, 192, 384, 768],
drop_path_rate=0.,
D=3):
super().__init__()
self.depths = depths
self.num_classes = num_classes
self.downsample_layers = nn.ModuleList() # stem and 3 intermediate downsampling conv layers
stem = nn.Sequential(
nn.Conv2d(in_chans, dims[0], kernel_size=4, stride=4),
LayerNorm(dims[0], eps=1e-6, data_format="channels_first")
)
self.downsample_layers.append(stem)
for i in range(3):
downsample_layer = nn.Sequential(
MinkowskiLayerNorm(dims[i], eps=1e-6),
MinkowskiConvolution(dims[i], dims[i+1], kernel_size=2, stride=2, bias=True, dimension=D)
)
self.downsample_layers.append(downsample_layer)
self.stages = nn.ModuleList() # 4 feature resolution stages, each consisting of multiple residual blocks
dp_rates=[x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))]
cur = 0
for i in range(4):
stage = nn.Sequential(
*[Block(dim=dims[i], drop_path=dp_rates[cur + j], D=D) for j in range(depths[i])]
)
self.stages.append(stage)
cur += depths[i]
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, MinkowskiConvolution):
trunc_normal_(m.kernel, std=.02)
nn.init.constant_(m.bias, 0)
if isinstance(m, MinkowskiDepthwiseConvolution):
trunc_normal_(m.kernel, std=.02)
nn.init.constant_(m.bias, 0)
if isinstance(m, MinkowskiLinear):
trunc_normal_(m.linear.weight, std=.02)
nn.init.constant_(m.linear.bias, 0)
def upsample_mask(self, mask, scale):
assert len(mask.shape) == 2
p = int(mask.shape[1] ** .5)
return mask.reshape(-1, p, p).\
repeat_interleave(scale, axis=1).\
repeat_interleave(scale, axis=2)
def forward(self, x, mask):
num_stages = len(self.stages)
mask = self.upsample_mask(mask, 2**(num_stages-1))
mask = mask.unsqueeze(1).type_as(x)
# patch embedding
x = self.downsample_layers[0](x)
x *= (1.-mask)
# sparse encoding
x = to_sparse(x)
for i in range(4):
x = self.downsample_layers[i](x) if i > 0 else x
x = self.stages[i](x)
# densify
x = x.dense()[0]
return x
|
ConvNeXt-V2-main
|
models/convnextv2_sparse.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
from MinkowskiEngine import (
MinkowskiConvolution,
MinkowskiDepthwiseConvolution,
MinkowskiLinear,
)
from timm.models.layers import trunc_normal_
from .convnextv2_sparse import SparseConvNeXtV2
from .convnextv2 import Block
class FCMAE(nn.Module):
""" Fully Convolutional Masked Autoencoder with ConvNeXtV2 backbone
"""
def __init__(
self,
img_size=224,
in_chans=3,
depths=[3, 3, 9, 3],
dims=[96, 192, 384, 768],
decoder_depth=1,
decoder_embed_dim=512,
patch_size=32,
mask_ratio=0.6,
norm_pix_loss=False):
super().__init__()
# configs
self.img_size = img_size
self.depths = depths
self.imds = dims
self.patch_size = patch_size
self.mask_ratio = mask_ratio
self.num_patches = (img_size // patch_size) ** 2
self.decoder_embed_dim = decoder_embed_dim
self.decoder_depth = decoder_depth
self.norm_pix_loss = norm_pix_loss
# encoder
self.encoder = SparseConvNeXtV2(
in_chans=in_chans, depths=depths, dims=dims, D=2)
# decoder
self.proj = nn.Conv2d(
in_channels=dims[-1],
out_channels=decoder_embed_dim,
kernel_size=1)
# mask tokens
self.mask_token = nn.Parameter(torch.zeros(1, decoder_embed_dim, 1, 1))
decoder = [Block(
dim=decoder_embed_dim,
drop_path=0.) for i in range(decoder_depth)]
self.decoder = nn.Sequential(*decoder)
# pred
self.pred = nn.Conv2d(
in_channels=decoder_embed_dim,
out_channels=patch_size ** 2 * in_chans,
kernel_size=1)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, MinkowskiConvolution):
trunc_normal_(m.kernel, std=.02)
nn.init.constant_(m.bias, 0)
if isinstance(m, MinkowskiDepthwiseConvolution):
trunc_normal_(m.kernel)
nn.init.constant_(m.bias, 0)
if isinstance(m, MinkowskiLinear):
trunc_normal_(m.linear.weight)
nn.init.constant_(m.linear.bias, 0)
if isinstance(m, nn.Conv2d):
w = m.weight.data
trunc_normal_(w.view([w.shape[0], -1]))
nn.init.constant_(m.bias, 0)
if isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
if hasattr(self, 'mask_token'):
torch.nn.init.normal_(self.mask_token, std=.02)
def patchify(self, imgs):
"""
imgs: (N, 3, H, W)
x: (N, L, patch_size**2 *3)
"""
p = self.patch_size
assert imgs.shape[2] == imgs.shape[3] and imgs.shape[2] % p == 0
h = w = imgs.shape[2] // p
x = imgs.reshape(shape=(imgs.shape[0], 3, h, p, w, p))
x = torch.einsum('nchpwq->nhwpqc', x)
x = x.reshape(shape=(imgs.shape[0], h * w, p**2 * 3))
return x
def unpatchify(self, x):
"""
x: (N, L, patch_size**2 *3)
imgs: (N, 3, H, W)
"""
p = self.patch_size
h = w = int(x.shape[1]**.5)
assert h * w == x.shape[1]
x = x.reshape(shape=(x.shape[0], h, w, p, p, 3))
x = torch.einsum('nhwpqc->nchpwq', x)
imgs = x.reshape(shape=(x.shape[0], 3, h * p, h * p))
return imgs
def gen_random_mask(self, x, mask_ratio):
N = x.shape[0]
L = (x.shape[2] // self.patch_size) ** 2
len_keep = int(L * (1 - mask_ratio))
noise = torch.randn(N, L, device=x.device)
# sort noise for each sample
ids_shuffle = torch.argsort(noise, dim=1)
ids_restore = torch.argsort(ids_shuffle, dim=1)
# generate the binary mask: 0 is keep 1 is remove
mask = torch.ones([N, L], device=x.device)
mask[:, :len_keep] = 0
# unshuffle to get the binary mask
mask = torch.gather(mask, dim=1, index=ids_restore)
return mask
def upsample_mask(self, mask, scale):
assert len(mask.shape) == 2
p = int(mask.shape[1] ** .5)
return mask.reshape(-1, p, p).\
repeat_interleave(scale, axis=1).\
repeat_interleave(scale, axis=2)
def forward_encoder(self, imgs, mask_ratio):
# generate random masks
mask = self.gen_random_mask(imgs, mask_ratio)
# encoding
x = self.encoder(imgs, mask)
return x, mask
def forward_decoder(self, x, mask):
x = self.proj(x)
# append mask token
n, c, h, w = x.shape
mask = mask.reshape(-1, h, w).unsqueeze(1).type_as(x)
mask_token = self.mask_token.repeat(x.shape[0], 1, x.shape[2], x.shape[3])
x = x * (1. - mask) + mask_token * mask
# decoding
x = self.decoder(x)
# pred
pred = self.pred(x)
return pred
def forward_loss(self, imgs, pred, mask):
"""
imgs: [N, 3, H, W]
pred: [N, L, p*p*3]
mask: [N, L], 0 is keep, 1 is remove
"""
if len(pred.shape) == 4:
n, c, _, _ = pred.shape
pred = pred.reshape(n, c, -1)
pred = torch.einsum('ncl->nlc', pred)
target = self.patchify(imgs)
if self.norm_pix_loss:
mean = target.mean(dim=-1, keepdim=True)
var = target.var(dim=-1, keepdim=True)
target = (target - mean) / (var + 1.e-6)**.5
loss = (pred - target) ** 2
loss = loss.mean(dim=-1) # [N, L], mean loss per patch
loss = (loss * mask).sum() / mask.sum() # mean loss on removed patches
return loss
def forward(self, imgs, labels=None, mask_ratio=0.6):
x, mask = self.forward_encoder(imgs, mask_ratio)
pred = self.forward_decoder(x, mask)
loss = self.forward_loss(imgs, pred, mask)
return loss, pred, mask
def convnextv2_atto(**kwargs):
model = FCMAE(
depths=[2, 2, 6, 2], dims=[40, 80, 160, 320], **kwargs)
return model
def convnextv2_femto(**kwargs):
model = FCMAE(
depths=[2, 2, 6, 2], dims=[48, 96, 192, 384], **kwargs)
return model
def convnextv2_pico(**kwargs):
model = FCMAE(
depths=[2, 2, 6, 2], dims=[64, 128, 256, 512], **kwargs)
return model
def convnextv2_nano(**kwargs):
model = FCMAE(
depths=[2, 2, 8, 2], dims=[80, 160, 320, 640], **kwargs)
return model
def convnextv2_tiny(**kwargs):
model = FCMAE(
depths=[3, 3, 9, 3], dims=[96, 192, 384, 768], **kwargs)
return model
def convnextv2_base(**kwargs):
model = FCMAE(
depths=[3, 3, 27, 3], dims=[128, 256, 512, 1024], **kwargs)
return model
def convnextv2_large(**kwargs):
model = FCMAE(
depths=[3, 3, 27, 3], dims=[192, 384, 768, 1536], **kwargs)
return model
def convnextv2_huge(**kwargs):
model = FCMAE(
depths=[3, 3, 27, 3], dims=[352, 704, 1408, 2816], **kwargs)
return model
|
ConvNeXt-V2-main
|
models/fcmae.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy.random as random
import torch
import torch.nn as nn
import torch.nn.functional as F
from MinkowskiEngine import SparseTensor
class MinkowskiGRN(nn.Module):
""" GRN layer for sparse tensors.
"""
def __init__(self, dim):
super().__init__()
self.gamma = nn.Parameter(torch.zeros(1, dim))
self.beta = nn.Parameter(torch.zeros(1, dim))
def forward(self, x):
cm = x.coordinate_manager
in_key = x.coordinate_map_key
Gx = torch.norm(x.F, p=2, dim=0, keepdim=True)
Nx = Gx / (Gx.mean(dim=-1, keepdim=True) + 1e-6)
return SparseTensor(
self.gamma * (x.F * Nx) + self.beta + x.F,
coordinate_map_key=in_key,
coordinate_manager=cm)
class MinkowskiDropPath(nn.Module):
""" Drop Path for sparse tensors.
"""
def __init__(self, drop_prob: float = 0., scale_by_keep: bool = True):
super(MinkowskiDropPath, self).__init__()
self.drop_prob = drop_prob
self.scale_by_keep = scale_by_keep
def forward(self, x):
if self.drop_prob == 0. or not self.training:
return x
cm = x.coordinate_manager
in_key = x.coordinate_map_key
keep_prob = 1 - self.drop_prob
mask = torch.cat([
torch.ones(len(_)) if random.uniform(0, 1) > self.drop_prob
else torch.zeros(len(_)) for _ in x.decomposed_coordinates
]).view(-1, 1).to(x.device)
if keep_prob > 0.0 and self.scale_by_keep:
mask.div_(keep_prob)
return SparseTensor(
x.F * mask,
coordinate_map_key=in_key,
coordinate_manager=cm)
class MinkowskiLayerNorm(nn.Module):
""" Channel-wise layer normalization for sparse tensors.
"""
def __init__(
self,
normalized_shape,
eps=1e-6,
):
super(MinkowskiLayerNorm, self).__init__()
self.ln = nn.LayerNorm(normalized_shape, eps=eps)
def forward(self, input):
output = self.ln(input.F)
return SparseTensor(
output,
coordinate_map_key=input.coordinate_map_key,
coordinate_manager=input.coordinate_manager)
class LayerNorm(nn.Module):
""" LayerNorm that supports two data formats: channels_last (default) or channels_first.
The ordering of the dimensions in the inputs. channels_last corresponds to inputs with
shape (batch_size, height, width, channels) while channels_first corresponds to inputs
with shape (batch_size, channels, height, width).
"""
def __init__(self, normalized_shape, eps=1e-6, data_format="channels_last"):
super().__init__()
self.weight = nn.Parameter(torch.ones(normalized_shape))
self.bias = nn.Parameter(torch.zeros(normalized_shape))
self.eps = eps
self.data_format = data_format
if self.data_format not in ["channels_last", "channels_first"]:
raise NotImplementedError
self.normalized_shape = (normalized_shape, )
def forward(self, x):
if self.data_format == "channels_last":
return F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)
elif self.data_format == "channels_first":
u = x.mean(1, keepdim=True)
s = (x - u).pow(2).mean(1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.eps)
x = self.weight[:, None, None] * x + self.bias[:, None, None]
return x
class GRN(nn.Module):
""" GRN (Global Response Normalization) layer
"""
def __init__(self, dim):
super().__init__()
self.gamma = nn.Parameter(torch.zeros(1, 1, 1, dim))
self.beta = nn.Parameter(torch.zeros(1, 1, 1, dim))
def forward(self, x):
Gx = torch.norm(x, p=2, dim=(1,2), keepdim=True)
Nx = Gx / (Gx.mean(dim=-1, keepdim=True) + 1e-6)
return self.gamma * (x * Nx) + self.beta + x
|
ConvNeXt-V2-main
|
models/utils.py
|
"""
Copyright (c) Meta Platforms, Inc. and affiliates.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy
import pickle
import json
from pricefunction import PriceFunction
import pandas
class Dam(object):
def __init__(self,
instance=0,
):
self._instance = instance
self._marketpath="../marketinfo/"
if(instance not in [0,1,2,3,4]):
raise ValueError("the instance id is incorrect. it must be 0, 1, 2, 3, or 4.")
return
def getbudget(self,):
budget = numpy.loadtxt(self._marketpath+str(self._instance)+"/price/"+"/budget.txt")
return float(budget)
def getbuyerdata(self,):
path = self._marketpath+str(self._instance)+"/data_buyer/"+"/20.csv"
buydata = pandas.read_csv(path,header=None,engine="pyarrow").to_numpy()
return buydata
def getmlmodel(self,):
path = self._marketpath+str(self._instance)+"/data_buyer/"+"/mlmodel.pickle"
with open(path, 'rb') as handle:
model = pickle.load(handle)
return model
def getsellerid(self,):
path = self._marketpath+str(self._instance)+"/sellerid.txt"
ids = numpy.loadtxt(path)
return ids
def getsellerinfo(self,seller_id):
path = self._marketpath+str(self._instance)+"/summary/"+str(seller_id)+".csv.json"
f = open(path)
ids = json.load(f)
price = numpy.loadtxt(self._marketpath+str(self._instance)+"/price/"+"/price.txt",
delimiter=',',dtype=str)
price_i = price[seller_id]
MyPricing1 = PriceFunction()
#print("row number",ids['row_number'])
MyPricing1.setup(max_p = float(price_i[1]), method=price_i[0], data_size=ids['row_number'])
samples = numpy.loadtxt(self._marketpath+str(self._instance)+"/summary/"+str(seller_id)+".csvsamples.csv",
delimiter=' ',dtype=float)
return MyPricing1, ids, samples
def main():
MyDam = Dam()
budget = MyDam.getbudget() # get budget
buyer_data = MyDam.getbuyerdata() # get buyer data
mlmodel = MyDam.getmlmodel() # get ml model
sellers_id = MyDam.getsellerid()
i=0
seller_i_price, seller_i_summary, seller_i_samples = MyDam.getsellerinfo(seller_id=i)
return
if __name__ == "__main__":
main()
|
Data_Acquisition_for_ML_Benchmark-main
|
src/dam.py
|
"""
Copyright (c) Meta Platforms, Inc. and affiliates.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 16 18:35:29 2022
@author: lingjiao
"""
from sklearn.linear_model import LogisticRegression
import numpy
class Buyer(object):
def __init__(self):
return
def loaddata(self,
data=None,
datapath=None,):
if(not (data is None)):
self.data = data
return
if(datapath != None):
self.data = numpy.loadtxt(open(datapath, "rb"),
delimiter=",",
skiprows=1)
return
raise ValueError("Not implemented load data of buyer")
return
def load_stretagy(self,
stretagy=None):
return
def get_stretagy(self):
return self.stretagy
def load_mlmodel(self,
mlmodel):
self.mlmodel = mlmodel
return 0
def train_mlmodel(self,
train_data):
X = train_data[:,0:-1]
y = numpy.ravel(train_data[:,-1])
self.mlmodel.fit(X,y)
X_1 = self.data[:,0:-1]
y_1 = numpy.ravel(self.data[:,-1])
eval_acc = self.mlmodel.score(X_1, y_1)
return eval_acc
def main():
print("test of the buyer")
MyBuyer = Buyer()
MyBuyer.loaddata(data=numpy.asmatrix([[0,1,1,1],[1,0,1,0]]))
mlmodel1 = LogisticRegression(random_state=0)
MyBuyer.load_mlmodel(mlmodel1)
train_data = numpy.asmatrix([[0,1,1,1],[1,0,1,0],[1,1,1,1]])
eval1 = MyBuyer.train_mlmodel(train_data)
print("eval acc",eval1)
if __name__ == '__main__':
main()
|
Data_Acquisition_for_ML_Benchmark-main
|
src/buyer.py
|
"""
Copyright (c) Meta Platforms, Inc. and affiliates.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 16 18:36:30 2022
@author: lingjiao
"""
from sklearn.linear_model import LogisticRegression
import numpy
from seller import Seller
from buyer import Buyer
from pricefunction import PriceFunction
class MarketEngine(object):
def __init__(self):
return
def setup_market(self,
seller_data=None,
seller_prices=None,
buyer_data=None,
buyer_budget=None,
mlmodel=None):
sellers = list()
for i in range(len(seller_data)):
MySeller = Seller()
MySeller.loaddata(data=seller_data[i])
MySeller.setprice(seller_prices[i])
sellers.append(MySeller)
self.sellers = sellers
MyBuyer = Buyer()
MyBuyer.loaddata(data=buyer_data)
mlmodel1 = mlmodel
MyBuyer.load_mlmodel(mlmodel1)
self.buyer = MyBuyer
self.buyer_budget = buyer_budget
#print("set up the market")
return
def load_stretagy(self,
stretagy=None,):
self.stretagy = stretagy
return
def train_buyer_model(self):
print(" train buyer model ")
# check if the budget constraint is satisified.
cost = sum(self.stretagy[1])
if(cost>self.buyer_budget):
raise ValueError("The budget constraint is not satisifed!")
return
traindata = None
for i in range(len(self.sellers)):
d1 = self.sellers[i].getdata(self.stretagy[0][i],self.stretagy[1][i])
if(i==0):
traindata = d1
else:
traindata = numpy.concatenate((traindata,d1))
print(i,d1)
print("budget checked! data loaded!")
#print("train data", traindata)
acc = self.buyer.train_mlmodel(traindata)
return acc
def main():
print("test of the market engine")
MyMarketEngine = MarketEngine()
data_1 = numpy.asmatrix([[0,1,0],[1,0,0]])
data_2 = numpy.asmatrix([[0,1,1],[1,0,1],[1,1,1],[0,0,1]])
data_b = numpy.asmatrix([[0,1,0],[1,0,1],[0,1,1]])
buyer_budget = 100
MyPricing1 = PriceFunction()
MyPricing1.setup(max_p = 100, method="lin")
MyPricing2 = PriceFunction()
MyPricing2.setup(max_p = 100, method="lin")
mlmodel1 = LogisticRegression(random_state=0)
MyMarketEngine.setup_market(seller_data=[data_1,data_2],
seller_prices = [MyPricing1,MyPricing2],
buyer_data=data_b,
buyer_budget=buyer_budget,
mlmodel=mlmodel1,
)
stretagy = [[1,2],[50,50]]
MyMarketEngine.load_stretagy(stretagy)
acc1 = MyMarketEngine.train_buyer_model()
print("acc is ",acc1)
if __name__ == '__main__':
main()
|
Data_Acquisition_for_ML_Benchmark-main
|
src/marketengine.py
|
"""
Copyright (c) Meta Platforms, Inc. and affiliates.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 16 18:36:30 2022
@author: lingjiao
"""
from sklearn.linear_model import LogisticRegression
import matplotlib.pyplot as plt
import matplotlib
import numpy
from seller import Seller
from buyer import Buyer
from pricefunction import PriceFunction
from marketengine import MarketEngine
from helper import Helper
import pandas
from sklearn.neighbors import KNeighborsClassifier
import seaborn as sns
def visualize_acc_cost(data_path="../logs/0/acc_cost_tradeoffs_uniform_logreg.csv",
savepath="../figures/",
):
plt.clf()
data = pandas.read_csv(data_path)
print("data",data)
mean1 = data.groupby("budget").mean()
var1 = data.groupby("budget").var()
max1 = data.groupby("budget").max()
min1 = data.groupby("budget").min()
print("mean1 of acc",mean1['acc'])
print("var",var1['acc'])
print("diff, max, and min",max1['acc']-min1['acc'],max1['acc'],min1['acc'])
sns.color_palette("tab10")
swarm_plot = sns.histplot(data=data, x="acc", hue="budget",palette=["C0", "C1", "C2","C3","C4"])
#swarm_plot = sns.scatterplot(data=data, x= "cost",y="acc")
plt.figure()
fig = swarm_plot.get_figure()
data_parse = data_path.split("/")
method = data_parse[-1].split("_")[-2]
instanceid = data_parse[-2]
ml = data_parse[-1].split("_")[-1]
fig.savefig(savepath+str(instanceid)+"/"+method+ml+".pdf")
plt.figure()
swarm_plot = sns.lineplot(data=data, y="acc", x="budget", err_style="band")
fig2 = swarm_plot.get_figure()
fig2.savefig(savepath+str(instanceid)+"/"+method+ml+"_line.pdf")
return
def evaluate(
MarketHelper,
MarketEngineObj,
model,
buyer_data,
trial=100, # number of trials per budget
seller_data_size_list = [100,200,300],
cost_scale=0.1,
method="single",
):
trial_list = list(range(trial))
acc_list = list()
cost_list = list()
for i in range(trial):
print("trial:",i)
# generate a submission
submission = gen_submission(seller_data_size_list,cost_scale=cost_scale,
method=method)
# calculate the cost of the submission
cost = MarketHelper.get_cost(submission,MarketEngineObj)
# generate the accuracy of the submission
traindata = MarketHelper.load_data(submission, MarketEngineObj)
model = MarketHelper.train_model(model, traindata[:,0:-1],
numpy.ravel(traindata[:,-1]))
acc1 = MarketHelper.eval_model(model,test_X=buyer_data[:,0:-1],test_Y=buyer_data[:,-1])
cost_list.append(cost)
acc_list.append(acc1)
result = pandas.DataFrame()
result['trial'] = trial_list
result['acc'] = acc_list
result['cost'] = cost_list
return result
''' generate a pandas dataframe
trial,accuracy, cost
'''
def gen_submission(seller_data_size_list=[100,200,300],
cost_scale=1,
method="uniform"):
if(method=="uniform"):
submission = [numpy.random.randint(0,int(a*cost_scale)) for a in seller_data_size_list]
if(method=="single"):
submission = [0]*len(seller_data_size_list)
index = numpy.random.randint(0,len(submission))
submission[index] = int(seller_data_size_list[index]*cost_scale)
return submission
def evaluate_budget(MarketHelper,
MarketEngineObj,
model,
buyer_data,
trial=100, # number of trials per budget
seller_data_size_list = [100,200,300],
cost_scale_list=[0.1],
method="single",
):
results = [evaluate(
MarketHelper=MarketHelper,
MarketEngineObj=MarketEngineObj,
model=model,
buyer_data=buyer_data,
trial=trial, # number of trials per budget
seller_data_size_list = seller_data_size_list,
cost_scale=c1,
method=method,
) for c1 in cost_scale_list]
full_result = pandas.concat(results, ignore_index=True,axis=0)
return full_result
def main():
matplotlib.pyplot.close('all')
instance_ids = [0,1,2,3,4]
methods = ['single','uniform']
methods=['uniform']
for instance_id in instance_ids:
for method in methods:
#visualize_acc_cost(data_path="../logs/"+str(instance_id)+"/acc_cost_tradeoffs_"+method+"_knn.csv")
visualize_acc_cost(data_path="../logs/"+str(instance_id)+"/acc_cost_tradeoffs_"+method+"_rf.csv")
#visualize_acc_cost(data_path="../logs/"+str(instance_id)+"/acc_cost_tradeoffs_"+method+"_logreg.csv")
'''
print("evaluate acc and cost tradeoffs")
instance_id=0
MyHelper = Helper()
seller_data, seller_prices, buyer_data, buyer_budget, data_size = MyHelper.load_market_instance(
feature_path="../features/"+str(instance_id)+"/",
buyer_data_path="../marketinfo/"+str(instance_id)+"/data_buyer/20.csv",
price_path="../marketinfo/"+str(instance_id)+"/price/price.txt",
budget_path="../marketinfo/"+str(instance_id)+"/price/budget.txt",
)
MyMarketEngine = MarketEngine()
mlmodel1 = LogisticRegression(random_state=0)
mlmodel1 = KNeighborsClassifier(n_neighbors=9)
MyMarketEngine.setup_market(seller_data=seller_data,
seller_prices = seller_prices,
buyer_data=buyer_data,
buyer_budget=1e10,
mlmodel=mlmodel1,
)
result = evaluate(
MarketHelper=MyHelper,
MarketEngineObj=MyMarketEngine,
model=mlmodel1,
buyer_data=buyer_data,
trial=10, # number of trials per budget
seller_data_size_list = numpy.loadtxt("../marketinfo/"+str(instance_id)+"/seller_datasize.csv"),
cost_scale=0.1,
)
result2 = evaluate_budget(
MarketHelper=MyHelper,
MarketEngineObj=MyMarketEngine,
model=mlmodel1,
buyer_data=buyer_data,
trial=100, # number of trials per budget
seller_data_size_list = numpy.loadtxt("../marketinfo/" + str(instance_id) +"/seller_datasize.csv"),
# cost_scale_list=[0.005,0.0075,0.01,0.025],
# method="uniform",
cost_scale_list=[0.05,0.1,0.5,1],
method="single",
)
folder1 = "../logs/"+str(instance_id)+"/"
result2.to_csv(folder1+"acc_cost_tradeoffs.csv")
print("result is:",result)
'''
if __name__ == '__main__':
main()
|
Data_Acquisition_for_ML_Benchmark-main
|
src/visualize_acc_cost.py
|
"""
Copyright (c) Meta Platforms, Inc. and affiliates.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 16 18:36:30 2022
@author: lingjiao
"""
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import GradientBoostingClassifier
import numpy
from seller import Seller
from buyer import Buyer
from pricefunction import PriceFunction
from marketengine import MarketEngine
from helper import Helper
import pandas
from sklearn.neighbors import KNeighborsClassifier
def evaluate(
MarketHelper,
MarketEngineObj,
model,
buyer_data,
trial=100, # number of trials per budget
seller_data_size_list = [100,200,300],
cost_scale=0.1,
method="single",
full_price=100,
):
trial_list = list(range(trial))
acc_list = list()
cost_list = list()
budget_list = list()
for i in range(trial):
print("trial:",i)
# generate a submission
submission = gen_submission(seller_data_size_list,cost_scale=cost_scale,
method=method)
# calculate the cost of the submission
cost = MarketHelper.get_cost(submission,MarketEngineObj)
# generate the accuracy of the submission
traindata = MarketHelper.load_data(submission, MarketEngineObj)
model = MarketHelper.train_model(model, traindata[:,0:-1],
numpy.ravel(traindata[:,-1]))
acc1 = MarketHelper.eval_model(model,test_X=buyer_data[:,0:-1],test_Y=buyer_data[:,-1])
cost_list.append(cost)
acc_list.append(acc1)
budget_list.append(cost_scale*full_price)
result = pandas.DataFrame()
result['trial'] = trial_list
result['acc'] = acc_list
result['cost'] = cost_list
result['budget'] = budget_list
return result
''' generate a pandas dataframe
trial,accuracy, cost
'''
def gen_submission(seller_data_size_list=[100,200,300],
cost_scale=1,
method="uniform"):
if(method=="uniform"):
d = len(seller_data_size_list)
submission = [numpy.random.randint(0,int(a*cost_scale/d*2)) for a in seller_data_size_list]
if(method=="single"):
submission = [0]*len(seller_data_size_list)
index = numpy.random.randint(0,len(submission))
submission[index] = int(seller_data_size_list[index]*cost_scale)
return submission
def evaluate_budget(MarketHelper,
MarketEngineObj,
model,
buyer_data,
trial=100, # number of trials per budget
seller_data_size_list = [100,200,300],
cost_scale_list=[0.1],
method="single",
):
results = [evaluate(
MarketHelper=MarketHelper,
MarketEngineObj=MarketEngineObj,
model=model,
buyer_data=buyer_data,
trial=trial, # number of trials per budget
seller_data_size_list = seller_data_size_list,
cost_scale=c1,
method=method,
) for c1 in cost_scale_list]
full_result = pandas.concat(results, ignore_index=True,axis=0)
return full_result
def evaluate_full(instance_id=0,
method="single",
model_name="knn",):
print("evaluate acc and cost tradeoffs")
# instance_id=0
# method="single"
# model_name="knn"
MyHelper = Helper()
seller_data, seller_prices, buyer_data, buyer_budget, data_size = MyHelper.load_market_instance(
feature_path="../features/"+str(instance_id)+"/",
buyer_data_path="../marketinfo/"+str(instance_id)+"/data_buyer/20.csv",
price_path="../marketinfo/"+str(instance_id)+"/price/price.txt",
budget_path="../marketinfo/"+str(instance_id)+"/price/budget.txt",
)
numpy.savetxt("../marketinfo/"+str(instance_id)+"/seller_datasize.csv",data_size,fmt="%d")
MyMarketEngine = MarketEngine()
mlmodel1 = LogisticRegression(random_state=0)
if(model_name=="knn"):
mlmodel1 = KNeighborsClassifier(n_neighbors=9)
if(model_name=='rf'):
mlmodel1 = GradientBoostingClassifier(n_estimators=100, learning_rate=1.0,
max_depth=1, random_state=0)
MyMarketEngine.setup_market(seller_data=seller_data,
seller_prices = seller_prices,
buyer_data=buyer_data,
buyer_budget=1e10,
mlmodel=mlmodel1,
)
result = evaluate(
MarketHelper=MyHelper,
MarketEngineObj=MyMarketEngine,
model=mlmodel1,
buyer_data=buyer_data,
trial=10, # number of trials per budget
seller_data_size_list = numpy.loadtxt("../marketinfo/"+str(instance_id)+"/seller_datasize.csv"),
cost_scale=0.1,
)
result2 = evaluate_budget(
MarketHelper=MyHelper,
MarketEngineObj=MyMarketEngine,
model=mlmodel1,
buyer_data=buyer_data,
trial=100, # number of trials per budget
seller_data_size_list = numpy.loadtxt("../marketinfo/" + str(instance_id) +"/seller_datasize.csv"),
# cost_scale_list=[0.005,0.0075,0.01,0.025,0.05,0.075,0.1],
cost_scale_list=[0.01,0.025,0.05,0.1,0.2],
method=method,
# cost_scale_list=[0.05,0.1,0.5,1],
# method="single",
)
folder1 = "../logs/"+str(instance_id)+"/"
result2.to_csv(folder1+"acc_cost_tradeoffs_"+method+"_"+model_name+".csv")
print("result is:",result)
return
def main():
instance_ids = [3,4]
methods = ['single','uniform']
for instance_id in instance_ids:
for method in methods:
evaluate_full(instance_id=instance_id,method=method,model_name="knn")
evaluate_full(instance_id=instance_id,method=method,model_name="logreg")
evaluate_full(instance_id=instance_id,method=method,model_name="rf")
return
if __name__ == '__main__':
main()
|
Data_Acquisition_for_ML_Benchmark-main
|
src/evaluator_acc_cost.py
|
import matplotlib # noqa
matplotlib.use('Agg') # noqa
import matplotlib.pyplot as plt
plt.rcParams['axes.facecolor'] = 'white'
import numpy as np
import matplotlib.ticker as ticker
import json
import seaborn as sn
import pandas as pd
from matplotlib.colors import LogNorm
import seaborn as sns
from matplotlib.colors import LinearSegmentedColormap
import umap
#import matplotlib.pyplot as plt
class VisualizeTools(object):
def __init__(self,figuresize = (10,8),figureformat='jpg',
colorset=['r','orange','k','yellow','g','b','k'],
markersize=30,
fontsize=30,
usecommand=True):
self.figuresize=figuresize
self.figureformat = figureformat
self.fontsize = fontsize
self.linewidth = 5
self.markersize = markersize
self.folder = "../figures/" # use "../figures/" if needed
self.colorset=colorset
self.markerset = ['o','X','^','v','s','o','*','d','p']
self.marker = 'o' # from ['X','^','v','s','o','*','d','p'],
self.linestyle = '-' # from ['-.','--','--','-.','-',':','--','-.'],
self.linestyleset = ['-','-.','--','--','-.','-',':','--','-.']
self.usecommand = usecommand
def plotline(self,
xvalue,
yvalue,
xlabel='xlabel',
ylabel='ylabel',
legend=None,
filename='lineplot',
fig=None,
color=None,
ax=None):
if(ax==None):
# setup figures
fig = plt.figure(figsize=self.figuresize)
fig, ax = plt.subplots(figsize=self.figuresize)
plt.rcParams.update({'font.size': self.fontsize})
plt.rcParams["font.weight"] = "bold"
plt.rcParams["axes.labelweight"] = "bold"
plt.rcParams["lines.linewidth"] = self.linewidth
plt.rcParams["lines.markersize"] = self.markersize
plt.rcParams["font.sans-serif"] = 'Arial'
# plot it
if(color==None):
color = self.colorset[0]
ax.plot(xvalue,
yvalue,
marker=self.marker,
label=legend,
color=color,
linestyle = self.linestyle,
zorder=0,
)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.grid(True)
ax.locator_params(axis='x', nbins=6)
ax.locator_params(axis='y', nbins=6)
formatter = ticker.FormatStrFormatter('%0.2e')
formatterx = ticker.FormatStrFormatter('%0.2f')
ax.yaxis.set_major_formatter(formatter)
ax.xaxis.set_major_formatter(formatterx)
filename =filename+'.'+self.figureformat
if(self.figureformat=='jpg'):
plt.savefig(filename, format=self.figureformat, bbox_inches='tight',dpi=300)
else:
plt.savefig(filename, format=self.figureformat, bbox_inches='tight')
return fig, ax
#plt.fill_between(bud, np.asarray(acc_mean)-np.asarray(acc_std), np.asarray(acc_mean)+np.asarray(acc_std),alpha=0.3,facecolor='lightgray')
def plotlines(self,
xvalue,
yvalues,
xlabel='xlabel',
ylabel='ylabel',
legend=None,
filename='lineplot',
fig=None,
ax=None,
showlegend=False,
log=False,
fontsize=60,
basey=10,
ylim=None):
#if(-1):
if(ax==None):
# setup figures
fig = plt.figure(figsize=self.figuresize)
fig, ax = plt.subplots(figsize=self.figuresize,frameon=True)
plt.rcParams.update({'font.size': fontsize})
plt.rcParams["font.weight"] = "bold"
plt.rcParams["axes.labelweight"] = "bold"
plt.rcParams["lines.linewidth"] = self.linewidth
plt.rcParams["lines.markersize"] = self.markersize
plt.rcParams["font.sans-serif"] = 'Arial'
ax.set_facecolor("white")
#ax.set_edgecolor("black")
ax.grid("True",color="grey")
ax.get_yaxis().set_visible(True)
ax.get_xaxis().set_visible(True)
# plot it
for i in range(len(yvalues)):
ax.plot(xvalue,
yvalues[i],
marker=self.markerset[i],
label=legend[i],
color=self.colorset[i],
linestyle = self.linestyleset[i],
zorder=0,
markersize=self.markersize,
markevery=1,
)
plt.xlabel(xlabel,fontsize=fontsize)
plt.ylabel(ylabel,fontsize=fontsize)
plt.grid(True)
#ax.locator_params(axis='x', nbins=6)
#ax.locator_params(axis='y', nbins=6)
'''
formatter = ticker.FormatStrFormatter('%d')
formatterx = ticker.FormatStrFormatter('%d')
ax.yaxis.set_major_formatter(formatter)
ax.xaxis.set_major_formatter(formatterx)
'''
ax.tick_params(axis='both', which='major', labelsize=fontsize)
if(ylim!=None):
plt.ylim(ylim)
if(log==True):
ax.set_yscale('log',base=basey)
if(showlegend==True):
ax.legend(legend,facecolor="white",prop={'size': fontsize},
markerscale=1, numpoints= 2,loc="best")
filename =filename+'.'+self.figureformat
if(self.figureformat=='jpg'):
plt.savefig(filename, format=self.figureformat, bbox_inches='tight',dpi=300)
else:
plt.savefig(filename, format=self.figureformat, bbox_inches='tight')
return fig, ax
#plt.fill_between(bud, np.asarray(acc_mean)-np.asarray(acc_std), np.asarray(acc_mean)+np.asarray(acc_std),alpha=0.3,facecolor='lightgray')
def Histogram(self,
xvalue,
xlabel='xlabel',
ylabel='ylabel',
legend=None,
filename='lineplot',
fig=None,
ax=None,
showlegend=False,
log=False,
fontsize=90,
ylim=None,
n_bins=20):
#if(-1):
if(ax==None):
# setup figures
fig = plt.figure(figsize=self.figuresize)
fig, ax = plt.subplots(figsize=self.figuresize,frameon=True)
plt.rcParams.update({'font.size': fontsize})
plt.rcParams["font.weight"] = "bold"
plt.rcParams["axes.labelweight"] = "bold"
plt.rcParams["lines.linewidth"] = self.linewidth
plt.rcParams["lines.markersize"] = self.markersize
plt.rcParams["font.sans-serif"] = 'Arial'
ax.set_facecolor("white")
#ax.set_edgecolor("black")
ax.grid("True",color="grey")
ax.get_yaxis().set_visible(True)
ax.get_xaxis().set_visible(True)
# plot it
plt.hist(xvalue,bins=n_bins)
'''
for i in range(len(yvalues)):
ax.plot(xvalue,
yvalues[i],
marker=self.markerset[i],
label=legend[i],
color=self.colorset[i],
linestyle = self.linestyleset[i],
zorder=0,
markersize=self.markersize,
markevery=10,
)
'''
plt.xlabel(xlabel,fontsize=fontsize)
plt.ylabel(ylabel,fontsize=fontsize)
plt.grid(True)
#ax.locator_params(axis='x', nbins=6)
#ax.locator_params(axis='y', nbins=6)
'''
formatter = ticker.FormatStrFormatter('%d')
formatterx = ticker.FormatStrFormatter('%d')
ax.yaxis.set_major_formatter(formatter)
ax.xaxis.set_major_formatter(formatterx)
'''
ax.tick_params(axis='both', which='major', labelsize=fontsize)
if(ylim!=None):
plt.ylim(ylim)
if(log==True):
ax.set_yscale('log')
if(showlegend==True):
ax.legend(legend,facecolor="white",prop={'size': fontsize},
markerscale=2, numpoints= 2,loc=0)
filename =filename+'.'+self.figureformat
if(self.figureformat=='jpg'):
plt.savefig(filename, format=self.figureformat, bbox_inches='tight',dpi=300)
else:
plt.savefig(filename, format=self.figureformat, bbox_inches='tight')
return fig, ax
#plt.fill_between(bud, np.asarray(acc_mean)-np.asarray(acc_std), np.asarray(acc_mean)+np.asarray(acc_std),alpha=0.3,facecolor='lightgray')
def Histograms(self,
xvalues,
xlabel='xlabel',
ylabel='ylabel',
legend=None,
filename='lineplot',
fig=None,
ax=None,
showlegend=False,
log=False,
fontsize=90,
color=['red','orange'],
ylim=None,
n_bins=20):
#if(-1):
if(ax==None):
# setup figures
fig = plt.figure(figsize=self.figuresize)
fig, ax = plt.subplots(figsize=self.figuresize,frameon=True)
plt.rcParams.update({'font.size': fontsize})
plt.rcParams["font.weight"] = "bold"
plt.rcParams["axes.labelweight"] = "bold"
plt.rcParams["lines.linewidth"] = self.linewidth
plt.rcParams["lines.markersize"] = self.markersize
plt.rcParams["font.sans-serif"] = 'Arial'
ax.set_facecolor("white")
#ax.set_edgecolor("black")
ax.grid("True",color="grey")
ax.get_yaxis().set_visible(True)
ax.get_xaxis().set_visible(True)
# plot it
plt.hist(xvalues,bins=n_bins, density=True,color=color)
'''
for i in range(len(yvalues)):
ax.plot(xvalue,
yvalues[i],
marker=self.markerset[i],
label=legend[i],
color=self.colorset[i],
linestyle = self.linestyleset[i],
zorder=0,
markersize=self.markersize,
markevery=10,
)
'''
plt.xlabel(xlabel,fontsize=fontsize)
plt.ylabel(ylabel,fontsize=fontsize)
plt.grid(True)
#ax.locator_params(axis='x', nbins=6)
#ax.locator_params(axis='y', nbins=6)
'''
formatter = ticker.FormatStrFormatter('%d')
formatterx = ticker.FormatStrFormatter('%d')
ax.yaxis.set_major_formatter(formatter)
ax.xaxis.set_major_formatter(formatterx)
'''
ax.tick_params(axis='both', which='major', labelsize=fontsize)
if(ylim!=None):
plt.ylim(ylim)
if(log==True):
ax.set_yscale('log')
if(showlegend==True):
ax.legend(legend,facecolor="white",prop={'size': fontsize},
markerscale=2, numpoints= 2,loc=0)
filename =filename+'.'+self.figureformat
if(self.figureformat=='jpg'):
plt.savefig(filename, format=self.figureformat, bbox_inches='tight',dpi=300)
else:
plt.savefig(filename, format=self.figureformat, bbox_inches='tight')
return fig, ax
#plt.fill_between(bud, np.asarray(acc_mean)-np.asarray(acc_std), np.asarray(acc_mean)+np.asarray(acc_std),alpha=0.3,facecolor='lightgray')
def plotscatter(self,
xvalue=0.3,
yvalue=0.5,
filename='lineplot',
markersize=10,
legend='Learned Thres',
color='blue',
showlegend=False,
fig=None,
ax=None):
if(ax==None):
# setup figures
fig = plt.figure(figsize=self.figuresize)
fig, ax = plt.subplots(figsize=self.figuresize)
plt.rcParams.update({'font.size': self.fontsize})
plt.rcParams["font.weight"] = "bold"
plt.rcParams["axes.labelweight"] = "bold"
plt.rcParams["lines.linewidth"] = self.linewidth
plt.rcParams["lines.markersize"] = self.markersize
plt.rcParams["font.sans-serif"] = 'Arial'
ax.plot(xvalue,yvalue,'*',markersize=markersize,color=color,
label=legend)
if(showlegend):
handles, labels = ax.get_legend_handles_labels()
print("labels",labels)
ax.legend(handles[::-1],labels[::-1], prop={'size': 35},markerscale=3, numpoints= 1,loc=0)
filename =filename+'.'+self.figureformat
if(self.figureformat=='jpg'):
plt.savefig(filename, format=self.figureformat, bbox_inches='tight',dpi=300)
else:
plt.savefig(filename, format=self.figureformat, bbox_inches='tight')
return fig, ax
def plotscatter(self,
xvalue=0.3,
yvalue=0.5,
filename='lineplot',
markersize=10,
legend='Learned Thres',
color='blue',
showlegend=False,
fig=None,
ax=None):
if(ax==None):
# setup figures
fig = plt.figure(figsize=self.figuresize)
fig, ax = plt.subplots(figsize=self.figuresize)
plt.rcParams.update({'font.size': self.fontsize})
plt.rcParams["font.weight"] = "bold"
plt.rcParams["axes.labelweight"] = "bold"
plt.rcParams["lines.linewidth"] = self.linewidth
plt.rcParams["lines.markersize"] = self.markersize
plt.rcParams["font.sans-serif"] = 'Arial'
ax.plot(xvalue,yvalue,'*',markersize=markersize,color=color,
label=legend)
if(showlegend):
handles, labels = ax.get_legend_handles_labels()
print("labels",labels)
ax.legend(handles[::-1],labels[::-1], prop={'size': 35},markerscale=3, numpoints= 1,loc=0)
filename =filename+'.'+self.figureformat
if(self.figureformat=='jpg'):
plt.savefig(filename, format=self.figureformat, bbox_inches='tight',dpi=300)
else:
plt.savefig(filename, format=self.figureformat, bbox_inches='tight')
return fig, ax
def plotscatters_annotation(self,
xvalue=[0.3],
yvalue=[0.5],
filename='lineplot',
markersize=10,
legend='Learned Thres',
color='blue',
showlegend=False,
fig=None,
ax=None,
annotation=None):
if(ax==None):
# setup figures
fig = plt.figure(figsize=self.figuresize)
fig, ax = plt.subplots(figsize=self.figuresize)
plt.rcParams.update({'font.size': self.fontsize})
plt.rcParams["font.weight"] = "bold"
plt.rcParams["axes.labelweight"] = "bold"
plt.rcParams["lines.linewidth"] = self.linewidth
plt.rcParams["lines.markersize"] = self.markersize
plt.rcParams["font.sans-serif"] = 'Arial'
ax.scatter(xvalue,yvalue,)
# '*',markersize=markersize,color=color,
# )
for i in range(len(xvalue)):
ax.annotate(annotation[i], xy=[xvalue[i],yvalue[i]])
if(showlegend):
handles, labels = ax.get_legend_handles_labels()
print("labels",labels)
ax.legend(handles[::-1],labels[::-1], prop={'size': 35},markerscale=3, numpoints= 1,loc=0)
filename =filename+'.'+self.figureformat
if(self.figureformat=='jpg'):
plt.savefig(filename, format=self.figureformat, bbox_inches='tight',dpi=300)
else:
plt.savefig(filename, format=self.figureformat, bbox_inches='tight')
return fig, ax
def plot_bar(self,barname,barvalue,
filename='barplot',
markersize=2,
yname='Frequency',
xname="",
color='blue',
ylim=None,
fig=None,
showlegend=False,
ax=None,
labelpad=None,
fontsize=30,
threshold=10,
add_thresline=False,):
if(ax==None):
# setup figures
fig = plt.figure(figsize=self.figuresize)
fig, ax = plt.subplots(figsize=self.figuresize)
ax.set_facecolor("white")
plt.rcParams.update({'font.size': 1})
plt.rcParams["font.weight"] = "bold"
plt.rcParams["axes.labelweight"] = "bold"
plt.rcParams["lines.linewidth"] = self.linewidth
plt.rcParams["lines.markersize"] = markersize
plt.rcParams["font.sans-serif"] = 'Arial'
plt.rc('font', size=1) # controls default text sizes
print("xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx")
plt.grid(True,color="grey")
x = np.arange(len(barname))
ax.bar(x,barvalue,color=color,
label=barname)
ax.set_ylabel(yname,fontsize=fontsize)
if(xname!=""):
ax.set_xlabel(xname,fontsize=fontsize)
#ax.set_title('Scores by group and gender')
ax.set_xticks(x)
ax.set_xticklabels(barname,rotation='horizontal',fontsize=fontsize)
#ax.set_xticklabels(barname,rotation='vertical')
plt.xlim(x[0]-0.5,x[-1]+0.5)
if(add_thresline==True):
ax.plot([min(x)-0.5, max(x)+0.5], [threshold, threshold], "k--")
matplotlib.rc('xtick', labelsize=fontsize)
ax.tick_params(axis='both', which='major', labelsize=fontsize)
if(not(labelpad==None)):
ax.tick_params(axis='x', which='major', pad=labelpad)
#matplotlib.rc('ytick', labelsize=fontsize)
#ax.text(0.5,0.5,"hello")
#ax.legend()
if(showlegend):
handles, labels = ax.get_legend_handles_labels()
print("labels",labels)
ax.legend(handles[::-1],labels[::-1], prop={'size': 10},markerscale=3, numpoints= 1,loc=0)
#ticks = [tick for tick in plt.gca().get_xticklabels()]
#print("ticks 0 is",ticks[0].get_window_extent())
'''
plt.text(-0.07, -0.145, 'label:', horizontalalignment='center',fontsize=fontsize,
verticalalignment='center', transform=ax.transAxes)
plt.text(-0.07, -0.25, 'qs:', horizontalalignment='center',fontsize=fontsize,
verticalalignment='center', transform=ax.transAxes)
'''
filename =filename+'.'+self.figureformat
if(not(ylim==None)):
plt.ylim(ylim)
if(self.figureformat=='jpg'):
plt.savefig(filename, format=self.figureformat, bbox_inches='tight',dpi=300)
else:
plt.savefig(filename, format=self.figureformat, bbox_inches='tight')
return fig, ax
def plot_bar2value(self,barname,barvalue, barvalue2,
filename='barplot',
markersize=2,
yname='Frequency',
color='blue',
fig=None,
showlegend=False,
legend=['precision','recall'],
yrange = None,
ax=None,
fontsize=25,
showvalues = False,
legend_loc="upper left",
hatch=None):
if(ax==None):
# setup figures
fig = plt.figure(figsize=self.figuresize)
fig, ax = plt.subplots(figsize=self.figuresize)
plt.rcParams.update({'font.size': fontsize})
plt.rcParams["font.weight"] = "bold"
plt.rcParams["axes.labelweight"] = "bold"
plt.rcParams["lines.linewidth"] = self.linewidth
plt.rcParams["lines.markersize"] = markersize
plt.rcParams["font.sans-serif"] = 'Arial'
width=0.3
x = np.arange(len(barname))
ax.bar(x-width/2,barvalue,width,color=color[0],
label=legend[0])
ax.bar(x+width/2,barvalue2,width, color=color[1],
hatch=hatch,
label=legend[1])
ax.set_ylabel(yname,fontsize=fontsize)
#ax.set_title('Scores by group and gender')
ax.set_xticks(x)
#ax.set_xticklabels(barname,rotation='vertical')
#ax.set_xticklabels(barname,rotation=45)
ax.set_xticklabels(barname,rotation='horizontal')
plt.xlim(x[0]-0.5,x[-1]+0.5)
if(not(yrange==None)):
plt.ylim(yrange[0],yrange[1])
matplotlib.rc('xtick', labelsize=fontsize)
matplotlib.rc('ytick', labelsize=fontsize)
#ax.legend()
if(showvalues==True):
for i, v in enumerate(barvalue):
ax.text(i - 0.33,v + 0.1, "{:.1f}".format(v), color=color[0], fontweight='bold',)
for i, v in enumerate(barvalue2):
ax.text(i + .10,v + 0.2, "{:.1f}".format(v), color=color[1], fontweight='bold',)
if(showlegend):
handles, labels = ax.get_legend_handles_labels()
print("labels",labels)
ax.legend(handles[::-1],labels[::-1], prop={'size': fontsize},markerscale=3, numpoints= 1,
loc=legend_loc,ncol=1, )#bbox_to_anchor=(0, 1.05))
filename =filename+'.'+self.figureformat
if(self.figureformat=='jpg'):
plt.savefig(filename, format=self.figureformat, bbox_inches='tight',dpi=300)
else:
plt.savefig(filename, format=self.figureformat, bbox_inches='tight')
return fig, ax
def plotconfusionmaitrix(self,confmatrix,
xlabel=None,ylabel=None,
filename='confmatrix',
keywordsize = 16,
font_scale=2,
figuresize=(10,10),
cmap="coolwarm", # "Blues"
vmin=0,
vmax=10,
fonttype='Arial',
title1="",
fmt=".1f",
xlabel1 = "Predicted label",
ylabel1="True label",):
if(self.usecommand==True):
return self.plotconfusionmaitrix_common1(confmatrix=confmatrix,
xlabel=xlabel,
ylabel=ylabel,
filename=filename,
keywordsize = keywordsize,
font_scale=font_scale,
figuresize=figuresize,
cmap=cmap,
vmin=vmin,
vmax=vmax,
fonttype=fonttype,
title1=title1,
xlabel1=xlabel1,
ylabel1=ylabel1,
fmt=fmt)
sn.set(font=fonttype)
#boundaries = [0.0, 0.045, 0.05, 0.055, 0.06,0.065,0.07,0.08,0.1,0.15, 1.0] # custom boundaries
boundaries = [0.0, 0.06,0.2, 0.25,0.3, 0.4,0.5,0.6,0.7, 0.8, 1.0] # custom boundaries
# here I generated twice as many colors,
# so that I could prune the boundaries more clearly
#hex_colors = sns.light_palette('blue', n_colors=len(boundaries) * 2 + 2, as_cmap=False).as_hex()
#hex_colors = [hex_colors[i] for i in range(0, len(hex_colors), 2)]
#print("hex",hex_colors)
# My color
hex_colors = ['#ffffff','#ebf1f7',
'#d3e4f3',
'#bfd8ed',
'#a1cbe2',
'#7db8da',
'#5ca4d0',
'#3f8fc5',
'#2676b8',
'#135fa7',
'#08488e']
'''
['#e5eff9',
'#d3e4f3',
'#bfd8ed',
'#a1cbe2',
'#7db8da',
'#5ca4d0',
'#3f8fc5',
'#2676b8',
'#135fa7',
'#08488e']
'''
boundaries = [0.0, 0.03, 0.06,0.1,0.2,0.29,0.3,0.8,1.0]
hex_colors = ['#F2F6FA','#ebf1f7','#FFB9C7','#FF1242', '#FF1242','#FF1242','#2676b8','#135fa7','#08488e']
colors=list(zip(boundaries, hex_colors))
custom_color_map = LinearSegmentedColormap.from_list(
name='custom_navy',
colors=colors,
)
tol=1e-4
labels = confmatrix
confmatrix=confmatrix*(confmatrix>0.35)
print("confmatrix",confmatrix+tol)
df_cm = pd.DataFrame(confmatrix+tol,xlabel,ylabel)
plt.figure(figsize=figuresize)
sn.set(font_scale=font_scale) # for label size
g = sn.heatmap(df_cm,
linewidths=0.3,
linecolor="grey",
cmap=custom_color_map,
#annot=True,
annot = labels,
annot_kws={"size": keywordsize},fmt=".1f",
#mask=df_cm < 0.02,
vmin=vmin+tol,
vmax=vmax,
cbar=False,
#cbar_kws={"ticks":[0.1,0.3,1,3,10]},
#norm=LogNorm(),
#legend=False,
) # font size
#g.cax.set_visible(False)
#sn.heatmap(df, cbar=False)
g.set_yticklabels(labels=g.get_yticklabels(), va='center')
filename =filename+'.'+self.figureformat
plt.ylabel(ylabel1)
plt.xlabel(xlabel1)
plt.title("Overall accuracy:"+"{:.1f}".format(np.trace(confmatrix)),
fontweight="bold",
pad=32)
g.set_xticklabels(g.get_xticklabels(), rotation = 0)
if(self.figureformat=='jpg'):
plt.savefig(filename, format=self.figureformat, bbox_inches='tight',dpi=300)
else:
plt.savefig(filename, format=self.figureformat, bbox_inches='tight')
return 0
def plotconfusionmaitrix_common1(self,confmatrix,
xlabel=None,ylabel=None,
filename='confmatrix',
keywordsize = 16,
font_scale=2,
figuresize=(10,10),
cmap="vlag",
vmin=0,
vmax=10,
fonttype='Arial',
title1="",
fmt=".1f",
xlabel1 = "Predicted label",
ylabel1="True label",
):
print("Use common confusion matrix plot!")
sn.set(font=fonttype)
#boundaries = [0.0, 0.045, 0.05, 0.055, 0.06,0.065,0.07,0.08,0.1,0.15, 1.0] # custom boundaries
boundaries = [0.0, 0.06,0.2, 0.25,0.3, 0.4,0.5,0.6,0.7, 0.8, 1.0] # custom boundaries
# here I generated twice as many colors,
# so that I could prune the boundaries more clearly
#hex_colors = sns.light_palette('blue', n_colors=len(boundaries) * 2 + 2, as_cmap=False).as_hex()
#hex_colors = [hex_colors[i] for i in range(0, len(hex_colors), 2)]
#print("hex",hex_colors)
# My color
hex_colors = ['#ffffff','#ebf1f7',
'#d3e4f3',
'#bfd8ed',
'#a1cbe2',
'#7db8da',
'#5ca4d0',
'#3f8fc5',
'#2676b8',
'#135fa7',
'#08488e']
'''
['#e5eff9',
'#d3e4f3',
'#bfd8ed',
'#a1cbe2',
'#7db8da',
'#5ca4d0',
'#3f8fc5',
'#2676b8',
'#135fa7',
'#08488e']
'''
boundaries = [0.0, 0.03, 0.06,0.1,0.2,0.29,0.3,0.8,1.0]
hex_colors = ['#F2F6FA','#ebf1f7','#FFB9C7','#FF1242', '#FF1242','#FF1242','#2676b8','#135fa7','#08488e']
colors=list(zip(boundaries, hex_colors))
custom_color_map = LinearSegmentedColormap.from_list(
name='custom_navy',
colors=colors,
)
tol=1e-4
labels = confmatrix
#confmatrix=confmatrix*(confmatrix>0.35)
#print("confmatrix",confmatrix+tol)
df_cm = pd.DataFrame(confmatrix+tol,xlabel,ylabel)
plt.figure(figsize=figuresize)
sn.set(font_scale=font_scale) # for label size
g = sn.heatmap(-df_cm,
linewidths=0.3,
linecolor="grey",
cmap=cmap,
#annot=True,
annot = labels,
annot_kws={"size": keywordsize},fmt=fmt,
#mask=df_cm < 0.02,
#vmin=vmin+tol,
#vmax=vmax,
cbar=False,
center=0,
#cbar_kws={"ticks":[0.1,0.3,1,3,10]},
#norm=LogNorm(),
#legend=False,
) # font size
#g.cax.set_visible(False)
#sn.heatmap(df, cbar=False)
g.set_yticklabels(labels=g.get_yticklabels(), va='center')
filename =filename+'.'+self.figureformat
plt.ylabel(ylabel1)
plt.xlabel(xlabel1)
print("trece",np.trace(confmatrix),confmatrix)
plt.title(title1,
fontweight="bold",
fontsize=keywordsize*1.1,
pad=40)
g.set_xticklabels(g.get_xticklabels(), rotation = 0)
if(self.figureformat=='jpg'):
plt.savefig(filename, format=self.figureformat, bbox_inches='tight',dpi=300)
else:
plt.savefig(filename, format=self.figureformat, bbox_inches='tight')
return 0
def plotconfusionmaitrix_common(self,confmatrix,
xlabel=None,ylabel=None,
filename='confmatrix',
keywordsize = 16,
font_scale=2,
figuresize=(10,10),
cmap='vlag',#sn.diverging_palette(240, 10, n=9),
vmin=-5,
vmax=10,
center=0,
fonttype='Arial'):
cmap = LinearSegmentedColormap.from_list('RedWhiteGreen', ['red', 'white', 'green'])
sn.set(font=fonttype)
tol=1e-4
labels = (confmatrix+0.05)*(np.abs(confmatrix)>0.1)
labels = list()
for i in range(confmatrix.shape[0]):
temp = list()
for j in range(confmatrix.shape[1]):
a = confmatrix[i,j]
if(a>0.1):
temp.append("+"+"{0:.1f}".format(a))
if(a<-0.1):
temp.append("{0:.1f}".format(a))
if(a<=0.1 and a>=-0.1):
temp.append(str(0.0))
labels.append(temp)
#labels = (confmatrix+0.05)*(np.abs(confmatrix)>0.1)
print("labels",labels)
confmatrix=confmatrix=confmatrix*(np.abs(confmatrix)>0.7)
print("confmatrix",confmatrix+tol)
df_cm = pd.DataFrame(confmatrix+tol,xlabel,ylabel)
plt.figure(figsize=figuresize)
sn.set(font_scale=font_scale) # for label size
g = sn.heatmap(df_cm,
linewidths=12.0,
linecolor="grey",
cmap=cmap,
center=center,
#annot=True,
annot = labels,
annot_kws={"size": keywordsize},fmt="s",#fmt="{0:+.1f}",
#mask=df_cm < 0.02,
vmin=vmin,
vmax=vmax,
cbar=False,
#cbar_kws={"ticks":[0.1,0.3,1,3,10]},
#norm=LogNorm(),
#legend=False,
) # font size
#g.cax.set_visible(False)
#sn.heatmap(df, cbar=False)
g.set_yticklabels(labels=g.get_yticklabels(), va='center')
filename =filename+'.'+self.figureformat
plt.ylabel("ML API")
plt.xlabel("Dataset",)
#plt.title("Overall accuracy:"+"{:.1f}".format(np.trace(confmatrix)),
# fontweight="bold",
# pad=32)
g.set_xticklabels(g.get_xticklabels(), rotation = 0)
if(self.figureformat=='jpg'):
plt.savefig(filename, format=self.figureformat, bbox_inches='tight',dpi=40)
else:
plt.savefig(filename, format=self.figureformat, bbox_inches='tight')
return 0
def reward_vs_confidence(self,
BaseID = 100,
ModelID=[100,0,1,2],
confidencerange = (0.1,0.2,0.3,0.4,0.5,0.6,0.7,.99,1),
prob_range=None,
datapath='path/to/imagenet/result/val_performance'):
"""
Run a small experiment on solving a Bernoulli bandit with K slot machines,
each with a randomly initialized reward probability.
Args:
K (int): number of slot machiens.
N (int): number of time steps to try.
"""
datapath = self.datapath
print('reward datapath',datapath)
b0 = BernoulliBanditwithData(ModelID=ModelID,datapath=datapath)
K = len(ModelID)
print ("Data generated Bernoulli bandit has reward probabilities:\n", b0.probas)
print ("The best machine has index: {} and proba: {}".format(
max(range(K), key=lambda i: b0.probas[i]), max(b0.probas)))
Params0 = context_params(ModelID=ModelID,datapath=datapath)
#confidencerange = (0.02,0.03,0.04,0.05,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,0.95,0.99,0.9999,1)
#confidencerange = (0.99,0.991,0.992,0.993,0.994,0.995,0.996,0.997,0.9999,1)
if(not(prob_range==None)):
confidencerange = self.mlmodels.prob2qvalue(prob_interval=prob_range,conf_id=BaseID)
BaseAccuracy, Others =self.mlmodels.accuracy_condition_score_List(ScoreRange=confidencerange,BaseID=BaseID,ModelID=ModelID)
print(BaseAccuracy, Others)
CDF = Params0.BaseModel.Compute_Prob_vs_Score(ScoreRange=confidencerange)
print(CDF)
plot_reward_vs_confidence(confidencerange, BaseAccuracy,Others, ModelID,"model reward compare_ModelID_{}.png".format(ModelID),CDF)
def reward_vs_prob(self,
BaseID = 100,
ModelID=[100,0,1,2],
confidencerange = (0.1,0.2,0.3,0.4,0.5,0.6,0.7,.99,1),
prob_range=None,
datapath='path/to/imagenet/result/val_performance',
dataname='imagenet_val',
context=None):
"""
compute and plot reward as a function of the probability of not using
the basemodel.
Args:
See the name.
"""
datapath = self.datapath
print('reward datapath',datapath)
if(not(prob_range==None)):
confidencerange = self.mlmodels.prob2qvalue(prob_interval=prob_range,conf_id=BaseID,context = context)
BaseAccuracy, Others =self.mlmodels.accuracy_condition_score_list(ScoreRange=confidencerange,BaseID=BaseID,ModelID=ModelID,context=context)
print('Base Accuracy', BaseAccuracy, 'Other',Others)
CDF = self.mlmodels.compute_prob_vs_score(ScoreRange=confidencerange,context = context)
print('CDF',CDF)
self._plot_reward_vs_prob(CDF, BaseAccuracy,Others, ModelID,self.folder+"Reward_vs_Prob_BaseID_{}_{}_context_{}.{}".format(BaseID,dataname,context,self.figureformat),CDF)
def reward_vs_prob_pdf(self,
BaseID = 100,
ModelID=[100,0,1,2],
confidencerange = (0.1,0.2,0.3,0.4,0.5,0.6,0.7,.99,1),
prob_range=None,
datapath='path/to/imagenet/result/val_performance',
dataname='imagenet_val',
context=None):
"""
compute and plot reward as a function of the probability of not using
the basemodel.
Args:
See the name.
"""
datapath = self.datapath
print('reward datapath',datapath)
if(not(prob_range==None)):
confidencerange = self.mlmodels.prob2qvalue(prob_interval=prob_range,conf_id=BaseID,context = context)
BaseAccuracy, Others =self.mlmodels.accuracy_condition_score_list(ScoreRange=confidencerange,BaseID=BaseID,ModelID=ModelID,context=context)
print('Base Accuracy', BaseAccuracy, 'Other',Others)
CDF = self.mlmodels.compute_prob_vs_score(ScoreRange=confidencerange,context = context)
print('CDF',CDF)
self._plot_reward_vs_prob(CDF, BaseAccuracy,Others, ModelID,self.folder+"Reward_vs_Prob_BaseID_{}_{}_context_{}.{}".format(BaseID,dataname,context,self.figureformat),CDF)
if(not(prob_range==None)):
base_pdf,other_pdf = self.mlmodels.accuracy_condition_score_list_cdf2pdf(prob_range,BaseAccuracy,Others,diff = False)
print('base pdf',base_pdf)
print('other pdf',other_pdf)
self._plot_reward_vs_prob(CDF, base_pdf,other_pdf, ModelID,self.folder+"Reward_vs_Probpdf_diff_BaseID_{}_{}_context_{}.{}".format(BaseID,dataname,context,self.figureformat),CDF)
self._plot_reward_vs_prob(confidencerange, base_pdf,other_pdf, ModelID,self.folder+"Reward_vs_conf_pdf_diff_BaseID_{}_{}_context_{}.{}".format(BaseID,dataname,context,self.figureformat),CDF)
def qvalue_vs_prob(self,
confidence_range = None,
BaseID = 100,
prob_range = None,
dataname = 'imagenet_val',
context=None):
if(not(prob_range==None)):
confidence_range = self.mlmodels.prob2qvalue(prob_interval=prob_range,conf_id=BaseID,context=context)
filename = self.folder+"Conf_vs_prob_BaseID_{}_{}_context_{}.{}".format(BaseID,dataname,context,self.figureformat)
prob = self.mlmodels.compute_prob_wrt_confidence(confidence_range=confidence_range,BaseID = BaseID,context=context)
self._plot_q_value_vs_prob(confidence_range,prob,filename)
return 0
def _plot_reward_vs_prob(self, confidence_range, base_acc, model_acc, model_names, figname, CDF):
"""
Plot the results by multi-armed bandit solvers.
Args:
solvers (list<Solver>): All of them should have been fitted.
solver_names (list<str)
figname (str)
"""
fig = plt.figure(figsize=self.figuresize)
plt.rcParams.update({'font.size': self.fontsize})
plt.rcParams["font.weight"] = "bold"
plt.rcParams["axes.labelweight"] = "bold"
plt.rcParams["lines.linewidth"] = self.linewidth
plt.rcParams["lines.markersize"] = self.markersize
k=0
for i in model_acc:
plt.plot(confidence_range, i, label=model_names[k],marker='x')
k=k+1
plt.xlabel('Fraction of Low Confidence Data')
plt.ylabel('Accuracy on Low Confidence Data')
plt.legend(loc=8, ncol=5)
plt.savefig(figname, format=self.figureformat, bbox_inches='tight')
def _plot_q_value_vs_prob(self,confidence_range,prob,figname):
fig = plt.figure(figsize=self.figuresize)
plt.rcParams.update({'font.size': self.fontsize})
plt.rcParams["font.weight"] = "bold"
plt.rcParams["axes.labelweight"] = "bold"
plt.rcParams["lines.linewidth"] = self.linewidth
plt.rcParams["lines.markersize"] = self.markersize
plt.plot(prob,confidence_range,marker='x')
plt.xlabel('Fraction of Low Confidence Data')
plt.ylabel('Confidence Threshold')
#plt.legend(loc=9, ncol=5)
plt.savefig(figname, format=self.figureformat, bbox_inches='tight')
def plot_accuracy(self,
namestick=['bm', 's0','s1','s2'],
model_id=[100,0,1,2],
base_id = 100,
datapath='path/to/imagenet/result/val_performance',
dataname='imagenet_val'):
datapath = self.datapath
print('reward datapath',datapath)
BaseAccuracy, Others =self.mlmodels.accuracy_condition_score_list(ScoreRange=[1],BaseID=base_id,ModelID=model_id)
print('Base Accuracy', BaseAccuracy, 'Other',len(Others))
fig = plt.figure(figsize=self.figuresize)
plt.rcParams.update({'font.size': self.fontsize})
plt.rcParams["font.weight"] = "bold"
plt.rcParams["axes.labelweight"] = "bold"
plt.rcParams["lines.linewidth"] = self.linewidth
plt.rcParams["lines.markersize"] = self.markersize
flattened = [val for sublist in Others for val in sublist]
print('flat others',flattened)
acc = flattened
#plt.bar(range(len(acc)),acc,color=self.colorset,tick_label=namestick)
bars = plt.bar(range(len(acc)),acc,color=self.colorset,hatch="/")
#plt.bar(range(len(acc)),acc,color='r',edgecolor='k',hatch="/")
#ax = plt.gca()
#ax.bar(range(1, 5), range(1, 5), color='red', edgecolor='black', hatch="/")
#patterns = ('-', '+', 'x', '\\', '*', 'o', 'O', '.')
patterns = ('-', '\\', '/', 'o', 'O', '.','+', 'x','*')
for bar, pattern in zip(bars, patterns):
bar.set_hatch(pattern)
#ax.set_hatch('/')
plt.xlabel('ML Services')
plt.ylabel('Accuracy')
plt.ylim(min(acc)-0.01)
#set_xticklabels(namestick)
matplotlib.pyplot.xticks(range(len(acc)), namestick)
#plt.legend(loc=9, ncol=5)
figname = self.folder+"accuracy_dataset_{}.{}".format(dataname,self.figureformat)
plt.savefig(figname, format=self.figureformat, bbox_inches='tight')
def plot_umaps(self,
fit_data=[[1,2,3],[4,5,6]],
data=[[1,2,3],[4,5,6]],
filename="umap",
markersize=2,
markershape=["8","s"],
yname='Frequency',
color=['blue','red'],
fig=None,
showlegend=False,
legend=['male','female'],
yrange = None,
ax=None,
fontsize=30,
figureformat="jpg",):
# generate embeddings
reducer = umap.UMAP(random_state=42)
reducer.fit(fit_data[:,0:-1])
for i in range(len(data)):
datum1 = data[i]
embedding = reducer.transform(datum1[:,0:-1])
plt.scatter(embedding[:, 0], embedding[:, 1], c=datum1[:,-1], cmap='Spectral', s=markersize,marker=markershape[i],label=legend[i])
# plt.legend(loc=8, ncol=5)
lgnd = plt.legend(loc="lower left", scatterpoints=1, fontsize=10)
for handle in lgnd.legendHandles:
handle.set_sizes([2.0])
self.figureformat = figureformat
if(self.figureformat=='jpg'):
plt.savefig(filename+".jpg", format=self.figureformat, bbox_inches='tight',dpi=300)
else:
plt.savefig(filename, format=self.figureformat, bbox_inches='tight')
plt.close("all")
return
def plot_results(solvers, solver_names, figname):
"""
Plot the results by multi-armed bandit solvers.
Args:
solvers (list<Solver>): All of them should have been fitted.
solver_names (list<str)
figname (str)
"""
assert len(solvers) == len(solver_names)
assert all(map(lambda s: isinstance(s, Solver), solvers))
assert all(map(lambda s: len(s.regrets) > 0, solvers))
b = solvers[0].bandit
fig = plt.figure(figsize=(14, 4))
fig.subplots_adjust(bottom=0.3, wspace=0.3)
ax1 = fig.add_subplot(131)
ax2 = fig.add_subplot(132)
ax3 = fig.add_subplot(133)
# Sub.fig. 1: Regrets in time.
for i, s in enumerate(solvers):
ax1.plot(range(len(s.regrets)), s.regrets, label=solver_names[i])
ax1.set_xlabel('Time step')
ax1.set_ylabel('Cumulative regret')
ax1.legend(loc=9, bbox_to_anchor=(1.82, -0.25), ncol=5)
ax1.grid('k', ls='--', alpha=0.3)
# Sub.fig. 2: Probabilities estimated by solvers.
sorted_indices = sorted(range(b.n), key=lambda x: b.probas[x])
ax2.plot(range(b.n), [b.probas[x] for x in sorted_indices], 'k--', markersize=12)
for s in solvers:
ax2.plot(range(b.n), [s.estimated_probas[x] for x in sorted_indices], 'x', markeredgewidth=2)
ax2.set_xlabel('Actions sorted by ' + r'$\theta$')
ax2.set_ylabel('Estimated')
ax2.grid('k', ls='--', alpha=0.3)
# Sub.fig. 3: Action counts
for s in solvers:
ax3.plot(range(b.n), np.array(s.counts) / float(len(solvers[0].regrets)), ls='steps', lw=2)
ax3.set_xlabel('Actions')
ax3.set_ylabel('Frac. # trials')
ax3.grid('k', ls='--', alpha=0.3)
plt.savefig(figname)
def plot_reward_vs_confidence(confidence_range, base_acc, model_acc, model_names, figname, CDF):
"""
Plot the results by multi-armed bandit solvers.
Args:
solvers (list<Solver>): All of them should have been fitted.
solver_names (list<str)
figname (str)
"""
fig = plt.figure(figsize=(14, 6))
fig.subplots_adjust(bottom=0.3, wspace=0.3)
ax1 = fig.add_subplot(121)
#ax2 = fig.add_subplot(212)
ax3 = fig.add_subplot(122)
#ax4 = fig.add_subplot(214)
# Sub.fig. 1: Regrets in time.
k=0
for i in model_acc:
ax1.plot(confidence_range, i, label=model_names[k],marker='x')
k=k+1
ax1.set_xlabel('Probability threshold')
ax1.set_ylabel('Reward Value')
ax1.legend(loc=9, bbox_to_anchor=(1.82, -0.25), ncol=5)
ax1.grid('k', ls='--', alpha=0.3)
# Sub.fig. 2: Regrets in time.
k=0
for i in model_acc:
ax3.plot(confidence_range, CDF, label=model_names[k],marker='x')
k=k+1
ax3.set_xlabel('Probability threshold')
ax3.set_ylabel('CDF')
ax3.legend(loc=9, bbox_to_anchor=(1.82, -0.25), ncol=5)
ax3.grid('k', ls='--', alpha=0.3)
plt.savefig(figname, dpi=1000)
def plot_reward_vs_confidence_old(confidence_range, base_acc, model_acc, model_names, figname, CDF):
"""
Plot the results by multi-armed bandit solvers.
Args:
solvers (list<Solver>): All of them should have been fitted.
solver_names (list<str)
figname (str)
"""
fig = plt.figure(figsize=(14, 4))
fig.subplots_adjust(bottom=0.3, wspace=0.3)
ax1 = fig.add_subplot(221)
ax2 = fig.add_subplot(222)
ax3 = fig.add_subplot(223)
ax4 = fig.add_subplot(224)
# Sub.fig. 1: Regrets in time.
k=0
for i in model_acc:
ax1.plot(confidence_range, i, label=model_names[k],marker='x')
k=k+1
ax1.set_xlabel('Probability threshold')
ax1.set_ylabel('Reward Value')
ax1.legend(loc=9, bbox_to_anchor=(1.82, -0.25), ncol=5)
ax1.grid('k', ls='--', alpha=0.3)
# Sub.fig. 2: Regrets in time.
k=0
for i in model_acc:
ax2.plot(confidence_range, np.array(i)-np.asarray(base_acc), label=model_names[k],marker='x')
k=k+1
ax2.set_xlabel('Probability threshold')
ax2.set_ylabel('Reward Value-Base')
ax2.legend(loc=9, bbox_to_anchor=(1.82, -0.25), ncol=5)
ax2.grid('k', ls='--', alpha=0.3)
# Sub.fig. 2: Regrets in time.
k=0
for i in model_acc:
ax3.plot(confidence_range, CDF, label=model_names[k],marker='x')
k=k+1
ax3.set_xlabel('Probability threshold')
ax3.set_ylabel('CDF')
ax3.legend(loc=9, bbox_to_anchor=(1.82, -0.25), ncol=5)
ax3.grid('k', ls='--', alpha=0.3)
# Sub.fig. 2: Regrets in time.
k=0
for i in model_acc:
ax4.plot(confidence_range, (np.array(i)-np.asarray(base_acc))*np.asarray(CDF), label=model_names[k],marker='x')
k=k+1
ax4.set_xlabel('Probability threshold')
ax4.set_ylabel('Reward*Prob')
ax4.legend(loc=9, bbox_to_anchor=(1.82, -0.25), ncol=5)
ax4.grid('k', ls='--', alpha=0.3)
plt.savefig(figname, dpi=1000)
def reward_vs_confidence(N=1000,
ModelID=[100,0,1,2,3,4],
ModelIndex = [0,1,2,3],
confidencerange = (0.1,0.2,0.3,0.4,0.5,0.6,0.7,.99,1),
datapath='path/to/imagenet/result/val_performance'):
"""
Run a small experiment on solving a Bernoulli bandit with K slot machines,
each with a randomly initialized reward probability.
Args:
K (int): number of slot machiens.
N (int): number of time steps to try.
"""
print('reward datapaht',datapath)
b0 = BernoulliBanditwithData(ModelID=ModelID,datapath=datapath)
K = len(ModelID)
print ("Data generated Bernoulli bandit has reward probabilities:\n", b0.probas)
print ("The best machine has index: {} and proba: {}".format(
max(range(K), key=lambda i: b0.probas[i]), max(b0.probas)))
Params0 = context_params(ModelID=ModelID,datapath=datapath)
#confidencerange = (0.02,0.03,0.04,0.05,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,0.95,0.99,0.9999,1)
#confidencerange = (0.99,0.991,0.992,0.993,0.994,0.995,0.996,0.997,0.9999,1)
BaseAccuracy, Others = Params0.BaseModel.Compute_Conditional_Accuracy_AmongModel_List(ScoreRange=confidencerange,BaseID=0,ModelID=ModelIndex)
print(BaseAccuracy, Others)
CDF = Params0.BaseModel.Compute_Prob_vs_Score(ScoreRange=confidencerange)
print(CDF)
#CDF1 = Compute_CDF_wrt_Score(ScoreRange=confidencerange)
#print(CDF1)
#print(Params0.BaseModel.Compute_Conditional_Accuracy(Score))
#Params1 = context_params(ModelID=[2])
#print(Params1.BaseModel.Compute_Conditional_Accuracy(Score))
# Test for different combinaers
#ParamsTest = BaseModel(ModelID=[0,1,3,4,5,100])
#output = ParamsTest.Stacking_AllModels()
# End of Test
# print(ParamsTest.Compute_Conditional_Accuracy_AmongModel(ScoreBound=Score, ModelID = [0,1]))
plot_reward_vs_confidence(confidencerange, BaseAccuracy,Others, ModelID,"model reward compare_ModelID_{}.png".format(ModelID),CDF)
def test_plotline():
prange= [0. , 0.0001, 0.0002, 0.0003, 0.0004, 0.0005, 0.0006, 0.0007,
0.0008, 0.0009, 0.001 , 0.0011, 0.0012, 0.0013, 0.0014, 0.0015,
0.0016, 0.0017, 0.0018, 0.0019, 0.002 , 0.0021, 0.0022, 0.0023,
0.0024, 0.0025, 0.0026, 0.0027, 0.0028, 0.0029, 0.003 , 0.0031,
0.0032, 0.0033, 0.0034, 0.0035, 0.0036, 0.0037, 0.0038, 0.0039,
0.004 , 0.0041, 0.0042, 0.0043, 0.0044, 0.0045, 0.0046, 0.0047,
0.0048, 0.0049, 0.005 , 0.0051, 0.0052, 0.0053, 0.0054, 0.0055,
0.0056, 0.0057, 0.0058, 0.0059, 0.006 , 0.0061, 0.0062, 0.0063,
0.0064, 0.0065, 0.0066, 0.0067, 0.0068, 0.0069, 0.007 , 0.0071,
0.0072, 0.0073, 0.0074, 0.0075, 0.0076, 0.0077, 0.0078, 0.0079,
0.008 , 0.0081, 0.0082, 0.0083, 0.0084, 0.0085, 0.0086, 0.0087,
0.0088, 0.0089, 0.009 , 0.0091, 0.0092, 0.0093, 0.0094, 0.0095,
0.0096, 0.0097, 0.0098, 0.0099, 0.01 , 0.0101, 0.0102, 0.0103,
0.0104, 0.0105, 0.0106, 0.0107, 0.0108, 0.0109, 0.011 , 0.0111,
0.0112, 0.0113, 0.0114, 0.0115, 0.0116, 0.0117, 0.0118, 0.0119,
0.012 , 0.0121, 0.0122, 0.0123, 0.0124, 0.0125, 0.0126, 0.0127,
0.0128, 0.0129, 0.013 , 0.0131, 0.0132, 0.0133, 0.0134, 0.0135,
0.0136, 0.0137, 0.0138, 0.0139, 0.014 , 0.0141, 0.0142, 0.0143,
0.0144, 0.0145, 0.0146, 0.0147, 0.0148, 0.0149, 0.015 , 0.0151,
0.0152, 0.0153, 0.0154, 0.0155, 0.0156, 0.0157, 0.0158, 0.0159,
0.016 , 0.0161, 0.0162, 0.0163, 0.0164, 0.0165, 0.0166, 0.0167,
0.0168, 0.0169, 0.017 , 0.0171, 0.0172, 0.0173, 0.0174, 0.0175,
0.0176, 0.0177, 0.0178, 0.0179, 0.018 , 0.0181, 0.0182, 0.0183,
0.0184, 0.0185, 0.0186, 0.0187, 0.0188, 0.0189, 0.019 , 0.0191,
0.0192, 0.0193, 0.0194, 0.0195, 0.0196, 0.0197, 0.0198, 0.0199,
0.02 , 0.0201, 0.0202, 0.0203, 0.0204, 0.0205, 0.0206, 0.0207,
0.0208, 0.0209, 0.021 , 0.0211, 0.0212, 0.0213, 0.0214, 0.0215,
0.0216, 0.0217, 0.0218, 0.0219, 0.022 , 0.0221, 0.0222, 0.0223,
0.0224, 0.0225, 0.0226, 0.0227, 0.0228, 0.0229, 0.023 , 0.0231,
0.0232, 0.0233, 0.0234, 0.0235, 0.0236, 0.0237, 0.0238, 0.0239,
0.024 , 0.0241, 0.0242, 0.0243, 0.0244, 0.0245, 0.0246, 0.0247,
0.0248, 0.0249, 0.025 , 0.0251, 0.0252, 0.0253, 0.0254, 0.0255,
0.0256, 0.0257, 0.0258, 0.0259, 0.026 , 0.0261, 0.0262, 0.0263,
0.0264, 0.0265, 0.0266, 0.0267, 0.0268, 0.0269, 0.027 , 0.0271,
0.0272, 0.0273, 0.0274, 0.0275, 0.0276, 0.0277, 0.0278, 0.0279,
0.028 , 0.0281, 0.0282, 0.0283, 0.0284, 0.0285, 0.0286, 0.0287,
0.0288, 0.0289, 0.029 , 0.0291, 0.0292, 0.0293, 0.0294, 0.0295,
0.0296, 0.0297, 0.0298, 0.0299, 0.03 , 0.0301, 0.0302, 0.0303,
0.0304, 0.0305, 0.0306, 0.0307, 0.0308, 0.0309, 0.031 , 0.0311,
0.0312, 0.0313, 0.0314, 0.0315, 0.0316, 0.0317, 0.0318, 0.0319,
0.032 , 0.0321, 0.0322, 0.0323, 0.0324, 0.0325, 0.0326, 0.0327,
0.0328, 0.0329, 0.033 , 0.0331, 0.0332, 0.0333, 0.0334, 0.0335,
0.0336, 0.0337, 0.0338, 0.0339, 0.034 , 0.0341, 0.0342, 0.0343,
0.0344, 0.0345, 0.0346, 0.0347, 0.0348, 0.0349, 0.035 , 0.0351,
0.0352, 0.0353, 0.0354, 0.0355, 0.0356, 0.0357, 0.0358, 0.0359,
0.036 , 0.0361, 0.0362, 0.0363, 0.0364, 0.0365, 0.0366, 0.0367,
0.0368, 0.0369, 0.037 , 0.0371, 0.0372, 0.0373, 0.0374, 0.0375,
0.0376, 0.0377, 0.0378, 0.0379, 0.038 , 0.0381, 0.0382, 0.0383,
0.0384, 0.0385, 0.0386, 0.0387, 0.0388, 0.0389, 0.039 , 0.0391,
0.0392, 0.0393, 0.0394, 0.0395, 0.0396, 0.0397, 0.0398, 0.0399,
0.04 , 0.0401, 0.0402, 0.0403, 0.0404, 0.0405, 0.0406, 0.0407,
0.0408, 0.0409, 0.041 , 0.0411, 0.0412, 0.0413, 0.0414, 0.0415,
0.0416, 0.0417, 0.0418, 0.0419, 0.042 , 0.0421, 0.0422, 0.0423,
0.0424, 0.0425, 0.0426, 0.0427, 0.0428, 0.0429, 0.043 , 0.0431,
0.0432, 0.0433, 0.0434, 0.0435, 0.0436, 0.0437, 0.0438, 0.0439,
0.044 , 0.0441, 0.0442, 0.0443, 0.0444, 0.0445, 0.0446, 0.0447,
0.0448, 0.0449, 0.045 , 0.0451, 0.0452, 0.0453, 0.0454, 0.0455,
0.0456, 0.0457, 0.0458, 0.0459, 0.046 , 0.0461, 0.0462, 0.0463,
0.0464, 0.0465, 0.0466, 0.0467, 0.0468, 0.0469, 0.047 , 0.0471,
0.0472, 0.0473, 0.0474, 0.0475, 0.0476, 0.0477, 0.0478, 0.0479,
0.048 , 0.0481, 0.0482, 0.0483, 0.0484, 0.0485, 0.0486, 0.0487,
0.0488, 0.0489, 0.049 , 0.0491, 0.0492, 0.0493, 0.0494, 0.0495,
0.0496, 0.0497, 0.0498, 0.0499]
acc = [0.48301023, 0.48457155, 0.48538639, 0.48615516, 0.48668402,
0.48743234, 0.48818995, 0.48874007, 0.48916215, 0.48976699,
0.49029502, 0.49083267, 0.49127285, 0.49186667, 0.49235521,
0.49291153, 0.49324094, 0.4937676 , 0.494199 , 0.49455204,
0.49486084, 0.49522269, 0.49560935, 0.49594377, 0.49625499,
0.49656768, 0.49680171, 0.497076 , 0.49740774, 0.49774282,
0.49808112, 0.49844063, 0.49888367, 0.49907962, 0.49934593,
0.4996519 , 0.50010442, 0.50044377, 0.50083441, 0.50119005,
0.50157951, 0.50191593, 0.50229962, 0.50263862, 0.5029507 ,
0.50321984, 0.50355179, 0.50382114, 0.50421764, 0.50475099,
0.50509806, 0.50548435, 0.50571974, 0.50673374, 0.50709485,
0.50754149, 0.50806022, 0.50838091, 0.50895068, 0.51405688,
0.51405485, 0.51387681, 0.51375979, 0.51368061, 0.51363966,
0.51358214, 0.51348813, 0.51320118, 0.5131013 , 0.51299855,
0.51285864, 0.51261339, 0.51251116, 0.51239189, 0.51230296,
0.51222542, 0.51213922, 0.51213295, 0.51199515, 0.51189603,
0.51178252, 0.51170102, 0.51167787, 0.51146198, 0.51132532,
0.51125551, 0.51083861, 0.51080367, 0.51065056, 0.51054177,
0.51030458, 0.51009311, 0.50985171, 0.50952144, 0.50941722,
0.50885601, 0.50872907, 0.5086227 , 0.50837746, 0.50827709,
0.50811138, 0.50789465, 0.50776248, 0.50757616, 0.50723169,
0.50710103, 0.50692262, 0.50636731, 0.50551236, 0.5052581 ,
0.50467229, 0.50438479, 0.50419524, 0.50370731, 0.50344827,
0.50315895, 0.50299427, 0.50237316, 0.50076062, 0.5000888 ,
0.49904193, 0.49821103, 0.49782044, 0.49751325, 0.49721562,
0.49615602, 0.49570051, 0.49546224, 0.49528738, 0.49499799,
0.49480788, 0.49441211, 0.49400208, 0.4937852 , 0.49357293,
0.4932995 , 0.49308115, 0.49274721, 0.49232387, 0.4904962 ,
0.48979254, 0.48883763, 0.48723269, 0.48694961, 0.48664716,
0.48383779, 0.4823387 , 0.48139062, 0.48097353, 0.48045641,
0.47893606, 0.47857627, 0.4783139 , 0.47800683, 0.47767765,
0.47749323, 0.47730572, 0.47711734, 0.47697098, 0.47674 ,
0.47655673, 0.47617975, 0.47604766, 0.47593491, 0.4756809 ,
0.47553382, 0.47541119, 0.47521898, 0.47502894, 0.47485214,
0.47467929, 0.47456147, 0.47439464, 0.4742843 , 0.47419802,
0.47407429, 0.47394389, 0.47376382, 0.47366419, 0.47347226,
0.4733965 , 0.47327365, 0.47314522, 0.47295317, 0.47277204,
0.47265397, 0.47254881, 0.47238721, 0.47231294, 0.47213071,
0.47205963, 0.471965 , 0.47181219, 0.47166599, 0.4715344 ,
0.47142736, 0.47133969, 0.47124544, 0.47120949, 0.47109536,
0.47099978, 0.47084196, 0.47067891, 0.47054779, 0.47039573,
0.47028735, 0.47016451, 0.47002245, 0.46977837, 0.46963242,
0.46943925, 0.4692959 , 0.46914154, 0.46891011, 0.4687833 ,
0.4685379 , 0.46843594, 0.46825524, 0.46778678, 0.46757136,
0.46737609, 0.46692911, 0.46674504, 0.46645814, 0.46626084,
0.46601046, 0.46587982, 0.46568659, 0.46549668, 0.46531255,
0.46491423, 0.4644362 , 0.46398542, 0.4631161 , 0.46295977,
0.46250332, 0.46236719, 0.46221666, 0.462093 , 0.46187842,
0.46174634, 0.46159738, 0.46147783, 0.46137749, 0.46129638,
0.4611781 , 0.46107324, 0.46094401, 0.46083739, 0.46074101,
0.46072508, 0.46064278, 0.46052262, 0.46042853, 0.46034242,
0.46028446, 0.46017712, 0.46011206, 0.46002659, 0.45995817,
0.45986543, 0.45975698, 0.45968683, 0.45957428, 0.45942207,
0.45930791, 0.45921235, 0.45910849, 0.45898494, 0.45888329,
0.45879647, 0.45870982, 0.45870496, 0.45862491, 0.45850992,
0.45846477, 0.4583252 , 0.45870034, 0.45860152, 0.4584608 ,
0.45840916, 0.45837632, 0.45829484, 0.45822002, 0.45816921,
0.45808426, 0.45801872, 0.4579592 , 0.45785556, 0.45777885,
0.4577343 , 0.45766358, 0.45753936, 0.45752268, 0.45744507,
0.45736837, 0.45728324, 0.45717934, 0.45703663, 0.45697995,
0.45691548, 0.45679727, 0.45673414, 0.45666303, 0.45661996,
0.4565089 , 0.45641751, 0.45633791, 0.45626128, 0.45619948,
0.4561366 , 0.45613471, 0.45607387, 0.45597782, 0.45588608,
0.45581065, 0.45568215, 0.4555245 , 0.45539021, 0.45530577,
0.45521037, 0.4550916 , 0.45500052, 0.45498943, 0.45484803,
0.45476247, 0.45469974, 0.45461052, 0.45449327, 0.45441162,
0.4543233 , 0.45421517, 0.45414812, 0.45402163, 0.45396933,
0.45382181, 0.45372327, 0.45364773, 0.4535485 , 0.45345609,
0.45338647, 0.45332349, 0.45321917, 0.45318078, 0.45311913,
0.45302852, 0.45289496, 0.45282775, 0.45291292, 0.45281203,
0.45271895, 0.45259684, 0.45251492, 0.45226131, 0.45199698,
0.45190208, 0.45177381, 0.45167107, 0.45156732, 0.45120557,
0.4510243 , 0.45040894, 0.45016372, 0.41494005, 0.41482359,
0.4147391 , 0.41467827, 0.41456255, 0.41442845, 0.41435356,
0.41427217, 0.4141186 , 0.41393056, 0.41373277, 0.41356792,
0.41346815, 0.41313181, 0.41306098, 0.41297357, 0.41284036,
0.41271761, 0.41264731, 0.41260986, 0.41259229, 0.41252037,
0.41246792, 0.41244859, 0.41239455, 0.41236259, 0.41230149,
0.41226418, 0.41217959, 0.41212254, 0.41211362, 0.41207712,
0.41202834, 0.4119794 , 0.41189217, 0.41186648, 0.41183323,
0.41177104, 0.4117605 , 0.41172562, 0.41171102, 0.4116806 ,
0.41165032, 0.41161321, 0.41153588, 0.4114937 , 0.41145179,
0.41141475, 0.41141205, 0.4113842 , 0.41137095, 0.41133905,
0.41131634, 0.41129309, 0.41124033, 0.41121707, 0.41119274,
0.41117111, 0.41115895, 0.41114137, 0.4111238 , 0.4111119 ,
0.41109377, 0.41106132, 0.41101536, 0.41100238, 0.41097399,
0.41095669, 0.4109064 , 0.41086747, 0.4108653 , 0.41084692,
0.41080381, 0.41078624, 0.4107565 , 0.41074001, 0.4107346 ,
0.41071432, 0.41067972, 0.41063105, 0.41062294, 0.41059725,
0.41055453, 0.41050722, 0.41047964, 0.41046612, 0.41040232,
0.41038609, 0.41036176, 0.41036446, 0.41036176, 0.41034473,
0.41029336, 0.41027285, 0.4102012 , 0.41018011, 0.41015145,
0.41014199, 0.41010603, 0.4100817 , 0.41002357, 0.40999707,
0.40999301, 0.40998193, 0.40995883, 0.40995234, 0.40991009,
0.40989792, 0.4098425 , 0.40983087, 0.40981059, 0.40980789,
0.40978626, 0.40978414, 0.40976115, 0.40971627, 0.40970445,
0.40969383, 0.40966004, 0.40961732, 0.40958487, 0.4095454 ,
0.40952512, 0.40952269, 0.40948619, 0.40948078, 0.40944969,
0.40944428, 0.40941995, 0.40940778, 0.40941589, 0.40941589,
0.40937777, 0.40934938, 0.40932234, 0.40931288, 0.4092899 ]
cost = [5.99998378, 5.99995133, 5.99998378, 5.99998378, 5.99998378,
5.99998378, 5.99996756, 5.99996756, 5.99998378, 5.99993511,
6. , 5.99995133, 5.99998378, 5.99995133, 5.99996756,
5.99996756, 5.99993511, 5.99998378, 5.99996756, 5.99993511,
5.99995133, 5.99996756, 5.99993511, 6. , 5.99998378,
5.99995133, 6. , 6. , 5.99996756, 5.99996756,
6. , 5.99995133, 5.99995133, 5.99998378, 5.99991889,
5.99998378, 5.99995133, 5.99996756, 5.99995133, 5.99991889,
5.99995133, 6. , 5.99995133, 6. , 5.99996756,
5.99998378, 5.99998378, 5.99993511, 5.99996756, 6. ,
5.99993511, 5.99996756, 6. , 5.99998378, 5.99996756,
5.99993511, 5.99995133, 5.99996756, 5.99995133, 5.99482512,
5.96959964, 5.93986438, 5.90917202, 5.875365 , 5.84858218,
5.81982026, 5.76072286, 5.73048472, 5.70715723, 5.68048796,
5.65459737, 5.62457011, 5.59642463, 5.57118292, 5.54910454,
5.52188372, 5.49866978, 5.47579651, 5.45198235, 5.42672442,
5.39849783, 5.37371034, 5.34908507, 5.32488158, 5.29626565,
5.27175394, 5.22610473, 5.19873791, 5.17562131, 5.1535267 ,
5.12797677, 5.10221595, 5.07600091, 5.04115567, 5.01468107,
4.94257349, 4.91944066, 4.89812472, 4.87351567, 4.84459153,
4.82238336, 4.79530855, 4.77207839, 4.73781714, 4.70809811,
4.68477062, 4.65748491, 4.60662838, 4.53348258, 4.5040231 ,
4.43806372, 4.41154046, 4.3859743 , 4.34853352, 4.32293492,
4.29568166, 4.26372396, 4.20829278, 4.08779443, 4.03799234,
3.97743495, 3.91929466, 3.89564272, 3.86397703, 3.83818376,
3.76805529, 3.72065408, 3.69499059, 3.67588086, 3.65414314,
3.63321653, 3.60901304, 3.58041334, 3.55827007, 3.53622413,
3.51049575, 3.48917981, 3.46007722, 3.42815197, 3.31135228,
3.26297774, 3.19297904, 3.06952826, 3.04595743, 3.01523263,
2.82674713, 2.74002336, 2.67805464, 2.6509636 , 2.60555772,
2.50275777, 2.48069561, 2.4648952 , 2.44812147, 2.43077996,
2.41721822, 2.40394848, 2.39111673, 2.37920966, 2.36603725,
2.35312439, 2.33891376, 2.32945623, 2.32024203, 2.30869184,
2.29655765, 2.28491013, 2.27579326, 2.26568685, 2.25663487,
2.24586334, 2.23767114, 2.22874895, 2.22080008, 2.21041788,
2.20259879, 2.19516904, 2.18631173, 2.17881708, 2.1691811 ,
2.16051846, 2.15156382, 2.14377717, 2.13540653, 2.12697099,
2.12012524, 2.1119817 , 2.10146973, 2.09397508, 2.08388489,
2.07486536, 2.06678671, 2.05770229, 2.05040231, 2.04141522,
2.0323308 , 2.02389527, 2.01721173, 2.00828953, 2.00157355,
1.99427357, 1.98590293, 1.97717539, 1.96831808, 1.96066122,
1.95154435, 1.94359548, 1.93636039, 1.92682175, 1.915012 ,
1.90612225, 1.89590228, 1.8862663 , 1.87789566, 1.86851924,
1.85755305, 1.84786841, 1.83615599, 1.80666407, 1.79229122,
1.78366102, 1.76315619, 1.75384466, 1.73528648, 1.7239634 ,
1.71507365, 1.70748167, 1.69943547, 1.69190838, 1.6825644 ,
1.6716631 , 1.65424048, 1.63678541, 1.61277659, 1.60628772,
1.5939913 , 1.58695088, 1.57871001, 1.57163714, 1.56274739,
1.55638829, 1.5488612 , 1.54246966, 1.53666212, 1.53244436,
1.52702615, 1.52173772, 1.51602751, 1.51041464, 1.50561287,
1.50136266, 1.49763156, 1.4931218 , 1.48887159, 1.48410226,
1.48027383, 1.47514762, 1.47080008, 1.46742586, 1.46398676,
1.459769 , 1.45636234, 1.45321524, 1.44925702, 1.4444877 ,
1.44023749, 1.43653884, 1.43186685, 1.42761664, 1.42278243,
1.41905133, 1.41447667, 1.41042113, 1.4068198 , 1.40192071,
1.39640517, 1.39127896, 1.37959899, 1.37586789, 1.37070923,
1.36668613, 1.36263059, 1.35948349, 1.35607683, 1.35208617,
1.34708974, 1.34361819, 1.33988709, 1.33547466, 1.33115956,
1.32713646, 1.32080981, 1.31717604, 1.31415872, 1.31110895,
1.30760496, 1.30423074, 1.29998053, 1.29560055, 1.29199922,
1.28856012, 1.2840828 , 1.28074103, 1.27694504, 1.27065083,
1.26717929, 1.2636753 , 1.26036597, 1.25686198, 1.25364999,
1.25004867, 1.24761534, 1.2440789 , 1.24031536, 1.23525404,
1.23204205, 1.22814872, 1.22266563, 1.2176692 , 1.21319188,
1.20839011, 1.2038479 , 1.20112257, 1.19677503, 1.19310882,
1.18992927, 1.18730128, 1.18363507, 1.17850886, 1.17562131,
1.17302576, 1.16926222, 1.16702355, 1.16189735, 1.15858802,
1.15313737, 1.14856271, 1.14583739, 1.14340406, 1.13844008,
1.13526053, 1.13045876, 1.12695477, 1.12267212, 1.11946013,
1.11400947, 1.10949971, 1.10661216, 1.09973396, 1.09558108,
1.08763221, 1.08305756, 1.07887223, 1.07186425, 1.06485627,
1.06021673, 1.05340341, 1.0491532 , 1.04516255, 1.02744793,
1.02125105, 1.00470443, 0.99208358, 0.23337227, 0.22814872,
0.22406074, 0.22204919, 0.21922653, 0.2165012 , 0.21445721,
0.21192655, 0.20916878, 0.20537279, 0.2015768 , 0.19693725,
0.19512037, 0.18824216, 0.18661995, 0.18389462, 0.18120174,
0.17740575, 0.17432354, 0.17273376, 0.17205243, 0.17017066,
0.16861333, 0.167932 , 0.16637467, 0.16484978, 0.16410356,
0.1630329 , 0.16167024, 0.16014535, 0.1587178 , 0.15787425,
0.15654403, 0.1552787 , 0.15287781, 0.15209915, 0.15080138,
0.14963338, 0.14866005, 0.14797872, 0.14778405, 0.14729738,
0.14642139, 0.14535072, 0.14382584, 0.14324184, 0.14256051,
0.1414574 , 0.1411654 , 0.1402894 , 0.13951074, 0.13834274,
0.13727208, 0.13649341, 0.13542275, 0.13493608, 0.13376809,
0.13250276, 0.13201609, 0.13143209, 0.13065343, 0.13006943,
0.12932321, 0.12864188, 0.12718188, 0.12659788, 0.12552722,
0.12429434, 0.12302901, 0.12150412, 0.12101746, 0.12004412,
0.11926546, 0.11897346, 0.11800013, 0.11735124, 0.11715658,
0.11647524, 0.11559925, 0.11452858, 0.11407436, 0.11349036,
0.11261437, 0.11164104, 0.11099215, 0.11044059, 0.10943482,
0.10875349, 0.10787749, 0.10748816, 0.10651483, 0.10602816,
0.10505483, 0.10447083, 0.10333528, 0.10297839, 0.10229706,
0.1018104 , 0.10125884, 0.10057751, 0.09953929, 0.09866329,
0.09827396, 0.09788463, 0.0974953 , 0.09710596, 0.0960353 ,
0.09528908, 0.09444553, 0.09366686, 0.09308286, 0.09279086,
0.09220687, 0.09181753, 0.09152553, 0.0905522 , 0.08987087,
0.08948154, 0.08883265, 0.08805399, 0.08746999, 0.08669132,
0.08630199, 0.08581533, 0.084842 , 0.084258 , 0.083674 ,
0.08321978, 0.08273311, 0.08224645, 0.08205178, 0.08205178,
0.08127312, 0.08078645, 0.08039712, 0.08010512, 0.07955357]
a = VisualizeTools()
max_x = 200
prange=prange[0:max_x]
acc =acc[0:max_x]
cost = cost[0:max_x]
fig, ax = a.plotline(prange,acc,xlabel='Weight Value', ylabel='Accuracy',
filename='coco_p_value_acc')
fig, ax = a.plotscatter(xvalue=[0.0060416667],
yvalue=[0.5140010157426727],
fig=fig,ax=ax,
markersize=30,
legend='Learned Thres',
filename='coco_p_value_acc')
fig, ax = a.plotline(prange,cost,xlabel='Weight Value', ylabel='Cost',
filename='coco_p_value_cost')
fig, ax = a.plotscatter(xvalue=[0.0060416667],
yvalue=[5.9999899999999995],
fig=fig,ax=ax,
markersize=30,
legend='Learned Thres',
filename='coco_p_value_cost')
def getlabeldist(datapath='..\APIperformance\mlserviceperformance_coco\Model0_TrueLabel.txt'):
mydict = dict()
labels = json.load(open(datapath))
for imgname in labels:
labelexist = dict()
for temp in labels[imgname]:
#print(temp)
label = temp['transcription']
if label in mydict:
if(label not in labelexist):
mydict[label]+=1
labelexist[label] = 1
else:
mydict[label] = 1
len_img = len(labels)
return mydict, len_img
def test_label_dist():
showlegend = True
a = VisualizeTools(figuresize=(22,8),figureformat='jpg')
name = ['Microsoft','Google']
value1 = [5175/6358,4302/6358]
value2 = [5368/6358,4304/6358]
legend = ['2020 March', '2021 Feb']
a.plot_bar2value(barname = name,barvalue = value1,
barvalue2 = value2,
color=['r','b'],
filename='FERPLUS',yname='',
legend=legend,
showlegend=showlegend,
yrange=[min(value1)-0.05,max(value2)+0.05])
showlegend = True
a = VisualizeTools(figuresize=(22,8),figureformat='jpg')
name = ['Microsoft','Google']
value1 = [10996/15339,10069/15339]
value2 = [11000/15339,10073/15339]
legend = ['2020 March', '2021 Feb']
a.plot_bar2value(barname = name,barvalue = value1,
barvalue2 = value2,
color=['r','b'],
filename='RAFDB',yname='',
legend=legend,
showlegend=showlegend,
yrange=[min(value1)-0.05,max(value2)+0.05])
a.plot_bar(barname = name,barvalue = value1)
def getlabelprecisionandrecall(targetlabel='person',
truelabelpath='..\APIperformance\mlserviceperformance_coco\Model2_TrueLabel.txt',
predlabelpath='..\APIperformance\mlserviceperformance_coco\Model6_PredictedLabel.txt',):
truelabel = json.load(open(truelabelpath))
predlabel = json.load(open(predlabelpath))
count = 0
for imgname in truelabel:
truehas = False
for temp in truelabel[imgname]:
#print(temp)
label = temp['transcription']
if label == targetlabel:
truehas = True
predhas = False
for temp in predlabel[imgname]:
#print(temp)
label = temp['transcription']
if label == targetlabel:
predhas = True
if(truehas and predhas):
count+=1
totaltrue = getlabeldist(truelabelpath)
totalpred = getlabeldist(predlabelpath)
if(targetlabel in totalpred[0]):
pred1 = totalpred[0][targetlabel]
else:
pred1 = 0
print('total true, total pred, all correct',totaltrue[0][targetlabel],pred1,count)
if(pred1==0):
return 0, count/totaltrue[0][targetlabel]
return count/totalpred[0][targetlabel], count/totaltrue[0][targetlabel]
def test_precisionrecall(predlabelpath='cocoresult\majvote_coco.txt',
labelid=100,
showlegend=False):
labeldist, labelen = getlabeldist()
labellist = list()
precisionlist = list()
recalllist = list()
for label in sorted(labeldist):
print(label)
pre, recall = getlabelprecisionandrecall(targetlabel=label,
predlabelpath=predlabelpath,)
precisionlist.append(pre)
recalllist.append(recall)
labellist.append(label)
print('pre and recall',precisionlist, recalllist)
np.savetxt('precision'+str(labelid)+'.txt', precisionlist)
np.savetxt('recall'+str(labelid)+'.txt', precisionlist)
np.savetxt('label'+str(labelid)+'.txt',labellist,fmt='%s')
a = VisualizeTools(figuresize=(23,8),figureformat='eps')
a.plot_bar(barname = labellist,barvalue = precisionlist,filename='precisionmajvote',yname='')
a.plot_bar(barname = labellist,barvalue = recalllist,filename='recallmajvote',yname='')
a.plot_bar2value(barname = labellist,barvalue = precisionlist,
barvalue2 = recalllist,
color=['r','b'],
filename='preandrecall'+str(labelid),yname='',
showlegend=showlegend)
return 0
if __name__ == '__main__':
'''
test_precisionrecall(predlabelpath='cocoresult\\FrugalMCTcoco.txt',
labelid=99999)
test_precisionrecall(predlabelpath='cocoresult\\majvote_coco.txt',
labelid=888)
test_precisionrecall(predlabelpath='cocoresult\\100000_coco_thres.txt',
labelid=100000,showlegend=True)
test_precisionrecall(predlabelpath='cocoresult\\0_coco_thres.txt',
labelid=0)
test_precisionrecall(predlabelpath='cocoresult\\6_coco_thres.txt',
labelid=6)
test_precisionrecall(predlabelpath='cocoresult\\2_coco_thres.txt',
labelid=2)
'''
#getlabelprecisionandrecall()
test_label_dist()
#test_plotline()
matplotlib.pyplot.close('all')
|
Data_Acquisition_for_ML_Benchmark-main
|
src/visualizetools.py
|
"""
Copyright (c) Meta Platforms, Inc. and affiliates.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from dam import Dam
print("Loading Dataset...")
instance=2 # instance id, can be 0,1,2,3,4
MyDam = Dam(instance=instance)
print("Dataset loaded!")
budget = MyDam.getbudget() # get budget
print("budget is:",budget)
# 3. Display seller_data
buyer_data = MyDam.getbuyerdata() # get buyer data
print("buyer data is:",buyer_data)
mlmodel = MyDam.getmlmodel() # get ml model
print("mlmodel is",mlmodel)
sellers_id = MyDam.getsellerid() # seller ids
print("seller ids are", sellers_id)
for i in sellers_id:
seller_i_price, seller_i_summary, seller_i_samples = MyDam.getsellerinfo(seller_id=int(i))
print("seller ", i, " price: ", seller_i_price.get_price_samplesize(100))
print("seller ", i, " summary: ", seller_i_summary)
print("seller ", i, " samples: ", seller_i_samples)
|
Data_Acquisition_for_ML_Benchmark-main
|
src/example.py
|
"""
Copyright (c) Meta Platforms, Inc. and affiliates.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 16 18:39:21 2022
@author: lingjiao
"""
def pricefunc_lin(frac = 1,
max_p = 100):
p1 = max_p * frac
return p1
|
Data_Acquisition_for_ML_Benchmark-main
|
src/utils.py
|
"""
Copyright (c) Meta Platforms, Inc. and affiliates.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 16 18:35:29 2022
@author: lingjiao
"""
from pricefunction import PriceFunction
import numpy
numpy.random.seed(1111)
class Seller(object):
def __init__(self):
return
def loaddata(self,
data=None,
datapath=None,):
# data: a m x n matrix
# datapath: a path to a csv file.
# the file should be a matrix with column names.
if(not (data is None)):
self.data = data
return
if(datapath != None):
self.data = numpy.loadtxt(open(datapath, "rb"),
delimiter=",",
skiprows=1)
return
print("Not implemented load data of seller")
return
def setprice(self, pricefunc):
self.pricefunc = pricefunc
def getprice(self,data_size):
q1 = data_size/(len(self.data))
return self.pricefunc.get_price(q1)
def getdata(self, data_size, price):
data = self.data
q1 = data_size/(len(self.data))
if(q1>1):
raise ValueError("The required number of samples is too large!")
if(self.pricefunc.get_price(q1) <= price):
number_of_rows = self.data.shape[0]
random_indices = numpy.random.choice(number_of_rows,
size=data_size,
replace=True)
rows = data[random_indices, :]
return rows
else:
raise ValueError("The buyer's offer is too small!")
return
def main():
print("test of the seller")
MySeller = Seller()
MySeller.loaddata(data=numpy.asmatrix([[0,1,1],[1,0,1]]))
MyPricing = PriceFunction()
MyPricing.setup(max_p = 100, method="lin")
MySeller.setprice(MyPricing)
data = MySeller.getdata(1,60)
print("get data is ",data)
if __name__ == '__main__':
main()
|
Data_Acquisition_for_ML_Benchmark-main
|
src/seller.py
|
"""
Copyright (c) Meta Platforms, Inc. and affiliates.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 16 18:36:30 2022
@author: lingjiao
"""
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.dummy import DummyClassifier
import numpy
from seller import Seller
from buyer import Buyer
from pricefunction import PriceFunction
from marketengine import MarketEngine
import glob
import pandas
def sub2stretagy(submission,MarketEngineObj):
stretagy1 = list()
cost1 = list()
for i in range(len(submission)):
stretagy1.append(submission[i])
cost1.append(MarketEngineObj.sellers[i].getprice(submission[i]))
stretagy = list()
stretagy.append(stretagy1)
stretagy.append(cost1)
#print("stretagy is:",stretagy)
return stretagy
class Helper(object):
def __init__(self):
return
def get_cost(self,submission,MarketEngineObj):
stretagy = sub2stretagy(submission,MarketEngineObj)
cost = sum(stretagy[1])
return cost
def load_data(self, submission, MarketEngineObj):
'''
load submissions.
return: train X and y
'''
#print(" train buyer model ")
stretagy = sub2stretagy(submission,MarketEngineObj)
buyer_budget = MarketEngineObj.buyer_budget
print("strategy is:",stretagy)
# check if the budget constraint is satisified.
cost = sum(stretagy[1])
if(cost>buyer_budget):
raise ValueError("The budget constraint is not satisifed!")
return
traindata = None
for i in range(len(MarketEngineObj.sellers)):
d1 = MarketEngineObj.sellers[i].getdata(stretagy[0][i],stretagy[1][i])
if(i==0):
traindata = d1
else:
traindata = numpy.concatenate((traindata,d1))
return traindata
def train_model(self, model, train_X, train_Y):
model.fit(train_X,train_Y)
return model
def eval_model(self, model, test_X, test_Y):
eval_acc = model.score(test_X, test_Y)
return eval_acc
def load_market_instance(self,
feature_path="features/0/",
buyer_data_path="buyerdata.csv",
price_path="price.txt",
budget_path="budget.txt",
):
paths = glob.glob(feature_path+"*.csv")
print("paths:",paths)
# 1. load seller data
seller_data = list()
seller_prices = list()
buyer_budget = numpy.loadtxt(budget_path)
buyer_budget = float(buyer_budget)
#print('budget_ is', type(buyer_budget))
# datafull = [numpy.loadtxt(path,delimiter=',') for path in paths]
datafull = [pandas.read_csv(path,header=None,engine="pyarrow").to_numpy() for path in paths]
seller_datasize = [len(data1) for data1 in datafull]
pricefull = numpy.loadtxt(price_path,delimiter=',',dtype=str)
for i in range(len(datafull)):
if(1):
seller_data.append(datafull[i])
#print(pricefull[i])
MyPricing1 = PriceFunction()
MyPricing1.setup(max_p = float(pricefull[i][1]), method=pricefull[i][0])
seller_prices.append(MyPricing1)
# buyer_data = numpy.loadtxt(buyer_data_path,delimiter=',')
buyer_data = pandas.read_csv(buyer_data_path,header=None,engine="pyarrow").to_numpy()
return seller_data, seller_prices, buyer_data, buyer_budget, seller_datasize
def main():
print("test of the helper")
MyMarketEngine = MarketEngine()
data_1 = numpy.asmatrix([[0,1,0],[1,0,0]])
data_2 = numpy.asmatrix([[0,1,1],[1,0,1],[1,1,1],[0,0,1]])
data_b = numpy.asmatrix([[0,1,0],[1,0,1],[0,1,1]])
buyer_budget = 100
MyPricing1 = PriceFunction()
MyPricing1.setup(max_p = 100, method="lin")
MyPricing2 = PriceFunction()
MyPricing2.setup(max_p = 100, method="lin")
mlmodel1 = LogisticRegression(random_state=0)
MyMarketEngine.setup_market(seller_data=[data_1,data_2],
seller_prices = [MyPricing1,MyPricing2],
buyer_data=data_b,
buyer_budget=buyer_budget,
mlmodel=mlmodel1,
)
stretagy = [[1,2],[50,50]]
#MyMarketEngine.load_stretagy(stretagy)
#acc1 = MyMarketEngine.train_buyer_model()
#print("acc is ",acc1)
MyHelper = Helper()
seller_data, seller_prices, buyer_data, buyer_budget, seller_datasize = MyHelper.load_market_instance(
feature_path="../features/0/",
buyer_data_path="../marketinfo/0/data_buyer/20.csv",
price_path="../marketinfo/0/price/price.txt",
budget_path="../marketinfo/0/price/budget.txt",
)
print("load data finished")
print("seller data size:",seller_datasize)
numpy.savetxt("../marketinfo/0/seller_datasize.csv",seller_datasize,fmt="%d")
MyMarketEngine.setup_market(seller_data=seller_data,
seller_prices = seller_prices,
buyer_data=buyer_data,
buyer_budget=buyer_budget,
mlmodel=mlmodel1,
)
print("set up market finished")
stretagy=[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,10,10,10,10,15]
stretagy=[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,0,0,0,0,0]
stretagy=[10,20,30,40,50,60,70,80,9,10,11,12,13,14,15,0,0,0,0,0]
stretagy=[10,20,30,40,50,60,70,800,9,10,11,12,13,14,15,0,0,0,0,0]
stretagy=[10,20,30,40,50,60,70,80,9,10,11,12,13,14,15,0,0,0,0,0]
stretagy=[50,20,30,40,5,6,7,80,9,10,11,12,13,14,15,0,400,0,50,0]
stretagy=[100,200,300,400,500,600,70,80,9,10,11,12,13,14,15,50,50,50,50,50]
stretagy=[10,20,30,40,50,60,70,80,9,10,11,12,13,14,15,50,50,50,50,50]
stretagy=[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,0,0,0,0,0]
traindata = MyHelper.load_data(stretagy, MyMarketEngine)
model = RandomForestClassifier()
model = KNeighborsClassifier(n_neighbors=9)
model = LogisticRegression(random_state=0)
model = MyHelper.train_model(model, traindata[:,0:-1],
numpy.ravel(traindata[:,-1]))
acc1 = MyHelper.eval_model(model,test_X=buyer_data[:,0:-1],test_Y=buyer_data[:,-1])
print("acc is:", acc1)
model2 = DummyClassifier(strategy="most_frequent")
model2 = MyHelper.train_model(model2, traindata[:,0:-1],
numpy.ravel(traindata[:,-1]))
acc2 = MyHelper.eval_model(model2,test_X=buyer_data[:,0:-1],test_Y=buyer_data[:,-1])
print("dummy acc is:", acc2)
if __name__ == '__main__':
main()
|
Data_Acquisition_for_ML_Benchmark-main
|
src/helper.py
|
"""
Copyright (c) Meta Platforms, Inc. and affiliates.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 16 18:35:29 2022
@author: lingjiao
"""
class PriceFunction(object):
def __init__(self):
return
def setup(self, max_p = 100, method="lin",
data_size=1):
self.max_p = max_p
self.method = "lin"
self.data_size = data_size
def get_price(self,
frac=1,
):
if(frac<0 or frac>1):
raise ValueError("The fraction of samples must be within [0,1]!")
max_p = self.max_p
if(self.method=="lin"):
p1 = max_p * frac
return p1
return
def get_price_samplesize(self,
samplesize=10,
):
frac = samplesize/self.data_size
#print("frac is",frac)
return self.get_price(frac)
def main():
print("test of the price func")
if __name__ == '__main__':
main()
|
Data_Acquisition_for_ML_Benchmark-main
|
src/pricefunction.py
|
"""
Copyright (c) Meta Platforms, Inc. and affiliates.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 16 18:36:30 2022
@author: lingjiao
"""
from sklearn.linear_model import LogisticRegression
import numpy
from seller import Seller
from buyer import Buyer
from pricefunction import PriceFunction
from marketengine import MarketEngine
from helper import Helper
class Evaluator(object):
def __init__(self):
self.Helper = Helper()
return
def eval_submission(self,
submission,
seller_data,
buyer_data,
seller_price,
buyer_budget=100,
mlmodel=LogisticRegression(random_state=0),
):
'''
Parameters
----------
submission : TYPE
DESCRIPTION.
seller_data_path : TYPE
DESCRIPTION.
buyer_data_path : TYPE
DESCRIPTION.
price_data_path : TYPE
mlmodel: TYPE
DESCRIPTION.
: TYPE
DESCRIPTION.
Returns
-------
None.
'''
MyMarketEngine = MarketEngine()
MyHelper = self.Helper
# set up the market
MyMarketEngine.setup_market(seller_data=seller_data,
seller_prices = seller_price,
buyer_data=buyer_data,
buyer_budget=buyer_budget,
mlmodel=mlmodel,
)
# get train data
traindata = MyHelper.load_data(submission, MyMarketEngine)
# train the model
model = MyHelper.train_model(mlmodel, traindata[:,0:-1],
numpy.ravel(traindata[:,-1]))
# eval the model
acc1 = MyHelper.eval_model(model,test_X=buyer_data[:,0:-1],test_Y=buyer_data[:,-1])
return acc1
def main():
print("test of the evaluator")
submission = [[1,2],[50,50]]
data_1 = numpy.asmatrix([[0,1,0],[1,0,0]])
data_2 = numpy.asmatrix([[0,1,1],[1,0,1],[1,1,1],[0,0,1]])
seller_data = [data_1, data_2]
buyer_data = numpy.asmatrix([[0,1,0],[1,0,1],[0,1,1]])
MyPricing1 = PriceFunction()
MyPricing1.setup(max_p = 100, method="lin")
MyPricing2 = PriceFunction()
MyPricing2.setup(max_p = 100, method="lin")
seller_price = [MyPricing1, MyPricing2]
MyEval = Evaluator()
acc1 = MyEval.eval_submission(
submission,
seller_data,
buyer_data,
seller_price,
buyer_budget=100,
mlmodel=LogisticRegression(random_state=0),
)
print("acc is:", acc1)
if __name__ == '__main__':
main()
|
Data_Acquisition_for_ML_Benchmark-main
|
src/evaluator.py
|
"""
Copyright (c) Meta Platforms, Inc. and affiliates.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 16 18:36:30 2022
@author: lingjiao
"""
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import GradientBoostingClassifier
import numpy
from marketengine import MarketEngine
from helper import Helper
from sklearn.neighbors import KNeighborsClassifier
import json
def evaluate_batch(data_config,
):
instance_ids = data_config['instance_ids']
result = dict()
for id1 in instance_ids:
result[id1] = evaluate_multiple_trial(data_config,instance_id=id1)
return result
def evaluate_multiple_trial(data_config,
instance_id,
num_trial=10,
):
results = [evaluate_once(data_config=data_config,
instance_id=instance_id) for i in range(num_trial)]
#print("results are:",results)
results_avg = dict()
results_avg['cost'] = 0
results_avg['acc'] = 0
for item in results:
#print("item is:",item)
results_avg['cost'] += item['cost']/len(results)
results_avg['acc'] += item['acc']/len(results)
return results_avg
def evaluate_once(data_config,
instance_id):
# load submission
submission = load_submission(path = data_config['submission_path']+str(instance_id)+".csv")
# get the helper
model_name = data_config['model_name']
MarketHelper, MarketEngineObj, model, traindata, buyer_data = get_market_info(instance_id=instance_id,
model_name=model_name)
# calculate the cost of the submission
cost = MarketHelper.get_cost(submission,MarketEngineObj)
# generate the accuracy of the submission
traindata = MarketHelper.load_data(submission, MarketEngineObj)
model = MarketHelper.train_model(model, traindata[:,0:-1],
numpy.ravel(traindata[:,-1]))
acc1 = MarketHelper.eval_model(model,test_X=buyer_data[:,0:-1],test_Y=buyer_data[:,-1])
result = dict()
result['cost'] = cost
result['acc'] = acc1
return result
def load_submission(path):
data = numpy.loadtxt(path,delimiter=",",dtype=int)
return data
def get_market_info(instance_id,
model_name="lr"):
MyHelper = Helper()
seller_data, seller_prices, buyer_data, buyer_budget, data_size = MyHelper.load_market_instance(
feature_path="../features/"+str(instance_id)+"/",
buyer_data_path="../marketinfo/"+str(instance_id)+"/data_buyer/20.csv",
price_path="../marketinfo/"+str(instance_id)+"/price/price.txt",
budget_path="../marketinfo/"+str(instance_id)+"/price/budget.txt",
)
MyMarketEngine = MarketEngine()
mlmodel1 = LogisticRegression(random_state=0)
if(model_name=="knn"):
mlmodel1 = KNeighborsClassifier(n_neighbors=9)
if(model_name=='rf'):
mlmodel1 = GradientBoostingClassifier(n_estimators=100, learning_rate=1.0,
max_depth=1, random_state=0)
MyMarketEngine.setup_market(seller_data=seller_data,
seller_prices = seller_prices,
buyer_data=buyer_data,
buyer_budget=1e10,
mlmodel=mlmodel1,
)
return MyHelper, MyMarketEngine, mlmodel1,seller_data, buyer_data
def main():
data_config = json.load(open("../config/bilge20230301_rf.json")) # load the data folder
result = evaluate_batch(data_config)
json_object = json.dumps(result, indent=4)
save_path = data_config['save_path']
with open(save_path, "w") as outfile:
outfile.write(json_object)
print("The result is:",result)
return
if __name__ == '__main__':
main()
|
Data_Acquisition_for_ML_Benchmark-main
|
src/evaluator_submission.py
|
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# coding: utf-8
# In[1]:
import os, sys
import time
sys.path.insert(0, '..')
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.cbook import flatten
import lib
import torch, torch.nn as nn
import torch.nn.functional as F
from sklearn.preprocessing import QuantileTransformer
from sklearn.preprocessing import MinMaxScaler
from pickle import dump
import random
import pandas as pd
from itertools import chain
device = 'cuda' if torch.cuda.is_available() else 'cpu'
#device = 'cpu'
experiment_name = 'augur_node_shallow'
experiment_name = '{}_{}.{:0>2d}.{:0>2d}_{:0>2d}:{:0>2d}'.format(experiment_name, *time.gmtime()[:5])
print("experiment:", experiment_name)
# In[2]:
data = lib.Dataset("AUGUR", random_state=round(time.time()), quantile_transform=False,scaling='None', log_transform=False,quantile_noise=1e-3)
in_features = data.X_train.shape[1]
random_state=1337
output_distribution='normal'
data.y_train = np.log10(data.y_train)
data.y_valid = np.log10(data.y_valid)
data.y_test = np.log10(data.y_test)
print("Dataset reading Successful!")
# Plots the y-distribution
plt.hist(data.y_train, density=False, bins=30)
plt.xlabel('Total performance')
plt.ylabel('count')
plt.savefig("y_train_dist.png")
plt.close()
plt.hist(data.y_test.reshape(-1), density=False, bins=30)
plt.xlabel('Total performance')
plt.ylabel('count')
plt.savefig("y_test_dist.png")
plt.close()
plt.hist(data.y_valid.reshape(-1), density=False, bins=30)
plt.xlabel('Total performance')
plt.ylabel('count')
plt.savefig("y_valid_dist.png")
plt.close()
model = nn.Sequential(
lib.DenseBlock(in_features, 128, num_layers=6, tree_dim=3, depth=8, flatten_output=False,
choice_function=lib.entmax15, bin_function=lib.entmoid15),
lib.Lambda(lambda x: x[..., 0].mean(dim=-1)), # average first channels of every tree
).to(device)
with torch.no_grad():
res = model(torch.as_tensor(np.float32(data.X_train[:1000]), device=device))
# trigger data-aware init
#if torch.cuda.device_count() > 1:
# model = nn.DataParallel(model)
# In[31]:
from qhoptim.pyt import QHAdam
optimizer_params = { 'nus':(0.7, 1.0), 'betas':(0.95, 0.998) }
print("qhoptim import successful!")
# In[33]:
if(True):
experiment_name = "augur_energy_6_layers_128_depth8_log_transformed__rel_error"
#experiment_name = "dummy_test"
trainer = lib.Trainer(
model=model, loss_function=F.mse_loss,
experiment_name=experiment_name,
warm_start=False,
Optimizer=QHAdam,
optimizer_params=optimizer_params,
verbose=True,
n_last_checkpoints=5
)
# Training parameters to control
loss_history, mse_history = [], []
best_mse = float('inf')
best_step_mse = 0
early_stopping_rounds = 5000
report_frequency = 100
# Train and plot the training loss and validation loss
if (True):
for batch in lib.iterate_minibatches(np.float32(data.X_train), np.float32(data.y_train), batch_size=512,
shuffle=True, epochs=float('inf')):
metrics = trainer.train_on_batch(*batch, device=device)
loss_history.append(metrics['loss'])
if trainer.step % report_frequency == 0:
trainer.save_checkpoint()
trainer.average_checkpoints(out_tag='avg')
trainer.load_checkpoint(tag='avg')
mse = trainer.evaluate_mse(
np.float32(data.X_valid), np.float32(data.y_valid), device=device, batch_size=512)
if mse < best_mse:
best_mse = mse
best_step_mse = trainer.step
trainer.save_checkpoint(tag='best_mse')
mse_history.append(mse)
trainer.load_checkpoint() # last
trainer.remove_old_temp_checkpoints()
plt.figure(figsize=[18, 6])
plt.subplot(1, 2, 1)
plt.plot(loss_history)
plt.title('Loss')
plt.grid()
plt.subplot(1, 2, 2)
plt.plot(mse_history)
plt.title('MSE')
plt.grid()
#plt.show()
filename = experiment_name + ".png"
plt.savefig(filename)
plt.close()
print("Loss %.5f" % (metrics['loss']))
print("Val MSE: %0.5f" % (mse))
if trainer.step > best_step_mse + early_stopping_rounds:
print('BREAK. There is no improvment for {} steps'.format(early_stopping_rounds))
print("Best step: ", best_step_mse)
print("Best Val MSE: %0.5f" % (best_mse))
break
# In case you want to test a particular checkpoint, uncomment the following
# and comment line 173-177
'''
trainer_test = lib.Trainer(model=model, loss_function=F.mse_loss)
ckpt_path = "/workspace/node/node/notebooks/augur_energy_6k_dataset_6_layers_128_depth8_log_transformed__rel_error/checkpoint_best_mse.pth"
trainer_test.load_checkpoint(path=ckpt_path)
mse, pred, ground, error = trainer_test.evaluate_mse_test(np.float32(data.X_test), np.float32(data.y_test), device=device)
print('Best step: ', trainer_test.step)
print('Mean Error', np.mean(error))
'''
# Evaluation on the test dataset
trainer.load_checkpoint(tag='best_mse')
mse, pred, ground, error = trainer.evaluate_mse_test(np.float32(data.X_test), np.float32(data.y_test), device=device)
print('Best step: ', trainer.step)
print("Test MSE: %0.5f" % (mse))
# Plot the correlation on the test set
plt.scatter(ground, ground, color='green', alpha=0.1)
plt.scatter(ground, pred, color='gray')
test_filename = experiment_name + "_test.png"
plt.savefig(test_filename)
plt.close()
|
Augur-main
|
train/augur_node_trainer.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# Code by Samarth Brahmbhatt
import os
from scripts.download_data import ContactPoseDownloader
osp = os.path
def startup(data_dir=None, default_dir=osp.join('data', 'contactpose_data')):
# check that the provided data_dir is OK
if data_dir is not None:
assert data_dir!=default_dir, \
"If you provide --data_dir, it must not be {:s}".format(default_dir)
assert osp.isdir(data_dir), "If you provide --data_dir, it must exist"
else:
data_dir = default_dir
if not osp.isdir(data_dir):
if osp.isfile(data_dir) or osp.islink(data_dir):
os.remove(data_dir)
print('Removed file {:s}'.format(data_dir))
os.mkdir(data_dir)
# symlink for easy access
if data_dir != default_dir:
if osp.islink(default_dir):
os.remove(default_dir)
print('Removed symlink {:s}'.format(default_dir))
os.symlink(data_dir, default_dir)
print('Symlinked to {:s} for easy access'.format(default_dir))
downloader = ContactPoseDownloader()
# download 3D models and marker locations
downloader.download_3d_models()
downloader.download_markers()
# download all 3D joint, object pose, camera calibration data
downloader.download_grasps()
# download contact maps for participant 28, 'use' grasps
downloader.download_contact_maps(28, 'use')
# download RGB-D images for participant 28, bowl 'use' grasp
downloader.download_images(28, 'use', data_dir,
include_objects=('bowl',))
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', default=None,
help='Base data dir for the ContactPose dataset')
args = parser.parse_args()
startup(args.data_dir)
|
ContactPose-main
|
startup.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# Code by Samarth Brahmbhatt
|
ContactPose-main
|
__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# Code by Samarth Brahmbhatt
import numpy as np
import logging
import math
import transforms3d.euler as txe
import transforms3d.quaternions as txq
import argparse
import cv2
import matplotlib.pyplot as plt
try:
from thirdparty.mano.webuser.smpl_handpca_wrapper_HAND_only \
import load_model as load_mano_model
MANO_PRESENT = True
except ImportError:
load_mano_model = None
MANO_PRESENT = False
if MANO_PRESENT:
# hacks needed for MANO Python2 code
import os.path as osp
import _pickle as cPickle
import sys
sys.modules['cPickle'] = cPickle
sys.path.append(osp.join('thirdparty', 'mano'))
sys.path.append(osp.join('thirdparty', 'mano', 'webuser'))
def texture_proc(colors, a=0.05, invert=False):
idx = colors > 0
ci = colors[idx]
if len(ci) == 0:
return colors
if invert:
ci = 1 - ci
# fit a sigmoid
x1 = min(ci); y1 = a
x2 = max(ci); y2 = 1-a
lna = np.log((1 - y1) / y1)
lnb = np.log((1 - y2) / y2)
k = (lnb - lna) / (x1 - x2)
mu = (x2*lna - x1*lnb) / (lna - lnb)
# apply the sigmoid
ci = np.exp(k * (ci-mu)) / (1 + np.exp(k * (ci-mu)))
colors[idx] = ci
return colors
class MovingAverage:
def __init__(self):
self.count = 0
self.val = 0
def append(self, v):
self.val = self.val*self.count + v
self.count += 1
self.val /= self.count
def linesegment_from_points(p1, p2):
n = p2 - p1
return np.hstack((p1, n))
def get_hand_line_ids():
line_ids = []
for finger in range(5):
base = 4*finger + 1
line_ids.append([0, base])
for j in range(3):
line_ids.append([base+j, base+j+1])
line_ids = np.asarray(line_ids, dtype=int)
return line_ids
def rotmat_from_vecs(v1, v2=np.asarray([0, 0, 1])):
"""
Returns a rotation matrix R_1_2
:param v1: vector in frame 1
:param v2: vector in frame 2
:return:
"""
v1 = v1 / np.linalg.norm(v1)
v2 = v2 / np.linalg.norm(v2)
v = np.cross(v2, v1)
vx = np.asarray([
[0, -v[2], +v[1], 0],
[+v[2], 0, -v[0], 0],
[-v[1], +v[0], 0, 0],
[0, 0, 0, 0]])
dotp = np.dot(v1, v2)
if np.abs(dotp + 1) < 1e-3:
R = np.eye(4)
x = np.cross(v2, [1, 0, 0])
R[:3, :3] = txe.axangle2mat(x, np.pi)
else:
R = np.eye(4) + vx + np.dot(vx, vx)/(1+dotp)
return R
def p_dist_linesegment(p, ls):
"""
Distance from point p to line segment ls
p: Nx3
ls: Mx6 (2 3-dim endpoints of M line segments)
"""
# NxMx3
ap = p[:, np.newaxis, :] - ls[np.newaxis, :, :3]
# 1xMx3
u = ls[np.newaxis, :, 3:]
# 1xMx3
u_norm = u / np.linalg.norm(u, axis=2, keepdims=True)
# NxM
proj = np.sum(ap * u_norm, axis=2)
# point to line distance
# NxM
d_line = np.linalg.norm(np.cross(ap, u_norm, axis=2), axis=2)
# point to endpoint distance
# NxM
d_a = np.linalg.norm(ap, axis=2)
d_b = np.linalg.norm(ap-u, axis=2)
d_endp = np.minimum(d_a, d_b)
within_ls = (proj > 0) * (proj < np.linalg.norm(u, axis=2)) * (d_endp < 0.03)
d_ls = within_ls*d_line + (1-within_ls)*d_endp
return d_ls
def closest_linesegment_point(l0, l1, p):
"""
For each point in p, finds the closest point on the list of line segments
whose endpoints are l0 and l1
p: N x 3
l0, l1: M x 3
out: N x M x 3
"""
p = np.broadcast_to(p[:, np.newaxis, :], (len(p), len(l0), 3))
l0 = np.broadcast_to(l0[np.newaxis, :, :], (len(p), len(l0), 3))
l1 = np.broadcast_to(l1[np.newaxis, :, :], (len(p), len(l1), 3))
llen = np.linalg.norm(l1 - l0, axis=-1, keepdims=True)
lu = (l1 - l0) / llen
v = p - l0
d = np.sum(v * lu, axis=-1, keepdims=True)
d = np.clip(d, a_min=0, a_max=llen)
out = l0 + d*lu
return out
def pose_matrix(pose):
T = np.eye(4)
T[:3, 3] = pose['translation']
T[:3, :3] = txq.quat2mat(pose['rotation'])
return T
def tform_points(T, X):
"""
X: Nx3
T: 4x4 homogeneous
"""
X = np.vstack((X.T, np.ones(len(X))))
X = T @ X
X = X[:3].T
return X
def project(P, X):
"""
X: Nx3
P: 3x4 projection matrix, ContactPose.P or K @ cTo
returns Nx2 perspective projections
"""
X = np.vstack((X.T, np.ones(len(X))))
x = P @ X
x = x[:2] / x[2]
return x.T
def get_A(camera_name, W=960, H=540):
"""
Get the affine transformation matrix applied after 3D->2D projection
"""
def flipud(H):
return np.asarray([[1, 0, 0], [0, -1, H], [0, 0, 1]])
def fliplr(W):
return np.asarray([[-1, 0, W], [0, 1, 0], [0, 0, 1]])
def transpose():
return np.asarray([[0, 1, 0], [1, 0, 0], [0, 0, 1]])
if camera_name == 'kinect2_left':
return np.dot(fliplr(H), transpose())
elif camera_name == 'kinect2_right':
return np.dot(flipud(W), transpose())
elif camera_name == 'kinect2_middle':
return np.dot(fliplr(W), flipud(H))
else:
raise NotImplementedError
def setup_logging(filename=None):
logging.basicConfig(level=logging.DEBUG)
root = logging.getLogger()
if filename is not None:
root.addHandler(logging.FileHandler(filename, 'w'))
root.info('Logging to {:s}'.format(filename))
def quaternion_slerp(quat0, quat1, fraction, spin=0, shortestpath=True):
EPS = np.finfo(float).eps * 4.0
q0 = np.asarray(quat0) / np.linalg.norm(quat0)
q1 = np.asarray(quat1) / np.linalg.norm(quat1)
if fraction == 0.0:
return q0
elif fraction == 1.0:
return q1
d = np.dot(q0, q1)
if abs(abs(d) - 1.0) < EPS:
return q0
if shortestpath and d < 0.0:
# invert rotation
d = -d
q1 *= -1.0
angle = math.acos(d) + spin * math.pi
if abs(angle) < EPS:
return q0
isin = 1.0 / math.sin(angle)
q0 *= math.sin((1.0 - fraction) * angle) * isin
q1 *= math.sin(fraction * angle) * isin
q0 += q1
return q0
def average_quaternions(qs, ws=None):
"""
From https://qr.ae/TcwOci
"""
if ws is None:
ws = np.ones(len(qs)) / len(qs)
else:
assert sum(ws) == 1
for idx in range(1, len(qs)):
if np.dot(qs[0], qs[idx]) < 0:
qs[idx] *= -1
for i in range(1, len(qs)):
frac = ws[i] / (ws[i-1] + ws[i]) # weight of qs[i]
qs[i] = quaternion_slerp(qs[i-1], qs[i], fraction=frac)
ws[i] = 1 - sum(ws[i+1:])
return qs[-1]
def default_argparse(require_p_num=True, require_intent=True,
require_object_name=True):
parser = argparse.ArgumentParser()
parser.add_argument('--p_num', type=int, help='Participant number (1-50)',
required=require_p_num)
parser.add_argument('--intent', choices=('use', 'handoff'),
help='Grasp intent', required=require_intent)
parser.add_argument('--object_name', help="Name of object",
required=require_object_name)
return parser
def default_multiargparse():
parser = argparse.ArgumentParser()
parser.add_argument('--p_num',
help='Participant numbers, comma or - separated.'
'Skipping means all participants',
default=None)
parser.add_argument('--intent', choices=('use', 'handoff', 'use,handoff'),
help='Grasp intents, comma separated', default='use,handoff')
parser.add_argument('--object_name',
help="Object names, comma separated, ignore for all objects",
default=None)
return parser
def parse_multiargs(args):
"""
parses the p_num, intent, and object_name arguments from a parser created
with default_multiargparse
"""
from utilities.dataset import get_p_nums
p_nums = args.p_num
if p_nums is None:
p_nums = list(range(1, 51))
elif '-' in p_nums:
first, last = p_nums.split('-')
p_nums = list(range(int(first), int(last)+1))
else:
p_nums = [int(p) for p in p_nums.split(',')]
intents = args.intent.split(',')
object_names = args.object_name
if object_names is not None:
object_names = object_names.split(',')
all_p_nums = []
for intent in intents:
for object_name in object_names:
all_p_nums.extend([pn for pn in p_nums if pn in
get_p_nums(object_name, intent)])
p_nums = list(set(all_p_nums))
delattr(args, 'p_num')
delattr(args, 'intent')
delattr(args, 'object_name')
return p_nums, intents, object_names, args
def colorcode_depth_image(im):
assert(im.ndim == 2)
im = im.astype(float)
im /= im.max()
j, i = np.nonzero(im)
c = im[j, i]
im = np.zeros((im.shape[0], im.shape[1], 3))
im[j, i, :] = plt.cm.viridis(c)[:, :3]
im = (im * 255.0).astype(np.uint8)
return im
def draw_hands(im, joints, colors=((0, 255, 0), (0, 0, 255)), circle_radius=3,
line_thickness=2, offset=np.zeros(2, dtype=np.int)):
if im is None:
print('Invalid image')
return im
if im.ndim == 2: # depth image
im = colorcode_depth_image(im)
for hand_idx, (js, c) in enumerate(zip(joints, colors)):
if js is None:
continue
else:
js = np.round(js-offset[np.newaxis, :]).astype(np.int)
for j in js:
im = cv2.circle(im, tuple(j), circle_radius, c, -1, cv2.LINE_AA)
for finger in range(5):
base = 4*finger + 1
im = cv2.line(im, tuple(js[0]), tuple(js[base]), (0, 0, 0),
line_thickness, cv2.LINE_AA)
for j in range(3):
im = cv2.line(im, tuple(js[base+j]), tuple(js[base+j+1]),
(0, 0, 0), line_thickness, cv2.LINE_AA)
return im
def draw_object_markers(im, ms, color=(0, 255, 255), circle_radius=3,
offset=np.zeros(2, dtype=np.int)):
if im.ndim == 2: # depth image
im = colorcode_depth_image(im)
for m in np.round(ms).astype(np.int):
im = cv2.circle(im, tuple(m-offset), circle_radius, color, -1, cv2.LINE_AA)
return im
def crop_image(im, joints, crop_size, fillvalue=[0]):
"""
joints: list of 21x2 2D joint locations per each hand
crops the im into a crop_size square centered at the mean of all joint
locations
returns cropped image and top-left pixel position of the crop in the full image
"""
if im.ndim < 3:
im = im[:, :, np.newaxis]
if isinstance(fillvalue, list) or isinstance(fillvalue, np.ndarray):
fillvalue = np.asarray(fillvalue).astype(im.dtype)
else:
fillvalue = np.asarray([fillvalue for _ in im.shape[2]]).astype(im.dtype)
joints = np.vstack([j for j in joints if j is not None])
bbcenter = np.round(np.mean(joints, axis=0)).astype(np.int)
im_crop = np.zeros((crop_size, crop_size, im.shape[2]), dtype=im.dtype)
tl = bbcenter - crop_size//2
br = bbcenter + crop_size//2
tl_crop = np.asarray([0, 0], dtype=np.int)
br_crop = np.asarray([crop_size, crop_size], dtype=np.int)
tl_spill = np.minimum(0, tl)
tl -= tl_spill
tl_crop -= tl_spill
br_spill = np.maximum(0, br-np.array([im.shape[1], im.shape[0]]))
br -= br_spill
br_crop -= br_spill
im_crop[tl_crop[1]:br_crop[1], tl_crop[0]:br_crop[0], :] = \
im[tl[1]:br[1], tl[0]:br[0], :]
return im_crop.squeeze(), tl
def openpose2mano(o, n_joints_per_finger=4):
"""
convert joints from openpose format to MANO format
"""
finger_o2m = {0: 4, 1: 0, 2: 1, 3: 3, 4: 2}
m = np.zeros((5*n_joints_per_finger+1, 3))
m[0] = o[0]
for ofidx in range(5):
for jidx in range(n_joints_per_finger):
oidx = 1 + ofidx*4 + jidx
midx = 1 + finger_o2m[ofidx]*n_joints_per_finger + jidx
m[midx] = o[oidx]
return np.array(m)
# m2o
# 0->1, 1->2, 2->4, 3->3, 4->0
def mano2openpose(m, n_joints_per_finger=4):
"""
convert joints from MANO format to openpose format
"""
finger_o2m = {0: 4, 1: 0, 2: 1, 3: 3, 4: 2}
finger_m2o = {v: k for k,v in finger_o2m.items()}
o = np.zeros((5*n_joints_per_finger+1, 3))
o[0] = m[0]
for mfidx in range(5):
for jidx in range(n_joints_per_finger):
midx = 1 + mfidx*4 + jidx
oidx = 1 + finger_m2o[mfidx]*n_joints_per_finger + jidx
o[oidx] = m[midx]
return o
def mano_joints_with_fingertips(m):
"""
get joints from MANO model
MANO model does not come with fingertip joints, so we have selected vertices
that correspond to fingertips
"""
fingertip_idxs = [333, 444, 672, 555, 745]
out = [m.J_transformed[0]]
for fidx in range(5):
for jidx in range(4):
if jidx < 3:
idx = 1 + fidx*3 + jidx
out.append(m.J_transformed[idx])
else:
out.append(m[fingertip_idxs[fidx]])
return out
def load_mano_meshes(params, model_dicts, oTh=(np.eye(4), np.eye(4)),
flat_hand_mean=False):
if not MANO_PRESENT or model_dicts is None:
return (None, None)
out = []
for hand_idx, mp in enumerate(params):
if mp is None:
out.append(None)
continue
ncomps = len(mp['pose']) - 3
m = load_mano_model(model_dicts[hand_idx], ncomps=ncomps,
flat_hand_mean=flat_hand_mean)
m.betas[:] = mp['betas']
m.pose[:] = mp['pose']
oTm = oTh[hand_idx] @ mp['hTm']
vertices = np.array(m)
vertices = tform_points(oTm, vertices)
joints = mano2openpose(mano_joints_with_fingertips(m))
joints = tform_points(oTm, joints)
out.append({
'vertices': vertices,
'joints': joints,
'faces': np.asarray(m.f),
})
return out
def grabcut_mask(src, mask, n_iters=10):
"""
Refines noisy mask edges using Grabcut on image src
"""
assert(src.shape[:2] == mask.shape[:2])
y, x = np.where(mask)
gmask = np.zeros((src.shape[0], src.shape[1]), dtype=np.uint8) # GC_BGD
gmask[y.min():y.max()+1, x.min():x.max()+1] = 2 # GC_PR_BGD
gmask[y, x] = 3 # GC_PR_FGD
bgdModel = np.zeros((1,65),np.float64)
fgdModel = np.zeros((1,65),np.float64)
gmask, bgdModel, fgdModel = \
cv2.grabCut(src, gmask, (0, 0, 0, 0), bgdModel, fgdModel, n_iters,
mode=cv2.GC_INIT_WITH_MASK)
mask = np.logical_or(gmask==1, gmask==3)
return mask
|
ContactPose-main
|
utilities/misc.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# Code by Samarth Brahmbhatt
from utilities.import_open3d import *
from open3d import pipelines
import utilities.misc as mutils
assert(mutils.load_mano_model is not None)
import numpy as np
import chumpy as ch
import os
import json
import transforms3d.quaternions as txq
import pickle
osp = os.path
o3dr = pipelines.registration
def mano_param_dict(n_pose_params, n_betas=10):
out = {
'pose': [0.0 for _ in range(n_pose_params+3)],
'betas': [0.0 for _ in range(n_betas)],
'valid': False,
'mTc': {
'translation': [0.0, 0.0, 0.0],
'rotation': [1.0, 0.0, 0.0, 0.0],
}
}
return out
def get_palm_joints(p, n_joints_per_finger=4):
"""
get the 6 palm joints (root + base of all 5 fingers)
"""
idx = [0]
for fidx in range(5):
idx.append(1 + fidx*n_joints_per_finger)
return p[idx]
def register_pcs(src, tgt, verbose=True):
"""
registers two pointclouds by rigid transformation
target_x = target_T_source * source_x
"""
assert(len(src) == len(tgt))
ps = o3dg.PointCloud()
ps.points = o3du.Vector3dVector(src)
pt = o3dg.PointCloud()
pt.points = o3du.Vector3dVector(tgt)
c = [[i, i] for i in range(len(src))]
c = o3du.Vector2iVector(c)
r = o3dr.TransformationEstimationPointToPoint()
r.with_scaling = False
if verbose:
print('Rigid registration RMSE (before) = {:f}'.
format(r.compute_rmse(ps, pt, c)))
tTs = r.compute_transformation(ps, pt, c)
pst = ps.transform(tTs)
if verbose:
print('Rigid registration RMSE (after) = {:f}'.
format(r.compute_rmse(pst, pt, c)))
return tTs
class MANOFitter(object):
_mano_dicts = None
def __init__(self):
if MANOFitter._mano_dicts is None:
MANOFitter._mano_dicts = []
for hand_name in ('LEFT', 'RIGHT'):
filename = osp.join('thirdparty', 'mano', 'models',
'MANO_{:s}.pkl'.format(hand_name))
with open(filename, 'rb') as f:
MANOFitter._mano_dicts.append(pickle.load(f, encoding='latin1'))
@staticmethod
def fit_joints(both_joints, n_pose_params=15, shape_sigma=10.0,
save_filename=None):
"""
Fits the MANO model to hand joint 3D locations
both_jonts: tuple of length 2, 21 joints per hand, e.g. output of ContactPose.hand_joints()
n_pose_params: number of pose parameters (excluding 3 global rotation params)
shape_sigma: reciprocal of shape regularization strength
save_filename: file where the fitting output will be saved in JSON format
"""
mano_params = []
for hand_idx, joints in enumerate(both_joints):
if joints is None: # hand is not present
mano_params.append(mano_param_dict(n_pose_params)) # dummy
continue
cp_joints = mutils.openpose2mano(joints)
# MANO model
m = mutils.load_mano_model(MANOFitter._mano_dicts[hand_idx],
ncomps=n_pose_params, flat_hand_mean=False)
m.betas[:] = np.zeros(m.betas.size)
m.pose[:] = np.zeros(m.pose.size)
mano_joints = mutils.mano_joints_with_fingertips(m)
mano_joints_np = np.array([[float(mm) for mm in m] for m in mano_joints])
# align palm
cp_palm = get_palm_joints(np.asarray(cp_joints))
mano_palm = get_palm_joints(np.asarray(mano_joints_np))
mTc = register_pcs(cp_palm, mano_palm)
cp_joints = np.dot(mTc, np.vstack((cp_joints.T, np.ones(len(cp_joints)))))
cp_joints = cp_joints[:3].T
cp_joints = ch.array(cp_joints)
# set up objective
objective = [m-c for m,c in zip(mano_joints, cp_joints)]
mean_betas = ch.array(np.zeros(m.betas.size))
objective.append((m.betas - mean_betas) / shape_sigma)
# optimize
ch.minimize(objective, x0=(m.pose, m.betas, m.trans), method='dogleg')
p = mano_param_dict(n_pose_params)
p['pose'] = np.array(m.pose).tolist()
p['betas'] = np.array(m.betas).tolist()
p['valid'] = True
p['mTc']['translation'] = (mTc[:3, 3] - np.array(m.trans)).tolist()
p['mTc']['rotation'] = txq.mat2quat(mTc[:3, :3]).tolist()
mano_params.append(p)
# # to access hand mesh vertices and faces
# vertices = np.array(m.r)
# vertices = mutils.tform_points(np.linalg.inv(mTc), vertices)
# faces = np.array(m.f)
if save_filename is not None:
with open(save_filename, 'w') as f:
json.dump(mano_params, f, indent=4, separators=(',', ':'))
print('{:s} written'.format(save_filename))
return mano_params
|
ContactPose-main
|
utilities/mano_fitting.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# Code by Samarth Brahmbhatt
import sys
sys.path.append('.')
|
ContactPose-main
|
utilities/init_paths.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# Code by Samarth Brahmbhatt
import os
os.environ["PYOPENGL_PLATFORM"] = "osmesa"
import trimesh
import pyrender
import numpy as np
import transforms3d.euler as txe
import utilities.misc as mutils
import cv2
osp = os.path
class DepthRenderer(object):
"""
Renders object or hand mesh into a depth map
"""
def __init__(self, object_name_or_mesh, K, camera_name, mesh_scale=1.0):
"""
object_name_or_mesh: either object name string (for objects),
or {'vertices': ..., 'faces': ...} (for hand mesh)
K: 3x3 intrinsics matrix
mesh_scale: scale factor applied to the mesh (1.0 for hand, 1e-3 for object)
"""
self.K = K
self.camera_name = camera_name
if camera_name == 'kinect2_middle':
self.flip_fn = lambda x: cv2.flip(cv2.flip(x, 0), 1)
self.out_imsize = (960, 540)
elif camera_name == 'kinect2_left':
self.flip_fn = lambda x: cv2.flip(cv2.transpose(x), 1)
self.out_imsize = (540, 960)
elif camera_name == 'kinect2_right':
self.flip_fn = lambda x: cv2.flip(cv2.transpose(x), 0)
self.out_imsize = (540, 960)
else:
raise NotImplementedError
# mesh
if isinstance(object_name_or_mesh, str):
filename = osp.join('data', 'object_models',
'{:s}.ply'.format(object_name_or_mesh))
mesh_t = trimesh.load_mesh(filename)
elif isinstance(object_name_or_mesh, dict):
mesh_t = trimesh.Trimesh(vertices=object_name_or_mesh['vertices'],
faces=object_name_or_mesh['faces'])
else:
raise NotImplementedError
mesh_t.apply_transform(np.diag([mesh_scale, mesh_scale, mesh_scale, 1]))
self.oX = mesh_t.vertices
mesh = pyrender.Mesh.from_trimesh(mesh_t)
self.scene = pyrender.Scene()
self.scene.add(mesh, pose=np.eye(4))
# camera
camera = pyrender.IntrinsicsCamera(K[0, 0], K[1, 1], K[0, 2], K[1, 2],
znear=0.1, zfar=2.0)
self.camera_node = pyrender.Node(camera=camera, matrix=np.eye(4))
self.scene.add_node(self.camera_node)
self.cTopengl = np.eye(4)
self.cTopengl[:3, :3] = txe.euler2mat(np.pi, 0, 0)
# renderer object
self.renderer = pyrender.OffscreenRenderer(960, 540)
def render(self, object_pose):
"""
returns depth map produced by rendering the mesh
object_pose: 4x4 pose of object w.r.t. camera, from ContactPose.object_pose()
object_pose = cTo in the naming convention
"""
oTc = np.linalg.inv(object_pose)
oTopengl = oTc @ self.cTopengl
self.scene.set_pose(self.camera_node, oTopengl)
# TODO: figure out DEPTH_ONLY rendering mode with OSMesa backend
# DEPTH_ONLY + OSMesa does not work currently
# so we have to render color also :(
_, depth = self.renderer.render(self.scene)
return self.flip_fn(depth)
def object_visibility_and_projections(self, object_pose, depth_thresh=5e-3):
"""
returns projection locations of object mesh vertices (Nx2)
and their binary visibility from the object_pose
object_pose = cTo 4x4 pose of object w.r.t. camera
This is cheap Z-buffering. We use rendered depth maps because they are
cleaner than Kinect depth maps
"""
# render depth image
depth_im = self.render(object_pose)
# project all vertices
cX = mutils.tform_points(object_pose, self.oX)
P = mutils.get_A(self.camera_name) @ self.K @ np.eye(4)[:3]
cx = mutils.project(P, cX)
# determine visibility
visible = cX[:, 2] > 0
visible = np.logical_and(visible, cx[:, 0] >= 0)
visible = np.logical_and(visible, cx[:, 1] >= 0)
visible = np.logical_and(visible, cx[:, 0] < self.out_imsize[0]-1)
visible = np.logical_and(visible, cx[:, 1] < self.out_imsize[1]-1)
u = np.round(cx[:, 0]).astype(np.int)
v = np.round(cx[:, 1]).astype(np.int)
d_sensor = -np.ones(len(u))
d_sensor[visible] = depth_im[v[visible], u[visible]]
visible = np.logical_and(visible, np.abs(d_sensor-cX[:, 2]) < depth_thresh)
return cx, visible
|
ContactPose-main
|
utilities/rendering.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# Code by Samarth Brahmbhatt
from open3d import io as o3dio
from open3d import visualization as o3dv
from open3d import utility as o3du
from open3d import geometry as o3dg
|
ContactPose-main
|
utilities/import_open3d.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# Code by Samarth Brahmbhatt
|
ContactPose-main
|
utilities/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# Code by Samarth Brahmbhatt
"""
ContactPose dataset loading utilities
"""
import os
import json
import numpy as np
import pickle
from . import misc as mutils
osp = os.path
def get_object_names(p_num, intent, ignore_hp=True):
"""
returns list of objects grasped by this participant with this intent
"""
sess_dir = 'full{:d}_{:s}'.format(p_num, intent)
sess_dir = osp.join(osp.dirname(__file__), '..', 'data', 'contactpose_data', sess_dir)
ignored_objects = ('hands', 'palm_print') if ignore_hp else ()
return [o for o in next(os.walk(sess_dir))[1] if o not in ignored_objects]
def get_intents(p_num, object_name):
"""
returns list of intents with which this participant grasped object
"""
out = []
for ins in ('use', 'handoff'):
sess_dir = 'full{:d}_{:s}'.format(p_num, ins)
sess_dir = osp.join(osp.dirname(__file__), '..', 'data', 'contactpose_data', sess_dir, object_name)
if osp.isdir(sess_dir):
out.append(ins)
return out
def get_p_nums(object_name, intent):
"""
returns list of participants who grasped this object with this intent
"""
out = []
for p_num in range(1, 51):
sess_dir = 'full{:d}_{:s}'.format(p_num, intent)
sess_dir = osp.join(osp.dirname(__file__), '..', 'data', 'contactpose_data', sess_dir, object_name)
if osp.isdir(sess_dir):
out.append(p_num)
return out
class ContactPose(object):
"""
Base class for accessing the ContactPose dataset
"""
_mano_dicts = None # class variable so that large data is not loaded repeatedly
def __init__(self, p_num, intent, object_name, mano_pose_params=15,
load_mano=True):
"""
load_mano: Flag can be used to prevent loading MANO hand models, which is
time consuming
"""
if (object_name == 'palm_print') or (object_name == 'hands'):
print('This class is not meant to be used with palm_print or hands')
raise ValueError
self.p_num = p_num
self.intent = intent
self.object_name = object_name
self._mano_pose_params = mano_pose_params
p_id = 'full{:d}_{:s}'.format(p_num, intent)
self.data_dir = osp.join(osp.dirname(__file__), '..', 'data', 'contactpose_data', p_id, object_name)
assert(osp.isdir(self.data_dir))
# read grasp data
with open(self.annotation_filename, 'r') as f:
ann = json.load(f)
self._n_frames = len(ann['frames'])
self._valid_cameras = [cn for cn,cv in ann['cameras'].items() if cv['valid']]
self._is_object_pose_optimized = [f['object_pose_optimized'] for
f in ann['frames']]
self._valid_hands = [hand_idx for hand_idx, hand in enumerate(ann['hands'])
if hand['valid']]
im_filenames = {}
for camera_name in self.valid_cameras:
im_dir = osp.join(self.data_dir, 'images_full', camera_name, '{:s}')
im_filenames[camera_name] = [
osp.join(im_dir, 'frame{:03d}.png'.format(i)) for i in range(len(self))]
self._im_filenames = [{k: v for k,v in zip(im_filenames.keys(), vv)} for
vv in zip(*im_filenames.values())]
oX = [] # 3D joints w.r.t. object
all_oTh = []
for hand_idx, hand in enumerate(ann['hands']):
if hand['valid']:
hX = np.asarray(hand['joints']) # hand joints w.r.t. hand root
if hand['moving']:
# object pose w.r.t. hand
oThs = [np.linalg.inv(mutils.pose_matrix(f['hTo'][hand_idx])) for f
in ann['frames']]
all_oTh.append(oThs)
oX.append([mutils.tform_points(oTh, hX) for oTh in oThs])
else:
oX.append([hX for _ in range(len(self))])
all_oTh.append([np.eye(4) for _ in range(len(self))])
else:
oX.append([None for _ in range(len(self))])
all_oTh.append([np.eye(4) for _ in range(len(self))])
self._oX = list(map(tuple, zip(*oX)))
self._oTh = list(map(tuple, zip(*all_oTh)))
# world pose w.r.t. object
oTws = [mutils.pose_matrix(f['oTw']) for f in ann['frames']]
self._cTo = {} # object pose w.r.t. camera
self._K = {} # camera intrinsics
for camera_name in self.valid_cameras:
cam = ann['cameras'][camera_name]
self._K[camera_name] = np.array([[cam['K']['fx'], 0, cam['K']['cx']],
[0, cam['K']['fy'], cam['K']['cy']],
[0, 0, 1]])
# camera pose w.r.t. world
wTc = mutils.pose_matrix(cam['wTc'])
self._cTo[camera_name] = [np.linalg.inv(oTw @ wTc) for oTw in oTws]
# projections
self._ox = [] # joint projections
self._om = [] # marker projections
# 3D marker locations w.r.t. object
oM = np.loadtxt(osp.join(osp.dirname(__file__), '..', 'data',
'object_marker_locations',
'{:s}_final_marker_locations.txt'.
format(object_name)))[:, :3]
for frame_idx in range(len(self)):
this_ox = {}
this_om = {}
for camera_name in self.valid_cameras:
this_om[camera_name] = mutils.project(self.P(camera_name, frame_idx),
oM)
x = []
for hand_idx in range(2):
if hand_idx not in self._valid_hands:
x.append(None)
else:
x.append(mutils.project(self.P(camera_name, frame_idx),
self._oX[frame_idx][hand_idx]))
this_ox[camera_name] = tuple(x)
self._ox.append(this_ox)
self._om.append(this_om)
# check if MANO code and models are present
if mutils.MANO_PRESENT and load_mano:
# load MANO data for the class
if ContactPose._mano_dicts is not None:
return
ContactPose._mano_dicts = []
for hand_name in ('LEFT', 'RIGHT'):
filename = osp.join(osp.dirname(__file__), '..', 'thirdparty',
'mano', 'models',
'MANO_{:s}.pkl'.format(hand_name))
with open(filename, 'rb') as f:
ContactPose._mano_dicts.append(pickle.load(f, encoding='latin1'))
elif load_mano:
print('MANO code was not detected, please follow steps in README.md. '
'mano_meshes() will return (None, None)')
def __len__(self):
"""
Number of RGB-D time frames
"""
return self._n_frames
def __repr__(self):
hand_names = ['left', 'right']
hand_str = ' '.join([hand_names[i] for i in self._valid_hands])
return 'Participant {:d}, intent {:s}, object {:s}\n'.format(self.p_num,
self.intent,
self.object_name) +\
'{:d} frames\n'.format(len(self)) +\
'Cameras present: {:s}\n'.format(' '.join(self.valid_cameras)) +\
'Hands present: {:s}'.format(hand_str)
@property
def contactmap_filename(self):
return osp.join(self.data_dir, '{:s}.ply'.format(self.object_name))
@property
def annotation_filename(self):
return osp.join(self.data_dir, 'annotations.json')
@property
def mano_filename(self):
"""
return name of file containing MANO fit params
"""
return osp.join(self.data_dir,
'mano_fits_{:d}.json'.format(self._mano_pose_params))
@property
def valid_cameras(self):
"""
return list of cameras valid for this grasp
"""
return self._valid_cameras
@property
def mano_params(self):
"""
List of 2 [left, right]. Each element is None or a dict containing
'pose' (PCA pose space of dim self._mano_pose_params),
'betas' (PCA shape space), and root transform 'hTm'
"""
with open(self.mano_filename, 'r') as f:
params = json.load(f)
out = []
for p in params:
if not p['valid']:
out.append(None)
continue
# MANO root pose w.r.t. hand
hTm = np.linalg.inv(mutils.pose_matrix(p['mTc']))
out.append({
'pose': p['pose'],
'betas': p['betas'],
'hTm': hTm,
})
return out
def im_size(self, camera_name):
"""
(width, height) in pixels
"""
return (960, 540) if camera_name == 'kinect2_middle' else (540, 960)
def image_filenames(self, mode, frame_idx):
"""
return dict with full image filenames for all valid cameras
mode = color or depth
"""
return {k: v.format(mode) for k,v in self._im_filenames[frame_idx].items()}
def hand_joints(self, frame_idx=None):
"""
3D hand joints w.r.t. object
randomly sampled time frame if frame_idx is None
tuple of length 2, 21 joints per hand, None if hand is not present
"""
if frame_idx is None:
frame_idx = np.random.choice(len(self))
return self._oX[frame_idx]
def K(self, camera_name):
"""
Camera intrinsics 3x3
You will almost never need this. Use self.P() for projection
"""
return self._K[camera_name]
def A(self, camera_name):
"""
Affine transform to be applied to 2D points after projection
Included in self.P
"""
return mutils.get_A(camera_name, 960, 540)
def P(self, camera_name, frame_idx):
"""
3x4 3D -> 2D projection matrix
Use this for all projection operations, not self.K
"""
P = self.K(camera_name) @ self.object_pose(camera_name, frame_idx)[:3]
P = self.A(camera_name) @ P
return P
def object_pose(self, camera_name, frame_idx):
"""
Pose of object w.r.t. camera at frame frame_idx
4x4 homogeneous matrix
"""
return self._cTo[camera_name][frame_idx]
def projected_hand_joints(self, camera_name, frame_idx):
"""
hand joints projected into camera image
tuple of length 2
21x2 or None based on if hand is present in this grasp
"""
return self._ox[frame_idx][camera_name]
def projected_object_markers(self, camera_name, frame_idx):
"""
object markers projected into camera image
Nx2 where N in [5, 10]
"""
return self._om[frame_idx][camera_name]
def mano_meshes(self, frame_idx=None):
"""
return list of 2 dicts. Element is None if that hand is absent,
or contains 'vertices', 'faces', and 'joints'
"""
if frame_idx is None:
frame_idx = np.random.choice(len(self))
return mutils.load_mano_meshes(self.mano_params, ContactPose._mano_dicts,
self._oTh[frame_idx])
|
ContactPose-main
|
utilities/dataset.py
|
import datetime
try:
import dropbox
DROPBOX_FOUND = True
except ImportError:
DROPBOX_FOUND = False
import json
import math
import os
import random
import requests
from requests.exceptions import ConnectionError
import time
from tqdm.autonotebook import tqdm
osp = os.path
if DROPBOX_FOUND:
dropbox_app_key = os.environ.get('DROPBOX_APP_KEY')
with open(osp.join('data', 'proxies.json'), 'r') as f:
proxies = json.load(f)
if ('https' not in proxies) or (proxies['https'] is None):
proxies = None
def exponential_backoff(n, max_backoff=64.0):
t = math.pow(2.0, n)
t += (random.randint(0, 1000)) / 1000.0
t = min(t, max_backoff)
return t
def upload_dropbox(lfilename, dfilename, max_tries=7):
"""
Upload local file lfilename to dropbox location dfilename
Implements exponential backoff
"""
if not DROPBOX_FOUND:
print('Dropbox API not found')
return False
dbx = dropbox.Dropbox(dropbox_app_key)
ddir, _ = osp.split(dfilename)
ddir_exists = True
try:
dbx.files_get_metadata(ddir)
except dropbox.exceptions.ApiError as err:
ddir_exists = False
if not ddir_exists:
try:
dbx.files_create_folder(ddir)
except dropbox.exceptions.ApiError as err:
print('*** API error', err)
dbx.close()
return False
mtime = osp.getmtime(lfilename)
with open(lfilename, 'rb') as f:
ldata = f.read()
upload_tries = 0
while upload_tries < max_tries:
try:
res = dbx.files_upload(
ldata, dfilename,
dropbox.files.WriteMode.overwrite,
client_modified=datetime.datetime(*time.gmtime(mtime)[:6]),
mute=True)
print('uploaded as', res.name.encode('utf8'))
dbx.close()
return True
except dropbox.exceptions.ApiError as err:
print('*** API error', err)
dbx.close()
return False
except ConnectionError as err:
t = exponential_backoff(upload_tries)
print('*** Requests Connection error, sleeping for {:f} s'.format(t), err)
time.sleep(t)
upload_tries += 1
print('*** Max upload tries exceeded')
dbx.close()
return False
def download_url(url, filename, progress=True, max_tries=7):
"""
Download file from a URL to filename, optionally
displaying progress bar with tqdm
Implements exponential backoff
"""
tries = 0
while tries < max_tries:
done = download_url_once(url, filename, progress)
if done:
return True
else:
t = exponential_backoff(tries)
print('*** Sleeping for {:f} s'.format(t))
time.sleep(t)
tries += 1
print('*** Max download tries exceeded')
return False
def download_url_once(url, filename, progress=True):
"""
Download file from a URL to filename, optionally
displaying progress bar with tqdm
taken from https://stackoverflow.com/a/37573701
"""
# Streaming, so we can iterate over the response.
try:
r = requests.get(url, stream=True, proxies=proxies)
except ConnectionError as err:
print(err)
return False
# Total size in bytes.
total_size = int(r.headers.get('content-length', 0))
block_size = 1024 #1 Kibibyte
if progress:
t=tqdm(total=total_size, unit='iB', unit_scale=True)
done = True
datalen = 0
with open(filename, 'wb') as f:
itr = r.iter_content(block_size)
while True:
try:
try:
data = next(itr)
except StopIteration:
break
if progress:
t.update(len(data))
datalen += len(data)
f.write(data)
except KeyboardInterrupt:
done = False
print('Cancelled')
except ConnectionError as err:
done = False
print(err)
if progress:
t.close()
if (not done) or (total_size != 0 and datalen != total_size):
print("ERROR, something went wrong")
try:
os.remove(filename)
except OSError as e:
print(e)
return False
else:
return True
|
ContactPose-main
|
utilities/networking.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# Code by Samarth Brahmbhatt
import sys
sys.path.append('.')
|
ContactPose-main
|
scripts/init_paths.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# Code by Samarth Brahmbhatt
import matplotlib.pyplot as plt
import numpy as np
import init_paths
from utilities.import_open3d import *
from utilities.dataset import ContactPose
import utilities.misc as mutils
def apply_colormap_to_mesh(mesh, sigmoid_a=0.05, invert=False):
colors = np.asarray(mesh.vertex_colors)[:, 0]
colors = mutils.texture_proc(colors, a=sigmoid_a, invert=invert)
colors = plt.cm.inferno(colors)[:, :3]
mesh.vertex_colors = o3du.Vector3dVector(colors)
return mesh
def apply_semantic_colormap_to_mesh(mesh, semantic_idx, sigmoid_a=0.05,
invert=False):
colors = np.asarray(mesh.vertex_colors)[:, 0]
colors = mutils.texture_proc(colors, a=sigmoid_a, invert=invert)
# apply different colormaps based on finger
mesh_colors = np.zeros((len(colors), 3))
cmaps = ['Greys', 'Purples', 'Oranges', 'Greens', 'Blues', 'Reds']
cmaps = [plt.cm.get_cmap(c) for c in cmaps]
for semantic_id in np.unique(semantic_idx):
if (len(cmaps) <= semantic_id):
print('Not enough colormaps, ignoring semantic id {:d}'.format(
semantic_id))
continue
idx = semantic_idx == semantic_id
mesh_colors[idx] = cmaps[semantic_id](colors[idx])[:, :3]
mesh.vertex_colors = o3du.Vector3dVector(mesh_colors)
return mesh
def show_contactmap(p_num, intent, object_name, mode='simple',
joint_sphere_radius_mm=4.0, bone_cylinder_radius_mm=2.5,
bone_color=np.asarray([224.0, 172.0, 105.0])/255,
show_axes=False):
"""
mode =
simple: just contact map
simple_hands: skeleton + contact map
semantic_hands_fingers: skeleton + contact map colored by finger proximity
semantic_hands_phalanges: skeleton + contact map colored by phalange proximity
show_axes: visualize coordinate axes if True
"""
cp = ContactPose(p_num, intent, object_name)
# read contactmap
mesh = o3dio.read_triangle_mesh(cp.contactmap_filename)
mesh.compute_vertex_normals()
geoms = []
# apply simple colormap to the mesh
if 'simple' in mode:
mesh = apply_colormap_to_mesh(mesh)
geoms.append(mesh)
if 'hands' in mode:
# read hands
line_ids = mutils.get_hand_line_ids()
joint_locs = cp.hand_joints()
# show hands
hand_colors = [[0, 1, 0], [1, 0, 0]]
for hand_idx, hand_joints in enumerate(joint_locs):
if hand_joints is None:
continue
# joint locations
for j in hand_joints:
m = o3dg.TriangleMesh.create_sphere(radius=joint_sphere_radius_mm*1e-3,
resolution=10)
T = np.eye(4)
T[:3, 3] = j
m.transform(T)
m.paint_uniform_color(hand_colors[hand_idx])
m.compute_vertex_normals()
geoms.append(m)
# connecting lines
for line_idx, (idx0, idx1) in enumerate(line_ids):
bone = hand_joints[idx0] - hand_joints[idx1]
h = np.linalg.norm(bone)
l = o3dg.TriangleMesh.create_cylinder(radius=bone_cylinder_radius_mm*1e-3,
height=h, resolution=10)
T = np.eye(4)
T[2, 3] = -h/2.0
l.transform(T)
T = mutils.rotmat_from_vecs(bone, [0, 0, 1])
T[:3, 3] = hand_joints[idx0]
l.transform(T)
l.paint_uniform_color(bone_color)
l.compute_vertex_normals()
geoms.append(l)
if 'semantic' in mode:
n_lines_per_hand = len(line_ids)
n_parts_per_finger = 4
# find line equations for hand parts
lines = []
for hand_joints in joint_locs:
if hand_joints is None:
continue
for line_id in line_ids:
a = hand_joints[line_id[0]]
n = hand_joints[line_id[1]] - hand_joints[line_id[0]]
n /= np.linalg.norm(n)
lines.append(np.hstack((a, n)))
lines = np.asarray(lines)
ops = np.asarray(mesh.vertices)
d_lines = mutils.p_dist_linesegment(ops, lines)
line_idx = np.argmin(d_lines, axis=1) % n_lines_per_hand
finger_idx, part_idx = divmod(line_idx, n_parts_per_finger)
if 'phalanges' in mode:
mesh = apply_semantic_colormap_to_mesh(mesh, part_idx)
elif 'fingers' in mode:
mesh = apply_semantic_colormap_to_mesh(mesh, finger_idx)
geoms.append(mesh)
elif 'mano' in mode:
for hand in cp.mano_meshes():
if hand is None:
continue
mesh = o3dg.TriangleMesh()
mesh.vertices = o3du.Vector3dVector(hand['vertices'])
mesh.triangles = o3du.Vector3iVector(hand['faces'])
mesh.paint_uniform_color(bone_color)
mesh.compute_vertex_normals()
geoms.append(mesh)
if show_axes:
geoms.append(o3dg.TriangleMesh.create_coordinate_frame(size=0.2))
o3dv.draw_geometries(geoms)
if __name__ == '__main__':
import sys
parser = mutils.default_argparse()
parser.add_argument('--mode', help='Contact Map mode', default='simple_hands',
choices=('simple', 'simple_mano', 'simple_hands', 'semantic_hands_fingers',
'semantic_hands_phalanges'))
parser.add_argument('--show_axes', action='store_true',
help='Show coordinate axes')
args = parser.parse_args()
if args.object_name == 'hands':
print('hands do not have a contact map')
sys.exit(0)
elif args.object_name == 'palm_print':
print('Forcing mode to simple since palm_print does not have hand pose')
args.mode = 'simple'
show_contactmap(args.p_num, args.intent, args.object_name, args.mode,
show_axes=args.show_axes)
|
ContactPose-main
|
scripts/show_contactmap.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# Code by Samarth Brahmbhatt
"""
Preprocesses images for ML training by cropping (RGB and depth), and
randomizing background (RGB only)
NOTE: Requites rendering setup, see docs/rendering.py
"""
import init_paths
from utilities.dataset import ContactPose, get_object_names
from utilities.rendering import DepthRenderer
import utilities.misc as mutils
import numpy as np
import cv2
import os
from tqdm import tqdm
osp = os.path
def inspect_dir(dirname):
assert(osp.isdir(dirname))
print('Inspecting {:s}...'.format(dirname))
filenames = next(os.walk(dirname))[-1]
filenames = [osp.join(dirname, f) for f in filenames]
print('Found {:d} images'.format(len(filenames)))
return filenames
def preprocess(p_num, intent, object_name, rim_filenames_or_dir, crop_size,
do_rgb=True, do_depth=True, do_grabcut=True,
depth_percentile_thresh=30, mask_dilation=5):
if isinstance(rim_filenames_or_dir, list):
rim_filenames = rim_filenames_or_dir[:]
else:
rim_filenames = inspect_dir(rim_filenames_or_dir)
cp = ContactPose(p_num, intent, object_name, load_mano=False)
for camera_name in cp.valid_cameras:
K = cp.K(camera_name)
renderer = DepthRenderer(object_name, K, camera_name, 1e-3)
output_dir = osp.join(cp.data_dir, 'images', camera_name)
for d in ('color', 'depth', 'projections'):
dd = osp.join(output_dir, d)
if not osp.isdir(dd):
os.makedirs(dd)
A = mutils.get_A(camera_name)
print('{:d}:{:s}:{:s}:{:s}'.format(p_num, intent, object_name, camera_name))
print('Writing to {:s}'.format(output_dir))
for frame_idx in tqdm(range(len(cp))):
# read images
filename = cp.image_filenames('color', frame_idx)[camera_name]
rgb_im = cv2.imread(filename)
if rgb_im is None:
print('Could not read {:s}, skipping frame'.format(filename))
continue
filename = cp.image_filenames('depth', frame_idx)[camera_name]
_, out_filename = osp.split(filename)
depth_im = cv2.imread(filename, -1)
if depth_im is None:
print('Could not read {:s}, skipping frame'.format(filename))
continue
# crop images
joints = cp.projected_hand_joints(camera_name, frame_idx)
rgb_im, _ = mutils.crop_image(rgb_im, joints, crop_size)
depth_im, crop_tl = mutils.crop_image(depth_im, joints, crop_size)
this_A = np.copy(A)
A = np.asarray([[1, 0, -crop_tl[0]], [0, 1, -crop_tl[1]], [0, 0, 1]]) @ A
cTo = cp.object_pose(camera_name, frame_idx)
P = this_A @ K @ cTo[:3]
if do_depth: # save preprocessed depth image
filename = osp.join(output_dir, 'depth', out_filename)
cv2.imwrite(filename, depth_im)
# save projection matrix
filename = osp.join(output_dir, 'projections',
out_filename.replace('.png', '_P.txt'))
np.savetxt(filename, P)
# foreground mask
cxx, visible = renderer.object_visibility_and_projections(cTo)
cxx -= crop_tl
cx = np.round(cxx).astype(np.int)
visible = np.logical_and(visible, cx[:, 0]>=0)
visible = np.logical_and(visible, cx[:, 1]>=0)
visible = np.logical_and(visible, cx[:, 0] < rgb_im.shape[1])
visible = np.logical_and(visible, cx[:, 1] < rgb_im.shape[0])
cx = cx[visible]
# save projection information
filename = osp.join(output_dir, 'projections',
out_filename.replace('.png', '_verts.npy'))
idx = np.where(visible)[0]
projs = np.vstack((cxx[idx].T, idx)).T
np.save(filename, projs)
if not do_rgb:
continue
obj_depths = depth_im[cx[:, 1], cx[:, 0]]
obj_depths = obj_depths[obj_depths > 0]
all_depths = depth_im[depth_im > 0]
if (len(obj_depths) > 0) and (len(all_depths) > 0):
mthresh = np.median(obj_depths) + 150.0
pthresh = np.percentile(depth_im[depth_im>0], depth_percentile_thresh)
else:
print('Depth image {:s} all 0s, skipping frame'.format(filename))
continue
thresh = min(pthresh, mthresh)
# mask derived from depth
dmask = 255 * np.logical_and(depth_im > 0, depth_im <= thresh)
dmask = cv2.dilate(dmask.astype(np.uint8), np.ones(
(mask_dilation, mask_dilation), dtype=np.uint8))
# mask derived from color
cmask_green = np.logical_and(rgb_im[:, :, 1] > rgb_im[:, :, 0],
rgb_im[:, :, 1] > rgb_im[:, :, 2])
cmask_white = np.mean(rgb_im, axis=2) > 225
cmask = np.logical_not(np.logical_or(cmask_green, cmask_white))
mask = np.logical_and(dmask>0, cmask)
if do_grabcut:
mask = mutils.grabcut_mask(rgb_im, mask)
# randomize background
count = 0
while count < len(rim_filenames):
random_idx = np.random.choice(len(rim_filenames))
random_im = cv2.imread(rim_filenames[random_idx], cv2.IMREAD_COLOR)
if np.any(np.asarray(random_im.shape[:2]) <= np.asarray(rgb_im.shape[:2])):
count += 1
continue
x = np.random.choice(random_im.shape[1] - rgb_im.shape[1])
y = np.random.choice(random_im.shape[0] - rgb_im.shape[0])
random_im = random_im[y:y+rgb_im.shape[0], x:x+rgb_im.shape[1], :]
break
else:
print('ERROR: All random images are smaller than {:d}x{:d}!'.
format(crop_size, crop_size))
break
mask = mask[:, :, np.newaxis]
im = mask*rgb_im + (1-mask)*random_im
filename = osp.join(output_dir, 'color', out_filename)
cv2.imwrite(filename, im)
def preprocess_all(p_nums, intents, object_names, background_images_dir, *args,
**kwargs):
rim_filenames = inspect_dir(background_images_dir)
for p_num in p_nums:
for intent in intents:
if object_names is None:
object_names = get_object_names(p_num, intent)
for object_name in object_names:
preprocess(p_num, intent, object_name, rim_filenames_or_dir=rim_filenames,
*args, **kwargs)
if __name__ == '__main__':
parser = mutils.default_multiargparse()
parser.add_argument('--no_rgb', action='store_false', dest='do_rgb')
parser.add_argument('--no_depth', action='store_false', dest='do_depth')
parser.add_argument('--background_images_dir', required=True,
help='Directory containing background images e.g. COCO')
parser.add_argument('--crop_size', default=256, type=int)
parser.add_argument('--no_mask_refinement', action='store_false',
dest='do_grabcut',
help='No refinement of masks with GrabCut')
args = parser.parse_args()
p_nums, intents, object_names, args = mutils.parse_multiargs(args)
preprocess_all(p_nums, intents, object_names, **vars(args))
|
ContactPose-main
|
scripts/preprocess_images.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# Code by Samarth Brahmbhatt
|
ContactPose-main
|
scripts/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# Code by Samarth Brahmbhatt
"""
script to download ContactPose data from Dropbox
URLs in data/urls.json
"""
import init_paths
import cv2
import os
import json
import shutil
from tqdm.autonotebook import tqdm
import utilities.networking as nutils
from zipfile import ZipFile
osp = os.path
def is_nonempty_dir(dir):
if osp.isdir(dir):
return next(os.scandir(dir), None) is not None
else:
return False
class ContactPoseDownloader(object):
def __init__(self):
self.data_dir = osp.join('data', 'contactpose_data')
if not osp.isdir(self.data_dir):
os.makedirs(self.data_dir)
print('Created {:s}'.format(self.data_dir))
with open(osp.join('data', 'urls.json'), 'r') as f:
self.urls = json.load(f)
@staticmethod
def _unzip_and_del(filename, dst_dir=None, progress=True, filter_fn=None):
if dst_dir is None:
dst_dir, _ = osp.split(filename)
if len(dst_dir) == 0:
dst_dir = '.'
with ZipFile(filename) as f:
# members = None means everything
members = None if filter_fn is None else \
list(filter(filter_fn, f.namelist()))
f.extractall(dst_dir, members=members)
os.remove(filename)
def download_grasps(self):
filename = osp.join(self.data_dir, 'grasps.zip')
print('Downloading grasps...')
if not nutils.download_url(self.urls['grasps'], filename):
print('Download unsuccessful')
return
print('Extracting...')
self._unzip_and_del(filename, self.data_dir)
p_ids = next(os.walk(self.data_dir))[1]
for p_id in tqdm(p_ids):
if 'full' not in p_id:
continue
sess_dir = osp.join(self.data_dir, p_id)
for filename in next(os.walk(sess_dir))[-1]:
if '.zip' not in filename:
continue
self._unzip_and_del(osp.join(sess_dir, filename), progress=False)
def download_contact_maps(self, p_num, intent):
p_id = 'full{:d}_{:s}'.format(p_num, intent)
filename = osp.join(self.data_dir, '{:s}_contact_maps.zip'.format(p_id))
print('Downloading {:d} {:s} contact maps...'.format(p_num, intent))
if not nutils.download_url(self.urls['contact_maps'][p_id], filename):
print('Download unsuccessful')
return
print('Extracting...')
self._unzip_and_del(filename, self.data_dir)
def download_markers(self):
filename = osp.join('data', 'markers.zip')
print('Downloading 3D model marker locations...')
if not nutils.download_url(self.urls['object_marker_locations'], filename):
print('Download unsuccessful')
return
print('Extracting...')
self._unzip_and_del(filename, osp.join('data', 'object_marker_locations'))
def download_3d_models(self):
filename = osp.join('data', '3Dmodels.zip')
print('Downloading 3D models...')
if not nutils.download_url(self.urls['object_models'], filename):
print('Download unsuccessful')
return
print('Extracting...')
self._unzip_and_del(filename, osp.join('data', 'object_models'))
def download_depth_images(self, p_num, intent, dload_dir,
include_objects=None):
self.download_images(p_num, intent, dload_dir, include_objects,
download_color=False, download_depth=True)
def download_color_images(self, p_num, intent, dload_dir,
include_objects=None):
self.download_images(p_num, intent, dload_dir, include_objects,
download_color=True, download_depth=False)
def download_images(self, p_num, intent, dload_dir,
include_objects=None, download_color=True,
download_depth=True):
assert osp.isdir(dload_dir),\
'Image download dir {:s} does not exist'.format(dload_dir)
p_id = 'full{:d}_{:s}'.format(p_num, intent)
if download_color and (not download_depth):
urls = self.urls['videos']['color']
else:
urls = self.urls['images']
# check if already extracted
dirs_to_check = []
if download_color:
dirs_to_check.append('color')
if download_depth:
dirs_to_check.append('depth')
ok = True
if osp.isdir(osp.join(self.data_dir, p_id)):
sess_dir = osp.join(self.data_dir, p_id)
for object_name in next(os.walk(sess_dir))[1]:
if include_objects is not None and object_name not in include_objects:
continue
images_dir = osp.join(sess_dir, object_name, 'images_full')
if not osp.isdir(images_dir):
continue
for cam_name in next(os.walk(images_dir))[1]:
for check_name in dirs_to_check:
check_dir = osp.join(images_dir, cam_name, check_name)
if is_nonempty_dir(check_dir):
print('{:s} {:s} already has extracted images, please delete {:s}'.
format(p_id, object_name, check_dir))
ok = False
if not ok:
return
# download and extract
sess_dir = osp.join(dload_dir, p_id)
if not osp.isdir(sess_dir):
print('Creating {:s}'.format(sess_dir))
os.makedirs(sess_dir, exist_ok=True)
print('Downloading {:s} images...'.format(p_id))
object_names = list(urls[p_id].keys())
if include_objects is None:
include_objects = object_names[:]
filenames_to_extract = {}
for object_name in tqdm(include_objects):
if object_name not in object_names:
print('{:d} {:s} does not have {:s}'.format(p_num, intent, object_name))
continue
filename = osp.join(sess_dir, '{:s}_images.zip'.format(object_name))
url = urls[p_id][object_name]
print(object_name)
if nutils.download_url(url, filename):
filenames_to_extract[object_name] = filename
else:
print('{:s} {:s} Download unsuccessful'.format(p_id, object_name))
return
print('Extracting...')
for object_name, filename in tqdm(filenames_to_extract.items()):
obj_dir = osp.join(sess_dir, object_name)
os.makedirs(obj_dir, exist_ok=True)
self._unzip_and_del(filename, obj_dir)
for filename in next(os.walk(obj_dir))[-1]:
if download_color and (not download_depth):
if '.mp4' not in filename:
continue
camera_name = filename.replace('.mp4', '')
video_filename = osp.join(obj_dir, filename)
im_dir = osp.join(obj_dir, 'images_full', camera_name, 'color')
os.makedirs(im_dir, exist_ok=True)
cap = cv2.VideoCapture(video_filename)
if not cap.isOpened():
print('Could not read {:s}'.format(video_filename))
return
count = 0
while True:
ok, im = cap.read()
if not ok:
break
filename = osp.join(im_dir, 'frame{:03d}.png'.format(count))
cv2.imwrite(filename, im)
count += 1
os.remove(video_filename)
else:
if '.zip' not in filename:
continue
filter_fn = (lambda x: 'color' not in x) if (not download_color) \
else None
self._unzip_and_del(osp.join(obj_dir, filename), progress=False,
filter_fn=filter_fn)
# symlink
if osp.realpath(dload_dir) != osp.realpath(self.data_dir):
src = osp.join(obj_dir, 'images_full')
dst_dir = osp.join(self.data_dir, p_id, object_name)
if not osp.isdir(dst_dir):
os.makedirs(dst_dir)
dst = osp.join(dst_dir, 'images_full')
os.symlink(src, dst)
if __name__ == '__main__':
import argparse
import sys
from itertools import product
parser = argparse.ArgumentParser()
parser.add_argument('--type', choices=('grasps', 'markers', '3Dmodels',
'color_images', 'depth_images',
'images', 'contact_maps'),
required=True)
parser.add_argument('--p_nums', default=None,
help='Participant numbers E.g. 1, 1,2, or 1-5')
parser.add_argument('--intents', default='use,handoff',
help='use, handoff, or use,handoff')
parser.add_argument('--object_names', default=None,
help='Comma separated object names. Used only for image '+\
'download. All other types download data for all '+\
'objects in that particular p_num, intent combo')
parser.add_argument('--images_dload_dir',
default=osp.join('data', 'contactpose_data'),
help='Directory where images will be downloaded. '
'They will be symlinked to the appropriate location')
args = parser.parse_args()
downloader = ContactPoseDownloader()
if args.type == 'grasps':
downloader.download_grasps()
sys.exit(0)
elif args.type == 'markers':
downloader.download_markers()
sys.exit(0)
elif args.type == '3Dmodels':
downloader.download_3d_models()
sys.exit(0)
assert(args.p_nums is not None)
if '-' in args.p_nums:
start, finish = args.p_nums.split('-')
nums = list(range(int(start), int(finish)+1))
elif ',' in args.p_nums:
nums = [int(n) for n in args.p_nums.split(',')]
else:
nums = [int(args.p_nums)]
intents = args.intents.split(',')
include_objects = args.object_names
if include_objects is not None:
include_objects = include_objects.split(',')
include_objects = list(set(include_objects)) # remove duplicates
for p_num, intent in product(nums, intents):
p_id = 'full{:d}_{:s}'.format(p_num, intent)
print('####### {:s} #######'.format(p_id))
if args.type == 'contact_maps':
downloader.download_contact_maps(p_num, intent)
elif args.type == 'color_images':
downloader.download_color_images(p_num, intent,
osp.expanduser(args.images_dload_dir),
include_objects=include_objects)
elif args.type == 'depth_images':
downloader.download_depth_images(p_num, intent,
osp.expanduser(args.images_dload_dir),
include_objects=include_objects)
elif args.type == 'images':
downloader.download_images(p_num, intent,
osp.expanduser(args.images_dload_dir),
include_objects=include_objects)
else:
raise NotImplementedError
|
ContactPose-main
|
scripts/download_data.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# Code by Samarth Brahmbhatt
"""
Discovers 'active areas' i.e. areas on the object surface most frequently
touched by a certain part of the hand. See Figure 7 in the paper
https://arxiv.org/pdf/2007.09545.pdf.
"""
import init_paths
from utilities.import_open3d import * # need to import open3d before others
import json
from matplotlib import cm
import numpy as np
import os
from random import shuffle
from utilities.dataset import get_p_nums
import utilities.misc as mutils
osp = os.path
def discover_active_areas(finger_idx, part_idx, object_name, intent, p_nums=None,
color_thresh=0.4):
"""
finger_idx: 0->4 : thumb->little
part_idx: 0->3 : proximal to distal phalanges, 3 = finger tip
"""
p_nums = p_nums or get_p_nums(object_name, intent)
shuffle(p_nums)
data_dir = osp.join('data', 'contactpose_data')
# read object mesh
vertices = None
for p_num in p_nums:
filename = osp.join(data_dir, f'full{p_num}_{intent}', object_name,
f'{object_name}.ply')
if osp.isfile(filename):
mesh = o3dio.read_triangle_mesh(filename)
else:
print('{:s} does not exist'.format(filename))
continue
vertices = np.asarray(mesh.vertices)
break
if vertices is None:
print("no object model found")
return
line_ids = mutils.get_hand_line_ids()
n_lines_per_hand = len(line_ids)
n_parts_per_finger = 4
touched_by_part = np.zeros(len(vertices))
count = 0
for p_num in p_nums:
print(f'Processing full{p_num}_{intent} {object_name}')
# read contact from the mesh
filename = osp.join(data_dir, f'full{p_num}_{intent}', object_name,
f'{object_name}.ply')
if osp.isfile(filename):
mesh = o3dio.read_triangle_mesh(filename)
else:
print('{:s} does not exist'.format(filename))
continue
tex = np.asarray(mesh.vertex_colors)[:, 0]
tex = mutils.texture_proc(tex)
# read joints
filename = osp.join(data_dir, f'full{p_num}_{intent}', object_name,
'annotations.json')
try:
with open(filename, 'r') as f:
annotations = json.load(f)
except FileNotFoundError:
print('{:s} does not exist'.format(filename))
continue
ds = []
for hand_idx, hand in enumerate(annotations['hands']):
if hand['valid']:
joints = np.asarray(hand['joints'])
l0 = joints[line_ids[:, 0]]
l1 = joints[line_ids[:, 1]]
pl = mutils.closest_linesegment_point(l0, l1, vertices)
d = pl - vertices[:, np.newaxis, :]
d = np.linalg.norm(d, axis=2)
else:
d = np.inf * np.ones((len(vertices), n_lines_per_hand))
ds.append(d)
ds = np.hstack(ds)
hand_idxs, line_idxs = divmod(np.argmin(ds, axis=1), n_lines_per_hand)
finger_idxs, part_idxs = divmod(line_idxs, n_parts_per_finger)
this_touched_by_part = np.logical_and(
tex > color_thresh, np.logical_and(hand_idxs >= 0,
np.logical_and(finger_idxs == finger_idx, part_idxs == part_idx)))
touched_by_part += this_touched_by_part
count += 1
touched_by_part /= count
touched_by_part /= touched_by_part.max()
filename = osp.join('data',
f'{object_name}_{intent}_{finger_idx}_{part_idx}_active_areas.npy')
np.save(filename, touched_by_part)
print('{:s} saved'.format(filename))
def show_active_areas(finger_idx, part_idx, object_name, intent):
filename = osp.join('data', 'object_models', f'{object_name}.ply')
mesh = o3dio.read_triangle_mesh(filename)
mesh.compute_vertex_normals()
filename = osp.join('data',
f'{object_name}_{intent}_{finger_idx}_{part_idx}_active_areas.npy')
c = np.load(filename)
mesh.vertex_colors = o3du.Vector3dVector(cm.bwr(c)[:, :3])
o3dv.draw_geometries([mesh])
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--finger_idx', type=int, required=True,
help='0->4 : thumb->little', choices=(0, 1, 2, 3, 4))
parser.add_argument('--part_idx', type=int, required=True, choices=(0, 1, 2, 3),
help='0->3 : proximal to distal phalanges, 3 = finger tip')
parser.add_argument('--object_name', required=True)
parser.add_argument('--intent', required=True, choices=('use', 'handoff'))
parser.add_argument('--p_nums', default='1-50',
help='Participant numbers, comma or - separated.'
'Skipping means all participants')
parser.add_argument('--show', action='store_true')
args = parser.parse_args()
p_nums = args.p_nums
if '-' in p_nums:
first, last = p_nums.split('-')
p_nums = list(range(int(first), int(last)+1))
else:
p_nums = [int(p) for p in p_nums.split(',')]
if args.show:
show_active_areas(args.finger_idx, args.part_idx, args.object_name,
args.intent)
else:
discover_active_areas(args.finger_idx, args.part_idx, args.object_name,
args.intent, p_nums)
|
ContactPose-main
|
scripts/data_analysis/active_areas.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# Code by Samarth Brahmbhatt
import sys
sys.path.append('.')
|
ContactPose-main
|
scripts/data_analysis/init_paths.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# Code by Samarth Brahmbhatt
"""
Calculates and shows the contact probability for hand points
Figure 5(a) in the paper
"""
import os
import matplotlib.pyplot as plt
import numpy as np
import init_paths
from utilities.import_open3d import *
from utilities.dataset import ContactPose, get_object_names
import utilities.misc as mutils
osp = os.path
def calc_hand_contact_prob(p_nums, intents, object_names, contact_thresh=0.4,
search_r=15e-3, hand_idx=1):
"""
hand_idx: 0 for left, 1 for right
"""
contact_probs = []
for p_num in p_nums:
for intent in intents:
if object_names is None:
object_names = get_object_names(p_num, intent)
for object_name in object_names:
print('{:d} : {:s} : {:s}'.format(p_num, intent, object_name))
cp = ContactPose(p_num, intent, object_name)
object_mesh = o3dio.read_triangle_mesh(cp.contactmap_filename)
v = np.array(object_mesh.vertices)
c = np.array(object_mesh.vertex_colors)[:, 0]
c = mutils.texture_proc(c)
idx = c >= contact_thresh
v = v[idx]
# read mano
hand = cp.mano_meshes()[hand_idx]
if hand is None:
continue
h_pc = o3dg.PointCloud()
h_pc.points = o3du.Vector3dVector(hand['vertices'])
tree = o3dg.KDTreeFlann(h_pc)
contact_prob = np.zeros(len(hand['vertices']))
for vv in v:
k, idx, dist2 = tree.search_hybrid_vector_3d(vv, search_r, 10)
for i in range(k):
# contact_prob[idx[i]] += (1.0/np.sqrt(dist2[i]))
contact_prob[idx[i]] = 1
contact_probs.append(contact_prob)
contact_probs = np.mean(contact_probs, axis=0)
return contact_probs
def show_hand_contact_prob(contact_prob, hand_idx=1):
# dummy params
mp = {
'pose': np.zeros(15+3),
'betas': np.zeros(10),
'hTm': np.eye(4)
}
hand = mutils.load_mano_meshes([mp, mp], ContactPose._mano_dicts,
flat_hand_mean=True)[hand_idx]
contact_prob -= contact_prob.min()
contact_prob /= contact_prob.max()
contact_prob = plt.cm.bwr(contact_prob)[:, :3]
h = o3dg.TriangleMesh()
h.vertices = o3du.Vector3dVector(hand['vertices'])
h.triangles = o3du.Vector3iVector(hand['faces'])
h.vertex_colors = o3du.Vector3dVector(contact_prob)
h.compute_vertex_normals()
o3dv.draw_geometries([h])
if __name__ == '__main__':
parser = mutils.default_multiargparse()
args = parser.parse_args()
p_nums, intents, object_names, args = mutils.parse_multiargs(args)
hand_idx = 1
p = calc_hand_contact_prob(p_nums, intents, object_names, hand_idx=hand_idx)
show_hand_contact_prob(p, hand_idx=hand_idx)
|
ContactPose-main
|
scripts/data_analysis/hand_contact_prob.py
|
ContactPose-main
|
scripts/data_analysis/__init__.py
|
|
# Copyright (c) Facebook, Inc. and its affiliates.
# Code by Samarth Brahmbhatt
import init_paths
import dropbox
import json
from requests.exceptions import ConnectionError
import os
from utilities.dataset import get_object_names
osp = os.path
dbx = dropbox.Dropbox(os.environ['DROPBOX_APP_KEY'])
def move(p_num, intent, object_name):
p_id = 'full{:d}_{:s}'.format(p_num, intent)
opath = osp.join('/', 'contactpose', 'videos_full', p_id, object_name)
dpath = osp.join(opath, 'color')
try:
dbx.files_create_folder(dpath)
except dropbox.exceptions.ApiError as err:
print('*** API error', err)
dbx.close()
return
print('{:s} created'.format(dpath))
for camera_name in ('kinect2_left', 'kinect2_right', 'kinect2_middle'):
src = osp.join(opath, '{:s}_color.mp4'.format(camera_name))
file_exists = True
try:
dbx.files_get_metadata(src)
except dropbox.exceptions.ApiError as err:
file_exists = False
print('{:s} does not exist'.format(src))
if not file_exists:
continue
dst = osp.join(dpath, '{:s}.mp4'.format(camera_name))
try:
dbx.files_move(src, dst)
except dropbox.exceptions.ApiError as err:
print('*** API error moving {:s} -> {:s}'.format(src, dst), err)
print('Moved {:s} -> {:s}'.format(src, dst))
if __name__ == '__main__':
p_num = 5
for intent in ('use', 'handoff'):
p_id = 'full{:d}_{:s}'.format(p_num, intent)
with open(osp.join('data', 'object_names.txt'), 'r') as f:
object_names = [o.strip() for o in f]
with open(osp.join('data', 'urls.json'), 'r') as f:
urls = json.load(f)
object_names = [o for o in object_names if o in urls['images'][p_id]]
for object_name in object_names:
move(p_num, intent, object_name)
|
ContactPose-main
|
scripts/maintenance/move_videos_dropbox.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# Code by Samarth Brahmbhatt
import sys
sys.path.append('.')
|
ContactPose-main
|
scripts/maintenance/init_paths.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# Code by Samarth Brahmbhatt
import requests
import json
from copy import deepcopy
import os
osp = os.path
data_template = {
'path': '/contactpose/videos_full/{:s}/{:s}/color',
'settings': {
'requested_visibility': 'public',
'audience': 'public',
'access': 'viewer'
}
}
def get_url(p_id, object_name):
headers = {
'Authorization': 'Bearer {:s}'.format(os.environ['DROPBOX_APP_KEY']),
'Content-Type': 'application/json',
}
d = deepcopy(data_template)
d['path'] = d['path'].format(p_id, object_name)
filename = '/tmp/tmpurl.json'
with open(filename, 'w') as f:
json.dump(d, f)
r = requests.post('https://api.dropboxapi.com/2/sharing/create_shared_link_with_settings', data=open(filename), headers=headers)
if r.status_code != 200:
print('Unsuccessful, return status = {:d}'.format(r.status_code))
return
url = r.json()['url']
url = url.replace('dl=0', 'dl=1')
filename = osp.join('data', 'urls.json')
with open(filename, 'r') as f:
d = json.load(f)
if p_id not in d['videos']['color']:
d['videos']['color'][p_id] = {}
d['videos']['color'][p_id][object_name] = url
with open(filename, 'w') as f:
json.dump(d, f, indent=4, separators=(', ', ': '))
# print('{:s} updated'.format(filename))
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--p_id', required=True)
args = parser.parse_args()
with open(osp.join('data', 'urls.json'), 'r') as f:
d = json.load(f)
object_names = d['images'][args.p_id]
print('#########', args.p_id)
for object_name in object_names:
print(object_name)
get_url(args.p_id, object_name)
|
ContactPose-main
|
scripts/maintenance/get_urls.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# Code by Samarth Brahmbhatt
import os
import shutil
import sys
osp = os.path
def remove(p_num):
for ins in ('use', 'handoff'):
p_id = 'full{:s}_{:s}'.format(p_num, ins)
sess_dir = osp.join('..', '..', 'data', 'contactpose_data', p_id)
for object_name in next(os.walk(sess_dir))[1]:
obj_dir = osp.join(sess_dir, object_name)
for filename in next(os.walk(obj_dir))[-1]:
if '.zip' not in filename:
continue
filename = osp.join(obj_dir, filename)
os.remove(filename)
print(filename)
obj_dir = osp.join(obj_dir, 'images_full')
# if osp.isdir(obj_dir):
# shutil.rmtree(obj_dir)
# print(obj_dir)
for camera_name in ('kinect2_left', 'kinect2_right', 'kinect2_middle'):
cam_dir = osp.join(obj_dir, camera_name)
filename = osp.join(cam_dir, 'color.mp4')
if osp.isfile(filename):
os.remove(filename)
print(filename)
for filename in next(os.walk(sess_dir))[-1]:
filename = osp.join(sess_dir, filename)
os.remove(filename)
print(filename)
if __name__ == '__main__':
remove(sys.argv[1])
|
ContactPose-main
|
scripts/maintenance/remove_videos.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# Code by Samarth Brahmbhatt
|
ContactPose-main
|
scripts/maintenance/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# Code by Samarth Brahmbhatt
import init_paths
from scripts.download_data import ContactPoseDownloader
import ffmpeg
import os
import shutil
import json
import itertools
from multiprocessing import Pool
import argparse
from functools import partial
import utilities.networking as nutils
osp = os.path
intents = ('use', 'handoff')
with open(osp.join('data', 'object_names.txt'), 'r') as f:
object_names = [l.strip() for l in f]
# object_names = ('bowl', )
with open(osp.join('data', 'urls.json'), 'r') as f:
urls = json.load(f)
urls = urls['images']
video_params = {
'color': {
'ffmpeg_kwargs': dict(pix_fmt='yuv420p', vcodec='libx264', crf=0),
'ext': 'mp4',
'valid': True,
},
'depth': {
'ffmpeg_kwargs': dict(pix_fmt='gray16le', vcodec='ffv1'),
'ext': 'mkv',
'valid': False, # compression not working losslessly right now, so skip
},
}
def produce_worker(task, ffmpeg_path):
try:
p_num, intent, object_name = task
p_id = 'full{:d}_{:s}'.format(p_num, intent)
dload_dir=osp.join('data', 'contactpose_data')
data_dir = osp.join(dload_dir, p_id, object_name, 'images_full')
# download
downloader = ContactPoseDownloader()
if osp.isdir(data_dir):
shutil.rmtree(data_dir)
print('Deleted {:s}'.format(data_dir))
downloader.download_images(p_num, intent, dload_dir,
include_objects=(object_name,))
if not osp.isdir(data_dir):
print('Could not download {:s} {:s}'.format(p_id, object_name))
# check if the data actually exists
if object_name in urls[p_id]:
return False
else:
print('But that is OK because underlying data does not exist')
return True
# process
for camera_position in ('left', 'right', 'middle'):
camera_name = 'kinect2_{:s}'.format(camera_position)
this_data_dir = osp.join(data_dir, camera_name)
if not osp.isdir(this_data_dir):
print('{:s} does not have {:s} camera'.format(this_data_dir, camera_position))
continue
for mode, params in video_params.items():
if not params['valid']:
shutil.rmtree(osp.join(this_data_dir, mode))
continue
# video encoding
output_filename = osp.join(this_data_dir,
'{:s}.{:s}'.format(mode, params['ext']))
(
ffmpeg
.input(osp.join(this_data_dir, mode, 'frame%03d.png'), framerate=30)
.output(output_filename, **params['ffmpeg_kwargs'])
.overwrite_output()
.run(cmd=ffmpeg_path)
)
print('{:s} written'.format(output_filename), flush=True)
shutil.rmtree(osp.join(this_data_dir, mode))
# upload
dropbox_path = osp.join('/', 'contactpose',
'videos_full',
p_id, object_name, mode,
'{:s}.mp4'.format(camera_name))
if not nutils.upload_dropbox(output_filename, dropbox_path):
return False
return True
except Exception as e:
print('Error somewhere in ', task)
print(str(e))
return False
def produce(p_nums, cleanup=False, parallel=True, ffmpeg_path='ffmpeg',
tasks=None):
if tasks is not None:
pass
elif cleanup:
print('#### Cleanup mode ####')
filename = osp.join('status.json')
with open(filename, 'r') as f:
status = json.load(f)
tasks = []
for task,done in status.items():
if done:
continue
task = task.split('_')
p_num = int(task[0][4:])
intent = task[1]
object_name = '_'.join(task[2:])
tasks.append((p_num, intent, object_name))
print('Found {:d} cleanup items'.format(len(tasks)))
else:
tasks = list(itertools.product(p_nums, intents, object_names))
worker = partial(produce_worker, ffmpeg_path=ffmpeg_path)
if parallel:
p = Pool(len(object_names))
dones = p.map(worker, tasks)
p.close()
p.join()
else:
dones = map(worker, tasks)
filename = osp.join('status.json')
d = {}
if osp.isfile(filename):
with open(filename, 'r') as f:
d = json.load(f)
for task, done in zip(tasks, dones):
d['full{:d}_{:s}_{:s}'.format(*task)] = done
with open(filename, 'w') as f:
json.dump(d, f, indent=4, separators=(', ', ': '))
print('{:s} updated'.format(filename))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-p', type=int, default=-1)
parser.add_argument('--tasks', default=None, help='e.g. 1-use-pan,34-use-mug')
parser.add_argument('--cleanup', action='store_true')
parser.add_argument('--no_parallel', action='store_false', dest='parallel')
parser.add_argument('--ffmpeg_path', default='ffmpeg')
args = parser.parse_args()
if args.tasks is not None:
# parse tasks
tasks = args.tasks.split(',')
tasks = [t.split('-') for t in tasks]
tasks = [(int(t[0]), t[1], t[2]) for t in tasks]
else:
tasks = args.tasks
assert (args.p > 0)
produce((args.p, ), cleanup=args.cleanup, parallel=args.parallel, tasks=tasks)
|
ContactPose-main
|
scripts/maintenance/produce_videos.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# Code by Samarth Brahmbhatt
|
ContactPose-main
|
thirdparty/__init__.py
|
#!/usr/bin/env python3
"""
Copyright (c) Meta Platforms, Inc. and affiliates.
Calculate cumulative distribution functions for standard Brownian motions.
Running as a script tests assertions that closed-form, analytical expressions
for the means match numerical evaluations of the means for the cumulative
distribution functions, prints values of the cumulative distribution functions
at some interesting values for their arguments, saves to disk plots in pdf
of the complementary cumulative distribution functions, and saves to disk plots
in both pdf and jpg of calibration curves for synthetic data sets drawn from
perfectly calibrated distributions. The script saves plots in the current
directory, in files named "gauss.pdf", "gauss_log.pdf", "kuiper.pdf",
"kuiper_log.pdf", "kolmogorov_smirnov.pdf", and "kolmogorov_smirnov_log.pdf".
The files whose names end with "_log.pdf" use log scales for the vertical axes.
The plots for the metrics of Kuiper and of Kolmogorov and Smirnov include
vertical dotted lines at the means associated with the corresponding
distribution. The script saves twelve other plots in the current directory,
too, as detailed in the docstring for function plotnull below.
An article detailing the functions named after mathematicians and statisticians
(Kolmogorov, Smirnov, Kuiper, Gauss, and Chebyshev) is Mark Tygert's
"Calibration of P-values for calibration and for deviation of a subpopulation
from the full population."
Functions
---------
kolmogorov_smirnov
Evaluates the cumulative distribution function for the maximum
of the absolute value of the standard Brownian motion on [0, 1]
kuiper
Evaluates the cumulative distribution function for the range
(maximum minus minimum) of the standard Brownian motion on [0, 1]
gauss
Evaluates the cumulative distribution function for the distribution N(0, 1)
(the standard normal distribution, involving a Gaussian)
chebyshev
Integrates the function f(x) from x=a to x=b using n Chebyshev nodes
testmeans
Verifies that the means of the cumulative distribution functions are right
printvals
Evaluates the cumulative distribution functions at some points of interest
and prints them
saveplots
Plots and saves to disk the complementary cumulative distribution functions
plotnull
Plots the P-values for data generated from a perfectly calibrated model
This source code is licensed under the MIT license found in the LICENSE file in
the root directory of this source tree.
"""
import math
import numpy as np
from numpy.random import default_rng
import subprocess
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
def kolmogorov_smirnov(x):
"""
Evaluates the cumulative distribution function for the maximum
of the absolute value of the standard Brownian motion on [0, 1]
Parameters
----------
x : float
argument at which to evaluate the cumulative distribution function
(must be positive)
Returns
-------
float
cumulative distribution function evaluated at x
"""
assert x > 0
# Compute the machine precision assuming binary numerical representations.
eps = 7 / 3 - 4 / 3 - 1
# Determine how many terms to use to attain accuracy eps.
fact = 4 / math.pi
kmax = math.ceil(
1 / 2 + x * math.sqrt(2) / math.pi * math.sqrt(math.log(fact / eps)))
# Sum the series.
c = 0
for k in range(kmax):
kplus = k + 1 / 2
c += (-1)**k / kplus * math.exp(-kplus**2 * math.pi**2 / (2 * x**2))
c *= 2 / math.pi
return c
def kuiper(x):
"""
Evaluates the cumulative distribution function for the range
(maximum minus minimum) of the standard Brownian motion on [0, 1]
Parameters
----------
x : float
argument at which to evaluate the cumulative distribution function
(must be positive)
Returns
-------
float
cumulative distribution function evaluated at x
"""
assert x > 0
# Compute the machine precision assuming binary numerical representations.
eps = 7 / 3 - 4 / 3 - 1
# Determine how many terms to use to attain accuracy eps.
fact = 4 / math.sqrt(2 * math.pi) * (1 / x + x / math.pi**2)
kmax = math.ceil(
1 / 2 + x / math.pi / math.sqrt(2) * math.sqrt(math.log(fact / eps)))
# Sum the series.
c = 0
for k in range(kmax):
kplus = k + 1 / 2
c += (8 / x**2 + 2 / kplus**2 / math.pi**2) * math.exp(
-2 * kplus**2 * math.pi**2 / x**2)
return c
def gauss(x):
"""
Evaluates the cumulative distribution function for the distribution N(0, 1)
(the standard normal distribution, involving a Gaussian)
Parameters
----------
x : float
argument at which to evaluate the cumulative distribution function
Returns
-------
float
cumulative distribution function evaluated at x
"""
return (1 + math.erf(x / math.sqrt(2))) / 2
def chebyshev(a, b, n, f):
"""
Integrates the function f(x) from x=a to x=b using n Chebyshev nodes
Parameters
----------
a : float
lower limit of integration
b : float
upper limit of integration
n : int
number of Chebyshev nodes in the Gauss-Chebyshev quadrature
f : callable
real-valued function of a real argument to be integrated
Returns
-------
float
integral from x=a to x=b of f(x) (dx)
"""
sum = 0
for k in range(n):
c = math.cos((2 * k + 1) * math.pi / (2 * n))
x = a + (b - a) * (1 + c) / 2
sum += f(x) * math.sqrt(1 - c**2)
sum *= (b - a) * math.pi / (2 * n)
return sum
def testmeans():
"""
Verifies that the means of the cumulative distribution functions are right
Returns
-------
float
mean of the Kolmogorov-Smirnov statistic under the null hypothesis
that the subpopulation arises from the full population's distribution
(and that the scores are dense in their domain)
float
mean of the Kuiper statistic under the null hypothesis
that the subpopulation arises from the full population's distribution
(and that the scores are dense in their domain)
References
----------
William Feller, "The asymptotic distribution of the range of sums of
independent random variables," Ann. Math. Statist., 22 (1951): 427-432.
Jaume Masoliver, "Extreme values and the level-crossing problem: an
application to the Feller process," Phys. Rev. E., 89 (2014): 042106.
"""
# Compute the means of the Kolmogorov-Smirnov and Kuiper statistics
# using closed-form analytic expressions (see Formula 1.4 of the reference
# to Feller given in the docstring, as well as Formula 46 of the reference
# to Masoliver).
ks_mean = math.sqrt(math.pi / 2)
ku_mean = 2 * math.sqrt(2 / math.pi)
# Compute the means from the associated cumulative distribution functions
# evaluated numerically.
ks_mean2 = chebyshev(1e-8, 8, 100000, lambda x: 1 - kolmogorov_smirnov(x))
ku_mean2 = chebyshev(1e-8, 8, 100000, lambda x: 1 - kuiper(x))
# Check that the calculated values agree with each other.
tolerance = 1e-8
assert (ks_mean - ks_mean2) / ks_mean < tolerance
assert (ku_mean - ku_mean2) / ku_mean < tolerance
return ks_mean, ku_mean
def printvals(ks_mean, ku_mean):
"""
Evaluates the cumulative distribution functions at some points of interest
and prints them
Parameters
----------
ks_mean : float
mean of the Kolmogorov-Smirnov statistic under the null hypothesis
that the subpopulation arises from the full population's distribution
(and that the scores are dense in their domain)
ku_mean : float
mean of the Kuiper statistic under the null hypothesis
that the subpopulation arises from the full population's distribution
(and that the scores are dense in their domain)
"""
print(f'1 - kolmogorov_smirnov(0.001) = {1 - kolmogorov_smirnov(0.001)}')
print('1 - kolmogorov_smirnov(ks_mean) = {}'
.format(1 - kolmogorov_smirnov(ks_mean)))
print('1 - kolmogorov_smirnov(7.319) = {}'
.format(1 - kolmogorov_smirnov(7.319)))
print('1 - kolmogorov_smirnov(6.818) = {}'
.format(1 - kolmogorov_smirnov(6.818)))
print('1 - kolmogorov_smirnov(4.307) = {}'
.format(1 - kolmogorov_smirnov(4.307)))
print('1 - kolmogorov_smirnov(4.624) = {}'
.format(1 - kolmogorov_smirnov(4.624)))
print('1 - kolmogorov_smirnov(2.205) = {}'
.format(1 - kolmogorov_smirnov(2.205)))
print('1 - kolmogorov_smirnov(2.043) = {}'
.format(1 - kolmogorov_smirnov(2.043)))
print(f'1 - kolmogorov_smirnov(1000) = {1 - kolmogorov_smirnov(1000)}')
print()
print(f'1 - kuiper(0.001) = {1 - kuiper(0.001)}')
print(f'1 - kuiper(ku_mean) = {1 - kuiper(ku_mean)}')
print(f'1 - kuiper(7.521) = {1 - kuiper(7.521)}')
print(f'1 - kuiper(7.213) = {1 - kuiper(7.213)}')
print(f'1 - kuiper(4.373) = {1 - kuiper(4.373)}')
print(f'1 - kuiper(4.710) = {1 - kuiper(4.710)}')
print(f'1 - kuiper(2.259) = {1 - kuiper(2.259)}')
print(f'1 - kuiper(2.110) = {1 - kuiper(2.110)}')
print(f'1 - kuiper(1000) = {1 - kuiper(1000)}')
print()
print('switch the mean values and see that the P-values deviate '
+ 'far from 0.5:')
print('1 - kolmogorov_smirnov(ku_mean) = {}'
.format(1 - kolmogorov_smirnov(ku_mean)))
print(f'1 - kuiper(ks_mean) = {1 - kuiper(ks_mean)}')
def saveplots(ks_mean, ku_mean):
"""
Plots and saves to disk the complementary cumulative distribution functions
The plots, saved in the current directory, are "gauss.pdf",
"gauss_log.pdf", "kuiper.pdf", "kuiper_log.pdf", "kolmogorov_smirnov.pdf",
and "kolmogorov_smirnov_log.pdf". The files whose names end with "_log.pdf"
use logarithmic scales for the vertical axes. The plots for Kuiper
and for Kolmogorov and Smirnov include vertical dotted lines at the means
of the corresponding distribution, assuming that the input parameters
are correct.
Parameters
----------
ks_mean : float
mean of the Kolmogorov-Smirnov statistic under the null hypothesis
that the subpopulation arises from the full population's distribution
(and that the scores are dense in their domain)
ku_mean : float
mean of the Kuiper statistic under the null hypothesis
that the subpopulation arises from the full population's distribution
(and that the scores are dense in their domain)
"""
for func in ['gauss', 'kuiper', 'kolmogorov_smirnov']:
for logscale in [True, False]:
# Create a plot.
plt.figure(figsize=[4.8, 3.6])
# Create abscissae and ordinates.
xmax = 8
x = np.arange(1e-3, xmax, 1e-3)
y = 1 - np.vectorize(globals()[func])(x)
# Plot y versus x.
plt.plot(x, y, 'k')
# Plot a vertical line at the mean.
if func == 'kuiper':
mean = ku_mean
elif func == 'kolmogorov_smirnov':
mean = ks_mean
else:
mean = 0
if mean > 0:
plt.vlines(mean, 1 - globals()[func](xmax), 1, 'k', 'dotted')
plt.text(
mean, 1 - globals()[func](xmax), 'mean ',
ha='center', va='top')
# Set the vertical axis to use a logscale if logscale is True.
if logscale:
plt.yscale('log')
# Title the axes.
plt.xlabel('$x$')
if func == 'kuiper':
plt.ylabel('$1 - F(x)$')
elif func == 'kolmogorov_smirnov':
plt.ylabel('$1 - D(x)$')
else:
plt.ylabel('$1 - \\Phi(x)$')
# Clean up the whitespace in the plot.
plt.tight_layout()
# Save the plot.
filename = func
if logscale:
filename += '_log'
filename += '.pdf'
plt.savefig(filename, bbox_inches='tight')
plt.close()
def plotnull(ns, points, transform=None, suffix=''):
"""
Plots the P-values for data generated from a perfectly calibrated model
The plots, saved in the current directory are "kuiper_ecdf[suffix].pdf",
"kuiper_ecdf[suffix].jpg", "kolmogorov_smirnov_ecdf[suffix].pdf", and
"kolmogorov_smirnov_ecdf[suffix].jpg". The JPEG versions are conversions
from the PDF at a resolution of 1200 pixels per inch. The plots display
the empirical cumulative distribution functions of the P-values associated
with the Kuiper and Kolmogorov-Smirnov statistics, for points data sets,
each with the number of scores and corresponding Bernoulli responses
given by the given entry in ns (running everything again for each entry
in ns). The Bernoulli responses are independent, and the probability
of success for each is equal to the corresponding score (ensuring perfect
calibration of the underlying data distribution). The transform gets
applied to each score, with the scores being equispaced before application
of transform (and remaining equispaced if transform is None).
Parameters
----------
ns : list of int
sample sizes for each generated data set
points : int
number of data sets to generate per calibration curve (that is,
per empirical cumulative distribution function of P-values)
transform : callable, optional
numpy function to apply to the otherwise equispaced scores
(set to None -- the default -- to use the original equispaced scores)
suffix : string, optional
suffix to append to the filename (defaults to the empty string)
"""
# Store processes for converting from pdf to jpeg in procs.
procs = []
# Store the calibration curves for both Kolmogorov-Smirnov and Kuiper
# statistics (these are empirical cumulative distribution functions),
# in ksc and kuc, respectively.
ksc = np.zeros((len(ns), points))
kuc = np.zeros((len(ns), points))
for j, n in enumerate(ns):
rng = default_rng(seed=543216789)
# Run simulations points times.
pks = np.zeros((points))
pku = np.zeros((points))
for k in range(points):
# Generate predicted probabilities (the "scores").
s = np.arange(0, 1, 1 / n)[:n]
if transform is not None:
s = transform(s)
# Generate a sample of classifications (the "responses")
# into two classes, correct (class 1) and incorrect (class 0),
# avoiding numpy's random number generators that are based
# on random bits -- they yield strange results for many seeds.
uniform = rng.uniform(size=(n))
r = (uniform <= s).astype(float)
# Calculate the cumulative differences.
c = (np.cumsum(r) - np.cumsum(s)) / n
# Calculate the estimate of sigma.
sigma = np.sqrt(np.sum(s * (1 - s))) / n
# Compute the normalized Kolmogorov-Smirnov and Kuiper statistics.
ks = np.abs(c).max() / sigma
c = np.insert(c, 0, [0])
ku = (c.max() - c.min()) / sigma
# Evaluate the P-values.
pks[k] = 1 - kolmogorov_smirnov(ks)
pku[k] = 1 - kuiper(ku)
# Calculate the empirical cumulative distributions of the P-values.
ksc[j, :] = np.sort(pks)
kuc[j, :] = np.sort(pku)
for stat in ['kolmogorov_smirnov', 'kuiper']:
# Create a plot.
plt.figure(figsize=[4.8, 3.6])
# Title the axes.
plt.xlabel('$x$')
plt.ylabel('fraction of P-values $\\leq x$')
# Plot the empirical cumulative distribution functions.
frac = np.arange(1 / points, 1 + 1 / points, 1 / points)[:points]
for j in range(len(ns)):
if stat == 'kolmogorov_smirnov':
plt.plot(ksc[j, :], frac, color='k')
elif stat == 'kuiper':
plt.plot(kuc[j, :], frac, color='k')
# Add a diagonal line from (0, 0) to (1, 1).
zeroone = np.asarray((0, 1))
plt.plot(zeroone, zeroone, 'k', linestyle='dashed')
# Save the plot.
filepdf = stat + '_ecdf' + suffix + '.pdf'
plt.savefig(filepdf, bbox_inches='tight')
plt.close()
# Convert the pdf to jpg.
filejpg = filepdf[:-4] + '.jpg'
args = ['convert', '-density', '1200', filepdf, filejpg]
procs.append(subprocess.Popen(args))
print('waiting for conversion from pdf to jpg to finish....')
for iproc, proc in enumerate(procs):
proc.wait()
print(f'{iproc + 1} of {len(procs)} conversions are done....')
if __name__ == '__main__':
# Test if the cumulative distribution functions yield the known values
# for their means.
print('testing means...')
ks_mean, ku_mean = testmeans()
# Print values of the cumulative distribution functions at some interesting
# values for their arguments.
print()
print('evaluating for particular values of the arguments...')
print()
printvals(ks_mean, ku_mean)
# Save plots of the complementary cumulative distribution functions.
print()
print('plotting the complementary cumulative distribution functions...')
saveplots(ks_mean, ku_mean)
# Plot the calibration curves ("calibration curves" are the empirical
# cumulative distribution functions of P-values under the null hypothesis
# of perfect calibration).
ns = [100, 1000, 10000]
points = 100000
print('plotting calibration with equispaced scores...')
plotnull(ns, points)
print('plotting calibration with square-rooted scores...')
plotnull(ns, points, np.sqrt, suffix='_sqrt')
print('plotting calibration with squared scores...')
plotnull(ns, points, np.square, suffix='_square')
|
cdeets-main
|
codes/dists.py
|
#!/usr/bin/env python3
"""
Copyright (c) Meta Platforms, Inc. and affiliates.
Plot the subpopulation deviations for the American Community Survey of USCB.
This script creates a directory, "weighted," in the working directory if the
directory does not already exist, then creates subdirectories there for each
of the counties in California specified by the list "exs" defined below, and
fills each subdirectory with eight files:
1. metrics.txt -- metrics about the plots
2. cumulative.pdf -- plot of cumulative differences between the county & state
3. equiscores10.pdf -- reliability diagram of the county & state with 10 bins
(equispaced in scores)
4. equiscores20.pdf -- reliability diagram of the county & state with 20 bins
(equispaced in scores)
5. equiscores100.pdf -- reliability diagram of the county & state with 100 bins
(equispaced in scores)
6. equierrs10.pdf -- reliability diagram of the county & state with 10 bins
(the error bar is about the same for every bin)
7. equierrs20.pdf -- reliability diagram of the county & state with 20 bins
(the error bar is about the same for every bin)
8. equierrs100.pdf -- reliability diagram of the county & state with 100 bins
(the error bar is about the same for every bin)
The data comes from the American Community Survey of the U.S. Census Bureau,
specifically the household data from the state of California and its counties.
The scores are log_10 of the adjusted household personal incomes.
The results/responses are given by the variates specified in the list "exs"
defined below (together with the value of the variate to be considered
"success" in the sense of Bernoulli trials, or else the nonnegative integer
count for the variate, counting people, for instance).
This source code is licensed under the MIT license found in the LICENSE file in
the root directory of this source tree.
"""
import math
import numpy as np
import os
from subpop_weighted import equiscores, equierrs, cumulative
# Specify which counties and variates to process, as well as the coded value
# of interest for each variate (or None if the values of interest are
# nonnegative integer counts).
exs = [
{'county': 'Humboldt', 'var': 'LNGI', 'val': 2},
{'county': 'Los Angeles', 'var': 'NP', 'val': None},
{'county': 'Napa', 'var': 'SATELLITE', 'val': 1},
{'county': 'Orange', 'var': 'HISPEED', 'val': 1},
{'county': 'San Joaquin', 'var': 'NRC', 'val': None},
{'county': 'Stanislaus', 'var': 'NRC', 'val': None},
]
# Specify the name of the file of comma-separated values
# for the household data in the American Community Survey.
filename = 'psam_h06.csv'
# Count the number of lines in the file for filename.
lines = 0
with open(filename, 'r') as f:
for line in f:
lines += 1
print(f'reading and filtering all {lines} lines from {filename}....')
# Determine the number of columns in the file for filename.
with open(filename, 'r') as f:
line = f.readline()
num_cols = line.count(',') + 1
# Read and store all but the first two columns in the file for filename.
raw = np.zeros((lines, num_cols - 2))
with open(filename, 'r') as f:
for line_num, line in enumerate(f):
parsed = line.split(',')[2:]
if line_num == 0:
# The initial line is a header ... save its column labels.
header = parsed.copy()
# Eliminate the newline character at the end of the line.
header[-1] = header[-1][:-1]
else:
# All but the initial line consist of data ... extract the ints.
raw[line_num - 1, :] = np.array(
[int(s if s != '' else -1) for s in parsed])
# Filter out undesirable observations -- keep only strictly positive weights,
# strictly positive household personal incomes, and strictly positive factors
# for adjusting the income.
keep = np.logical_and.reduce([
raw[:, header.index('WGTP')] > 0,
raw[:, header.index('HINCP')] > 0,
raw[:, header.index('ADJINC')] > 0])
raw = raw[keep, :]
print(f'm = raw.shape[0] = {raw.shape[0]}')
# Form a dictionary of the lower- and upper-bounds on the ranges of numbers
# of the public-use microdata areas (PUMAs) for the counties in California.
puma = {
'Alameda': (101, 110),
'Alpine, Amador, Calaveras, Inyo, Mariposa, Mono and Tuolumne': (300, 300),
'Butte': (701, 702),
'Colusa, Glenn, Tehama and Trinity': (1100, 1100),
'Contra Costa': (1301, 1309),
'Del Norte, Lassen, Modoc, Plumas and Siskiyou': (1500, 1500),
'El Dorado': (1700, 1700),
'Fresno': (1901, 1907),
'Humboldt': (2300, 2300),
'Imperial': (2500, 2500),
'Kern': (2901, 2905),
'Kings': (3100, 3100),
'Lake and Mendocino': (3300, 3300),
'Los Angeles': (3701, 3769),
'Madera': (3900, 3900),
'Marin': (4101, 4102),
'Merced': (4701, 4702),
'Monterey': (5301, 5303),
'Napa': (5500, 5500),
'Nevada and Sierra': (5700, 5700),
'Orange': (5901, 5918),
'Placer': (6101, 6103),
'Riverside': (6501, 6515),
'Sacramento': (6701, 6712),
'San Bernardino': (7101, 7115),
'San Diego': (7301, 7322),
'San Francisco': (7501, 7507),
'San Joaquin': (7701, 7704),
'San Luis Obispo': (7901, 7902),
'San Mateo': (8101, 8106),
'Santa Barbara': (8301, 8303),
'Santa Clara': (8501, 8514),
'Santa Cruz': (8701, 8702),
'Shasta': (8900, 8900),
'Solano': (9501, 9503),
'Sonoma': (9701, 9703),
'Stanislaus': (9901, 9904),
'Sutter and Yuba': (10100, 10100),
'Tulare': (10701, 10703),
'Ventura': (11101, 11106),
'Yolo': (11300, 11300),
}
# Process the examples.
for ex in exs:
for dither in [True, False]:
# Form the scores, results, and weights.
np.random.seed(seed=3820497)
# Adjust the household personal income by the relevant factor.
s = raw[:, header.index('HINCP')] * raw[:, header.index('ADJINC')]
s /= 1e6
# Convert the adjusted incomes to a log (base-10) scale.
s = np.log(s) / math.log(10)
# Dither in order to ensure the uniqueness of the scores (if dither
# is True).
if dither:
s = s * (np.ones(s.shape) + np.random.normal(size=s.shape) * 1e-8)
# Read the result (raw integer count if the specified value is None,
# Bernoulli indicator of success otherwise).
if ex['val'] is None:
r = raw[:, header.index(ex['var'])]
else:
r = raw[:, header.index(ex['var'])] == ex['val']
# Read the weight.
w = raw[:, header.index('WGTP')]
# Sort the scores.
perm = np.argsort(s)
s = s[perm]
r = r[perm]
w = w[perm]
# Set a directory for the county (creating the directory if necessary).
dir = 'weighted'
try:
os.mkdir(dir)
except FileExistsError:
pass
dir = 'weighted/County_of_'
dir += ex['county'].replace(' ', '_').replace(',', '')
dir += '-'
dir += ex['var']
dir += '-'
if dither:
dir += 'dithered'
else:
dir += 'averaged'
try:
os.mkdir(dir)
except FileExistsError:
pass
dir += '/'
print(f'./{dir} is under construction....')
# Identify the indices of the subset corresponding to the county.
slice = raw[perm, header.index('PUMA')]
inds = slice >= (puma[ex['county']][0] * np.ones(raw.shape[0]))
inds = inds & (
slice <= (puma[ex['county']][1] * np.ones(raw.shape[0])))
inds = np.nonzero(inds)[0]
inds = np.unique(inds)
# Plot reliability diagrams and the cumulative graph.
nin = [10, 20, 100]
nout = {}
for nbins in nin:
filename = dir + 'equiscores' + str(nbins) + '.pdf'
equiscores(r, s, inds, nbins, filename, weights=w, left=0)
filename = dir + 'equierrs' + str(nbins) + '.pdf'
nout[str(nbins)] = equierrs(r, s, inds, nbins, filename, weights=w)
if nbins < 100:
assert abs(nout[str(nbins)][0] - nbins) <= 3
assert abs(nout[str(nbins)][1] - nbins) <= 3
majorticks = 10
minorticks = 300
filename = dir + 'cumulative.pdf'
kuiper, kolmogorov_smirnov, lenscale = cumulative(
r, s, inds, majorticks, minorticks, ex['val'] is not None,
filename=filename, weights=w)
# Save metrics in a text file.
filename = dir + 'metrics.txt'
with open(filename, 'w') as f:
f.write('m:\n')
f.write(f'{len(s)}\n')
f.write('n:\n')
f.write(f'{len(inds)}\n')
f.write('number of unique scores in the subset:\n')
f.write(f'{len(np.unique(s[inds]))}\n')
f.write('lenscale:\n')
f.write(f'{lenscale}\n')
for nbins in nin:
f.write("nout['" + str(nbins) + "']:\n")
f.write(f'{nout[str(nbins)][0]}\n')
f.write(f'{nout[str(nbins)][1]}\n')
f.write('Kuiper:\n')
f.write(f'{kuiper:.4}\n')
f.write('Kolmogorov-Smirnov:\n')
f.write(f'{kolmogorov_smirnov:.4}\n')
f.write('Kuiper / lenscale:\n')
f.write(f'{(kuiper / lenscale):.4}\n')
f.write('Kolmogorov-Smirnov / lenscale:\n')
f.write(f'{(kolmogorov_smirnov / lenscale):.4}\n')
|
cdeets-main
|
codes/acs.py
|
#!/usr/bin/env python3
"""
Copyright (c) Meta Platforms, Inc. and affiliates.
Plots of deviation of a subpop. from the full pop., with weighted sampling
*
This implementation considers responses r that can take arbitrary values,
not necesssarily restricted to taking values 0 or 1.
*
Functions
---------
cumulative
Cumulative difference between observations from a subpop. & the full pop.
equiscores
Reliability diagram with roughly equispaced average scores over bins
equierrs
Reliability diagram with similar ratio L2-norm / L1-norm of weights by bin
exactplot
Reliability diagram with exact values plotted
This source code is licensed under the MIT license found in the LICENSE file in
the root directory of this source tree.
"""
import math
import os
import subprocess
import random
import numpy as np
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
from matplotlib.ticker import FixedFormatter
def cumulative(r, s, inds, majorticks, minorticks, bernoulli=True,
filename='cumulative.pdf',
title='subpop. deviation is the slope as a function of $A_k$',
fraction=1, weights=None):
"""
Cumulative difference between observations from a subpop. & the full pop.
Saves a plot of the difference between the normalized cumulative weighted
sums of r for the subpopulation indices inds and the normalized cumulative
weighted sums of r from the full population interpolated to the subpop.
indices, with majorticks major ticks and minorticks minor ticks on the
lower axis, labeling the major ticks with the corresponding values from s.
Parameters
----------
r : array_like
random outcomes
s : array_like
scores (must be in non-decreasing order)
inds : array_like
indices of the subset within s that defines the subpopulation
(must be unique and in strictly increasing order)
majorticks : int
number of major ticks on each of the horizontal axes
minorticks : int
number of minor ticks on the lower axis
bernoulli : bool, optional
set to True (the default) for Bernoulli variates; set to False
to use empirical estimates of the variance rather than the formula
p(1-p) for a Bernoulli variate whose mean is p
filename : string, optional
name of the file in which to save the plot
title : string, optional
title of the plot
fraction : float, optional
proportion of the full horizontal axis to display
weights : array_like, optional
weights of the observations
(the default None results in equal weighting)
Returns
-------
float
Kuiper statistic
float
Kolmogorov-Smirnov statistic
float
quarter of the full height of the isosceles triangle
at the origin in the plot
"""
def histcounts(nbins, a):
# Counts the number of entries of a
# falling into each of nbins equispaced bins.
j = 0
nbin = np.zeros(nbins, dtype=np.int64)
for k in range(len(a)):
if a[k] > a[-1] * (j + 1) / nbins:
j += 1
if j == nbins:
break
nbin[j] += 1
return nbin
def aggregate(r, s, ss, w):
# Determines the weighted mean and variance of the entries of r
# in a bin around each entry of s corresponding to the subset ss of s.
# The bin ranges from halfway to the nearest entry of s in ss
# on the left to halfway to the nearest entry of s in ss on the right.
q = np.insert(np.append(ss, [1e20]), 0, [-1e20])
t = np.asarray([(q[k] + q[k + 1]) / 2 for k in range(len(q) - 1)])
rc = np.zeros((len(ss)))
rc2 = np.zeros((len(ss)))
sc = np.zeros((len(ss)))
sc2 = np.zeros((len(ss)))
j = 0
for k in range(len(s)):
if s[k] > t[j + 1]:
j += 1
if j == len(ss):
break
if s[k] >= t[0]:
sc[j] += w[k]
sc2[j] += w[k]**2
rc[j] += w[k] * r[k]
rc2[j] += w[k] * r[k]**2
means = rc / sc
# Calculate an adjustment factor for the estimate of the variance
# that will make the estimate unbiased.
unbias = sc**2
unbias[np.where(sc**2 == sc2)] = 0
unbias[np.where(sc**2 != sc2)] /= (sc**2 - sc2)[np.where(sc**2 != sc2)]
return means, unbias * (rc2 / sc - means**2)
assert all(s[k] <= s[k + 1] for k in range(len(s) - 1))
assert all(inds[k] < inds[k + 1] for k in range(len(inds) - 1))
# Determine the weighting scheme.
if weights is None:
w = np.ones((len(s)))
else:
w = weights.copy()
assert np.all(w > 0)
w /= w.sum()
# Create the figure.
plt.figure()
ax = plt.axes()
# Subsample s, r, and w.
ss = s[inds]
rs = r[inds]
ws = w[inds]
# Average the results and weights for repeated scores, while subsampling
# the scores and indices for the subpopulation. Also calculate factors
# for adjusting the variances of responses to account for the responses
# being averages of other responses (when the scores need not be unique).
sslist = []
rslist = []
wslist = []
fslist = []
rssum = 0
wssum = 0
wssos = 0
for k in range(len(ss)):
rssum += rs[k] * ws[k]
wssum += ws[k]
wssos += ws[k]**2
if k == len(ss) - 1 or not math.isclose(
ss[k], ss[k + 1], rel_tol=1e-14):
sslist.append(ss[k])
rslist.append(rssum / wssum)
wslist.append(wssum)
fslist.append(wssos / wssum**2)
rssum = 0
wssum = 0
wssos = 0
ss = np.asarray(sslist)
rs = np.asarray(rslist)
ws = np.asarray(wslist)
fs = np.asarray(fslist)
# Normalize the weights.
ws /= ws[:int(len(ws) * fraction)].sum()
# Aggregate r according to s, ss, and w.
rt, rtvar = aggregate(r, s, ss, w)
# Accumulate the weighted rs and rt, as well as ws.
f = np.insert(np.cumsum(ws * rs), 0, [0])
ft = np.insert(np.cumsum(ws * rt), 0, [0])
x = np.insert(np.cumsum(ws), 0, [0])
# Plot the difference.
plt.plot(
x[:int(len(x) * fraction)], (f - ft)[:int(len(f) * fraction)], 'k')
# Make sure the plot includes the origin.
plt.plot(0, 'k')
# Add an indicator of the scale of 1/sqrt(n) to the vertical axis.
rtsub = np.insert(rt, 0, [0])[:(int(len(rt) * fraction) + 1)]
if bernoulli:
lenscale = np.sqrt(np.sum(ws**2 * rtsub[1:] * (1 - rtsub[1:]) * fs))
else:
lenscale = np.sqrt(np.sum(ws**2 * rtvar * fs))
plt.plot(2 * lenscale, 'k')
plt.plot(-2 * lenscale, 'k')
kwargs = {
'head_length': 2 * lenscale, 'head_width': fraction / 20, 'width': 0,
'linewidth': 0, 'length_includes_head': True, 'color': 'k'}
plt.arrow(.1e-100, -2 * lenscale, 0, 4 * lenscale, shape='left', **kwargs)
plt.arrow(.1e-100, 2 * lenscale, 0, -4 * lenscale, shape='right', **kwargs)
plt.margins(x=0, y=.1)
# Label the major ticks of the lower axis with the values of ss.
lenxf = int(len(x) * fraction)
sl = ['{:.2f}'.format(a) for a in
np.insert(ss, 0, [0])[:lenxf:(lenxf // majorticks)].tolist()]
plt.xticks(x[:lenxf:(lenxf // majorticks)], sl)
if len(rtsub) >= 300 and minorticks >= 50:
# Indicate the distribution of s via unlabeled minor ticks.
plt.minorticks_on()
ax.tick_params(which='minor', axis='x')
ax.tick_params(which='minor', axis='y', left=False)
ax.set_xticks(x[np.cumsum(histcounts(minorticks,
ss[:int((len(x) - 1) * fraction)]))], minor=True)
# Label the axes.
plt.xlabel('$S_k$')
plt.ylabel('$B_k$')
ax2 = plt.twiny()
plt.xlabel(
'$k/n$ (together with minor ticks at equispaced values of $A_k$)')
ax2.tick_params(which='minor', axis='x', top=True, direction='in', pad=-17)
ax2.set_xticks(np.arange(0, 1 + 1 / majorticks, 1 / majorticks),
minor=True)
ks = ['{:.2f}'.format(a) for a in
np.arange(0, 1 + 1 / majorticks, 1 / majorticks).tolist()]
alist = (lenxf - 1) * np.arange(0, 1 + 1 / majorticks, 1 / majorticks)
alist = alist.tolist()
# Jitter minor ticks that overlap with major ticks lest Pyplot omit them.
alabs = []
for a in alist:
multiple = x[int(a)] * majorticks
if abs(multiple - round(multiple)) > 1e-4:
alabs.append(x[int(a)])
else:
alabs.append(x[int(a)] * (1 - 1e-4))
plt.xticks(alabs, ks)
ax2.xaxis.set_minor_formatter(FixedFormatter(
[r'$A_k\!=\!{:.2f}$'.format(1 / majorticks)]
+ [r'${:.2f}$'.format(k / majorticks) for k in range(2, majorticks)]))
# Title the plot.
plt.title(title)
# Clean up the whitespace in the plot.
plt.tight_layout()
# Save the plot.
plt.savefig(filename, bbox_inches='tight')
plt.close()
# Calculate summary statistics.
fft = (f - ft)[:int(len(f) * fraction)]
kuiper = np.max(fft) - np.min(fft)
kolmogorov_smirnov = np.max(np.abs(fft))
return kuiper, kolmogorov_smirnov, lenscale
def equiscores(r, s, inds, nbins, filename='equiscore.pdf', weights=None,
top=None, left=None, right=None):
"""
Reliability diagram with roughly equispaced average scores over bins
Plots a reliability diagram with roughly equispaced average scores
for the bins, for both the full population and the subpopulation specified
by the indices inds.
Parameters
----------
r : array_like
random outcomes
s : array_like
scores (must be in non-decreasing order)
inds : array_like
indices of the subset within s that defines the subpopulation
(must be unique and in strictly increasing order)
nbins : int
number of bins
filename : string, optional
name of the file in which to save the plot
weights : array_like, optional
weights of all observations
(the default None results in equal weighting)
top : float, optional
top of the range of the vertical axis (the default None is adaptive)
left : float, optional
leftmost value of the horizontal axis (the default None is adaptive)
right : float, optional
rightmost value of the horizontal axis (the default None is adaptive)
Returns
-------
None
"""
def bintwo(nbins, a, b, q, qmax, w):
# Determines the total weight of entries of q falling into each
# of nbins equispaced bins, and calculates the weighted average per bin
# of the arrays a and b, returning np.nan as the "average"
# for any bin that is empty.
j = 0
bina = np.zeros(nbins)
binb = np.zeros(nbins)
wbin = np.zeros(nbins)
for k in range(len(q)):
if q[k] > qmax * (j + 1) / nbins:
j += 1
if j == nbins:
break
bina[j] += w[k] * a[k]
binb[j] += w[k] * b[k]
wbin[j] += w[k]
# Normalize the sum for each bin to compute the arithmetic average.
bina = np.divide(bina, wbin, where=wbin != 0)
bina[np.where(wbin == 0)] = np.nan
binb = np.divide(binb, wbin, where=wbin != 0)
binb[np.where(wbin == 0)] = np.nan
return wbin, bina, binb
assert all(s[k] <= s[k + 1] for k in range(len(s) - 1))
assert all(inds[k] < inds[k + 1] for k in range(len(inds) - 1))
# Determine the weighting scheme.
if weights is None:
w = np.ones((len(s)))
else:
w = weights.copy()
assert np.all(w > 0)
w /= w.sum()
ws = w[inds]
ws /= ws.sum()
# Create the figure.
plt.figure()
_, binr, binst = bintwo(nbins, r, s, s, s[inds[-1]], w)
_, binrs, binss = bintwo(nbins, r[inds], s[inds], s[inds], s[inds[-1]], ws)
plt.plot(binst, binr, '*:', color='gray')
plt.plot(binss, binrs, '*:', color='black')
xmin = min(binst[0], binss[0]) if left is None else left
xmax = max(binst[-1], binss[-1]) if right is None else right
plt.xlim((xmin, xmax))
plt.ylim(bottom=0)
plt.ylim(top=top)
plt.xlabel('weighted average of $S_k$ for $k$ in the bin')
plt.ylabel('weighted average of $R_k$ for $k$ in the bin')
plt.title('reliability diagram')
plt.tight_layout()
plt.savefig(filename, bbox_inches='tight')
plt.close()
def equierrs(r, s, inds, nbins, filename='equibins.pdf', weights=None,
top=None, left=None, right=None):
"""
Reliability diagram with similar ratio L2-norm / L1-norm of weights by bin
Plots a reliability diagram with the ratio of the L2 norm of the weights
to the L1 norm of the weights being roughly the same for every bin.
The L2 norm is the square root of the sum of the squares, while the L1 norm
is the sum of the absolute values. The plot includes a graph both for the
full population and for the subpopulation specified by the indices inds.
Parameters
----------
r : array_like
random outcomes
s : array_like
scores (must be in non-decreasing order)
inds : array_like
indices of the subset within s that defines the subpopulation
(must be unique and in strictly increasing order)
nbins : int
rough number of bins to construct
filename : string, optional
name of the file in which to save the plot
weights : array_like, optional
weights of all observations
(the default None results in equal weighting)
top : float, optional
top of the range of the vertical axis (the default None is adaptive)
left : float, optional
leftmost value of the horizontal axis (the default None is adaptive)
right : float, optional
rightmost value of the horizontal axis (the default None is adaptive)
Returns
-------
int
number of bins constructed for the subpopulation
int
number of bins constructed for the full population
"""
def inbintwo(a, b, inbin, w):
# Determines the total weight falling into the bins given by inbin,
# and calculates the weighted average per bin of the arrays a and b,
# returning np.nan as the "average" for any bin that is empty.
wbin = [w[inbin[k]:inbin[k + 1]].sum() for k in range(len(inbin) - 1)]
bina = [(w[inbin[k]:inbin[k + 1]] * a[inbin[k]:inbin[k + 1]]).sum()
for k in range(len(inbin) - 1)]
binb = [(w[inbin[k]:inbin[k + 1]] * b[inbin[k]:inbin[k + 1]]).sum()
for k in range(len(inbin) - 1)]
# Normalize the sum for each bin to compute the weighted average.
bina = np.divide(bina, wbin, where=wbin != 0)
bina[np.where(wbin == 0)] = np.nan
binb = np.divide(binb, wbin, where=wbin != 0)
binb[np.where(wbin == 0)] = np.nan
return wbin, bina, binb
def binbounds(nbins, w):
# Partitions w into around nbins bins, each with roughly equal ratio
# of the L2 norm of w in the bin to the L1 norm of w in the bin,
# returning the indices defining the bins in the list inbin.
proxy = len(w) // nbins
v = w[np.sort(np.random.permutation(len(w))[:proxy])]
# t is a heuristic threshold.
t = np.square(v).sum() / v.sum()**2
inbin = []
k = 0
while k < len(w) - 1:
inbin.append(k)
k += 1
s = w[k]
ss = w[k]**2
while ss / s**2 > t and k < len(w) - 1:
k += 1
s += w[k]
ss += w[k]**2
if len(w) - inbin[-1] < (inbin[-1] - inbin[-2]) / 2:
inbin[-1] = len(w)
else:
inbin.append(len(w))
return inbin
assert all(s[k] <= s[k + 1] for k in range(len(s) - 1))
assert all(inds[k] < inds[k + 1] for k in range(len(inds) - 1))
# Determine the weighting scheme.
if weights is None:
w = np.ones((len(s)))
else:
w = weights.copy()
assert np.all(w > 0)
w /= w.sum()
inbin = binbounds(nbins, w)
ws = w[inds]
ws /= ws.sum()
inbins = binbounds(nbins, ws)
# Create the figure.
plt.figure()
_, binr, binst = inbintwo(r, s, inbin, w)
_, binrs, binss = inbintwo(r[inds], s[inds], inbins, ws)
plt.plot(binst, binr, '*:', color='gray')
plt.plot(binss, binrs, '*:', color='black')
xmin = min(binst[0], binss[0]) if left is None else left
xmax = max(binst[-1], binss[-1]) if right is None else right
plt.xlim((xmin, xmax))
plt.ylim(bottom=0)
plt.ylim(top=top)
plt.xlabel('weighted average of $S_k$ for $k$ in the bin')
plt.ylabel('weighted average of $R_k$ for $k$ in the bin')
title = r'reliability diagram'
title += r' ($\Vert W \Vert_2 / \Vert W \Vert_1$ is similar for every bin)'
plt.title(title)
plt.tight_layout()
plt.savefig(filename, bbox_inches='tight')
plt.close()
return len(inbins) - 1, len(inbin) - 1
def exactplot(r, s, inds, filename='exact.pdf', title='exact expectations',
top=None, left=None, right=None):
"""
Reliability diagram with exact values plotted
Plots a reliability diagram at full resolution with fractional numbers,
for both the full population and the subpop. specified by indices inds.
The entries of r should be the expected values of outcomes,
even if the outcomes are integer-valued counts or just 0s and 1s.
Parameters
----------
r : array_like
expected value of outcomes
s : array_like
scores (must be in non-decreasing order)
inds : array_like
indices of the subset within s that defines the subpopulation
(must be unique and in strictly increasing order)
filename : string, optional
name of the file in which to save the plot
title : string, optional
title of the plot
top : float, optional
top of the range of the vertical axis (the default None is adaptive)
left : float, optional
leftmost value of the horizontal axis (the default None is adaptive)
right : float, optional
rightmost value of the horizontal axis (the default None is adaptive)
Returns
-------
None
"""
assert all(s[k] <= s[k + 1] for k in range(len(s) - 1))
assert all(inds[k] < inds[k + 1] for k in range(len(inds) - 1))
plt.figure()
plt.plot(s, r, '*', color='gray')
rs = r[inds]
ss = s[inds]
plt.plot(ss, rs, '*', color='black')
plt.xlim((left, right))
plt.ylim(bottom=0)
plt.ylim(top=top)
plt.xlabel('score $S_k$')
plt.ylabel('expected value ($P_k$) of outcome $R_k$')
plt.title(title)
plt.tight_layout()
plt.savefig(filename, bbox_inches='tight')
plt.close()
if __name__ == '__main__':
#
# Generate directories with plots as specified via the code below,
# with each directory named m_len(inds)_nbins_iex-dithered or
# m_len(inds)_nbins_iex-averaged (where m, inds, nbins, and iex
# are defined in the code below, and "dithered" uses scores dithered
# to become distinct, while "averaged" uses responses averaged together
# at the same score).
#
# Set parameters.
# minorticks is the number of minor ticks on the lower axis.
minorticks = 100
# majorticks is the number of major ticks on the lower axis.
majorticks = 10
# m is the number of members from the full population.
m = 50000
# n determines the number of observations for the subpopulation.
n = 2500
# Store processes for converting from pdf to jpeg in procs.
procs = []
# Consider 4 examples.
for iex in range(4):
for dither in [True, False]:
# nbins is the number of bins for the reliability diagrams.
for nbins in [10, 50]:
# nbins must divide n evenly.
assert n % nbins == 0
if iex == 0:
# Define the indices of the subset for the subpopulation.
inds = np.arange(0, m, m // n) + m // n // 2
inds1 = np.arange(0, m // 4, m // n // 4) + m // 2 - m // 8
inds2 = np.arange(0, m // 2, m // n // 2) + m // 2 - m // 4
inds3 = np.arange(0, m, m // n)
inds = np.concatenate((inds1, inds1 + 1, inds2, inds3))
# Indices must be sorted and unique.
inds = np.unique(inds)
inds = inds[0:(m // (m // len(inds)))]
# Construct scores.
sl = np.arange(0, 1, 4 / m) + 2 / m
s = np.square(sl)
s = np.concatenate([s] * 4)
if dither:
ss = s.shape
s *= np.ones(ss) + np.random.normal(size=ss) * 1e-8
# The scores must be in non-decreasing order.
s = np.sort(s)
# Construct perturbations to the scores for sampling rates.
d = .25
tl = -np.arange(-d, d, 2 * d / m) - d / m
t = d - 1.1 * np.square(np.square(tl)) / d**3
e = .7
ul = -np.arange(-e, e, 2 * e / m) - e / m
u = e - np.abs(ul)
ins = np.arange(m // 2 - m // 50, m // 2 + m // 50)
u[ins] = t[ins]
u2 = 2 * t - u
t += np.sin(np.arange((m))) * (u - t)
# Construct the exact sampling probabilities.
exact = s + t
exact[inds] = s[inds] + u[inds]
exact[inds + 1] = s[inds] + u2[inds]
# Construct weights.
weights = 4 - np.cos(9 * np.arange(m) / m)
if iex == 1:
# Define the indices of the subset for the subpopulation.
np.random.seed(987654321)
inds = np.sort(np.random.permutation((m))[:n])
# Construct scores.
s = np.arange(0, 1, 2 / m) + 1 / m
s = np.sqrt(s)
s = np.concatenate((s, s))
if dither:
ss = s.shape
s *= np.ones(ss) + np.random.normal(size=ss) * 1e-8
# The scores must be in non-decreasing order.
s = np.sort(s)
# Construct perturbations to the scores for sampling rates.
d = math.sqrt(1 / 2)
tl = np.arange(-d, d, 2 * d / m) - d / m
t = (1 + np.sin(np.arange((m)))) / 2
t *= np.square(tl) - d**2
u = np.square(tl) - d**2
u *= .75 * np.round(1 + np.sin(10 * np.arange((m)) / m))
u /= 2
# Construct the exact sampling probabilities.
exact = s + t
exact[inds] = s[inds] + u[inds]
# Construct weights.
weights = 4 - np.cos(9 * np.arange(m) / m)
if iex == 2:
# Define the indices of the subset for the subpopulation.
np.random.seed(987654321)
inds = np.arange(0, m ** (3 / 4), 1)
inds = np.unique(np.round(np.power(inds, 4 / 3)))
inds = inds.astype(int)
inds = inds[0:(50 * (len(inds) // 50))]
# Construct scores.
s = np.arange(0, 1, 10 / m) + 5 / m
s = np.concatenate([s] * 10)
if dither:
ss = s.shape
s *= np.ones(ss) + np.random.normal(size=ss) * 1e-8
# The scores must be in non-decreasing order.
s = np.sort(s)
# Construct perturbations to the scores for sampling rates.
tl = np.arange(0, 1, 1 / m) + 1 / (2 * m)
t = np.power(tl, 1 / 4) - tl
t *= (1 + np.sin(np.arange((m)))) / 2
u = np.power(tl, 1 / 4) - tl
u *= .5 * (1 + np.sin(
50 * np.power(np.arange(0, m**4, m**3), 1 / 4) / m))
# Construct the exact sampling probabilities.
exact = s + t
exact[inds] = s[inds] + u[inds]
# Construct weights.
weights = 4 - np.cos(9 * np.arange(m) / m)
if iex == 3:
# Define the indices of the subset for the subpopulation.
np.random.seed(987654321)
inds = np.sort(np.random.permutation((m))[:n])
# Construct scores.
s = np.arange(0, 1, 4 / m) + 2 / m
s = np.concatenate([s] * 4)
if dither:
ss = s.shape
s *= np.ones(ss) + np.random.normal(size=ss) * 1e-8
# The scores must be in non-decreasing order.
s = np.sort(s)
# Construct the exact sampling probabilities.
exact = np.sin(np.arange(m))
exact *= np.sin(50 * np.arange(-3 * m / 4, m / 4) / m)
exact = np.square(exact)
exact /= 5
exact[inds] = 0
# Construct weights.
weights = np.ones((m))
ind = 3 * m // 4 - 1
# Identify an index near the middle that belongs
# to the subpop. for which the two adjacent indices do not.
while(
np.any(inds == (ind - 1))
or not np.any(inds == ind)
or np.any(inds == (ind + 1))):
ind += 1
weights[ind] = n / 50
weights[ind - 1] = m / 500
weights[ind + 1] = m / 500
# Alter the exact sampling probabilities for the 3 indices
# selected in the preceding "while" loop.
exact[ind] = 1
exact[ind - 1] = 1
exact[ind + 1] = 0
# Set a unique directory for each collection of experiments
# (creating the directory if necessary).
dir = 'weighted'
try:
os.mkdir(dir)
except FileExistsError:
pass
dir = 'weighted/' + str(m) + '_' + str(len(inds))
dir = dir + '_' + str(nbins)
dir = dir + '_' + str(iex)
dir += '-'
if dither:
dir += 'dithered'
else:
dir += 'averaged'
try:
os.mkdir(dir)
except FileExistsError:
pass
dir = dir + '/'
print(f'./{dir} is under construction....')
# Generate a sample of classifications into two classes,
# correct (class 1) and incorrect (class 0),
# avoiding numpy's random number generators
# that are based on random bits --
# they yield strange results for many seeds.
random.seed(987654321)
uniform = np.asarray([random.random() for _ in range(m)])
r = (uniform <= exact).astype(float)
# Generate five plots and a text file reporting metrics.
filename = dir + 'cumulative.pdf'
kuiper, kolmogorov_smirnov, lenscale = cumulative(
r, s, inds, majorticks, minorticks, True, filename,
weights=weights)
filename = dir + 'metrics.txt'
with open(filename, 'w') as f:
f.write('n:\n')
f.write(f'{len(inds)}\n')
f.write('number of unique scores in the subset:\n')
f.write(f'{len(np.unique(s[inds]))}\n')
f.write('lenscale:\n')
f.write(f'{lenscale}\n')
f.write('Kuiper:\n')
f.write(f'{kuiper:.4}\n')
f.write('Kolmogorov-Smirnov:\n')
f.write(f'{kolmogorov_smirnov:.4}\n')
f.write('Kuiper / lenscale:\n')
f.write(f'{(kuiper / lenscale):.4}\n')
f.write('Kolmogorov-Smirnov / lenscale:\n')
f.write(f'{(kolmogorov_smirnov / lenscale):.4}\n')
filename = dir + 'cumulative_exact.pdf'
_, _, _ = cumulative(
exact, s, inds, majorticks, minorticks, True, filename,
title='exact expectations', weights=weights)
filename = dir + 'equiscores.pdf'
equiscores(r, s, inds, nbins, filename, weights, top=1, left=0,
right=1)
filename = dir + 'equierrs.pdf'
equierrs(r, s, inds, nbins + 3, filename, weights, top=1,
left=0, right=1)
filepdf = dir + 'exact.pdf'
filejpg = dir + 'exact.jpg'
exactplot(exact, s, inds, filepdf, top=1, left=0, right=1)
args = ['convert', '-density', '1200', filepdf, filejpg]
procs.append(subprocess.Popen(args))
print('waiting for conversion from pdf to jpg to finish....')
for iproc, proc in enumerate(procs):
proc.wait()
print(f'{iproc + 1} of {len(procs)} conversions are done....')
|
cdeets-main
|
codes/subpop_weighted.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#Common imports
import sys
import os
import argparse
import random
import copy
import torch
import torch.utils.data as data_utils
from torch import nn, optim
from torch.nn import functional as F
from torchvision import datasets, transforms
from torchvision.utils import save_image
from torch.autograd import Variable
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.decomposition import FastICA
from algorithms.base_auto_encoder import AE
from algorithms.poly_auto_encoder import AE_Poly
from algorithms.ioss_auto_encoder import AE_IOSS
from algorithms.image_auto_encoder import AE_Image
from utils.metrics import *
from utils.helper import *
# Input Parsing
parser = argparse.ArgumentParser()
parser.add_argument('--method_type', type=str, default='ae_poly',
help= 'ae, ae_poly')
parser.add_argument('--latent_case', type=str, default='uniform',
help='laplace; uniform')
parser.add_argument('--data_dim', type=int, default= 200,
help='')
parser.add_argument('--latent_dim', type=int, default= 6,
help='')
parser.add_argument('--poly_degree', type=int, default= 2,
help='')
parser.add_argument('--batch_size', type=int, default= 16,
help='')
parser.add_argument('--lr', type=float, default= 1e-3,
help='')
parser.add_argument('--weight_decay', type=float, default= 5e-4,
help='')
parser.add_argument('--num_seeds', type=int, default=5,
help='')
parser.add_argument('--intervention_case', type= int, default= 0,
help= '')
parser.add_argument('--eval_ioss_transformation', type=int, default=0,
help='Evaluate the IOSS transformation from the base model representation')
parser.add_argument('--eval_intervene_transformation', type=int, default=0,
help='Evaluate the Intervention transformation from the base model representation')
parser.add_argument('--eval_dgp', type=int, default= 0,
help= 'Evaluate the function from z -> x and x -> z in the true DGP')
parser.add_argument('--wandb_log', type=int, default=0,
help='')
parser.add_argument('--cuda_device', type=int, default=-1,
help='Select the cuda device by id among the avaliable devices' )
args = parser.parse_args()
method_type= args.method_type
latent_case= args.latent_case
data_dim= args.data_dim
latent_dim= args.latent_dim
poly_degree= args.poly_degree
batch_size= args.batch_size
lr= args.lr
weight_decay= args.weight_decay
num_seeds= args.num_seeds
intervention_case= args.intervention_case
eval_dgp= args.eval_dgp
eval_ioss_transformation= args.eval_ioss_transformation
eval_intervene_transformation= args.eval_intervene_transformation
wandb_log= args.wandb_log
cuda_device= args.cuda_device
if 'balls' in latent_case:
save_dir= latent_case + '/'
else:
save_dir= 'polynomial' + '_latent_' + latent_case + '_poly_degree_' + str(poly_degree) + '_data_dim_' + str(data_dim) + '_latent_dim_' + str(latent_dim) + '/'
args.save_dir= save_dir
#GPU
#GPU
if cuda_device == -1:
device= "cpu"
else:
device= torch.device("cuda:" + str(cuda_device))
if device:
kwargs = {'num_workers': 0, 'pin_memory': False}
else:
kwargs= {}
res={}
for seed in range(num_seeds):
#Seed values
random.seed(seed*10)
np.random.seed(seed*10)
torch.manual_seed(seed*10)
# Load Dataset
train_dataset, val_dataset, test_dataset= sample_base_data_loaders(save_dir, batch_size, seed= seed, observation_case=1, intervention_case= intervention_case, latent_case= latent_case, kwargs=kwargs)
#Load Algorithm
if method_type == 'ae':
method= AE(args, train_dataset, val_dataset, test_dataset, seed=seed)
elif method_type == 'ae_poly':
method= AE_Poly(args, train_dataset, val_dataset, test_dataset, seed=seed)
elif method_type == 'ae_image':
method= AE_Image(args, train_dataset, val_dataset, test_dataset, seed=seed, device= device)
# Evaluate the models learnt on true latent variables
if eval_dgp:
# X->Z prediction R2
x, z= get_predictions_check(train_dataset, test_dataset)
rmse, r2= get_indirect_prediction_error(x, z)
key= 'oracle_pred_rmse'
if key not in res.keys():
res[key]= []
res[key].append(rmse)
key= 'oracle_pred_r2'
if key not in res.keys():
res[key]= []
res[key].append(r2)
# Z->X prediction R2
x, z= get_predictions_check(train_dataset, test_dataset)
rmse, r2= get_indirect_prediction_error(z, x)
key= 'debug_pred_rmse'
if key not in res.keys():
res[key]= []
res[key].append(rmse)
key= 'debug_pred_r2'
if key not in res.keys():
res[key]= []
res[key].append(r2)
# Evaluate the base model
method.load_model()
# method.load_intermediate_model(epoch=10)
#Latent Prediction Error
rmse,r2= method.eval_identification()
key= 'latent_pred_rmse'
if key not in res.keys():
res[key]=[]
res[key].append(rmse)
key= 'latent_pred_r2'
if key not in res.keys():
res[key]=[]
res[key].append(r2)
# Evaluating MCC on the observational data with representations from Step 1
#Sample data from only the observational distribution
train_dataset, val_dataset, test_dataset= sample_base_data_loaders(save_dir, batch_size, seed= seed, observation_case=1, intervention_case= 0, latent_case= latent_case, kwargs=kwargs)
#Obtain Predictions and Reconstruction Loss
logs= get_predictions(method.encoder, method.decoder, train_dataset, val_dataset, test_dataset, device=method.device, plot= False)
#Prediction RMSE
key= 'recon_rmse'
if key not in res.keys():
res[key]= []
res[key].append(logs['recon_loss']['val'])
print('RMSE Val: ', logs['recon_loss']['val'])
#MCC
if 'balls' not in latent_case:
mcc= get_cross_correlation(copy.deepcopy(logs['pred_z']), copy.deepcopy(logs['true_z']))
key= 'mcc'
if key not in res.keys():
res[key]= []
for item in mcc:
res[key].append(item)
if eval_intervene_transformation:
#Sample only from interventional distribution
train_dataset, val_dataset, test_dataset= sample_base_data_loaders(save_dir, batch_size, seed= seed, latent_case= latent_case, observation_case=0, intervention_case= 1, kwargs=kwargs)
#Obtain Predictions and Reconstruction Loss
logs= get_predictions(method.encoder, method.decoder, train_dataset, val_dataset, test_dataset, device=method.device, plot= False)
# Intervention Specific Metric
if 'balls' not in latent_case:
reg_models= intervene_metric(copy.deepcopy(logs['pred_z']), copy.deepcopy(logs['true_z']), model_train=1)
else:
reg_models= intervene_metric_image(copy.deepcopy(logs['pred_z']), copy.deepcopy(logs['true_z']), copy.deepcopy(logs['true_y']), model_train=1, model= 'mlp')
#Sample data from only the observational distribution
train_dataset, val_dataset, test_dataset= sample_base_data_loaders(save_dir, batch_size, seed= seed, latent_case= latent_case, observation_case=1, intervention_case= 0, kwargs=kwargs)
#Obtain Predictions and Reconstruction Loss
logs= get_predictions(method.encoder, method.decoder, train_dataset, val_dataset, test_dataset, device=method.device, plot= False)
# Intervention Specific Metric
if 'balls' not in latent_case:
logs['pred_z']= intervene_metric(copy.deepcopy(logs['pred_z']), copy.deepcopy(logs['true_z']), model_train=0, list_models=reg_models)
else:
logs['pred_z'], logs['true_z']= intervene_metric_image(copy.deepcopy(logs['pred_z']), copy.deepcopy(logs['true_z']), copy.deepcopy(logs['true_y']), model_train=0, list_models= reg_models, model= 'mlp')
#Sample dataloaders for finetuning the representations
if eval_ioss_transformation:
#Sample data from only the observational distribution
train_dataset, val_dataset, test_dataset= sample_base_data_loaders(save_dir, batch_size, seed= seed, latent_case= latent_case, observation_case=1, intervention_case= 0, kwargs=kwargs)
#Obtain Predictions and Reconstruction Loss
logs= get_predictions(method.encoder, method.decoder, train_dataset, val_dataset, test_dataset, device=method.device, plot= False)
#Train with IOSS Loss
train_dataset, val_dataset, test_dataset= sample_finetune_data_loaders(logs['pred_z'], logs['true_z'], save_dir, batch_size, kwargs= kwargs)
ioss_method= AE_IOSS(args, train_dataset, val_dataset, test_dataset, seed=seed, device=device, base_algo= method_type)
ioss_method.load_model()
#Obtain Predictions and Reconstruction Loss
logs= get_predictions(ioss_method.encoder, ioss_method.decoder, ioss_method.train_dataset, ioss_method.val_dataset, ioss_method.test_dataset, device=ioss_method.device, plot= False)
#MCC
if eval_ioss_transformation or eval_intervene_transformation:
mcc= get_cross_correlation(logs['pred_z'], logs['true_z'])
print('MCC: ', mcc)
key= 'mcc_tune'
if key not in res.keys():
res[key]= []
for item in mcc:
res[key].append(item)
print('Final Results')
print(res.keys())
for key in res.keys():
res[key]= np.array(res[key])
print('Metric: ', key, np.mean(res[key]), np.std(res[key])/np.sqrt(num_seeds))
|
CausalRepID-main
|
test.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#Common imports
import sys
import os
import argparse
import random
import copy
import torch
from torch import nn, optim
from torch.nn import functional as F
from torchvision import datasets, transforms
from torchvision.utils import save_image
from torch.autograd import Variable
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.decomposition import FastICA
from algorithms.base_auto_encoder import AE
from algorithms.poly_auto_encoder import AE_Poly
from algorithms.ioss_auto_encoder import AE_IOSS
from algorithms.image_auto_encoder import AE_Image
from utils.metrics import *
from utils.helper import *
# Input Parsing
parser = argparse.ArgumentParser()
parser.add_argument('--method_type', type=str, default='ae_poly',
help= 'ae; ae_poly; ae_image')
parser.add_argument('--latent_case', type=str, default='uniform',
help='laplace; uniform')
parser.add_argument('--data_dim', type=int, default= 200,
help='')
parser.add_argument('--latent_dim', type=int, default= 10,
help='')
parser.add_argument('--poly_degree', type=int, default= 2,
help='')
parser.add_argument('--batch_size', type=int, default= 16,
help='')
parser.add_argument('--lr', type=float, default= 1e-3,
help='')
parser.add_argument('--weight_decay', type=float, default= 5e-4,
help='')
parser.add_argument('--num_epochs', type=int, default= 200,
help='')
parser.add_argument('--seed', type=int, default=0,
help='')
parser.add_argument('--intervention_case', type= int, default= 0,
help= '')
parser.add_argument('--train_base_model', type=int, default=1,
help='Train the base auto encoder')
parser.add_argument('--train_ioss_transformation', type=int, default=0,
help='Learn the IOSS transformation from the base model representations')
parser.add_argument('--wandb_log', type=int, default=0,
help='')
parser.add_argument('--cuda_device', type=int, default=-1,
help='Select the cuda device by id among the avaliable devices' )
args = parser.parse_args()
method_type= args.method_type
latent_case= args.latent_case
data_dim= args.data_dim
latent_dim= args.latent_dim
poly_degree= args.poly_degree
batch_size= args.batch_size
lr= args.lr
weight_decay= args.weight_decay
num_epochs= args.num_epochs
seed= args.seed
intervention_case= args.intervention_case
train_base_model= args.train_base_model
train_ioss_transformation= args.train_ioss_transformation
wandb_log= args.wandb_log
cuda_device= args.cuda_device
if 'balls' in latent_case:
save_dir= latent_case + '/'
else:
save_dir= 'polynomial' + '_latent_' + latent_case + '_poly_degree_' + str(poly_degree) + '_data_dim_' + str(data_dim) + '_latent_dim_' + str(latent_dim) + '/'
args.save_dir= save_dir
#GPU
if cuda_device == -1:
device= torch.device("cpu")
else:
device= torch.device("cuda:" + str(cuda_device))
if device:
kwargs = {'num_workers': 0, 'pin_memory': False}
else:
kwargs= {}
#Seed values
random.seed(seed*10)
np.random.seed(seed*10)
torch.manual_seed(seed*10)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed*10)
# Load Dataset
train_dataset, val_dataset, test_dataset= sample_base_data_loaders(save_dir, batch_size, observation_case=1, intervention_case= intervention_case, latent_case= latent_case, seed= seed, kwargs=kwargs)
#Load Algorithm
if method_type == 'ae':
method= AE(args, train_dataset, val_dataset, test_dataset, seed=seed, device= device)
elif method_type == 'ae_poly':
method= AE_Poly(args, train_dataset, val_dataset, test_dataset, seed=seed, device= device)
elif method_type == 'ae_image':
method= AE_Image(args, train_dataset, val_dataset, test_dataset, seed=seed, device= device)
else:
print('Error: Incorrect method type')
sys.exit(-1)
# Training
if train_base_model:
method.train()
#Train with IOSS Loss
if train_ioss_transformation:
method.load_model()
#Sample data from only the observational distribution
train_dataset, val_dataset, test_dataset= sample_base_data_loaders(save_dir, batch_size, seed= seed, observation_case=1, intervention_case= 0, kwargs=kwargs)
#Obtain Predictions and Reconstruction Loss
res= get_predictions(method.encoder, method.decoder, train_dataset, val_dataset, test_dataset, device=method.device)
#Sample dataloaders for finetuning the representations
train_dataset, val_dataset, test_dataset= sample_finetune_data_loaders(res['pred_z'], res['true_z'], save_dir, batch_size, kwargs= kwargs)
ioss_method= AE_IOSS(args, train_dataset, val_dataset, test_dataset, seed=seed, device=device, base_algo= method_type)
ioss_method.train()
|
CausalRepID-main
|
train.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import os
import sys
import math
import torch
import torch.utils.data as data_utils
from torch import nn, optim
from torch.nn import functional as F
from torchvision import datasets, transforms
from torchvision.utils import save_image
from torch.autograd import Variable
from sklearn.decomposition import FastICA
from sklearn.decomposition import PCA
from utils.metrics import *
from models.encoder import Encoder
from models.poly_decoder import PolyDecoder
#Base Class
from algorithms.base_auto_encoder import AE
path= os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
sys.path.append(path)
from utils.helper import ValidationHelper
from utils.metrics import *
import wandb
class AE_Poly(AE):
def __init__(self, args, train_dataset, val_dataset, test_dataset, seed=0, device= None):
super().__init__(args, train_dataset, val_dataset, test_dataset, seed, device)
self.encoder= Encoder(self.args.data_dim, self.args.latent_dim).to(self.device)
self.decoder= PolyDecoder(self.args.data_dim, self.args.latent_dim, self.args.poly_degree, self.device).to(self.device)
self.opt, self.scheduler= self.get_optimizer()
if self.args.intervention_case:
self.res_dir= 'results/ae-poly/intervention/' + str(self.args.save_dir) + 'seed_' + str(seed) + '/'
else:
self.res_dir= 'results/ae-poly/observation/' + str(self.args.save_dir) + 'seed_' + str(seed) + '/'
self.save_path= self.res_dir
if self.args.wandb_log:
wandb.init(project="polynomial-identification", reinit=True)
wandb.run.name= 'ae-poly/' + self.args.save_dir + 'seed_' + str(seed) + '/'
|
CausalRepID-main
|
algorithms/poly_auto_encoder.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.