python_code
stringlengths 0
4.04M
| repo_name
stringlengths 7
58
| file_path
stringlengths 5
147
|
---|---|---|
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import argparse
import builtins
import math
import os
import random
import shutil
import time
import warnings
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.multiprocessing as mp
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
from timm.data.auto_augment import rand_augment_transform
import moco.loader
import moco.builder
model_names = sorted(
name
for name in models.__dict__
if name.islower()
and not name.startswith("__")
and callable(models.__dict__[name])
)
parser = argparse.ArgumentParser(description="PyTorch ImageNet Training")
parser.add_argument("data", metavar="DIR", help="path to dataset")
parser.add_argument(
"-a",
"--arch",
metavar="ARCH",
default="resnet50",
choices=model_names,
help="model architecture: "
+ " | ".join(model_names)
+ " (default: resnet50)",
)
parser.add_argument(
"-j",
"--workers",
default=32,
type=int,
metavar="N",
help="number of data loading workers (default: 32)",
)
parser.add_argument(
"--epochs",
default=100,
type=int,
metavar="N",
help="number of total epochs to run",
)
parser.add_argument(
"--start-epoch",
default=0,
type=int,
metavar="N",
help="manual epoch number (useful on restarts)",
)
parser.add_argument(
"-b",
"--batch-size",
default=256,
type=int,
metavar="N",
help="mini-batch size (default: 256), this is the total "
"batch size of all GPUs on the current node when "
"using Data Parallel or Distributed Data Parallel",
)
parser.add_argument(
"--lr",
"--learning-rate",
default=0.03,
type=float,
metavar="LR",
help="initial learning rate",
dest="lr",
)
parser.add_argument(
"--momentum",
default=0.9,
type=float,
metavar="M",
help="momentum of SGD solver",
)
parser.add_argument(
"--wd",
"--weight-decay",
default=1e-4,
type=float,
metavar="W",
help="weight decay (default: 1e-4)",
dest="weight_decay",
)
parser.add_argument(
"-p",
"--print-freq",
default=10,
type=int,
metavar="N",
help="print frequency (default: 10)",
)
parser.add_argument(
"--resume",
default="",
type=str,
metavar="PATH",
help="path to latest checkpoint (default: none)",
)
parser.add_argument(
"--world-size",
default=-1,
type=int,
help="number of nodes for distributed training",
)
parser.add_argument(
"--rank", default=-1, type=int, help="node rank for distributed training"
)
parser.add_argument(
"--dist-url",
default="tcp://224.66.41.62:23456",
type=str,
help="url used to set up distributed training",
)
parser.add_argument(
"--dist-backend", default="nccl", type=str, help="distributed backend"
)
parser.add_argument(
"--seed", default=None, type=int, help="seed for initializing training. "
)
parser.add_argument("--gpu", default=None, type=int, help="GPU id to use.")
parser.add_argument(
"--multiprocessing-distributed",
action="store_true",
help="Use multi-processing distributed training to launch "
"N processes per node, which has N GPUs. This is the "
"fastest way to use PyTorch for either single node or "
"multi node data parallel training",
)
# moco specific configs:
parser.add_argument(
"--moco-dim", default=128, type=int, help="feature dimension (default: 128)"
)
parser.add_argument(
"--moco-k",
default=65536,
type=int,
help="queue size; number of negative keys (default: 65536)",
)
parser.add_argument(
"--moco-m",
default=0.999,
type=float,
help="moco momentum of updating key encoder (default: 0.999)",
)
parser.add_argument(
"--moco-t",
default=0.2,
type=float,
help="softmax temperature (default: 0.2)",
)
# options for Asymmetry Siamese Representation Learning.
parser.add_argument(
"--enable-scalemix",
default=False,
action="store_true",
help="enable ScaleMix to generate new views of an image by mixing two "
"views of potentially different scales together via binary masking",
)
parser.add_argument(
"--enable-multicrop",
default=False,
action="store_true",
help="enable MultiCrop to take additional views (commonly in lower "
"resolution) from each image per iteration",
)
parser.add_argument(
"--enable-asymm-aug",
default=False,
action="store_true",
help="enable Asymmetrical Augmentation to form an asymmetric augmentation "
"recipes for source and target",
)
parser.add_argument(
"--enable-asym-bn",
default=False,
action="store_true",
help="enable Asymmetrical BN to employ SyncBN to normalizes batch stats "
"over all devices for target decoder",
)
parser.add_argument(
"--enable-mean-encoding",
default=False,
action="store_true",
help="enable Mean Encoding to perform i.i.d. sampling multiple times and "
"take the mean as target encoder output",
)
parser.add_argument(
"--tag",
default="",
type=str,
help="job tag for checkpoint name."
)
def main():
args = parser.parse_args()
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn(
"You have chosen to seed training. "
"This will turn on the CUDNN deterministic setting, "
"which can slow down your training considerably! "
"You may see unexpected behavior when restarting "
"from checkpoints."
)
if args.gpu is not None:
warnings.warn(
"You have chosen a specific GPU. This will completely "
"disable data parallelism."
)
if args.dist_url == "env://" and args.world_size == -1:
args.world_size = int(os.environ["WORLD_SIZE"])
args.distributed = args.world_size > 1 or args.multiprocessing_distributed
ngpus_per_node = torch.cuda.device_count()
if args.multiprocessing_distributed:
# Since we have ngpus_per_node processes per node, the total world_size
# needs to be adjusted accordingly
args.world_size = ngpus_per_node * args.world_size
# Use torch.multiprocessing.spawn to launch distributed processes: the
# main_worker process function
mp.spawn(
main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args)
)
else:
# Simply call main_worker function
main_worker(args.gpu, ngpus_per_node, args)
def main_worker(gpu, ngpus_per_node, args):
args.gpu = gpu
# suppress printing if not master
if args.multiprocessing_distributed and args.gpu != 0:
def print_pass(*args):
pass
builtins.print = print_pass
if args.gpu is not None:
print("Use GPU: {} for training".format(args.gpu))
if args.distributed:
if args.dist_url == "env://" and args.rank == -1:
args.rank = int(os.environ["RANK"])
if args.multiprocessing_distributed:
# For multiprocessing distributed training, rank needs to be the
# global rank among all the processes
args.rank = args.rank * ngpus_per_node + gpu
dist.init_process_group(
backend=args.dist_backend,
init_method=args.dist_url,
world_size=args.world_size,
rank=args.rank,
)
# create model
print("=> creating model '{}'".format(args.arch))
model = moco.builder.MoCo(
models.__dict__[args.arch],
args.moco_dim,
args.moco_k,
args.moco_m,
args.moco_t,
args.enable_asym_bn,
)
print(model)
if args.distributed:
# For multiprocessing distributed, DistributedDataParallel constructor
# should always set the single device scope, otherwise,
# DistributedDataParallel will use all available devices.
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
model.cuda(args.gpu)
# When using a single GPU per process and per
# DistributedDataParallel, we need to divide the batch size
# ourselves based on the total number of GPUs we have
args.batch_size = int(args.batch_size / ngpus_per_node)
args.workers = int(
(args.workers + ngpus_per_node - 1) / ngpus_per_node
)
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[args.gpu]
)
else:
model.cuda()
# DistributedDataParallel will divide and allocate batch_size to
# all available GPUs if device_ids are not set
model = torch.nn.parallel.DistributedDataParallel(model)
elif args.gpu is not None:
torch.cuda.set_device(args.gpu)
model = model.cuda(args.gpu)
# comment out the following line for debugging
raise NotImplementedError("Only DistributedDataParallel is supported.")
else:
# AllGather implementation (batch shuffle, queue update, etc.) in
# this code only supports DistributedDataParallel.
raise NotImplementedError("Only DistributedDataParallel is supported.")
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda(args.gpu)
optimizer = torch.optim.SGD(
model.parameters(),
args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay,
)
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
if args.gpu is None:
checkpoint = torch.load(args.resume)
else:
# Map model to be loaded to specified single gpu.
loc = "cuda:{}".format(args.gpu)
checkpoint = torch.load(args.resume, map_location=loc)
args.start_epoch = checkpoint["epoch"]
model.load_state_dict(checkpoint["state_dict"])
optimizer.load_state_dict(checkpoint["optimizer"])
print(
"=> loaded checkpoint '{}' (epoch {})".format(
args.resume, checkpoint["epoch"]
)
)
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
# Data loading code
traindir = os.path.join(args.data, "train")
normalize = transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
)
# MoCo v2's aug: similar to SimCLR https://arxiv.org/abs/2002.05709
if args.enable_multicrop:
ratio_range=(0.14,1.0)
else:
ratio_range=(0.2,1.0)
augmentation = [
transforms.RandomResizedCrop(224, scale=ratio_range),
transforms.RandomApply(
[transforms.ColorJitter(0.4, 0.4, 0.4, 0.1)], # not strengthened
p=0.8,
),
transforms.RandomGrayscale(p=0.2),
transforms.RandomApply([moco.loader.GaussianBlur([0.1, 2.0])], p=0.5),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]
"""
# --------------------------------------------------------------------------- #
# Asymmetric Augmentations #
# --------------------------------------------------------------------------- #
asymmetric augmentation recipes are formed by stronger and weaker augmentation
in source and target. Stronger augmentation introduces a higher variance, that
hurts target but helps source, and vice versa for weaker augmentation.
# --------------------------------------------------------------------------- #
"""
augmentation_stronger = [
transforms.RandomResizedCrop(224, scale=ratio_range),
transforms.RandomApply(
[transforms.ColorJitter(0.4, 0.4, 0.4, 0.1)], # not strengthened
p=0.8,
),
transforms.RandomGrayscale(p=0.2),
transforms.RandomApply([moco.loader.GaussianBlur([0.1, 2.0])], p=0.5),
transforms.RandomHorizontalFlip(),
rand_augment_transform(
"rand-m10-n2-mstd0.5", {"translate_const": 100},
),
transforms.ToTensor(),
normalize,
]
augmentation_weaker = [
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]
"""
# --------------------------------------------------------------------------- #
# MultiCrop #
# --------------------------------------------------------------------------- #
Besides the two basic views needed for Siamese learning, MultiCrop takes
additional views from each image per iteration. To alleviate the added
computation cost, a common strategy is to have low-resolution crops
(e.g., 96×96) instead of standard-resolution crops (224×224) as added views.
As a side effect, inputting small crops can potentially increase the variance
for an encoder due to the size and crop-distribution changes.
# --------------------------------------------------------------------------- #
"""
augmentation_mini = [
transforms.RandomResizedCrop(96, scale=(0.05, 0.14)),
transforms.RandomApply(
[transforms.ColorJitter(0.4, 0.4, 0.4, 0.1)], # not strengthened
p=0.8,
),
transforms.RandomGrayscale(p=0.2),
transforms.RandomApply([moco.loader.GaussianBlur([0.1, 2.0])], p=0.5),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]
train_dataset = datasets.ImageFolder(
traindir,
moco.loader.CropsTransform(
key_transform=transforms.Compose(augmentation_weaker)
if args.enable_asymm_aug
else transforms.Compose(augmentation),
query_mini_transform=transforms.Compose(augmentation_mini),
query_transform=transforms.Compose(augmentation_stronger)
if args.enable_asymm_aug
else transforms.Compose(augmentation),
enable_scalemix=args.enable_scalemix,
enable_multicrop=args.enable_multicrop,
enable_mean_encoding=args.enable_mean_encoding,
),
)
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(
train_dataset
)
else:
train_sampler = None
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=args.batch_size,
shuffle=(train_sampler is None),
num_workers=args.workers,
pin_memory=True,
sampler=train_sampler,
drop_last=True,
)
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
adjust_learning_rate(optimizer, epoch, args)
# train for one epoch
train(train_loader, model, criterion, optimizer, epoch, args)
if not args.multiprocessing_distributed or (
args.multiprocessing_distributed and args.rank % ngpus_per_node == 0
):
save_checkpoint(
{
"epoch": epoch + 1,
"arch": args.arch,
"state_dict": model.state_dict(),
"optimizer": optimizer.state_dict(),
},
is_best=False,
filename="checkpoint_{}_{:04d}.pth.tar".format(args.tag, epoch),
)
def train(train_loader, model, criterion, optimizer, epoch, args):
batch_time = AverageMeter("Time", ":6.3f")
data_time = AverageMeter("Data", ":6.3f")
losses = AverageMeter("Loss", ":.4e")
top1 = AverageMeter("Acc@1", ":6.2f")
top5 = AverageMeter("Acc@5", ":6.2f")
progress = ProgressMeter(
len(train_loader),
[batch_time, data_time, losses, top1, top5],
prefix="Epoch: [{}]".format(epoch),
)
# switch to train mode
model.train()
end = time.time()
for i, (images, _) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
if args.gpu is not None:
for j in range(len(images)):
images[j] = images[j].cuda(args.gpu, non_blocking=True)
if args.enable_mean_encoding:
q_mini_ind, k_ind = 1, -2
else:
q_mini_ind, k_ind = 1, -1
# compute outputs
outputs, targets = model(
im_q=images[:q_mini_ind],
im_q_mini=images[q_mini_ind:k_ind],
im_k=images[k_ind:],
)
loss = criterion(outputs[0], targets[0])
# Loss for mini multi-crops
if args.enable_multicrop:
loss += sum(
map(
lambda crop: criterion(crop[0], crop[1]),
zip(outputs[q_mini_ind:], targets[q_mini_ind:]),
)
) / len(outputs[q_mini_ind:])
# acc1/acc5 are (K+1)-way contrast classifier accuracy
# measure accuracy and record loss
acc1, acc5 = accuracy(outputs[0], targets[0], topk=(1, 5))
losses.update(loss.item(), images[0].size(0))
top1.update(acc1[0], images[0].size(0))
top5.update(acc5[0], images[0].size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.display(i)
def save_checkpoint(state, is_best, filename="checkpoint.pth.tar"):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, "model_best.pth.tar")
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=":f"):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = "{name} {val" + self.fmt + "} ({avg" + self.fmt + "})"
return fmtstr.format(**self.__dict__)
class ProgressMeter(object):
def __init__(self, num_batches, meters, prefix=""):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def display(self, batch):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
print("\t".join(entries))
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str(num_batches // 1))
fmt = "{:" + str(num_digits) + "d}"
return "[" + fmt + "/" + fmt.format(num_batches) + "]"
def adjust_learning_rate(optimizer, epoch, args):
"""Decay the learning rate based on schedule"""
lr = args.lr
lr *= 0.5 * (1.0 + math.cos(math.pi * epoch / args.epochs))
for param_group in optimizer.param_groups:
param_group["lr"] = lr
def accuracy(output, target, topk=(1,)):
"""
Computes the accuracy over the k top predictions for the specified values
of k
"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = (
correct[:k].reshape(-1).float().sum(0, keepdim=True)
)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == "__main__":
main()
|
asym-siam-main
|
main_moco.py
|
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import builtins
import math
import os
import random
import shutil
import time
import warnings
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.multiprocessing as mp
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
model_names = sorted(
name
for name in models.__dict__
if name.islower() and not name.startswith("__") and callable(models.__dict__[name])
)
parser = argparse.ArgumentParser(description="PyTorch ImageNet Training")
parser.add_argument("data", metavar="DIR", help="path to dataset")
parser.add_argument(
"-a",
"--arch",
metavar="ARCH",
default="resnet50",
choices=model_names,
help="model architecture: " + " | ".join(model_names) + " (default: resnet50)",
)
parser.add_argument(
"-j",
"--workers",
default=32,
type=int,
metavar="N",
help="number of data loading workers (default: 32)",
)
parser.add_argument(
"--epochs", default=90, type=int, metavar="N", help="number of total epochs to run"
)
parser.add_argument(
"--start-epoch",
default=0,
type=int,
metavar="N",
help="manual epoch number (useful on restarts)",
)
parser.add_argument(
"-b",
"--batch-size",
default=4096,
type=int,
metavar="N",
help="mini-batch size (default: 4096), this is the total "
"batch size of all GPUs on the current node when "
"using Data Parallel or Distributed Data Parallel",
)
parser.add_argument(
"--lr",
"--learning-rate",
default=0.1,
type=float,
metavar="LR",
help="initial (base) learning rate",
dest="lr",
)
parser.add_argument("--momentum", default=0.9, type=float, metavar="M", help="momentum")
parser.add_argument(
"--wd",
"--weight-decay",
default=0.0,
type=float,
metavar="W",
help="weight decay (default: 0.)",
dest="weight_decay",
)
parser.add_argument(
"-p",
"--print-freq",
default=10,
type=int,
metavar="N",
help="print frequency (default: 10)",
)
parser.add_argument(
"--resume",
default="",
type=str,
metavar="PATH",
help="path to latest checkpoint (default: none)",
)
parser.add_argument(
"-e",
"--evaluate",
dest="evaluate",
action="store_true",
help="evaluate model on validation set",
)
parser.add_argument(
"--world-size",
default=-1,
type=int,
help="number of nodes for distributed training",
)
parser.add_argument(
"--rank", default=-1, type=int, help="node rank for distributed training"
)
parser.add_argument(
"--dist-url",
default="tcp://224.66.41.62:23456",
type=str,
help="url used to set up distributed training",
)
parser.add_argument(
"--dist-backend", default="nccl", type=str, help="distributed backend"
)
parser.add_argument(
"--seed", default=None, type=int, help="seed for initializing training. "
)
parser.add_argument("--gpu", default=None, type=int, help="GPU id to use.")
parser.add_argument(
"--multiprocessing-distributed",
action="store_true",
help="Use multi-processing distributed training to launch "
"N processes per node, which has N GPUs. This is the "
"fastest way to use PyTorch for either single node or "
"multi node data parallel training",
)
# additional configs:
parser.add_argument(
"--pretrained", default="", type=str, help="path to simsiam pretrained checkpoint"
)
parser.add_argument("--lars", action="store_true", help="Use LARS")
best_acc1 = 0
def main():
args = parser.parse_args()
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn(
"You have chosen to seed training. "
"This will turn on the CUDNN deterministic setting, "
"which can slow down your training considerably! "
"You may see unexpected behavior when restarting "
"from checkpoints."
)
if args.gpu is not None:
warnings.warn(
"You have chosen a specific GPU. This will completely "
"disable data parallelism."
)
if args.dist_url == "env://" and args.world_size == -1:
args.world_size = int(os.environ["WORLD_SIZE"])
args.distributed = args.world_size > 1 or args.multiprocessing_distributed
ngpus_per_node = torch.cuda.device_count()
if args.multiprocessing_distributed:
# Since we have ngpus_per_node processes per node, the total world_size
# needs to be adjusted accordingly
args.world_size = ngpus_per_node * args.world_size
# Use torch.multiprocessing.spawn to launch distributed processes: the
# main_worker process function
mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
else:
# Simply call main_worker function
main_worker(args.gpu, ngpus_per_node, args)
def main_worker(gpu, ngpus_per_node, args):
global best_acc1
args.gpu = gpu
# suppress printing if not master
if args.multiprocessing_distributed and args.gpu != 0:
def print_pass(*args):
pass
builtins.print = print_pass
if args.gpu is not None:
print("Use GPU: {} for training".format(args.gpu))
if args.distributed:
if args.dist_url == "env://" and args.rank == -1:
args.rank = int(os.environ["RANK"])
if args.multiprocessing_distributed:
# For multiprocessing distributed training, rank needs to be the
# global rank among all the processes
args.rank = args.rank * ngpus_per_node + gpu
dist.init_process_group(
backend=args.dist_backend,
init_method=args.dist_url,
world_size=args.world_size,
rank=args.rank,
)
torch.distributed.barrier()
# create model
print("=> creating model '{}'".format(args.arch))
model = models.__dict__[args.arch]()
# freeze all layers but the last fc
for name, param in model.named_parameters():
if name not in ["fc.weight", "fc.bias"]:
param.requires_grad = False
# init the fc layer
model.fc.weight.data.normal_(mean=0.0, std=0.01)
model.fc.bias.data.zero_()
# load from pre-trained, before DistributedDataParallel constructor
if args.pretrained:
if os.path.isfile(args.pretrained):
print("=> loading checkpoint '{}'".format(args.pretrained))
checkpoint = torch.load(args.pretrained, map_location="cpu")
# rename moco pre-trained keys
state_dict = checkpoint["state_dict"]
for k in list(state_dict.keys()):
# retain only encoder_q up to before the embedding layer
if k.startswith("module.encoder_q") and not k.startswith(
"module.encoder_q.fc"
):
# remove prefix
state_dict[k[len("module.encoder_q.") :]] = state_dict[k]
# delete renamed or unused k
del state_dict[k]
args.start_epoch = 0
msg = model.load_state_dict(state_dict, strict=False)
assert set(msg.missing_keys) == {"fc.weight", "fc.bias"}
print("=> loaded pre-trained model '{}'".format(args.pretrained))
else:
print("=> no checkpoint found at '{}'".format(args.pretrained))
# infer learning rate before changing batch size
init_lr = args.lr * args.batch_size / 256
if args.distributed:
# For multiprocessing distributed, DistributedDataParallel constructor
# should always set the single device scope, otherwise,
# DistributedDataParallel will use all available devices.
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
model.cuda(args.gpu)
# When using a single GPU per process and per
# DistributedDataParallel, we need to divide the batch size
# ourselves based on the total number of GPUs we have
args.batch_size = int(args.batch_size / ngpus_per_node)
args.workers = int((args.workers + ngpus_per_node - 1) / ngpus_per_node)
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[args.gpu]
)
else:
model.cuda()
# DistributedDataParallel will divide and allocate batch_size to all
# available GPUs if device_ids are not set
model = torch.nn.parallel.DistributedDataParallel(model)
elif args.gpu is not None:
torch.cuda.set_device(args.gpu)
model = model.cuda(args.gpu)
else:
# DataParallel will divide and allocate batch_size to all available GPUs
if args.arch.startswith("alexnet") or args.arch.startswith("vgg"):
model.features = torch.nn.DataParallel(model.features)
model.cuda()
else:
model = torch.nn.DataParallel(model).cuda()
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda(args.gpu)
# optimize only the linear classifier
parameters = list(filter(lambda p: p.requires_grad, model.parameters()))
assert len(parameters) == 2 # fc.weight, fc.bias
optimizer = torch.optim.SGD(
parameters, init_lr, momentum=args.momentum, weight_decay=args.weight_decay
)
if args.lars:
print("=> use LARS optimizer.")
from apex.parallel.LARC import LARC
optimizer = LARC(optimizer=optimizer, trust_coefficient=0.001, clip=False)
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
if args.gpu is None:
checkpoint = torch.load(args.resume)
else:
# Map model to be loaded to specified single gpu.
loc = "cuda:{}".format(args.gpu)
checkpoint = torch.load(args.resume, map_location=loc)
args.start_epoch = checkpoint["epoch"]
best_acc1 = checkpoint["best_acc1"]
if args.gpu is not None:
# best_acc1 may be from a checkpoint from a different GPU
best_acc1 = best_acc1.to(args.gpu)
model.load_state_dict(checkpoint["state_dict"])
optimizer.load_state_dict(checkpoint["optimizer"])
print(
"=> loaded checkpoint '{}' (epoch {})".format(
args.resume, checkpoint["epoch"]
)
)
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
# Data loading code
traindir = os.path.join(args.data, "train")
valdir = os.path.join(args.data, "val")
normalize = transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
)
train_dataset = datasets.ImageFolder(
traindir,
transforms.Compose(
[
transforms.RandomResizedCrop(224, scale=(0.08, 1.0)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]
),
)
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
else:
train_sampler = None
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=args.batch_size,
shuffle=(train_sampler is None),
num_workers=args.workers,
pin_memory=True,
sampler=train_sampler,
drop_last=True,
)
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(
valdir,
transforms.Compose(
[
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
]
),
),
batch_size=256,
shuffle=False,
num_workers=args.workers,
pin_memory=True,
)
if args.evaluate:
validate(val_loader, model, criterion, args)
return
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
adjust_learning_rate(optimizer, init_lr, epoch, args)
# train for one epoch
train(train_loader, model, criterion, optimizer, epoch, args)
# evaluate on validation set
acc1 = validate(val_loader, model, criterion, args)
# remember best acc@1 and save checkpoint
is_best = acc1 > best_acc1
best_acc1 = max(acc1, best_acc1)
if not args.multiprocessing_distributed or (
args.multiprocessing_distributed and args.rank % ngpus_per_node == 0
):
save_checkpoint(
{
"epoch": epoch + 1,
"arch": args.arch,
"state_dict": model.state_dict(),
"best_acc1": best_acc1,
"optimizer": optimizer.state_dict(),
},
is_best,
)
if epoch == args.start_epoch:
sanity_check(model.state_dict(), args.pretrained)
def train(train_loader, model, criterion, optimizer, epoch, args):
batch_time = AverageMeter("Time", ":6.3f")
data_time = AverageMeter("Data", ":6.3f")
losses = AverageMeter("Loss", ":.4e")
top1 = AverageMeter("Acc@1", ":6.2f")
top5 = AverageMeter("Acc@5", ":6.2f")
progress = ProgressMeter(
len(train_loader),
[batch_time, data_time, losses, top1, top5],
prefix="Epoch: [{}]".format(epoch),
)
"""
Switch to eval mode:
Under the protocol of linear classification on frozen features/models,
it is not legitimate to change any part of the pre-trained model.
BatchNorm in train mode may revise running mean/std (even if it receives
no gradient), which are part of the model parameters too.
"""
model.eval()
end = time.time()
for i, (images, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
if args.gpu is not None:
images = images.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(images)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.display(i)
def validate(val_loader, model, criterion, args):
batch_time = AverageMeter("Time", ":6.3f")
losses = AverageMeter("Loss", ":.4e")
top1 = AverageMeter("Acc@1", ":6.2f")
top5 = AverageMeter("Acc@5", ":6.2f")
progress = ProgressMeter(
len(val_loader), [batch_time, losses, top1, top5], prefix="Test: "
)
# switch to evaluate mode
model.eval()
with torch.no_grad():
end = time.time()
for i, (images, target) in enumerate(val_loader):
if args.gpu is not None:
images = images.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(images)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.display(i)
# TODO: this should also be done with the ProgressMeter
print(
" * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}".format(top1=top1, top5=top5)
)
return top1.avg
def save_checkpoint(state, is_best, filename="checkpoint.pth.tar"):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, "model_best.pth.tar")
def sanity_check(state_dict, pretrained_weights):
"""
Linear classifier should not change any weights other than the linear layer.
This sanity check asserts nothing wrong happens (e.g., BN stats updated).
"""
print("=> loading '{}' for sanity check".format(pretrained_weights))
checkpoint = torch.load(pretrained_weights, map_location="cpu")
state_dict_pre = checkpoint["state_dict"]
for k in list(state_dict.keys()):
# only ignore fc layer
if "fc.weight" in k or "fc.bias" in k:
continue
# name in pretrained model
k_pre = (
"module.encoder_q." + k[len("module.") :]
if k.startswith("module.")
else "module.encoder_q." + k
)
assert (
state_dict[k].cpu() == state_dict_pre[k_pre]
).all(), "{} is changed in linear classifier training.".format(k)
print("=> sanity check passed.")
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=":f"):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = "{name} {val" + self.fmt + "} ({avg" + self.fmt + "})"
return fmtstr.format(**self.__dict__)
class ProgressMeter(object):
def __init__(self, num_batches, meters, prefix=""):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def display(self, batch):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
print("\t".join(entries))
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str(num_batches // 1))
fmt = "{:" + str(num_digits) + "d}"
return "[" + fmt + "/" + fmt.format(num_batches) + "]"
def adjust_learning_rate(optimizer, init_lr, epoch, args):
"""Decay the learning rate based on schedule"""
cur_lr = init_lr * 0.5 * (1.0 + math.cos(math.pi * epoch / args.epochs))
for param_group in optimizer.param_groups:
param_group["lr"] = cur_lr
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == "__main__":
main()
|
asym-siam-main
|
main_lincls.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
asym-siam-main
|
moco/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import torch
import torch.nn as nn
class MoCo(nn.Module):
"""
Build a MoCo model with: a query encoder, a key encoder, and a queue
https://arxiv.org/abs/1911.05722
"""
def __init__(
self,
base_encoder,
dim=128,
K=65536,
m=0.999,
T=0.07,
enable_asym_bn=False,
):
"""
dim: feature dimension (default: 128)
K: queue size; number of negative keys (default: 65536)
m: moco momentum of updating key encoder (default: 0.999)
T: softmax temperature (default: 0.07)
"""
super(MoCo, self).__init__()
self.K = K
self.m = m
self.T = T
# create the encoders
# num_classes is the output fc dimension
self.encoder_q = base_encoder(num_classes=dim)
self.encoder_k = base_encoder(num_classes=dim)
dim_mlp = self.encoder_q.fc.weight.shape[1]
self.encoder_q.fc = nn.Sequential(
nn.Linear(dim_mlp, dim_mlp),
nn.BatchNorm1d(dim_mlp),
nn.ReLU(),
nn.Linear(dim_mlp, dim_mlp),
nn.ReLU(),
self.encoder_q.fc,
)
self.encoder_k.fc = nn.Sequential(
nn.Linear(dim_mlp, dim_mlp),
nn.BatchNorm1d(dim_mlp),
nn.ReLU(),
nn.Linear(dim_mlp, dim_mlp),
nn.ReLU(),
self.encoder_k.fc,
)
"""
# --------------------------------------------------------------------------- #
# Sync BatchNorm #
# --------------------------------------------------------------------------- #
Intermediate Sync BatchNorm layers is a way to reduce intra-image variance
intarget encoder. Sync BatchNorm leads to a notable improvement when applied to
target (as referred ‘AsymBN’ in our paper) and degeneration to source.
# --------------------------------------------------------------------------- #
"""
if enable_asym_bn:
process_group = create_syncbn_process_group(8)
self.encoder_k.fc = torch.nn.SyncBatchNorm.convert_sync_batchnorm(
self.encoder_k.fc, process_group
)
for param_q, param_k in zip(
self.encoder_q.parameters(), self.encoder_k.parameters()
):
param_k.data.copy_(param_q.data) # initialize
param_k.requires_grad = False # not update by gradient
# create the queue
self.register_buffer("queue", torch.randn(dim, K))
self.queue = nn.functional.normalize(self.queue, dim=0)
self.register_buffer("queue_ptr", torch.zeros(1, dtype=torch.long))
@torch.no_grad()
def _momentum_update_key_encoder(self):
"""
Momentum update of the key encoder
"""
for param_q, param_k in zip(
self.encoder_q.parameters(), self.encoder_k.parameters()
):
param_k.data = param_k.data * self.m + param_q.data * (1.0 - self.m)
@torch.no_grad()
def _dequeue_and_enqueue(self, keys):
# gather keys before updating queue
keys = concat_all_gather(keys)
batch_size = keys.shape[0]
ptr = int(self.queue_ptr)
assert self.K % batch_size == 0 # for simplicity
# replace the keys at ptr (dequeue and enqueue)
self.queue[:, ptr : ptr + batch_size] = keys.T
ptr = (ptr + batch_size) % self.K # move pointer
self.queue_ptr[0] = ptr
@torch.no_grad()
def _batch_shuffle_ddp(self, x):
"""
Batch shuffle, for making use of BatchNorm.
*** Only support DistributedDataParallel (DDP) model. ***
"""
# gather from all gpus
batch_size_this = x.shape[0]
x_gather = concat_all_gather(x)
batch_size_all = x_gather.shape[0]
num_gpus = batch_size_all // batch_size_this
# random shuffle index
idx_shuffle = torch.randperm(batch_size_all).cuda()
# broadcast to all gpus
torch.distributed.broadcast(idx_shuffle, src=0)
# index for restoring
idx_unshuffle = torch.argsort(idx_shuffle)
# shuffled index for this gpu
gpu_idx = torch.distributed.get_rank()
idx_this = idx_shuffle.view(num_gpus, -1)[gpu_idx]
return x_gather[idx_this], idx_unshuffle
@torch.no_grad()
def _batch_unshuffle_ddp(self, x, idx_unshuffle):
"""
Undo batch shuffle.
*** Only support DistributedDataParallel (DDP) model. ***
"""
# gather from all gpus
batch_size_this = x.shape[0]
x_gather = concat_all_gather(x)
batch_size_all = x_gather.shape[0]
num_gpus = batch_size_all // batch_size_this
# restored index for this gpu
gpu_idx = torch.distributed.get_rank()
idx_this = idx_unshuffle.view(num_gpus, -1)[gpu_idx]
return x_gather[idx_this]
def forward(self, im_q, im_q_mini, im_k):
"""
Input:
im_q: a batch of query images
im_k: a batch of key images
Output:
logits, targets
"""
# compute query features
q_large = []
for im in im_q:
_q = self.encoder_q(im) # queries: NxC
_q = nn.functional.normalize(_q, dim=1)
q_large.append(_q)
q_mini = []
for im in im_q_mini:
_q_mini = self.encoder_q(im) # queries: NxC
_q_mini = nn.functional.normalize(_q_mini, dim=1)
q_mini.append(_q_mini)
"""
# --------------------------------------------------------------------------- #
# Mean Encoding #
# --------------------------------------------------------------------------- #
Mean Encoding is a direct approach to reduce the variance of a random variable
by performing i.i.d. sampling multiple times and take the mean as the new
variable. Mean Encoding is simply generated by running the same encoder on
multiple augmented views of the same image.
# --------------------------------------------------------------------------- #
"""
crop_num = len(im_k)
# compute key features
with torch.no_grad(): # no gradient to keys
self._momentum_update_key_encoder() # update the key encoder
im_k = torch.cat(im_k, dim=0)
# shuffle for making use of BN
im_k, idx_unshuffle = self._batch_shuffle_ddp(im_k)
k = self.encoder_k(im_k) # keys: NxC
# undo shuffle
k = self._batch_unshuffle_ddp(k, idx_unshuffle)
cur_size, embedding_length = k.shape
k = k.view(crop_num, cur_size // crop_num, embedding_length)
k = nn.functional.normalize(torch.mean(k, dim=0), dim=1)
logits_list = []
labels_list = []
for q in q_large + q_mini:
# compute logits
# Einstein sum is more intuitive
# positive logits: Nx1
l_pos = torch.einsum("nc,nc->n", [q, k]).unsqueeze(-1)
# negative logits: NxK
l_neg = torch.einsum("nc,ck->nk", [q, self.queue.clone().detach()])
# logits: Nx(1+K)
logits = torch.cat([l_pos, l_neg], dim=1)
# apply temperature
logits /= self.T
# labels: positive key indicators
labels = torch.zeros(logits.shape[0], dtype=torch.long).cuda()
logits_list.append(logits)
labels_list.append(labels)
# dequeue and enqueue
self._dequeue_and_enqueue(k)
return logits_list, labels_list
# utils
@torch.no_grad()
def concat_all_gather(tensor):
"""
Performs all_gather operation on the provided tensors.
*** Warning ***: torch.distributed.all_gather has no gradient.
"""
tensors_gather = [
torch.ones_like(tensor)
for _ in range(torch.distributed.get_world_size())
]
torch.distributed.all_gather(tensors_gather, tensor, async_op=False)
output = torch.cat(tensors_gather, dim=0)
return output
def create_syncbn_process_group(num_gpu_per_group):
if num_gpu_per_group == 0:
return None
world_size = torch.distributed.get_world_size()
assert world_size >= num_gpu_per_group
assert world_size % num_gpu_per_group == 0
group = None
for group_num in range(world_size // num_gpu_per_group):
group_ids = range(
group_num * num_gpu_per_group, (group_num + 1) * num_gpu_per_group
)
cur_group = torch.distributed.new_group(ranks=group_ids)
if torch.distributed.get_rank() // num_gpu_per_group == group_num:
group = cur_group
assert group is not None
return group
|
asym-siam-main
|
moco/builder.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from PIL import ImageFilter
import random
import numpy as np
"""
# --------------------------------------------------------------------------- #
# ScaleMix #
# --------------------------------------------------------------------------- #
ScaleMix generates new views of an image by mixing two views of potentially
different scales together via binary masking. The masking strategy follows
CutMix. where an entire region - denoted by a box with randomly sampled
coordinates - is cropped and pasted. Unlike CutMix, ScaleMix only operates on
views from the same image, and the output is a single view of standard size
(224x224). This single view can be regarded as an efficient approximation of
MultiCrop, without the need to process small crops separately.
# --------------------------------------------------------------------------- #
"""
def scalemix(view1, view2):
def random_bbox(lam, H, W):
cut_rat = np.sqrt(1.0 - lam)
cut_w = np.int(W * cut_rat)
cut_h = np.int(H * cut_rat)
cx = np.random.randint(W)
cy = np.random.randint(H)
bbx1 = np.clip(cx - cut_w // 2, 0, W)
bby1 = np.clip(cy - cut_h // 2, 0, H)
bbx2 = np.clip(cx + cut_w // 2, 0, W)
bby2 = np.clip(cy + cut_h // 2, 0, H)
return bbx1, bby1, bbx2, bby2
_, h, w = view1.shape
lam = np.random.uniform(low=0.0, high=1.0)
bbx1, bby1, bbx2, bby2 = random_bbox(lam, h, w)
view1[:, bbx1:bbx2, bby1:bby2] = view2[:, bbx1:bbx2, bby1:bby2]
return view1
class CropsTransform:
"""Take two random crops of one image as the query and key."""
def __init__(
self,
key_transform,
query_mini_transform,
query_transform,
enable_scalemix=False,
enable_multicrop=False,
enable_mean_encoding=False,
):
self.key_transform = key_transform
self.query_mini_transform = query_mini_transform
self.query_transform = query_transform
self.enable_scalemix = enable_scalemix
self.enable_multicrop = enable_multicrop
self.enable_mean_encoding = enable_mean_encoding
def __call__(self, x):
crops = []
# Query crop
if self.enable_scalemix:
q = scalemix(self.query_transform(x), self.query_transform(x),)
else:
q = self.query_transform(x)
crops.append(q)
# Query mini crops
if self.enable_multicrop:
for i in range(6):
crops.append(self.query_mini_transform(x))
# Key crop
crops.append(self.key_transform(x))
if self.enable_mean_encoding:
crops.append(self.key_transform(x))
return crops
class GaussianBlur(object):
"""Gaussian blur augmentation in SimCLR https://arxiv.org/abs/2002.05709"""
def __init__(self, sigma=[0.1, 2.0]):
self.sigma = sigma
def __call__(self, x):
sigma = random.uniform(self.sigma[0], self.sigma[1])
x = x.filter(ImageFilter.GaussianBlur(radius=sigma))
return x
|
asym-siam-main
|
moco/loader.py
|
# Copyright 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
import argparse, json, os
"""
During rendering, each CLEVR scene file is dumped to disk as a separate JSON
file; this is convenient for distributing rendering across multiple machines.
This script collects all CLEVR scene files stored in a directory and combines
them into a single JSON file. This script also adds the version number, date,
and license to the output file.
"""
parser = argparse.ArgumentParser()
parser.add_argument('--input_dir', default='output/scenes')
parser.add_argument('--output_file', default='output/CLEVR_misc_scenes.json')
parser.add_argument('--version', default='1.0')
parser.add_argument('--date', default='7/8/2017')
parser.add_argument('--license',
default='Creative Commons Attribution (CC-BY 4.0')
def main(args):
input_files = os.listdir(args.input_dir)
scenes = []
split = None
for filename in os.listdir(args.input_dir):
if not filename.endswith('.json'):
continue
path = os.path.join(args.input_dir, filename)
with open(path, 'r') as f:
scene = json.load(f)
scenes.append(scene)
if split is not None:
msg = 'Input directory contains scenes from multiple splits'
assert scene['split'] == split, msg
else:
split = scene['split']
scenes.sort(key=lambda s: s['image_index'])
for s in scenes:
print(s['image_filename'])
output = {
'info': {
'date': args.date,
'version': args.version,
'split': split,
'license': args.license,
},
'scenes': scenes
}
with open(args.output_file, 'w') as f:
json.dump(output, f)
if __name__ == '__main__':
args = parser.parse_args()
main(args)
|
clevr-dataset-gen-main
|
image_generation/collect_scenes.py
|
# Copyright 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from __future__ import print_function
import math, sys, random, argparse, json, os, tempfile
from datetime import datetime as dt
from collections import Counter
"""
Renders random scenes using Blender, each with with a random number of objects;
each object has a random size, position, color, and shape. Objects will be
nonintersecting but may partially occlude each other. Output images will be
written to disk as PNGs, and we will also write a JSON file for each image with
ground-truth scene information.
This file expects to be run from Blender like this:
blender --background --python render_images.py -- [arguments to this script]
"""
INSIDE_BLENDER = True
try:
import bpy, bpy_extras
from mathutils import Vector
except ImportError as e:
INSIDE_BLENDER = False
if INSIDE_BLENDER:
try:
import utils
except ImportError as e:
print("\nERROR")
print("Running render_images.py from Blender and cannot import utils.py.")
print("You may need to add a .pth file to the site-packages of Blender's")
print("bundled python with a command like this:\n")
print("echo $PWD >> $BLENDER/$VERSION/python/lib/python3.5/site-packages/clevr.pth")
print("\nWhere $BLENDER is the directory where Blender is installed, and")
print("$VERSION is your Blender version (such as 2.78).")
sys.exit(1)
parser = argparse.ArgumentParser()
# Input options
parser.add_argument('--base_scene_blendfile', default='data/base_scene.blend',
help="Base blender file on which all scenes are based; includes " +
"ground plane, lights, and camera.")
parser.add_argument('--properties_json', default='data/properties.json',
help="JSON file defining objects, materials, sizes, and colors. " +
"The \"colors\" field maps from CLEVR color names to RGB values; " +
"The \"sizes\" field maps from CLEVR size names to scalars used to " +
"rescale object models; the \"materials\" and \"shapes\" fields map " +
"from CLEVR material and shape names to .blend files in the " +
"--object_material_dir and --shape_dir directories respectively.")
parser.add_argument('--shape_dir', default='data/shapes',
help="Directory where .blend files for object models are stored")
parser.add_argument('--material_dir', default='data/materials',
help="Directory where .blend files for materials are stored")
parser.add_argument('--shape_color_combos_json', default=None,
help="Optional path to a JSON file mapping shape names to a list of " +
"allowed color names for that shape. This allows rendering images " +
"for CLEVR-CoGenT.")
# Settings for objects
parser.add_argument('--min_objects', default=3, type=int,
help="The minimum number of objects to place in each scene")
parser.add_argument('--max_objects', default=10, type=int,
help="The maximum number of objects to place in each scene")
parser.add_argument('--min_dist', default=0.25, type=float,
help="The minimum allowed distance between object centers")
parser.add_argument('--margin', default=0.4, type=float,
help="Along all cardinal directions (left, right, front, back), all " +
"objects will be at least this distance apart. This makes resolving " +
"spatial relationships slightly less ambiguous.")
parser.add_argument('--min_pixels_per_object', default=200, type=int,
help="All objects will have at least this many visible pixels in the " +
"final rendered images; this ensures that no objects are fully " +
"occluded by other objects.")
parser.add_argument('--max_retries', default=50, type=int,
help="The number of times to try placing an object before giving up and " +
"re-placing all objects in the scene.")
# Output settings
parser.add_argument('--start_idx', default=0, type=int,
help="The index at which to start for numbering rendered images. Setting " +
"this to non-zero values allows you to distribute rendering across " +
"multiple machines and recombine the results later.")
parser.add_argument('--num_images', default=5, type=int,
help="The number of images to render")
parser.add_argument('--filename_prefix', default='CLEVR',
help="This prefix will be prepended to the rendered images and JSON scenes")
parser.add_argument('--split', default='new',
help="Name of the split for which we are rendering. This will be added to " +
"the names of rendered images, and will also be stored in the JSON " +
"scene structure for each image.")
parser.add_argument('--output_image_dir', default='../output/images/',
help="The directory where output images will be stored. It will be " +
"created if it does not exist.")
parser.add_argument('--output_scene_dir', default='../output/scenes/',
help="The directory where output JSON scene structures will be stored. " +
"It will be created if it does not exist.")
parser.add_argument('--output_scene_file', default='../output/CLEVR_scenes.json',
help="Path to write a single JSON file containing all scene information")
parser.add_argument('--output_blend_dir', default='output/blendfiles',
help="The directory where blender scene files will be stored, if the " +
"user requested that these files be saved using the " +
"--save_blendfiles flag; in this case it will be created if it does " +
"not already exist.")
parser.add_argument('--save_blendfiles', type=int, default=0,
help="Setting --save_blendfiles 1 will cause the blender scene file for " +
"each generated image to be stored in the directory specified by " +
"the --output_blend_dir flag. These files are not saved by default " +
"because they take up ~5-10MB each.")
parser.add_argument('--version', default='1.0',
help="String to store in the \"version\" field of the generated JSON file")
parser.add_argument('--license',
default="Creative Commons Attribution (CC-BY 4.0)",
help="String to store in the \"license\" field of the generated JSON file")
parser.add_argument('--date', default=dt.today().strftime("%m/%d/%Y"),
help="String to store in the \"date\" field of the generated JSON file; " +
"defaults to today's date")
# Rendering options
parser.add_argument('--use_gpu', default=0, type=int,
help="Setting --use_gpu 1 enables GPU-accelerated rendering using CUDA. " +
"You must have an NVIDIA GPU with the CUDA toolkit installed for " +
"to work.")
parser.add_argument('--width', default=320, type=int,
help="The width (in pixels) for the rendered images")
parser.add_argument('--height', default=240, type=int,
help="The height (in pixels) for the rendered images")
parser.add_argument('--key_light_jitter', default=1.0, type=float,
help="The magnitude of random jitter to add to the key light position.")
parser.add_argument('--fill_light_jitter', default=1.0, type=float,
help="The magnitude of random jitter to add to the fill light position.")
parser.add_argument('--back_light_jitter', default=1.0, type=float,
help="The magnitude of random jitter to add to the back light position.")
parser.add_argument('--camera_jitter', default=0.5, type=float,
help="The magnitude of random jitter to add to the camera position")
parser.add_argument('--render_num_samples', default=512, type=int,
help="The number of samples to use when rendering. Larger values will " +
"result in nicer images but will cause rendering to take longer.")
parser.add_argument('--render_min_bounces', default=8, type=int,
help="The minimum number of bounces to use for rendering.")
parser.add_argument('--render_max_bounces', default=8, type=int,
help="The maximum number of bounces to use for rendering.")
parser.add_argument('--render_tile_size', default=256, type=int,
help="The tile size to use for rendering. This should not affect the " +
"quality of the rendered image but may affect the speed; CPU-based " +
"rendering may achieve better performance using smaller tile sizes " +
"while larger tile sizes may be optimal for GPU-based rendering.")
def main(args):
num_digits = 6
prefix = '%s_%s_' % (args.filename_prefix, args.split)
img_template = '%s%%0%dd.png' % (prefix, num_digits)
scene_template = '%s%%0%dd.json' % (prefix, num_digits)
blend_template = '%s%%0%dd.blend' % (prefix, num_digits)
img_template = os.path.join(args.output_image_dir, img_template)
scene_template = os.path.join(args.output_scene_dir, scene_template)
blend_template = os.path.join(args.output_blend_dir, blend_template)
if not os.path.isdir(args.output_image_dir):
os.makedirs(args.output_image_dir)
if not os.path.isdir(args.output_scene_dir):
os.makedirs(args.output_scene_dir)
if args.save_blendfiles == 1 and not os.path.isdir(args.output_blend_dir):
os.makedirs(args.output_blend_dir)
all_scene_paths = []
for i in range(args.num_images):
img_path = img_template % (i + args.start_idx)
scene_path = scene_template % (i + args.start_idx)
all_scene_paths.append(scene_path)
blend_path = None
if args.save_blendfiles == 1:
blend_path = blend_template % (i + args.start_idx)
num_objects = random.randint(args.min_objects, args.max_objects)
render_scene(args,
num_objects=num_objects,
output_index=(i + args.start_idx),
output_split=args.split,
output_image=img_path,
output_scene=scene_path,
output_blendfile=blend_path,
)
# After rendering all images, combine the JSON files for each scene into a
# single JSON file.
all_scenes = []
for scene_path in all_scene_paths:
with open(scene_path, 'r') as f:
all_scenes.append(json.load(f))
output = {
'info': {
'date': args.date,
'version': args.version,
'split': args.split,
'license': args.license,
},
'scenes': all_scenes
}
with open(args.output_scene_file, 'w') as f:
json.dump(output, f)
def render_scene(args,
num_objects=5,
output_index=0,
output_split='none',
output_image='render.png',
output_scene='render_json',
output_blendfile=None,
):
# Load the main blendfile
bpy.ops.wm.open_mainfile(filepath=args.base_scene_blendfile)
# Load materials
utils.load_materials(args.material_dir)
# Set render arguments so we can get pixel coordinates later.
# We use functionality specific to the CYCLES renderer so BLENDER_RENDER
# cannot be used.
render_args = bpy.context.scene.render
render_args.engine = "CYCLES"
render_args.filepath = output_image
render_args.resolution_x = args.width
render_args.resolution_y = args.height
render_args.resolution_percentage = 100
render_args.tile_x = args.render_tile_size
render_args.tile_y = args.render_tile_size
if args.use_gpu == 1:
# Blender changed the API for enabling CUDA at some point
if bpy.app.version < (2, 78, 0):
bpy.context.user_preferences.system.compute_device_type = 'CUDA'
bpy.context.user_preferences.system.compute_device = 'CUDA_0'
else:
cycles_prefs = bpy.context.user_preferences.addons['cycles'].preferences
cycles_prefs.compute_device_type = 'CUDA'
# Some CYCLES-specific stuff
bpy.data.worlds['World'].cycles.sample_as_light = True
bpy.context.scene.cycles.blur_glossy = 2.0
bpy.context.scene.cycles.samples = args.render_num_samples
bpy.context.scene.cycles.transparent_min_bounces = args.render_min_bounces
bpy.context.scene.cycles.transparent_max_bounces = args.render_max_bounces
if args.use_gpu == 1:
bpy.context.scene.cycles.device = 'GPU'
# This will give ground-truth information about the scene and its objects
scene_struct = {
'split': output_split,
'image_index': output_index,
'image_filename': os.path.basename(output_image),
'objects': [],
'directions': {},
}
# Put a plane on the ground so we can compute cardinal directions
bpy.ops.mesh.primitive_plane_add(radius=5)
plane = bpy.context.object
def rand(L):
return 2.0 * L * (random.random() - 0.5)
# Add random jitter to camera position
if args.camera_jitter > 0:
for i in range(3):
bpy.data.objects['Camera'].location[i] += rand(args.camera_jitter)
# Figure out the left, up, and behind directions along the plane and record
# them in the scene structure
camera = bpy.data.objects['Camera']
plane_normal = plane.data.vertices[0].normal
cam_behind = camera.matrix_world.to_quaternion() * Vector((0, 0, -1))
cam_left = camera.matrix_world.to_quaternion() * Vector((-1, 0, 0))
cam_up = camera.matrix_world.to_quaternion() * Vector((0, 1, 0))
plane_behind = (cam_behind - cam_behind.project(plane_normal)).normalized()
plane_left = (cam_left - cam_left.project(plane_normal)).normalized()
plane_up = cam_up.project(plane_normal).normalized()
# Delete the plane; we only used it for normals anyway. The base scene file
# contains the actual ground plane.
utils.delete_object(plane)
# Save all six axis-aligned directions in the scene struct
scene_struct['directions']['behind'] = tuple(plane_behind)
scene_struct['directions']['front'] = tuple(-plane_behind)
scene_struct['directions']['left'] = tuple(plane_left)
scene_struct['directions']['right'] = tuple(-plane_left)
scene_struct['directions']['above'] = tuple(plane_up)
scene_struct['directions']['below'] = tuple(-plane_up)
# Add random jitter to lamp positions
if args.key_light_jitter > 0:
for i in range(3):
bpy.data.objects['Lamp_Key'].location[i] += rand(args.key_light_jitter)
if args.back_light_jitter > 0:
for i in range(3):
bpy.data.objects['Lamp_Back'].location[i] += rand(args.back_light_jitter)
if args.fill_light_jitter > 0:
for i in range(3):
bpy.data.objects['Lamp_Fill'].location[i] += rand(args.fill_light_jitter)
# Now make some random objects
objects, blender_objects = add_random_objects(scene_struct, num_objects, args, camera)
# Render the scene and dump the scene data structure
scene_struct['objects'] = objects
scene_struct['relationships'] = compute_all_relationships(scene_struct)
while True:
try:
bpy.ops.render.render(write_still=True)
break
except Exception as e:
print(e)
with open(output_scene, 'w') as f:
json.dump(scene_struct, f, indent=2)
if output_blendfile is not None:
bpy.ops.wm.save_as_mainfile(filepath=output_blendfile)
def add_random_objects(scene_struct, num_objects, args, camera):
"""
Add random objects to the current blender scene
"""
# Load the property file
with open(args.properties_json, 'r') as f:
properties = json.load(f)
color_name_to_rgba = {}
for name, rgb in properties['colors'].items():
rgba = [float(c) / 255.0 for c in rgb] + [1.0]
color_name_to_rgba[name] = rgba
material_mapping = [(v, k) for k, v in properties['materials'].items()]
object_mapping = [(v, k) for k, v in properties['shapes'].items()]
size_mapping = list(properties['sizes'].items())
shape_color_combos = None
if args.shape_color_combos_json is not None:
with open(args.shape_color_combos_json, 'r') as f:
shape_color_combos = list(json.load(f).items())
positions = []
objects = []
blender_objects = []
for i in range(num_objects):
# Choose a random size
size_name, r = random.choice(size_mapping)
# Try to place the object, ensuring that we don't intersect any existing
# objects and that we are more than the desired margin away from all existing
# objects along all cardinal directions.
num_tries = 0
while True:
# If we try and fail to place an object too many times, then delete all
# the objects in the scene and start over.
num_tries += 1
if num_tries > args.max_retries:
for obj in blender_objects:
utils.delete_object(obj)
return add_random_objects(scene_struct, num_objects, args, camera)
x = random.uniform(-3, 3)
y = random.uniform(-3, 3)
# Check to make sure the new object is further than min_dist from all
# other objects, and further than margin along the four cardinal directions
dists_good = True
margins_good = True
for (xx, yy, rr) in positions:
dx, dy = x - xx, y - yy
dist = math.sqrt(dx * dx + dy * dy)
if dist - r - rr < args.min_dist:
dists_good = False
break
for direction_name in ['left', 'right', 'front', 'behind']:
direction_vec = scene_struct['directions'][direction_name]
assert direction_vec[2] == 0
margin = dx * direction_vec[0] + dy * direction_vec[1]
if 0 < margin < args.margin:
print(margin, args.margin, direction_name)
print('BROKEN MARGIN!')
margins_good = False
break
if not margins_good:
break
if dists_good and margins_good:
break
# Choose random color and shape
if shape_color_combos is None:
obj_name, obj_name_out = random.choice(object_mapping)
color_name, rgba = random.choice(list(color_name_to_rgba.items()))
else:
obj_name_out, color_choices = random.choice(shape_color_combos)
color_name = random.choice(color_choices)
obj_name = [k for k, v in object_mapping if v == obj_name_out][0]
rgba = color_name_to_rgba[color_name]
# For cube, adjust the size a bit
if obj_name == 'Cube':
r /= math.sqrt(2)
# Choose random orientation for the object.
theta = 360.0 * random.random()
# Actually add the object to the scene
utils.add_object(args.shape_dir, obj_name, r, (x, y), theta=theta)
obj = bpy.context.object
blender_objects.append(obj)
positions.append((x, y, r))
# Attach a random material
mat_name, mat_name_out = random.choice(material_mapping)
utils.add_material(mat_name, Color=rgba)
# Record data about the object in the scene data structure
pixel_coords = utils.get_camera_coords(camera, obj.location)
objects.append({
'shape': obj_name_out,
'size': size_name,
'material': mat_name_out,
'3d_coords': tuple(obj.location),
'rotation': theta,
'pixel_coords': pixel_coords,
'color': color_name,
})
# Check that all objects are at least partially visible in the rendered image
all_visible = check_visibility(blender_objects, args.min_pixels_per_object)
if not all_visible:
# If any of the objects are fully occluded then start over; delete all
# objects from the scene and place them all again.
print('Some objects are occluded; replacing objects')
for obj in blender_objects:
utils.delete_object(obj)
return add_random_objects(scene_struct, num_objects, args, camera)
return objects, blender_objects
def compute_all_relationships(scene_struct, eps=0.2):
"""
Computes relationships between all pairs of objects in the scene.
Returns a dictionary mapping string relationship names to lists of lists of
integers, where output[rel][i] gives a list of object indices that have the
relationship rel with object i. For example if j is in output['left'][i] then
object j is left of object i.
"""
all_relationships = {}
for name, direction_vec in scene_struct['directions'].items():
if name == 'above' or name == 'below': continue
all_relationships[name] = []
for i, obj1 in enumerate(scene_struct['objects']):
coords1 = obj1['3d_coords']
related = set()
for j, obj2 in enumerate(scene_struct['objects']):
if obj1 == obj2: continue
coords2 = obj2['3d_coords']
diff = [coords2[k] - coords1[k] for k in [0, 1, 2]]
dot = sum(diff[k] * direction_vec[k] for k in [0, 1, 2])
if dot > eps:
related.add(j)
all_relationships[name].append(sorted(list(related)))
return all_relationships
def check_visibility(blender_objects, min_pixels_per_object):
"""
Check whether all objects in the scene have some minimum number of visible
pixels; to accomplish this we assign random (but distinct) colors to all
objects, and render using no lighting or shading or antialiasing; this
ensures that each object is just a solid uniform color. We can then count
the number of pixels of each color in the output image to check the visibility
of each object.
Returns True if all objects are visible and False otherwise.
"""
f, path = tempfile.mkstemp(suffix='.png')
object_colors = render_shadeless(blender_objects, path=path)
img = bpy.data.images.load(path)
p = list(img.pixels)
color_count = Counter((p[i], p[i+1], p[i+2], p[i+3])
for i in range(0, len(p), 4))
os.remove(path)
if len(color_count) != len(blender_objects) + 1:
return False
for _, count in color_count.most_common():
if count < min_pixels_per_object:
return False
return True
def render_shadeless(blender_objects, path='flat.png'):
"""
Render a version of the scene with shading disabled and unique materials
assigned to all objects, and return a set of all colors that should be in the
rendered image. The image itself is written to path. This is used to ensure
that all objects will be visible in the final rendered scene.
"""
render_args = bpy.context.scene.render
# Cache the render args we are about to clobber
old_filepath = render_args.filepath
old_engine = render_args.engine
old_use_antialiasing = render_args.use_antialiasing
# Override some render settings to have flat shading
render_args.filepath = path
render_args.engine = 'BLENDER_RENDER'
render_args.use_antialiasing = False
# Move the lights and ground to layer 2 so they don't render
utils.set_layer(bpy.data.objects['Lamp_Key'], 2)
utils.set_layer(bpy.data.objects['Lamp_Fill'], 2)
utils.set_layer(bpy.data.objects['Lamp_Back'], 2)
utils.set_layer(bpy.data.objects['Ground'], 2)
# Add random shadeless materials to all objects
object_colors = set()
old_materials = []
for i, obj in enumerate(blender_objects):
old_materials.append(obj.data.materials[0])
bpy.ops.material.new()
mat = bpy.data.materials['Material']
mat.name = 'Material_%d' % i
while True:
r, g, b = [random.random() for _ in range(3)]
if (r, g, b) not in object_colors: break
object_colors.add((r, g, b))
mat.diffuse_color = [r, g, b]
mat.use_shadeless = True
obj.data.materials[0] = mat
# Render the scene
bpy.ops.render.render(write_still=True)
# Undo the above; first restore the materials to objects
for mat, obj in zip(old_materials, blender_objects):
obj.data.materials[0] = mat
# Move the lights and ground back to layer 0
utils.set_layer(bpy.data.objects['Lamp_Key'], 0)
utils.set_layer(bpy.data.objects['Lamp_Fill'], 0)
utils.set_layer(bpy.data.objects['Lamp_Back'], 0)
utils.set_layer(bpy.data.objects['Ground'], 0)
# Set the render settings back to what they were
render_args.filepath = old_filepath
render_args.engine = old_engine
render_args.use_antialiasing = old_use_antialiasing
return object_colors
if __name__ == '__main__':
if INSIDE_BLENDER:
# Run normally
argv = utils.extract_args()
args = parser.parse_args(argv)
main(args)
elif '--help' in sys.argv or '-h' in sys.argv:
parser.print_help()
else:
print('This script is intended to be called from blender like this:')
print()
print('blender --background --python render_images.py -- [args]')
print()
print('You can also run as a standalone python script to view all')
print('arguments like this:')
print()
print('python render_images.py --help')
|
clevr-dataset-gen-main
|
image_generation/render_images.py
|
# Copyright 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
import sys, random, os
import bpy, bpy_extras
"""
Some utility functions for interacting with Blender
"""
def extract_args(input_argv=None):
"""
Pull out command-line arguments after "--". Blender ignores command-line flags
after --, so this lets us forward command line arguments from the blender
invocation to our own script.
"""
if input_argv is None:
input_argv = sys.argv
output_argv = []
if '--' in input_argv:
idx = input_argv.index('--')
output_argv = input_argv[(idx + 1):]
return output_argv
def parse_args(parser, argv=None):
return parser.parse_args(extract_args(argv))
# I wonder if there's a better way to do this?
def delete_object(obj):
""" Delete a specified blender object """
for o in bpy.data.objects:
o.select = False
obj.select = True
bpy.ops.object.delete()
def get_camera_coords(cam, pos):
"""
For a specified point, get both the 3D coordinates and 2D pixel-space
coordinates of the point from the perspective of the camera.
Inputs:
- cam: Camera object
- pos: Vector giving 3D world-space position
Returns a tuple of:
- (px, py, pz): px and py give 2D image-space coordinates; pz gives depth
in the range [-1, 1]
"""
scene = bpy.context.scene
x, y, z = bpy_extras.object_utils.world_to_camera_view(scene, cam, pos)
scale = scene.render.resolution_percentage / 100.0
w = int(scale * scene.render.resolution_x)
h = int(scale * scene.render.resolution_y)
px = int(round(x * w))
py = int(round(h - y * h))
return (px, py, z)
def set_layer(obj, layer_idx):
""" Move an object to a particular layer """
# Set the target layer to True first because an object must always be on
# at least one layer.
obj.layers[layer_idx] = True
for i in range(len(obj.layers)):
obj.layers[i] = (i == layer_idx)
def add_object(object_dir, name, scale, loc, theta=0):
"""
Load an object from a file. We assume that in the directory object_dir, there
is a file named "$name.blend" which contains a single object named "$name"
that has unit size and is centered at the origin.
- scale: scalar giving the size that the object should be in the scene
- loc: tuple (x, y) giving the coordinates on the ground plane where the
object should be placed.
"""
# First figure out how many of this object are already in the scene so we can
# give the new object a unique name
count = 0
for obj in bpy.data.objects:
if obj.name.startswith(name):
count += 1
filename = os.path.join(object_dir, '%s.blend' % name, 'Object', name)
bpy.ops.wm.append(filename=filename)
# Give it a new name to avoid conflicts
new_name = '%s_%d' % (name, count)
bpy.data.objects[name].name = new_name
# Set the new object as active, then rotate, scale, and translate it
x, y = loc
bpy.context.scene.objects.active = bpy.data.objects[new_name]
bpy.context.object.rotation_euler[2] = theta
bpy.ops.transform.resize(value=(scale, scale, scale))
bpy.ops.transform.translate(value=(x, y, scale))
def load_materials(material_dir):
"""
Load materials from a directory. We assume that the directory contains .blend
files with one material each. The file X.blend has a single NodeTree item named
X; this NodeTree item must have a "Color" input that accepts an RGBA value.
"""
for fn in os.listdir(material_dir):
if not fn.endswith('.blend'): continue
name = os.path.splitext(fn)[0]
filepath = os.path.join(material_dir, fn, 'NodeTree', name)
bpy.ops.wm.append(filename=filepath)
def add_material(name, **properties):
"""
Create a new material and assign it to the active object. "name" should be the
name of a material that has been previously loaded using load_materials.
"""
# Figure out how many materials are already in the scene
mat_count = len(bpy.data.materials)
# Create a new material; it is not attached to anything and
# it will be called "Material"
bpy.ops.material.new()
# Get a reference to the material we just created and rename it;
# then the next time we make a new material it will still be called
# "Material" and we will still be able to look it up by name
mat = bpy.data.materials['Material']
mat.name = 'Material_%d' % mat_count
# Attach the new material to the active object
# Make sure it doesn't already have materials
obj = bpy.context.active_object
assert len(obj.data.materials) == 0
obj.data.materials.append(mat)
# Find the output node of the new material
output_node = None
for n in mat.node_tree.nodes:
if n.name == 'Material Output':
output_node = n
break
# Add a new GroupNode to the node tree of the active material,
# and copy the node tree from the preloaded node group to the
# new group node. This copying seems to happen by-value, so
# we can create multiple materials of the same type without them
# clobbering each other
group_node = mat.node_tree.nodes.new('ShaderNodeGroup')
group_node.node_tree = bpy.data.node_groups[name]
# Find and set the "Color" input of the new group node
for inp in group_node.inputs:
if inp.name in properties:
inp.default_value = properties[inp.name]
# Wire the output of the new group node to the input of
# the MaterialOutput node
mat.node_tree.links.new(
group_node.outputs['Shader'],
output_node.inputs['Surface'],
)
|
clevr-dataset-gen-main
|
image_generation/utils.py
|
# Copyright 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
import json, os, math
from collections import defaultdict
"""
Utilities for working with function program representations of questions.
Some of the metadata about what question node types are available etc are stored
in a JSON metadata file.
"""
# Handlers for answering questions. Each handler receives the scene structure
# that was output from Blender, the node, and a list of values that were output
# from each of the node's inputs; the handler should return the computed output
# value from this node.
def scene_handler(scene_struct, inputs, side_inputs):
# Just return all objects in the scene
return list(range(len(scene_struct['objects'])))
def make_filter_handler(attribute):
def filter_handler(scene_struct, inputs, side_inputs):
assert len(inputs) == 1
assert len(side_inputs) == 1
value = side_inputs[0]
output = []
for idx in inputs[0]:
atr = scene_struct['objects'][idx][attribute]
if value == atr or value in atr:
output.append(idx)
return output
return filter_handler
def unique_handler(scene_struct, inputs, side_inputs):
assert len(inputs) == 1
if len(inputs[0]) != 1:
return '__INVALID__'
return inputs[0][0]
def vg_relate_handler(scene_struct, inputs, side_inputs):
assert len(inputs) == 1
assert len(side_inputs) == 1
output = set()
for rel in scene_struct['relationships']:
if rel['predicate'] == side_inputs[0] and rel['subject_idx'] == inputs[0]:
output.add(rel['object_idx'])
return sorted(list(output))
def relate_handler(scene_struct, inputs, side_inputs):
assert len(inputs) == 1
assert len(side_inputs) == 1
relation = side_inputs[0]
return scene_struct['relationships'][relation][inputs[0]]
def union_handler(scene_struct, inputs, side_inputs):
assert len(inputs) == 2
assert len(side_inputs) == 0
return sorted(list(set(inputs[0]) | set(inputs[1])))
def intersect_handler(scene_struct, inputs, side_inputs):
assert len(inputs) == 2
assert len(side_inputs) == 0
return sorted(list(set(inputs[0]) & set(inputs[1])))
def count_handler(scene_struct, inputs, side_inputs):
assert len(inputs) == 1
return len(inputs[0])
def make_same_attr_handler(attribute):
def same_attr_handler(scene_struct, inputs, side_inputs):
cache_key = '_same_%s' % attribute
if cache_key not in scene_struct:
cache = {}
for i, obj1 in enumerate(scene_struct['objects']):
same = []
for j, obj2 in enumerate(scene_struct['objects']):
if i != j and obj1[attribute] == obj2[attribute]:
same.append(j)
cache[i] = same
scene_struct[cache_key] = cache
cache = scene_struct[cache_key]
assert len(inputs) == 1
assert len(side_inputs) == 0
return cache[inputs[0]]
return same_attr_handler
def make_query_handler(attribute):
def query_handler(scene_struct, inputs, side_inputs):
assert len(inputs) == 1
assert len(side_inputs) == 0
idx = inputs[0]
obj = scene_struct['objects'][idx]
assert attribute in obj
val = obj[attribute]
if type(val) == list and len(val) != 1:
return '__INVALID__'
elif type(val) == list and len(val) == 1:
return val[0]
else:
return val
return query_handler
def exist_handler(scene_struct, inputs, side_inputs):
assert len(inputs) == 1
assert len(side_inputs) == 0
return len(inputs[0]) > 0
def equal_handler(scene_struct, inputs, side_inputs):
assert len(inputs) == 2
assert len(side_inputs) == 0
return inputs[0] == inputs[1]
def less_than_handler(scene_struct, inputs, side_inputs):
assert len(inputs) == 2
assert len(side_inputs) == 0
return inputs[0] < inputs[1]
def greater_than_handler(scene_struct, inputs, side_inputs):
assert len(inputs) == 2
assert len(side_inputs) == 0
return inputs[0] > inputs[1]
# Register all of the answering handlers here.
# TODO maybe this would be cleaner with a function decorator that takes
# care of registration? Not sure. Also what if we want to reuse the same engine
# for different sets of node types?
execute_handlers = {
'scene': scene_handler,
'filter_color': make_filter_handler('color'),
'filter_shape': make_filter_handler('shape'),
'filter_material': make_filter_handler('material'),
'filter_size': make_filter_handler('size'),
'filter_objectcategory': make_filter_handler('objectcategory'),
'unique': unique_handler,
'relate': relate_handler,
'union': union_handler,
'intersect': intersect_handler,
'count': count_handler,
'query_color': make_query_handler('color'),
'query_shape': make_query_handler('shape'),
'query_material': make_query_handler('material'),
'query_size': make_query_handler('size'),
'exist': exist_handler,
'equal_color': equal_handler,
'equal_shape': equal_handler,
'equal_integer': equal_handler,
'equal_material': equal_handler,
'equal_size': equal_handler,
'equal_object': equal_handler,
'less_than': less_than_handler,
'greater_than': greater_than_handler,
'same_color': make_same_attr_handler('color'),
'same_shape': make_same_attr_handler('shape'),
'same_size': make_same_attr_handler('size'),
'same_material': make_same_attr_handler('material'),
}
def answer_question(question, metadata, scene_struct, all_outputs=False,
cache_outputs=True):
"""
Use structured scene information to answer a structured question. Most of the
heavy lifting is done by the execute handlers defined above.
We cache node outputs in the node itself; this gives a nontrivial speedup
when we want to answer many questions that share nodes on the same scene
(such as during question-generation DFS). This will NOT work if the same
nodes are executed on different scenes.
"""
all_input_types, all_output_types = [], []
node_outputs = []
for node in question['nodes']:
if cache_outputs and '_output' in node:
node_output = node['_output']
else:
node_type = node['type']
msg = 'Could not find handler for "%s"' % node_type
assert node_type in execute_handlers, msg
handler = execute_handlers[node_type]
node_inputs = [node_outputs[idx] for idx in node['inputs']]
side_inputs = node.get('side_inputs', [])
node_output = handler(scene_struct, node_inputs, side_inputs)
if cache_outputs:
node['_output'] = node_output
node_outputs.append(node_output)
if node_output == '__INVALID__':
break
if all_outputs:
return node_outputs
else:
return node_outputs[-1]
def insert_scene_node(nodes, idx):
# First make a shallow-ish copy of the input
new_nodes = []
for node in nodes:
new_node = {
'type': node['type'],
'inputs': node['inputs'],
}
if 'side_inputs' in node:
new_node['side_inputs'] = node['side_inputs']
new_nodes.append(new_node)
# Replace the specified index with a scene node
new_nodes[idx] = {'type': 'scene', 'inputs': []}
# Search backwards from the last node to see which nodes are actually used
output_used = [False] * len(new_nodes)
idxs_to_check = [len(new_nodes) - 1]
while idxs_to_check:
cur_idx = idxs_to_check.pop()
output_used[cur_idx] = True
idxs_to_check.extend(new_nodes[cur_idx]['inputs'])
# Iterate through nodes, keeping only those whose output is used;
# at the same time build up a mapping from old idxs to new idxs
old_idx_to_new_idx = {}
new_nodes_trimmed = []
for old_idx, node in enumerate(new_nodes):
if output_used[old_idx]:
new_idx = len(new_nodes_trimmed)
new_nodes_trimmed.append(node)
old_idx_to_new_idx[old_idx] = new_idx
# Finally go through the list of trimmed nodes and change the inputs
for node in new_nodes_trimmed:
new_inputs = []
for old_idx in node['inputs']:
new_inputs.append(old_idx_to_new_idx[old_idx])
node['inputs'] = new_inputs
return new_nodes_trimmed
def is_degenerate(question, metadata, scene_struct, answer=None, verbose=False):
"""
A question is degenerate if replacing any of its relate nodes with a scene
node results in a question with the same answer.
"""
if answer is None:
answer = answer_question(question, metadata, scene_struct)
for idx, node in enumerate(question['nodes']):
if node['type'] == 'relate':
new_question = {
'nodes': insert_scene_node(question['nodes'], idx)
}
new_answer = answer_question(new_question, metadata, scene_struct)
if verbose:
print('here is truncated question:')
for i, n in enumerate(new_question['nodes']):
name = n['type']
if 'side_inputs' in n:
name = '%s[%s]' % (name, n['side_inputs'][0])
print(i, name, n['_output'])
print('new answer is: ', new_answer)
if new_answer == answer:
return True
return False
|
clevr-dataset-gen-main
|
question_generation/question_engine.py
|
# Copyright 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from __future__ import print_function
import argparse, json, os, itertools, random, shutil
import time
import re
import question_engine as qeng
"""
Generate synthetic questions and answers for CLEVR images. Input is a single
JSON file containing ground-truth scene information for all images, and output
is a single JSON file containing all generated questions, answers, and programs.
Questions are generated by expanding templates. Each template contains a single
program template and one or more text templates, both with the same set of typed
slots; by convention <Z> = Size, <C> = Color, <M> = Material, <S> = Shape.
Program templates may contain special nodes that expand into multiple functions
during instantiation; for example a "filter" node in a program template will
expand into a combination of "filter_size", "filter_color", "filter_material",
and "filter_shape" nodes after instantiation, and a "filter_unique" node in a
template will expand into some combination of filtering nodes followed by a
"unique" node.
Templates are instantiated using depth-first search; we are looking for template
instantiations where (1) each "unique" node actually refers to a single object,
(2) constraints in the template are satisfied, and (3) the answer to the question
passes our rejection sampling heuristics.
To efficiently handle (1) and (2), we keep track of partial evaluations of the
program during each step of template expansion. This together with the use of
composite nodes in program templates (filter_unique, relate_filter_unique) allow
us to efficiently prune the search space and terminate early when we know that
(1) or (2) will be violated.
"""
parser = argparse.ArgumentParser()
# Inputs
parser.add_argument('--input_scene_file', default='../output/CLEVR_scenes.json',
help="JSON file containing ground-truth scene information for all images " +
"from render_images.py")
parser.add_argument('--metadata_file', default='metadata.json',
help="JSON file containing metadata about functions")
parser.add_argument('--synonyms_json', default='synonyms.json',
help="JSON file defining synonyms for parameter values")
parser.add_argument('--template_dir', default='CLEVR_1.0_templates',
help="Directory containing JSON templates for questions")
# Output
parser.add_argument('--output_questions_file',
default='../output/CLEVR_questions.json',
help="The output file to write containing generated questions")
# Control which and how many images to process
parser.add_argument('--scene_start_idx', default=0, type=int,
help="The image at which to start generating questions; this allows " +
"question generation to be split across many workers")
parser.add_argument('--num_scenes', default=0, type=int,
help="The number of images for which to generate questions. Setting to 0 " +
"generates questions for all scenes in the input file starting from " +
"--scene_start_idx")
# Control the number of questions per image; we will attempt to generate
# templates_per_image * instances_per_template questions per image.
parser.add_argument('--templates_per_image', default=10, type=int,
help="The number of different templates that should be instantiated " +
"on each image")
parser.add_argument('--instances_per_template', default=1, type=int,
help="The number of times each template should be instantiated on an image")
# Misc
parser.add_argument('--reset_counts_every', default=250, type=int,
help="How often to reset template and answer counts. Higher values will " +
"result in flatter distributions over templates and answers, but " +
"will result in longer runtimes.")
parser.add_argument('--verbose', action='store_true',
help="Print more verbose output")
parser.add_argument('--time_dfs', action='store_true',
help="Time each depth-first search; must be given with --verbose")
parser.add_argument('--profile', action='store_true',
help="If given then run inside cProfile")
# args = parser.parse_args()
def precompute_filter_options(scene_struct, metadata):
# Keys are tuples (size, color, shape, material) (where some may be None)
# and values are lists of object idxs that match the filter criterion
attribute_map = {}
if metadata['dataset'] == 'CLEVR-v1.0':
attr_keys = ['size', 'color', 'material', 'shape']
else:
assert False, 'Unrecognized dataset'
# Precompute masks
masks = []
for i in range(2 ** len(attr_keys)):
mask = []
for j in range(len(attr_keys)):
mask.append((i // (2 ** j)) % 2)
masks.append(mask)
for object_idx, obj in enumerate(scene_struct['objects']):
if metadata['dataset'] == 'CLEVR-v1.0':
keys = [tuple(obj[k] for k in attr_keys)]
for mask in masks:
for key in keys:
masked_key = []
for a, b in zip(key, mask):
if b == 1:
masked_key.append(a)
else:
masked_key.append(None)
masked_key = tuple(masked_key)
if masked_key not in attribute_map:
attribute_map[masked_key] = set()
attribute_map[masked_key].add(object_idx)
scene_struct['_filter_options'] = attribute_map
def find_filter_options(object_idxs, scene_struct, metadata):
# Keys are tuples (size, color, shape, material) (where some may be None)
# and values are lists of object idxs that match the filter criterion
if '_filter_options' not in scene_struct:
precompute_filter_options(scene_struct, metadata)
attribute_map = {}
object_idxs = set(object_idxs)
for k, vs in scene_struct['_filter_options'].items():
attribute_map[k] = sorted(list(object_idxs & vs))
return attribute_map
def add_empty_filter_options(attribute_map, metadata, num_to_add):
# Add some filtering criterion that do NOT correspond to objects
if metadata['dataset'] == 'CLEVR-v1.0':
attr_keys = ['Size', 'Color', 'Material', 'Shape']
else:
assert False, 'Unrecognized dataset'
attr_vals = [metadata['types'][t] + [None] for t in attr_keys]
if '_filter_options' in metadata:
attr_vals = metadata['_filter_options']
target_size = len(attribute_map) + num_to_add
while len(attribute_map) < target_size:
k = (random.choice(v) for v in attr_vals)
if k not in attribute_map:
attribute_map[k] = []
def find_relate_filter_options(object_idx, scene_struct, metadata,
unique=False, include_zero=False, trivial_frac=0.1):
options = {}
if '_filter_options' not in scene_struct:
precompute_filter_options(scene_struct, metadata)
# TODO: Right now this is only looking for nontrivial combinations; in some
# cases I may want to add trivial combinations, either where the intersection
# is empty or where the intersection is equal to the filtering output.
trivial_options = {}
for relationship in scene_struct['relationships']:
related = set(scene_struct['relationships'][relationship][object_idx])
for filters, filtered in scene_struct['_filter_options'].items():
intersection = related & filtered
trivial = (intersection == filtered)
if unique and len(intersection) != 1: continue
if not include_zero and len(intersection) == 0: continue
if trivial:
trivial_options[(relationship, filters)] = sorted(list(intersection))
else:
options[(relationship, filters)] = sorted(list(intersection))
N, f = len(options), trivial_frac
num_trivial = int(round(N * f / (1 - f)))
trivial_options = list(trivial_options.items())
random.shuffle(trivial_options)
for k, v in trivial_options[:num_trivial]:
options[k] = v
return options
def node_shallow_copy(node):
new_node = {
'type': node['type'],
'inputs': node['inputs'],
}
if 'side_inputs' in node:
new_node['side_inputs'] = node['side_inputs']
return new_node
def other_heuristic(text, param_vals):
"""
Post-processing heuristic to handle the word "other"
"""
if ' other ' not in text and ' another ' not in text:
return text
target_keys = {
'<Z>', '<C>', '<M>', '<S>',
'<Z2>', '<C2>', '<M2>', '<S2>',
}
if param_vals.keys() != target_keys:
return text
key_pairs = [
('<Z>', '<Z2>'),
('<C>', '<C2>'),
('<M>', '<M2>'),
('<S>', '<S2>'),
]
remove_other = False
for k1, k2 in key_pairs:
v1 = param_vals.get(k1, None)
v2 = param_vals.get(k2, None)
if v1 != '' and v2 != '' and v1 != v2:
print('other has got to go! %s = %s but %s = %s'
% (k1, v1, k2, v2))
remove_other = True
break
if remove_other:
if ' other ' in text:
text = text.replace(' other ', ' ')
if ' another ' in text:
text = text.replace(' another ', ' a ')
return text
def instantiate_templates_dfs(scene_struct, template, metadata, answer_counts,
synonyms, max_instances=None, verbose=False):
param_name_to_type = {p['name']: p['type'] for p in template['params']}
initial_state = {
'nodes': [node_shallow_copy(template['nodes'][0])],
'vals': {},
'input_map': {0: 0},
'next_template_node': 1,
}
states = [initial_state]
final_states = []
while states:
state = states.pop()
# Check to make sure the current state is valid
q = {'nodes': state['nodes']}
outputs = qeng.answer_question(q, metadata, scene_struct, all_outputs=True)
answer = outputs[-1]
if answer == '__INVALID__': continue
# Check to make sure constraints are satisfied for the current state
skip_state = False
for constraint in template['constraints']:
if constraint['type'] == 'NEQ':
p1, p2 = constraint['params']
v1, v2 = state['vals'].get(p1), state['vals'].get(p2)
if v1 is not None and v2 is not None and v1 != v2:
if verbose:
print('skipping due to NEQ constraint')
print(constraint)
print(state['vals'])
skip_state = True
break
elif constraint['type'] == 'NULL':
p = constraint['params'][0]
p_type = param_name_to_type[p]
v = state['vals'].get(p)
if v is not None:
skip = False
if p_type == 'Shape' and v != 'thing': skip = True
if p_type != 'Shape' and v != '': skip = True
if skip:
if verbose:
print('skipping due to NULL constraint')
print(constraint)
print(state['vals'])
skip_state = True
break
elif constraint['type'] == 'OUT_NEQ':
i, j = constraint['params']
i = state['input_map'].get(i, None)
j = state['input_map'].get(j, None)
if i is not None and j is not None and outputs[i] == outputs[j]:
if verbose:
print('skipping due to OUT_NEQ constraint')
print(outputs[i])
print(outputs[j])
skip_state = True
break
else:
assert False, 'Unrecognized constraint type "%s"' % constraint['type']
if skip_state:
continue
# We have already checked to make sure the answer is valid, so if we have
# processed all the nodes in the template then the current state is a valid
# question, so add it if it passes our rejection sampling tests.
if state['next_template_node'] == len(template['nodes']):
# Use our rejection sampling heuristics to decide whether we should
# keep this template instantiation
cur_answer_count = answer_counts[answer]
answer_counts_sorted = sorted(answer_counts.values())
median_count = answer_counts_sorted[len(answer_counts_sorted) // 2]
median_count = max(median_count, 5)
if cur_answer_count > 1.1 * answer_counts_sorted[-2]:
if verbose: print('skipping due to second count')
continue
if cur_answer_count > 5.0 * median_count:
if verbose: print('skipping due to median')
continue
# If the template contains a raw relate node then we need to check for
# degeneracy at the end
has_relate = any(n['type'] == 'relate' for n in template['nodes'])
if has_relate:
degen = qeng.is_degenerate(q, metadata, scene_struct, answer=answer,
verbose=verbose)
if degen:
continue
answer_counts[answer] += 1
state['answer'] = answer
final_states.append(state)
if max_instances is not None and len(final_states) == max_instances:
break
continue
# Otherwise fetch the next node from the template
# Make a shallow copy so cached _outputs don't leak ... this is very nasty
next_node = template['nodes'][state['next_template_node']]
next_node = node_shallow_copy(next_node)
special_nodes = {
'filter_unique', 'filter_count', 'filter_exist', 'filter',
'relate_filter', 'relate_filter_unique', 'relate_filter_count',
'relate_filter_exist',
}
if next_node['type'] in special_nodes:
if next_node['type'].startswith('relate_filter'):
unique = (next_node['type'] == 'relate_filter_unique')
include_zero = (next_node['type'] == 'relate_filter_count'
or next_node['type'] == 'relate_filter_exist')
filter_options = find_relate_filter_options(answer, scene_struct, metadata,
unique=unique, include_zero=include_zero)
else:
filter_options = find_filter_options(answer, scene_struct, metadata)
if next_node['type'] == 'filter':
# Remove null filter
filter_options.pop((None, None, None, None), None)
if next_node['type'] == 'filter_unique':
# Get rid of all filter options that don't result in a single object
filter_options = {k: v for k, v in filter_options.items()
if len(v) == 1}
else:
# Add some filter options that do NOT correspond to the scene
if next_node['type'] == 'filter_exist':
# For filter_exist we want an equal number that do and don't
num_to_add = len(filter_options)
elif next_node['type'] == 'filter_count' or next_node['type'] == 'filter':
# For filter_count add nulls equal to the number of singletons
num_to_add = sum(1 for k, v in filter_options.items() if len(v) == 1)
add_empty_filter_options(filter_options, metadata, num_to_add)
filter_option_keys = list(filter_options.keys())
random.shuffle(filter_option_keys)
for k in filter_option_keys:
new_nodes = []
cur_next_vals = {k: v for k, v in state['vals'].items()}
next_input = state['input_map'][next_node['inputs'][0]]
filter_side_inputs = next_node['side_inputs']
if next_node['type'].startswith('relate'):
param_name = next_node['side_inputs'][0] # First one should be relate
filter_side_inputs = next_node['side_inputs'][1:]
param_type = param_name_to_type[param_name]
assert param_type == 'Relation'
param_val = k[0]
k = k[1]
new_nodes.append({
'type': 'relate',
'inputs': [next_input],
'side_inputs': [param_val],
})
cur_next_vals[param_name] = param_val
next_input = len(state['nodes']) + len(new_nodes) - 1
for param_name, param_val in zip(filter_side_inputs, k):
param_type = param_name_to_type[param_name]
filter_type = 'filter_%s' % param_type.lower()
if param_val is not None:
new_nodes.append({
'type': filter_type,
'inputs': [next_input],
'side_inputs': [param_val],
})
cur_next_vals[param_name] = param_val
next_input = len(state['nodes']) + len(new_nodes) - 1
elif param_val is None:
if metadata['dataset'] == 'CLEVR-v1.0' and param_type == 'Shape':
param_val = 'thing'
else:
param_val = ''
cur_next_vals[param_name] = param_val
input_map = {k: v for k, v in state['input_map'].items()}
extra_type = None
if next_node['type'].endswith('unique'):
extra_type = 'unique'
if next_node['type'].endswith('count'):
extra_type = 'count'
if next_node['type'].endswith('exist'):
extra_type = 'exist'
if extra_type is not None:
new_nodes.append({
'type': extra_type,
'inputs': [input_map[next_node['inputs'][0]] + len(new_nodes)],
})
input_map[state['next_template_node']] = len(state['nodes']) + len(new_nodes) - 1
states.append({
'nodes': state['nodes'] + new_nodes,
'vals': cur_next_vals,
'input_map': input_map,
'next_template_node': state['next_template_node'] + 1,
})
elif 'side_inputs' in next_node:
# If the next node has template parameters, expand them out
# TODO: Generalize this to work for nodes with more than one side input
assert len(next_node['side_inputs']) == 1, 'NOT IMPLEMENTED'
# Use metadata to figure out domain of valid values for this parameter.
# Iterate over the values in a random order; then it is safe to bail
# from the DFS as soon as we find the desired number of valid template
# instantiations.
param_name = next_node['side_inputs'][0]
param_type = param_name_to_type[param_name]
param_vals = metadata['types'][param_type][:]
random.shuffle(param_vals)
for val in param_vals:
input_map = {k: v for k, v in state['input_map'].items()}
input_map[state['next_template_node']] = len(state['nodes'])
cur_next_node = {
'type': next_node['type'],
'inputs': [input_map[idx] for idx in next_node['inputs']],
'side_inputs': [val],
}
cur_next_vals = {k: v for k, v in state['vals'].items()}
cur_next_vals[param_name] = val
states.append({
'nodes': state['nodes'] + [cur_next_node],
'vals': cur_next_vals,
'input_map': input_map,
'next_template_node': state['next_template_node'] + 1,
})
else:
input_map = {k: v for k, v in state['input_map'].items()}
input_map[state['next_template_node']] = len(state['nodes'])
next_node = {
'type': next_node['type'],
'inputs': [input_map[idx] for idx in next_node['inputs']],
}
states.append({
'nodes': state['nodes'] + [next_node],
'vals': state['vals'],
'input_map': input_map,
'next_template_node': state['next_template_node'] + 1,
})
# Actually instantiate the template with the solutions we've found
text_questions, structured_questions, answers = [], [], []
for state in final_states:
structured_questions.append(state['nodes'])
answers.append(state['answer'])
text = random.choice(template['text'])
for name, val in state['vals'].items():
if val in synonyms:
val = random.choice(synonyms[val])
text = text.replace(name, val)
text = ' '.join(text.split())
text = replace_optionals(text)
text = ' '.join(text.split())
text = other_heuristic(text, state['vals'])
text_questions.append(text)
return text_questions, structured_questions, answers
def replace_optionals(s):
"""
Each substring of s that is surrounded in square brackets is treated as
optional and is removed with probability 0.5. For example the string
"A [aa] B [bb]"
could become any of
"A aa B bb"
"A B bb"
"A aa B "
"A B "
with probability 1/4.
"""
pat = re.compile(r'\[([^\[]*)\]')
while True:
match = re.search(pat, s)
if not match:
break
i0 = match.start()
i1 = match.end()
if random.random() > 0.5:
s = s[:i0] + match.groups()[0] + s[i1:]
else:
s = s[:i0] + s[i1:]
return s
def main(args):
with open(args.metadata_file, 'r') as f:
metadata = json.load(f)
dataset = metadata['dataset']
if dataset != 'CLEVR-v1.0':
raise ValueError('Unrecognized dataset "%s"' % dataset)
functions_by_name = {}
for f in metadata['functions']:
functions_by_name[f['name']] = f
metadata['_functions_by_name'] = functions_by_name
# Load templates from disk
# Key is (filename, file_idx)
num_loaded_templates = 0
templates = {}
for fn in os.listdir(args.template_dir):
if not fn.endswith('.json'): continue
with open(os.path.join(args.template_dir, fn), 'r') as f:
base = os.path.splitext(fn)[0]
for i, template in enumerate(json.load(f)):
num_loaded_templates += 1
key = (fn, i)
templates[key] = template
print('Read %d templates from disk' % num_loaded_templates)
def reset_counts():
# Maps a template (filename, index) to the number of questions we have
# so far using that template
template_counts = {}
# Maps a template (filename, index) to a dict mapping the answer to the
# number of questions so far of that template type with that answer
template_answer_counts = {}
node_type_to_dtype = {n['name']: n['output'] for n in metadata['functions']}
for key, template in templates.items():
template_counts[key[:2]] = 0
final_node_type = template['nodes'][-1]['type']
final_dtype = node_type_to_dtype[final_node_type]
answers = metadata['types'][final_dtype]
if final_dtype == 'Bool':
answers = [True, False]
if final_dtype == 'Integer':
if metadata['dataset'] == 'CLEVR-v1.0':
answers = list(range(0, 11))
template_answer_counts[key[:2]] = {}
for a in answers:
template_answer_counts[key[:2]][a] = 0
return template_counts, template_answer_counts
template_counts, template_answer_counts = reset_counts()
# Read file containing input scenes
all_scenes = []
with open(args.input_scene_file, 'r') as f:
scene_data = json.load(f)
all_scenes = scene_data['scenes']
scene_info = scene_data['info']
begin = args.scene_start_idx
if args.num_scenes > 0:
end = args.scene_start_idx + args.num_scenes
all_scenes = all_scenes[begin:end]
else:
all_scenes = all_scenes[begin:]
# Read synonyms file
with open(args.synonyms_json, 'r') as f:
synonyms = json.load(f)
questions = []
scene_count = 0
for i, scene in enumerate(all_scenes):
scene_fn = scene['image_filename']
scene_struct = scene
print('starting image %s (%d / %d)'
% (scene_fn, i + 1, len(all_scenes)))
if scene_count % args.reset_counts_every == 0:
print('resetting counts')
template_counts, template_answer_counts = reset_counts()
scene_count += 1
# Order templates by the number of questions we have so far for those
# templates. This is a simple heuristic to give a flat distribution over
# templates.
templates_items = list(templates.items())
templates_items = sorted(templates_items,
key=lambda x: template_counts[x[0][:2]])
num_instantiated = 0
for (fn, idx), template in templates_items:
if args.verbose:
print('trying template ', fn, idx)
if args.time_dfs and args.verbose:
tic = time.time()
ts, qs, ans = instantiate_templates_dfs(
scene_struct,
template,
metadata,
template_answer_counts[(fn, idx)],
synonyms,
max_instances=args.instances_per_template,
verbose=False)
if args.time_dfs and args.verbose:
toc = time.time()
print('that took ', toc - tic)
image_index = int(os.path.splitext(scene_fn)[0].split('_')[-1])
for t, q, a in zip(ts, qs, ans):
questions.append({
'split': scene_info['split'],
'image_filename': scene_fn,
'image_index': image_index,
'image': os.path.splitext(scene_fn)[0],
'question': t,
'program': q,
'answer': a,
'template_filename': fn,
'question_family_index': idx,
'question_index': len(questions),
})
if len(ts) > 0:
if args.verbose:
print('got one!')
num_instantiated += 1
template_counts[(fn, idx)] += 1
elif args.verbose:
print('did not get any =(')
if num_instantiated >= args.templates_per_image:
break
# Change "side_inputs" to "value_inputs" in all functions of all functional
# programs. My original name for these was "side_inputs" but I decided to
# change the name to "value_inputs" for the public CLEVR release. I should
# probably go through all question generation code and templates and rename,
# but that could be tricky and take a while, so instead I'll just do it here.
# To further complicate things, originally functions without value inputs did
# not have a "side_inputs" field at all, and I'm pretty sure this fact is used
# in some of the code above; however in the public CLEVR release all functions
# have a "value_inputs" field, and it's an empty list for functions that take
# no value inputs. Again this should probably be refactored, but the quick and
# dirty solution is to keep the code above as-is, but here make "value_inputs"
# an empty list for those functions that do not have "side_inputs". Gross.
for q in questions:
for f in q['program']:
if 'side_inputs' in f:
f['value_inputs'] = f['side_inputs']
del f['side_inputs']
else:
f['value_inputs'] = []
with open(args.output_questions_file, 'w') as f:
print('Writing output to %s' % args.output_questions_file)
json.dump({
'info': scene_info,
'questions': questions,
}, f)
if __name__ == '__main__':
args = parser.parse_args()
if args.profile:
import cProfile
cProfile.run('main(args)')
else:
main(args)
|
clevr-dataset-gen-main
|
question_generation/generate_questions.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="c3dm", # Replace with your own username
version="1.0.0",
author="Facebook AI Research",
author_email="romansh@fb.com",
description="""Code for the paper: Canonical 3D Deformer Maps: \
Unifying parametric and non-parametric methods for \
dense weakly-supervised category reconstruction\
""",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/facebookresearch/c3dm",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Environment :: GPU :: NVIDIA CUDA :: 10.1",
"Intended Audience :: Science/Research",
"Topic :: Multimedia :: Graphics :: 3D Modeling",
],
python_requires='>=3.6',
install_requires=[
"torch==1.5.1",
"pytorch3d",
"pyyaml>=5.3.1",
"numpy>=1.17",
"pillow>=1.7.2",
"trimesh>=3.7.3",
"matplotlib",
"visdom>=0.1.8.9",
"plotly>=4.8.1",
],
)
|
c3dm-main
|
setup.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import os
import torch
from torch import nn as nn
import torch.nn.functional as Fu
import numpy as np
from tools.utils import NumpySeedFix, auto_init_args
from tools.vis_utils import get_visdom_connection, \
denorm_image_trivial, \
show_projections, \
visdom_plot_pointclouds
from tools.functions import masked_kp_mean, avg_l2_dist, \
safe_sqrt, \
argmin_translation, argmin_scale, \
avg_l2_huber, \
find_camera_T
from tools.so3 import so3_exponential_map, rand_rot
from dataset.dataset_configs import STICKS
from PIL import Image
class C3DPO(torch.nn.Module):
def __init__( self, n_keypoints = 17,
shape_basis_size = 10,
mult_shape_by_class_mask = False,
squared_reprojection_loss = False,
n_fully_connected = 1024,
n_layers = 6,
keypoint_rescale = float(1),
keypoint_norm_type = 'to_mean',
projection_type = 'orthographic',
z_augment = True,
z_augment_rot_angle = float(np.pi),
z_equivariance = False,
z_equivariance_rot_angle = float(np.pi)/4, # < 0 means same as z_augment_rot_angle
compose_z_equivariant_rot = True, # TODO: remove this soon!
camera_translation = False,
camera_xy_translation = True,
argmin_translation = False,
argmin_translation_test = False,
argmin_translation_min_depth = 3.,
argmin_to_augmented = False,
camera_scale = False,
argmin_scale = False,
argmin_scale_test = False,
loss_normalization = 'kp_total_count',
independent_phi_for_aug = False,
shape_pred_wd = 1.,
connectivity_setup = 'NONE',
custom_param_groups = False,
use_huber = False,
huber_scaling = 0.1,
alpha_bias = True,
canonicalization = {
'use': False,
'n_layers': 6,
'n_rand_samples': 4,
'rot_angle': float(np.pi),
'n_fully_connected': 1024,
},
linear_instead_of_conv = False,
perspective_depth_threshold = 0.1,
depth_offset = 0.,
replace_keypoints_with_input = False,
root_joint = 0,
loss_weights = { 'l_reprojection': 1.,
'l_canonicalization': 1. },
log_vars = [ \
'objective',
'dist_reprojection',
'l_reprojection',
'l_canonicalization' ],
**kwargs ):
super(C3DPO, self).__init__()
# autoassign constructor params to self
auto_init_args(self)
# factorization net
self.phi = nn.Sequential( \
*make_trunk( dim_in=self.n_keypoints * 3 , # 2 dim loc, 1 dim visibility
n_fully_connected=self.n_fully_connected,
n_layers=self.n_layers ) )
if linear_instead_of_conv:
layer_init_fn = linear_layer
else:
layer_init_fn = conv1x1
# shape coefficient predictor
self.alpha_layer = layer_init_fn( self.n_fully_connected,
self.shape_basis_size,
init='normal0.01',
cnv_args = {'bias': self.alpha_bias,
'kernel_size': 1 } )
# 3D shape predictor
self.shape_layer = layer_init_fn( self.shape_basis_size,
3*n_keypoints,
init='normal0.01' )
# rotation predictor (predicts log-rotation)
self.rot_layer = layer_init_fn(self.n_fully_connected,3,init='normal0.01')
if self.camera_translation:
# camera translation
self.translation_layer = layer_init_fn(self.n_fully_connected,3,init='normal0.01')
if self.camera_scale:
# camera scale (non-negative predictions)
self.scale_layer = nn.Sequential( \
layer_init_fn(self.n_fully_connected,1,init='normal0.01'),
nn.Softplus() )
if self.canonicalization['use']:
# canonicalization net:
self.psi = nn.Sequential( \
*make_trunk( dim_in=self.n_keypoints*3 ,
n_fully_connected=self.canonicalization['n_fully_connected'],
n_layers=self.canonicalization['n_layers'] ) )
self.alpha_layer_psi = conv1x1( \
self.n_fully_connected,
self.shape_basis_size,
init='normal0.01')
# def _get_param_groups(self,lr,wd=0.):
# # make sure to set correct weight decay for the shape predictor
# shape_param_names = [ 'shape_pred_layer.weight', \
# 'shape_pred_layer.bias' ]
# prm_shape = []
# prm_remain = []
# for name,prm in self.named_parameters():
# if not prm.requires_grad: continue
# if name in shape_param_names:
# prm_list = prm_shape
# else:
# prm_list = prm_remain
# prm_list.append(prm)
# p_groups = [ { 'params':prm_remain,'lr':float(lr), \
# 'weight_decay': wd },
# { 'params':prm_shape, 'lr':float(lr), \
# 'weight_decay': float(wd*self.shape_pred_wd) } ]
# return p_groups
def _get_param_groups(self,lr,wd=0.):
assert False
# make sure to set correct weight decay for the shape predictor
shape_param_names = [ 'shape_pred_layer.weight', \
'shape_pred_layer.bias' ]
prm_shape = []
prm_remain = []
for name,prm in self.named_parameters():
if not prm.requires_grad: continue
if name in shape_param_names:
prm_list = prm_shape
else:
prm_list = prm_remain
prm_list.append(prm)
p_groups = [ { 'params':prm_remain,'lr':float(lr), \
'weight_decay': wd },
{ 'params':prm_shape, 'lr':float(lr), \
'weight_decay': float(wd*self.shape_pred_wd) } ]
return p_groups
def forward( self, kp_loc=None, kp_vis=None, \
class_mask=None, K=None, dense_basis=None, \
phi_out = None, dense_basis_mask=None,
shape_coeff_in = None, **kwargs ):
# dictionary with outputs of the fw pass
preds = {}
# input sizes ...
ba,kp_dim,n_kp = kp_loc.shape
dtype = kp_loc.type()
assert kp_dim==2, 'bad input keypoint dim'
assert n_kp==self.n_keypoints, 'bad # of keypoints!'
if self.projection_type=='perspective':
kp_loc_cal = self.calibrate_keypoints(kp_loc, K)
else:
kp_loc_cal = kp_loc
# save for later visualisations ...
kp_loc_norm, kp_mean, kp_scale = \
self.normalize_keypoints( \
kp_loc_cal, kp_vis, rescale=self.keypoint_rescale )
preds['kp_loc_norm'] = kp_loc_norm
preds['kp_mean'], preds['kp_scale'] = kp_mean, kp_scale
# run the shape predictor
if phi_out is not None: # bypass the predictor and use input
preds['phi'] = phi_out
else:
preds['phi'] = self.run_phi(kp_loc_norm, kp_vis, \
class_mask=class_mask, \
shape_coeff_in=shape_coeff_in)
if self.canonicalization['use']:
preds['l_canonicalization' ], preds['psi'] = \
self.canonicalization_loss( preds['phi'], \
class_mask=class_mask )
# 3D->2D project shape to camera
kp_reprojected, depth = self.camera_projection( \
preds['phi']['shape_camera_coord'])
preds['kp_reprojected'] = kp_reprojected
if dense_basis is not None:
preds['phi_dense'] = self.run_phi_dense(dense_basis, preds['phi'])
kp_reprojected_dense, depth_dense = self.camera_projection( \
preds['phi_dense']['shape_camera_coord_dense'])
preds['kp_reprojected_dense'] = kp_reprojected_dense
preds['depth_dense'] = depth_dense
# compute the repro loss for backpropagation
if self.loss_normalization=='kp_count_per_image':
preds['l_reprojection'] = avg_l2_dist( \
kp_reprojected,
kp_loc_norm,
mask=kp_vis,
squared=self.squared_reprojection_loss )
# print(float(preds['l_reprojection']))
elif self.loss_normalization=='kp_total_count':
kp_reprojected_flatten = \
kp_reprojected.permute(1,2,0).contiguous().view(1,2,self.n_keypoints*ba)
kp_loc_norm_flatten = \
kp_loc_norm.permute(1,2,0).contiguous().view(1,2,self.n_keypoints*ba)
kp_vis_flatten = \
kp_vis.permute(1,0).contiguous().view(1,self.n_keypoints*ba)
if self.use_huber:
preds['l_reprojection'] = avg_l2_huber( \
kp_reprojected_flatten,
kp_loc_norm_flatten,
mask=kp_vis_flatten,
scaling=self.huber_scaling )
else:
assert False
preds['l_reprojection'] = avg_l2_dist( \
kp_reprojected_flatten,
kp_loc_norm_flatten,
mask=kp_vis_flatten,
squared=self.squared_reprojection_loss )
else:
raise ValueError('undefined loss normalization %s' % self.loss_normalization)
if self.squared_reprojection_loss:
assert False
# compute the average reprojection distance
# = easier to interpret than the squared repro loss
preds['dist_reprojection'] = avg_l2_dist( \
kp_reprojected,
kp_loc_norm,
mask=kp_vis,
squared=False )
# unnormalize the shape projections
kp_reprojected_image = \
self.unnormalize_keypoints(kp_reprojected, kp_mean, \
rescale=self.keypoint_rescale, kp_scale=kp_scale)
if dense_basis is not None:
kp_reprojected_image_dense = \
self.unnormalize_keypoints( \
preds['kp_reprojected_dense'], kp_mean, \
rescale=self.keypoint_rescale, kp_scale=kp_scale)
preds['kp_reprojected_image_dense'] = kp_reprojected_image_dense
# projections in the image coordinate frame
if self.replace_keypoints_with_input and not self.training:
# use the input points
kp_reprojected_image = (1-kp_vis[:,None,:]) * kp_reprojected_image + \
kp_vis[:,None,:] * kp_loc_cal
preds['kp_reprojected_image'] = kp_reprojected_image
# projected 3D shape in the image space
# = unprojection of kp_reprojected_image
shape_image_coord, depth_image_coord = \
self.camera_unprojection( \
kp_reprojected_image, depth, \
rescale=self.keypoint_rescale, \
kp_scale=kp_scale )
if dense_basis is not None:
shape_image_coord_dense, depth_image_coord_dense = \
self.camera_unprojection( \
kp_reprojected_image_dense, depth_dense, \
rescale=self.keypoint_rescale, \
kp_scale=kp_scale )
if self.projection_type=='perspective':
preds['kp_reprojected_image_cal'] = kp_reprojected_image
preds['shape_image_coord_cal'] = shape_image_coord
preds['shape_image_coord'] = \
self.uncalibrate_keypoints(shape_image_coord, K)
preds['kp_reprojected_image'], _ = \
self.camera_projection(preds['shape_image_coord'])
if dense_basis is not None:
preds['shape_image_coord_cal_dense'] = shape_image_coord_dense
preds['shape_image_coord_dense'] = \
self.uncalibrate_keypoints(shape_image_coord_dense, K)
preds['kp_reprojected_image_dense'], _ = \
self.camera_projection(preds['shape_image_coord_dense'])
# if True:
# preds['shape_image_coord_dense'].register_hook(\
# lambda grad: print(grad.abs().view(-1).topk(10)[0][-1]))
# preds['kp_reprojected_image_dense'].register_hook(\
# lambda grad: print(grad.abs().view(-1).topk(10)[0][-1]))
preds['depth_image_coord_dense'] = depth_image_coord_dense
elif self.projection_type=='orthographic':
preds['shape_image_coord'] = shape_image_coord
preds['depth_image_coord'] = depth_image_coord
if dense_basis is not None:
preds['shape_image_coord_dense'] = shape_image_coord_dense
preds['depth_image_coord_dense'] = depth_image_coord_dense
else:
raise ValueError()
# get the final loss
preds['objective'] = self.get_objective(preds)
assert np.isfinite(preds['objective'].sum().data.cpu().numpy()), "nans!"
return preds
def camera_projection(self, shape):
out = {}
depth = shape[:,2:3]
if self.projection_type=='perspective':
if self.perspective_depth_threshold > 0:
depth = torch.clamp(depth, self.perspective_depth_threshold)
projections = shape[:,0:2] / depth
elif self.projection_type=='orthographic':
projections = shape[:,0:2]
else:
raise ValueError('no such projection type %s' % \
self.projection_type )
return projections, depth
def camera_unprojection(self,kp_loc,depth,kp_scale=None,rescale=float(1)):
corr_scale = 1./rescale if kp_scale is None else kp_scale / rescale
if kp_scale is not None:
depth = depth * corr_scale[:,None,None]
else:
depth = depth * corr_scale
if self.projection_type=='perspective':
shape = torch.cat((kp_loc * depth, depth), dim=1)
elif self.projection_type=='orthographic':
shape = torch.cat((kp_loc, depth), dim=1)
else:
raise ValueError('no such projection type %s' % self.projection_type)
return shape, depth
def calibrate_keypoints(self, kp_loc, K):
# undo the projection matrix
assert K is not None
orig_shape = kp_loc.shape
kp_loc = kp_loc.view(orig_shape[0],2,-1) - K[:,0:2,2:3]
focal = torch.stack((K[:,0,0], K[:,1,1]), dim=1)
kp_loc = kp_loc / focal[:,:,None]
kp_loc = kp_loc.view(orig_shape)
return kp_loc
def uncalibrate_keypoints(self, kp_loc, K):
assert K is not None
ba = kp_loc.shape[0]
kp_loc = torch.bmm(K, kp_loc.view(ba,3,-1) ).view(kp_loc.shape)
return kp_loc
def normalize_keypoints( self,
kp_loc,
kp_vis,
rescale=1.,
kp_mean=None ):
if self.keypoint_norm_type=='to_root':
if kp_mean is None:
# center around the root joint
kp_mean = kp_loc[:,:,self.root_joint]
kp_loc_norm = kp_loc - kp_mean[:,:,None]
kp_scale = None
elif self.keypoint_norm_type=='to_mean':
if kp_mean is None:
# calc the mean of visible points
kp_mean = masked_kp_mean( kp_loc, kp_vis )
# remove the mean from the keypoint locations
kp_loc_norm = kp_loc - kp_mean[:,:,None]
kp_scale = None
else:
raise BaseException( 'no such kp norm %s' % \
self.keypoint_norm_type )
# rescale
kp_loc_norm = kp_loc_norm * rescale
return kp_loc_norm, kp_mean, kp_scale
def unnormalize_keypoints( self,
kp_loc_norm,
kp_mean,
rescale=1.,
kp_scale=None,
K=None ):
kp_loc = kp_loc_norm * (1. / rescale)
if kp_scale is not None:
kp_loc = kp_loc * kp_scale[:,None,None]
kp_loc = (kp_loc.view(kp_loc.shape[0],2,-1)
+ kp_mean[:, :, None]).view(kp_loc.shape)
return kp_loc
def run_phi(
self,
kp_loc,
kp_vis,
class_mask=None,
shape_coeff_in=None,
):
preds = {}
# batch size
ba = kp_loc.shape[0]
dtype = kp_loc.type()
eps = 1e-4
kp_loc_orig = kp_loc.clone()
if self.z_augment and self.training:
R_rand = rand_rot( ba,
dtype=dtype,
max_rot_angle=float(self.z_augment_rot_angle),
axes=(0,0,1) )
kp_loc_in = torch.bmm(R_rand[:,0:2,0:2],kp_loc)
else:
R_rand = torch.eye(3).type(dtype)[None].repeat( (ba,1,1) )
kp_loc_in = kp_loc_orig
if self.z_equivariance and self.training:
if self.z_equivariance_rot_angle < 0.:
zeq_angle = self.z_augment_rot_angle
else:
zeq_angle = self.z_equivariance_rot_angle
# random xy rot
R_rand_eq = rand_rot( ba,
dtype=dtype,
max_rot_angle=float(zeq_angle),
axes=(0,0,1) )
kp_loc_in = torch.cat( \
( kp_loc_in, \
torch.bmm(R_rand_eq[:,0:2,0:2],
kp_loc_in if self.compose_z_equivariant_rot else kp_loc_orig) \
), dim=0 )
kp_vis_in = kp_vis.repeat( (2,1) )
else:
kp_vis_in = kp_vis
# mask kp_loc by kp_visibility
kp_loc_masked = kp_loc_in * kp_vis_in[:,None,:]
# vectorize
kp_loc_flatten = kp_loc_masked.view(-1, 2*self.n_keypoints)
# concatenate visibilities and kp locations
l1_input = torch.cat( (kp_loc_flatten,kp_vis_in) , dim=1 )
# pass to network
if self.independent_phi_for_aug and l1_input.shape[0]==2*ba:
feats = torch.cat([ self.phi(l1_[:,:,None,None]) for \
l1_ in l1_input.split(ba, dim=0) ], dim=0)
else:
feats = self.phi( l1_input[:,:,None,None] )
# here the network runs once on concatenated input ... maybe split it?
# coefficients into the linear basis
shape_coeff = self.alpha_layer(feats)[:,:,0,0]
if self.z_equivariance and self.training:
# use the shape coeff from the second set of preds
shape_coeff = shape_coeff[ba:]
# take the feats from the first set
feats = feats[:ba]
if shape_coeff_in is not None:
preds['shape_coeff_orig'] = shape_coeff
shape_coeff = shape_coeff_in
# shape prediction is just a linear layer implemented as a conv
shape_canonical = self.shape_layer( \
shape_coeff[:,:,None,None])[:,:,0,0]
shape_canonical = shape_canonical.view(ba,3,self.n_keypoints)
if self.keypoint_norm_type=='to_root':
# make sure we fix the root at 0
root_j = shape_canonical[:,:,self.root_joint]
shape_canonical = shape_canonical - root_j[:,:,None]
# predict camera params
# ... log rotation (exponential representation)
R_log = self.rot_layer(feats)[:,:,0,0]
# convert from the 3D to 3x3 rot matrix
R = so3_exponential_map(R_log)
# T vector of the camera
if self.camera_translation:
T = self.translation_layer(feats)[:,:,0,0]
if self.camera_xy_translation: # kill the last z-dim
T = T * torch.tensor([1.,1.,0.]).type(dtype)[None,:]
else:
T = R_log.new_zeros(ba, 3)
# offset the translation vector of the camera
if self.depth_offset > 0.:
T[:,2] = T[:,2] + self.depth_offset
# scale of the camera
if self.camera_scale:
scale = self.scale_layer(feats)[:,0,0,0]
else:
scale = R_log.new_ones(ba)
# rotated+scaled shape into the camera ( Y = sRX + T )
shape_camera_coord = self.apply_similarity_t(shape_canonical,R,T,scale)
# undo equivariant transformation
if (self.z_equivariance or self.z_augment) and self.training:
R_rand_inv = R_rand.transpose(2,1)
R = torch.bmm(R_rand_inv,R)
T = torch.bmm(R_rand_inv,T[:,:,None])[:,:,0]
shape_camera_coord = torch.bmm(R_rand_inv,shape_camera_coord)
# estimate translation
if self.argmin_translation or \
(self.argmin_translation_test and not self.training) :
if self.projection_type=='orthographic':
projection, _ = self.camera_projection(shape_camera_coord)
if self.argmin_to_augmented:
assert False
T_amin = argmin_translation( projection, kp_loc_in[:ba], v=kp_vis )
else:
T_amin = argmin_translation( projection, kp_loc_orig, v=kp_vis )
T_amin = Fu.pad(T_amin,(0,1),'constant',float(0))
shape_camera_coord = shape_camera_coord + T_amin[:,:,None]
T = T + T_amin
elif self.projection_type=='perspective':
K_ = torch.eye(3).type_as(kp_loc)[None].repeat(ba,1,1)
T = find_camera_T(\
K_, shape_camera_coord, kp_loc_orig, v=kp_vis)
if self.argmin_translation_min_depth > 0.:
T = torch.cat( \
( T[:,0:2], \
torch.clamp(T[:,2:3], self.argmin_translation_min_depth)),
dim = 1 )
shape_camera_coord = shape_camera_coord + T[:,:,None]
else:
raise ValueError(self.projection_type)
# estimate scale
if self.argmin_scale or \
(self.argmin_scale_test and not self.training) :
assert self.projection_type=='orthographic'
# assert False
projection, _ = self.camera_projection(shape_camera_coord)
scale_correct = argmin_scale(projection, kp_loc_orig, v=kp_vis)
scale = scale_correct * scale
shape_camera_coord = scale_correct[:,None,None] * shape_camera_coord
T = scale_correct[:,None] * T
if class_mask is not None and self.mult_shape_by_class_mask:
shape_camera_coord = shape_camera_coord * class_mask[:,None,:]
shape_canonical = shape_canonical * class_mask[:,None,:]
preds['R_log'] = R_log
preds['R'] = R
preds['scale'] = scale
preds['T'] = T
preds['shape_camera_coord'] = shape_camera_coord
preds['shape_coeff'] = shape_coeff
preds['shape_canonical'] = shape_canonical
return preds
def run_phi_dense(self, dense_basis, phi_out):
R, T, scale, shape_coeff = [phi_out[k] for k in ['R', 'T', 'scale', 'shape_coeff']]
preds = {}
ba, dim, he, wi = dense_basis.shape
shape_basis_size = dim // 3
dense_basis_ = dense_basis.view(ba, shape_basis_size, 3*he*wi)
shape_coeff_1 = Fu.pad(shape_coeff, (1,0), value=1.) # mean shape goes first
if False:
dense_basis_decomp = dense_basis_.permute(0, 2, 1).contiguous()
dense_basis_decomp = dense_basis_decomp.view(ba, 3, -1)
# only rotate the basis
dense_basis_decomp_t = \
self.apply_similarity_t(dense_basis_decomp,R,T*0.,scale*0.+1.)
dense_basis_decomp_t = \
dense_basis_decomp_t.view(ba,3,he,wi, shape_basis_size)
dense_basis_decomp_rot = dense_basis_decomp_t.permute(0,4,1,2,3)
preds['dense_basis_rot'] = dense_basis_decomp_rot
shape_canonical_dense = torch.bmm(shape_coeff_1[:,None,:],
dense_basis_).view(ba, 3, -1)
shape_camera_coord_dense = self.apply_similarity_t(shape_canonical_dense,R,T,scale)
preds['shape_canonical_dense'] = shape_canonical_dense.view(ba, 3, he, wi)
preds['shape_camera_coord_dense'] = shape_camera_coord_dense.view(ba, 3, he, wi)
return preds
def apply_similarity_t( self, S, R, T, s ):
return torch.bmm( R, s[:,None,None] * S ) + T[:,:,None]
def canonicalization_loss( self, phi_out, class_mask=None ):
shape_canonical = phi_out['shape_canonical']
dtype = shape_canonical.type()
ba = shape_canonical.shape[0]
n_sample = self.canonicalization['n_rand_samples']
# rotate the canonical point cloud
# generate random rotation around all axes
R_rand = rand_rot( ba * n_sample,
dtype=dtype,
max_rot_angle=self.canonicalization['rot_angle'],
axes=(1,1,1) )
unrotated = shape_canonical.repeat(n_sample, 1, 1)
rotated = torch.bmm( R_rand, unrotated )
psi_out = self.run_psi( rotated ) # psi3( Rrand X )
a , b = psi_out['shape_canonical'] , unrotated
if self.use_huber:
l_canonicalization = avg_l2_huber(a, b, \
scaling=self.huber_scaling,
mask=class_mask.repeat(n_sample,1) if class_mask is not None else None)
else:
l_canonicalization = avg_l2_dist(a, b, \
squared=self.squared_reprojection_loss,
mask=class_mask.repeat(n_sample,1) if class_mask is not None else None)
# reshape the outputs in the output list
psi_out = { k : v.view( \
self.canonicalization['n_rand_samples'] , \
ba, *v.shape[1:] ) for k,v in psi_out.items() }
return l_canonicalization, psi_out
def run_psi( self, shape_canonical ):
preds = {}
# batch size
ba = shape_canonical.shape[0]
assert shape_canonical.shape[1]==3, '3d inputs only please'
# reshape and pass to the network ...
l1_input = shape_canonical.view(ba,3*self.n_keypoints)
# pass to network
feats = self.psi( l1_input[:,:,None,None] )
# coefficients into the linear basis
shape_coeff = self.alpha_layer_psi(feats)[:,:,0,0]
preds['shape_coeff'] = shape_coeff
# use the shape_pred_layer from 2d predictor
shape_pred = self.shape_layer( \
shape_coeff[:,:,None,None])[:,:,0,0]
shape_pred = shape_pred.view(ba,3,self.n_keypoints)
preds['shape_canonical'] = shape_pred
return preds
def get_objective(self,preds):
losses_weighted = [ preds[k] * float(w) for k,w in \
self.loss_weights.items() \
if k in preds ]
if not hasattr(self,'_loss_weights_printed') or \
not self._loss_weights_printed:
print('-------\nloss_weights:')
for k,w in self.loss_weights.items():
print('%20s: %1.2e' % (k,w) )
print('-------')
self._loss_weights_printed = True
loss = torch.stack(losses_weighted).sum()
return loss
def get_alpha_mean_complement(self):
delta = self.shape_layer.weight.view(3, -1, self.shape_basis_size)
alpha_bias = self.alpha_layer.bias.data
mu_add = (delta * alpha_bias[None,None,:]).sum(2)
return mu_add
def reparametrize_mean_shape(self):
if self.alpha_layer.bias is None:
print('no alpha bias => skipping reparametrization')
return
else:
print('reparametrizing nrsfm model mean')
mu = self.shape_layer.bias.data.view(3, self.n_keypoints)
mu_add = self.get_alpha_mean_complement()
mu_new = mu + mu_add
self.shape_layer.bias.data = mu_new.view(-1)
self.alpha_layer.bias.data.fill_(0.)
self.reparametrized = True
def get_mean_shape(self):
mu = self.shape_layer.bias.data.view(3, self.n_keypoints)
mu_orig = mu.clone()
if self.alpha_layer.bias is not None:
mu_add = self.get_alpha_mean_complement()
mu = mu + mu_add
if hasattr(self, 'reparametrized') and self.reparametrized:
assert (mu - mu_orig).abs().max() <= 1e-6
return mu
def visualize( self, visdom_env, trainmode, \
preds, stats, clear_env=False ):
viz = get_visdom_connection(server=stats.visdom_server,\
port=stats.visdom_port )
if not viz.check_connection():
print("no visdom server! -> skipping batch vis")
return;
if clear_env: # clear visualisations
print(" ... clearing visdom environment")
viz.close(env=visdom_env,win=None)
print('vis into env:\n %s' % visdom_env)
it = stats.it[trainmode]
epoch = stats.epoch
idx_image = 0
title="e%d_it%d_im%d"%(stats.epoch,stats.it[trainmode],idx_image)
# get the connectivity pattern
sticks = STICKS[self.connectivity_setup] if \
self.connectivity_setup in STICKS else None
var_kp = { 'orthographic': 'kp_reprojected_image',
'perspective': 'kp_reprojected_image_uncal'}[self.projection_type]
# show reprojections
p = np.stack( \
[ preds[k][idx_image].detach().cpu().numpy() \
for k in (var_kp, 'kp_loc') ] )
v = preds['kp_vis'][idx_image].detach().cpu().numpy()
show_projections( viz, visdom_env, p, v=v,
title=title, cmap__='gist_ncar',
markersize=50, sticks=sticks,
stickwidth=1, plot_point_order=True,
image_path=preds['image_path'][idx_image],
win='projections' )
# show 3d reconstruction
if True:
var3d = { 'orthographic': 'shape_image_coord',
'perspective': 'shape_image_coord_cal'}[self.projection_type]
pcl = {'pred': preds[var3d][idx_image].detach().cpu().numpy().copy()}
if 'kp_loc_3d' in preds:
pcl['gt'] = preds['kp_loc_3d'][idx_image].detach().cpu().numpy().copy()
if self.projection_type=='perspective':
# for perspective projections, we dont know the scale
# so we estimate it here ...
scale = argmin_scale( torch.from_numpy(pcl['pred'][None]),
torch.from_numpy(pcl['gt'][None]) )
pcl['pred'] = pcl['pred'] * float(scale)
elif self.projection_type=='orthographic':
pcl['pred'] = pcl['pred'] - pcl['pred'].mean(1)
visdom_plot_pointclouds(viz, pcl, visdom_env, title, \
plot_legend=False, markersize=20, \
sticks=sticks, win='3d' )
#TODO: Make these layers nicer + move somewhere else ...
def make_trunk(
n_fully_connected=None,
dim_in=None,
n_layers=None,
use_bn=True,
l2_norm=False,
):
layer1 = ConvBNLayer( dim_in,
n_fully_connected,
use_bn=use_bn,
l2_norm=l2_norm )
layers = [layer1]
for l in range(n_layers):
layers.append(
ResLayer(n_fully_connected, int(n_fully_connected/4),
use_bn=use_bn, l2_norm=l2_norm)
)
# print('made a trunk net:')
# print(layers)
return layers
def conv1x1(in_planes, out_planes, init='no', cnv_args={'bias':True,'kernel_size':1},std=0.01):
"""1x1 convolution"""
cnv = nn.Conv2d(in_planes, out_planes, **cnv_args)
# init weights ...
if init=='no':
pass
elif init=='normal0.01':
# print("warning: N(0.0.01) conv weight init (different from previous exps)")
# print('init std = %1.2e' % std)
cnv.weight.data.normal_(0.,std)
if cnv.bias is not None:
cnv.bias.data.fill_(0.)
else:
assert False
return cnv
class ConvBNLayer(nn.Module):
def __init__(self, inplanes, planes, use_bn=True, stride=1, l2_norm=False):
super(ConvBNLayer, self).__init__()
# do a reasonable init
cnv_args = {'kernel_size':1, 'stride':stride, 'bias':True}
self.conv1 = conv1x1(inplanes, planes, init='normal0.01', cnv_args=cnv_args)
self.use_bn = use_bn
self.l2_norm = l2_norm
if use_bn: self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.stride = stride
def forward(self, x):
out = self.conv1(x)
if self.l2_norm: out = Fu.normalize(out, dim=1)
if self.use_bn: out = self.bn1(out)
out = self.relu(out)
return out
class ResLayer(nn.Module):
def __init__(self, inplanes, planes, expansion=4, use_bn=True, l2_norm=False):
super(ResLayer, self).__init__()
self.expansion=expansion
self.conv1 = conv1x1(inplanes, planes,init='normal0.01')
if use_bn: self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = conv1x1(planes, planes, init='normal0.01' )
if use_bn: self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = conv1x1(planes, planes * self.expansion, init='normal0.01')
if use_bn: self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.skip = inplanes==(planes*self.expansion)
self.use_bn = use_bn
self.l2_norm = l2_norm
# print( "reslayer skip = %d" % self.skip )
def forward(self, x):
residual = x
out = self.conv1(x)
if self.l2_norm: out = Fu.normalize(out, dim=1)
if self.use_bn: out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
if self.l2_norm: out = Fu.normalize(out, dim=1)
if self.use_bn: out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
if self.l2_norm: out = Fu.normalize(out, dim=1)
if self.use_bn: out = self.bn3(out)
if self.skip: out += residual
out = self.relu(out)
return out
|
c3dm-main
|
c3dm/c3dpo.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import argparse
import inspect
import copy
import os
import yaml
import ast
import numpy as np
from tools.attr_dict import nested_attr_dict
from tools.utils import auto_init_args
def convert_to_stringval(cfg_,squeeze=None,stringify_vals=False):
out = {}
convert_to_stringval_rec( [('ROOT',cfg_)], out,
squeeze=squeeze,stringify_vals=stringify_vals)
return out
def convert_to_stringval_rec( flds, output, squeeze=None, stringify_vals=False):
for k,v in flds[-1][1].items():
if isinstance(v,dict):
flds_cp = copy.deepcopy(flds)
flds_cp.append( (k,v) )
convert_to_stringval_rec( flds_cp, output,
squeeze=squeeze, stringify_vals=stringify_vals)
else:
valname = [] ; valname_full = []
for f in flds[1:]:
valname_full.append(squeeze_string(f[0],squeeze))
valname_full.append(squeeze_string(k,squeeze))
valname_full = ".".join(valname_full)
if stringify_vals:
output[valname_full] = str(v)
else:
output[valname_full] = v
def squeeze_key_string(f,squeeze_inter,squeeze_tail):
keys = f.split('.')
tail = keys[-1]
inter = keys[0:-1]
nkeys = len(keys)
if nkeys > 1:
take_from_each = int(np.floor(float(squeeze_inter-nkeys)/float(nkeys-1)))
take_from_each = max(take_from_each,1)
for keyi in range(nkeys-1):
s = inter[keyi]
s = s[0:min(take_from_each,len(s))]
inter[keyi] = s
tail = squeeze_string(tail,squeeze_tail)
inter.append(tail)
out = ".".join( inter )
return out
def squeeze_string(f,squeeze):
if squeeze is None or squeeze > len(f): return f;
idx = np.round(np.linspace(0,len(f)-1,squeeze))
idx = idx.astype(int).tolist()
f_short = [ f[i] for i in idx ]
f_short = str("").join(f_short)
return f_short
def get_default_args(C):
# returns dict of keyword args of a callable C
sig = inspect.signature(C)
kwargs = {}
for pname,defval in dict(sig.parameters).items():
if defval.default==inspect.Parameter.empty:
print('skipping %s' % pname)
continue
else:
kwargs[pname] = copy.deepcopy(defval.default)
return kwargs
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def arg_as_list(s):
v = ast.literal_eval(s)
if type(v) is not list:
raise argparse.ArgumentTypeError("Argument \"%s\" is not a list" % (s))
return v
def get_arg_parser(cfg_constructor):
dargs = (get_default_args(cfg_constructor)
if inspect.isclass(cfg_constructor)
else cfg_constructor)
dargs_full_name = convert_to_stringval(dargs,stringify_vals=False)
parser = argparse.ArgumentParser(
description='Auto-initialized argument parser'
)
for darg, val in dargs_full_name.items():
tp = type(val) if val is not None else str
if tp==bool:
parser.add_argument(
'--%s' % darg,
dest=darg,
help=darg,
default=val,
type=str2bool,
)
elif tp == list:
parser.add_argument(
'--%s' % darg,
type=arg_as_list,
default=val,
help=darg)
else:
parser.add_argument(
'--%s' % darg,
dest=darg,
help=darg,
default=val,
type=tp,
)
return parser
def set_config_from_config(cfg,cfg_set):
# cfg_set ... dict with nested options
cfg_dot_separated = convert_to_stringval(cfg_set,stringify_vals=False)
set_config(cfg,cfg_dot_separated)
def set_config_rec(cfg,tgt_key,val,check_only=False):
if len(tgt_key) > 1:
k = tgt_key.pop(0)
if k not in cfg:
#raise ValueError('no such config key %s' % k )
cfg[k] = {}
set_config_rec(cfg[k],tgt_key,val,check_only=check_only)
else:
if check_only:
assert cfg[tgt_key[0]]==val
else:
cfg[tgt_key[0]] = val
def set_config(cfg,cfg_set):
# cfg_set ... dict with .-separated options
for cfg_key,cfg_val in cfg_set.items():
# print('setting %s = %s' % (cfg_key,str(cfg_val)) )
cfg_key_split = [ k for k in cfg_key.split('.') if len(k) > 0 ]
set_config_rec(cfg,copy.deepcopy(cfg_key_split),cfg_val)
set_config_rec(cfg,cfg_key_split,cfg_val,check_only=True)
def set_config_from_file(cfg,cfg_filename):
# set config from yaml file
with open(cfg_filename, 'r') as f:
yaml_cfg = yaml.load(f)
set_config_from_config(cfg,yaml_cfg)
def dump_config(cfg):
cfg_filename = os.path.join(cfg.exp_dir,'expconfig.yaml')
with open(cfg_filename, 'w') as yaml_file:
yaml.dump(cfg, yaml_file, default_flow_style=False)
|
c3dm-main
|
c3dm/config.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import torch
from torch import nn
from torch.nn import Parameter
import torch.nn.functional as Fu
import torchvision
from torchvision import models
from visdom import Visdom
import numpy as np
from tools.utils import auto_init_args
import torchvision
import collections
class HyperColumNet(nn.Module):
def __init__( self,
trunk_arch='resnet50',
n_upsample=2,
hc_layers=[1,2,3,4],
hcdim=512,
pose_confidence=True,
depth_offset=0.,
smooth=False,
encode_input_keypoints = False,
kp_encoding_sig=1.,
dimout=1,
dimout_glob = 0,
dimout_glob_alpha = 0,
n_keypoints=12,
architecture='hypercolumns',
dilate_start=2,
glob_inst_norm=False,
final_std=0.01,
final_bias=-1.,
glob_activation=True,
pretrained=True ):
super().__init__()
auto_init_args(self)
trunk = getattr(torchvision.models,trunk_arch)(pretrained=pretrained)
# nfc = trunk.fc.in_features
self.layer0 = torch.nn.Sequential( trunk.conv1,
trunk.bn1,
trunk.relu,
trunk.maxpool )
if self.architecture=='hypercolumns':
for l in [1, 2, 3, 4]:
lname = 'layer%d'%l
setattr(self, lname, getattr(trunk,lname))
for hcl in hc_layers:
lname = 'hc_layer%d'%hcl
indim = getattr(trunk,'layer%d'%hcl)[-1].conv1.in_channels
# if ((self.dimout_glob + self.dimout_glob_alpha) > 0 \
# and hcl==hc_layers[-1]):
# if not self.smooth:
# glob_layers = [ torch.nn.Conv2d(indim, indim,1,bias=True,padding=0),
# torch.nn.ReLU(),
# nn.Conv2d(indim, self.dimout_glob+self.dimout_glob_alpha, \
# 1, bias=True, padding=0) ]
# if self.glob_activation:
# glob_layers.insert(1, \
# torch.nn.InstanceNorm2d(indim) if self.glob_inst_norm \
# else torch.nn.BatchNorm2d(indim))
# else:
# glob_layers = [ nn.Conv2d(indim, self.dimout_glob+self.dimout_glob_alpha, \
# 1, bias=True, padding=0) ]
# self.final_glob = torch.nn.Sequential(*glob_layers )
if self.encode_input_keypoints:
indim += self.n_keypoints
if not self.smooth:
layer_ = torch.nn.Sequential( \
torch.nn.Conv2d(indim, hcdim, 3, bias=True, padding=1),
torch.nn.BatchNorm2d(hcdim),
torch.nn.ReLU(),
torch.nn.Conv2d(hcdim, hcdim, 3, bias=True, padding=1),
)
else:
layer_ = torch.nn.Sequential( \
torch.nn.Conv2d(indim, hcdim, 3, bias=True, padding=1),
)
setattr(self, lname, layer_)
if not self.smooth:
up_layers = [ torch.nn.Conv2d(hcdim,hcdim,3,bias=True,padding=1),
torch.nn.BatchNorm2d(hcdim),
torch.nn.ReLU(),
nn.Conv2d(hcdim, dimout, 3, bias=True, padding=1) ]
else:
up_layers = [ nn.Conv2d(hcdim, dimout, 3, bias=True, padding=1) ]
llayer = up_layers[-1]
llayer.weight.data = \
llayer.weight.data.normal_(0., self.final_std)
if self.final_bias > -1.:
llayer.bias.data = \
llayer.bias.data.fill_(self.final_bias)
print('hcnet: final bias = %1.2e, final std=%1.2e' % \
(llayer.bias.data.mean(),
llayer.weight.data.std())
)
self.final = torch.nn.Sequential(*up_layers)
elif self.architecture=='dilated':
if self.dimout_glob > 0:
raise NotImplementedError('not done yet')
# for l in [1, 2, 3, 4]:
# lname = 'layer%d'%l
# setattr(self, lname, getattr(trunk,lname))
if self.encode_input_keypoints:
c1 = self.layer0[0]
wsz = list(c1.weight.data.shape)
wsz[1] = self.n_keypoints
c1_add = c1.weight.data.new_zeros( wsz ).normal_(0.,0.0001)
c1.weight.data = torch.cat( (c1.weight.data, c1_add), dim=1 )
c1.in_channels += self.n_keypoints
layers = [self.layer0]
li = 0
for l in [1,2,3,4]:
lname = 'layer%d'%l
m = getattr(trunk,lname)
if l >= self.dilate_start:
for mm in m.modules():
if type(mm) == torch.nn.Conv2d:
mm.stride = (1,1)
if mm.kernel_size==(3,3):
dil = (li+2)**2
mm.dilation = ( dil, dil )
mm.padding = ( dil, dil )
li += 1
layers.append(m)
# setattr(self, lname, m)
for m in layers[-1][-1].modules():
if hasattr(m, 'out_channels'):
lastdim = m.out_channels
if True: # deconv for final layer (2x higher resol)
layers.append( torch.nn.ConvTranspose2d( \
lastdim, dimout, kernel_size=3, \
stride=2, output_padding=1, padding=1, bias=True) )
else: # classic conv
layers.append( torch.nn.Conv2d( \
lastdim, dimout, kernel_size=3, \
stride=1, padding=1, bias=True) )
layers[-1].weight.data = \
layers[-1].weight.data.normal_(0., self.final_std)
self.trunk = torch.nn.Sequential(*layers )
self.mean = torch.FloatTensor([0.485, 0.456, 0.406])
self.std = torch.FloatTensor([0.229, 0.224, 0.225])
def get_last_layer_numchannels(self):
return getattr(self,'layer4')[-1].conv1.in_channels
def norm_image(self, x):
mean = self.mean[None,:,None,None].type_as(x)
std = self.std[None,:,None,None].type_as(x)
return (x - mean) / std
def gkernel( self, sz, rel_scale, mu, sig ):
g = torch.linspace( 0.5, sz-0.5, sz ).type_as(mu)
g = ( (-(g[None,None,:] - mu[:,:,None]*rel_scale)**2) / \
(sig * rel_scale) ).exp()
return g
def make_kp_encoding(self, kp_loc_vis, im_size, grid_size):
rel_scale = [g/i for g,i in zip(grid_size, im_size)]
g_x = self.gkernel( grid_size[1], rel_scale[1], kp_loc_vis[:,0,:],
self.kp_encoding_sig )
g_y = self.gkernel( grid_size[0], rel_scale[0], kp_loc_vis[:,1,:],
self.kp_encoding_sig )
g = g_y[:,:,:,None] * g_x[:,:,None,:]
g *= kp_loc_vis[:,2,:,None, None]
return g
def run_hc(self, images, kp_loc_vis=None, only_glob=False, skip_norm_image=False):
if skip_norm_image:
x = self.layer0(images)
else:
x = self.layer0(self.norm_image(images))
x1 = self.layer1(x)
x2 = self.layer2(x1)
x3 = self.layer3(x2)
x4 = self.layer4(x3)
x4_avg = x4.mean((2,3), keepdim=True) # TODO: keepdim=False
if only_glob:
return _, x4_avg
# if (self.dimout_glob + self.dimout_glob_alpha) > 0:
# out_glob = self.final_glob(x4_avg)
# if only_glob:
# return out_glob
# else:
# assert not only_glob
xs = [x1, x2, x3, x4]
if self.encode_input_keypoints:
# append kp_encoding to all xs
kp_encoding = self.make_kp_encoding( \
kp_loc_vis, images.shape[2:], x.shape[2:] )
for i in range(len(xs)):
kp_up_ = Fu.interpolate( kp_encoding, size=xs[i].shape[2:],
mode='bilinear' )
xs[i] = torch.cat( (xs[i], kp_up_), dim=1 )
hc = 0.
upsize = None
for hcl in self.hc_layers:
if upsize==None:
upsize = xs[hcl-1].shape[2:]
lname = 'hc_layer%d'%hcl
f = getattr(self, lname)(xs[hcl-1])
fup = Fu.interpolate(f,size=upsize,mode='bilinear')
hc = hc + fup * (1./len(self.hc_layers))
out = self.final(hc)
return out, x4_avg
# if (self.dimout_glob+self.dimout_glob_alpha) > 0:
# return out, out_glob
# else:
# return out, None
def run_dil(self, images, kp_loc_vis=None, only_glob=False, skip_norm_image=False):
assert not only_glob, 'not yet implemented'
if skip_norm_image:
l1in = images
else:
l1in = self.norm_image(images)
if self.encode_input_keypoints:
kp_encoding = self.make_kp_encoding( \
kp_loc_vis, images.shape[2:], images.shape[2:] )
l1in = torch.cat( (l1in, kp_encoding), dim=1 )
return self.trunk(l1in)
def forward(self, images, kp_loc_vis=None, only_glob=False, skip_norm_image=False):
if self.architecture=='dilated':
out = self.run_dil(images, kp_loc_vis=kp_loc_vis, only_glob=only_glob, skip_norm_image=skip_norm_image)
elif self.architecture=='hypercolumns':
out = self.run_hc(images, kp_loc_vis=kp_loc_vis, only_glob=only_glob, skip_norm_image=skip_norm_image)
else:
raise ValueError()
return out
# taken from FCRN_pytorch on github
class FasterUpProj(nn.Module):
# Faster UpProj decorder using pixelshuffle
class faster_upconv(nn.Module):
def __init__(self, in_channel):
super(FasterUpProj.faster_upconv, self).__init__()
self.conv1_ = nn.Sequential(collections.OrderedDict([
('conv1', nn.Conv2d(in_channel, in_channel // 2, kernel_size=3)),
('bn1', nn.BatchNorm2d(in_channel // 2)),
]))
self.conv2_ = nn.Sequential(collections.OrderedDict([
('conv1', nn.Conv2d(in_channel, in_channel // 2, kernel_size=(2, 3))),
('bn1', nn.BatchNorm2d(in_channel // 2)),
]))
self.conv3_ = nn.Sequential(collections.OrderedDict([
('conv1', nn.Conv2d(in_channel, in_channel // 2, kernel_size=(3, 2))),
('bn1', nn.BatchNorm2d(in_channel // 2)),
]))
self.conv4_ = nn.Sequential(collections.OrderedDict([
('conv1', nn.Conv2d(in_channel, in_channel // 2, kernel_size=2)),
('bn1', nn.BatchNorm2d(in_channel // 2)),
]))
self.ps = nn.PixelShuffle(2)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
# print('Upmodule x size = ', x.size())
x1 = self.conv1_(nn.functional.pad(x, (1, 1, 1, 1)))
x2 = self.conv2_(nn.functional.pad(x, (1, 1, 0, 1)))
x3 = self.conv3_(nn.functional.pad(x, (0, 1, 1, 1)))
x4 = self.conv4_(nn.functional.pad(x, (0, 1, 0, 1)))
# print(x1.size(), x2.size(), x3.size(), x4.size())
x = torch.cat((x1, x2, x3, x4), dim=1)
x = self.ps(x)
return x
class FasterUpProjModule(nn.Module):
def __init__(self, in_channels, smooth=False):
super(FasterUpProj.FasterUpProjModule, self).__init__()
out_channels = in_channels // 2
self.smooth = smooth
self.upper_branch = nn.Sequential(collections.OrderedDict([
('faster_upconv', FasterUpProj.faster_upconv(in_channels)),
('relu', nn.ReLU(inplace=True)),
('conv', nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1, bias=False)),
('batchnorm', nn.BatchNorm2d(out_channels)),
]))
if self.smooth:
self.bottom_branch = None
else:
self.bottom_branch = FasterUpProj.faster_upconv(in_channels)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x1 = self.upper_branch(x)
if self.smooth:
x2 = Fu.interpolate(x[:,:x1.shape[1],:,:],size=x1.shape[2:],mode='bilinear')
else:
x2 = self.bottom_branch(x)
x = x1 + x2
x = self.relu(x)
return x
def __init__(self, in_channel, n_layers=2, smooth=False, dimout=2):
super(FasterUpProj, self).__init__()
layers = []
for l in range(n_layers):
indim = in_channel // int(2**l)
layers.append(self.FasterUpProjModule(indim,smooth=smooth))
last = nn.Conv2d(indim//2, dimout, 3, padding=1)
layers.append( last )
self.trunk = nn.Sequential(*layers)
def forward(self,x):
return self.trunk(x)
# def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
# """3x3 convolution with padding"""
# return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
# padding=dilation, groups=groups, bias=False, dilation=dilation)
# def upconv(in_planes, out_planes, stride=2, groups=1, dilation=1):
# """up convolution"""
# kernel_size = 2*(stride-1)+1
# pad = int((kernel_size-1)/2)
# return nn.ConvTranspose2d(in_planes, out_planes, kernel_size, stride=stride, \
# padding=pad, output_padding=pad, groups=groups)
# def conv1x1(in_planes, out_planes, stride=1):
# """1x1 convolution"""
# return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
# class UpBottleneck(nn.Module):
# expansion = 4
# def __init__(self, inplanes, planes, stride=1, upfactor=2, groups=1,
# base_width=64, dilation=1, norm_layer=None):
# super(UpBottleneck, self).__init__()
# if norm_layer is None:
# norm_layer = nn.BatchNorm2d
# width = int(planes * (base_width / 64.)) * groups
# # Both self.conv2 and self.downsample layers downsample the input when stride != 1
# self.conv1 = conv1x1(inplanes, width)
# self.bn1 = norm_layer(width)
# self.conv2 = upconv(width, width, upfactor, groups)
# self.bn2 = norm_layer(width)
# self.conv3 = conv1x1(width, planes * self.expansion)
# self.bn3 = norm_layer(planes * self.expansion)
# self.relu = nn.ReLU(inplace=True)
# self.scale = scale
# self.stride = stride
# def forward(self, x):
# identity = x
# out = self.conv1(x)
# out = self.bn1(out)
# out = self.relu(out)
# out = self.conv2(out)
# out = self.bn2(out)
# out = self.relu(out)
# out = self.conv3(out)
# out = self.bn3(out)
# out += identity
# identity = Fu.interpolate(x,size=out.shape[2:],mode='bilinear')
# out = self.relu(out)
# return out
|
c3dm-main
|
c3dm/hypercolumnet.py
|
c3dm-main
|
c3dm/__init__.py
|
|
# Copyright (c) Facebook, Inc. and its affiliates.
import copy
from functools import lru_cache
import math
import os
import yaml
import numpy as np
import torch
import torch.nn.functional as Fu
from pytorch3d.renderer import cameras
from pytorch3d.transforms import so3
from visdom import Visdom
import c3dpo
from hypercolumnet import HyperColumNet
from config import get_default_args
from tools import model_io
from tools import so3 as so3int # TODO: move random 2d rot elsewhere; use 6d from pt3d
from tools import vis_utils
import tools.eval_functions as eval_func
import tools.functions as func
from tools.loss_models import AppearanceLoss, GaussianLayer
from tools import utils
from tools.tensor_accumulator import TensorAccumulator
def conv1x1(in_planes, out_planes, init='no', cnv_args={
'bias': True,
'kernel_size': 1,
}, std=0.01):
"""1x1 convolution"""
cnv = torch.nn.Conv2d(in_planes, out_planes, **cnv_args)
# init weights ...
if init == 'no':
pass
elif init == 'normal0.01':
cnv.weight.data.normal_(0., std)
if cnv.bias is not None:
cnv.bias.data.fill_(0.)
else:
assert False
return cnv
# Module that predicts shape and texture parameters, along with rotation
class GlobalHead(torch.nn.Module):
def __init__(
self,
input_channels,
alpha_geom_size=0,
alpha_tex_size=0,
camera_code_size=0,
add_shared_layer=True,
glob_inst_norm=False,
):
super(GlobalHead, self).__init__()
if not(alpha_tex_size > 0 or alpha_geom_size >= 0 or camera_code_size > 0):
return
make_fc_layer = lambda dimout: conv1x1(input_channels, dimout, init='normal0.01')
# conv with dimout 0 does not work; use this instead
make_degenerate = lambda feat: feat.new_empty(feat.size()[0], 0, 1, 1)
# shared layer by all global stuff
self.shared_layer = None
if add_shared_layer:
self.shared_layer = torch.nn.Sequential(
make_fc_layer(input_channels),
torch.nn.InstanceNorm2d(input_channels)
if glob_inst_norm
else torch.nn.BatchNorm2d(input_channels),
torch.nn.ReLU(),
)
self.alpha_geom_layer = (
make_fc_layer(alpha_geom_size)
if alpha_geom_size > 0
else make_degenerate if alpha_geom_size == 0 else None
)
self.alpha_tex_layer = make_fc_layer(alpha_tex_size) if alpha_tex_size > 0 else None
self.rot_layer = make_fc_layer(camera_code_size) if camera_code_size else None
def forward(self, feat):
if self.shared_layer is not None:
feat = self.shared_layer(feat)
return tuple([
(head(feat)[:,:,0,0] if head is not None else None)
for head in (self.alpha_geom_layer, self.alpha_tex_layer, self.rot_layer)
])
class Model(torch.nn.Module):
def __init__( self,
TRUNK = get_default_args(HyperColumNet),
APPEARANCE_LOSS = get_default_args(AppearanceLoss),
nrsfm_exp_path = '',
huber_scaling_basis = 0.01,
huber_scaling_repro = 0.01,
photo_min_k = 6,
photo_reenact = False,
repro_loss_min_ray_length = 0.0,
app_mask_image = False,
detach_app = True,
uv_model_use_bn = True,
uv_model_l2_norm = False,
sampled_sil_n_samples = 1000,
sampled_sph_chamfer = 0,
spherical_embedding_radius = 1.,
c3dpo_flipped=True,
reparametrize_nrsfm_mean = True,
scale_aug_range = 0.2,
t_aug_range = 0.02,
rot_aug_range = 3.14/12.,
custom_basis_size = -1,
n_images_for_app_model = -1,
min_depth = 0.,
argmin_translation_min_depth = 0.,
argmin_translation_ray_projection = True,
ray_reprojection = False,
dilate_basis_loss = 0.,
EMBED_DB = get_default_args(TensorAccumulator),
embed_db_eval = False,
app_model_mask_gt = False,
loss_weights = {
'loss_basis': 1.,
'loss_alpha': 0.,
'loss_rotation': 0.,
'loss_repro': 0.0,
'loss_vgg': 0.0,
'loss_sph_emb_to_cam': 0.0,
'loss_sph_sample_mask': 0.0,
'loss_vgg_app': 0.0,
'loss_l1_app': 0.0,
'loss_ssim_app': 0.0,
'loss_repro_2d': 0.0,
'loss_repro_ray': 0.0,
},
log_vars=[ 'objective',
'loss_basis',
'loss_alpha',
'loss_rotation',
'loss_repro',
'loss_repro_2d',
'loss_repro_ray',
'loss_vgg',
'loss_sph_emb_to_cam',
'loss_sph_sample_mask',
'loss_vgg_app',
'loss_l1_app',
'loss_ssim_app',
'sig_avg',
# depth error metrics
'pclerr_dist',
],
**kwargs ):
super(Model, self).__init__()
# autoassign constructor params to self
utils.auto_init_args(self)
assert not uv_model_use_bn and uv_model_l2_norm, 'Do not use BN UV network!'
self._load_and_fix_nrsfm()
self.alpha_bias = None
self.basis_size = custom_basis_size if custom_basis_size >= 0 else self.nrsfm_model.shape_basis_size
if self.basis_size == self.nrsfm_model.shape_basis_size:
# will be able to compute basis matching loss
basis = torch.cat((
self.nrsfm_model.shape_layer.bias.data.view(3, -1, 1),
self.nrsfm_model.shape_layer.weight.data.view(3, -1, self.basis_size),
), dim=2)
self.nrsfm_model_basis = basis.permute(2,0,1).detach().cuda(0)
self.alpha_bias = self.nrsfm_model.alpha_layer.bias[None,:,None,None,None].cuda(0)
TRUNK['dimout'] = 3
self.trunk = HyperColumNet(**TRUNK)
self._make_glob_layers()
if self.trunk.dimout_glob > 0:
self._make_texture_model()
self._make_geom_deformation_model()
# appearance loss
self.appearance_loss = AppearanceLoss(**APPEARANCE_LOSS)
# init the embed database
EMBED_DB['db_dim'] = TRUNK['dimout']
self.embed_db = TensorAccumulator(**EMBED_DB)
def _load_and_fix_nrsfm(self):
self.nrsfm_model = load_nrsfm_model(self.nrsfm_exp_path)
self.nrsfm_model.z_augment = False
self.nrsfm_model.z_equivariance = False
self.nrsfm_model.canonicalization.use = False
self.nrsfm_model.perspective_depth_threshold = \
max(self.nrsfm_model.perspective_depth_threshold, self.min_depth)
self.nrsfm_model_kp_rescale = float(self.nrsfm_model.keypoint_rescale)
if self.reparametrize_nrsfm_mean:
self.nrsfm_model.reparametrize_mean_shape()
self.nrsfm_mean_radius = self._get_nrsfm_mean_radius()
for prm in self.nrsfm_model.parameters():
prm.requires_grad = False
self.nrsfm_model_basis = None
self.projection_type = self.nrsfm_model.projection_type
assert self.nrsfm_model.keypoint_rescale == 1.0 or self.projection_type == 'orthographic'
def _make_glob_layers(self):
indim = self.trunk.get_last_layer_numchannels()
# TODO: move the relevant config params from trunk
dimout_alpha_tex = self.trunk.dimout_glob
dimout_alpha_geom = self.basis_size
self.global_head = GlobalHead(
indim,
dimout_alpha_geom,
dimout_alpha_tex,
6,
glob_inst_norm=self.trunk.glob_inst_norm,
)
def _make_texture_model(self):
# make MLP mapping basis vectors + app encoding to colors
app_dim = 3 + self.trunk.dimout_glob
app_layers = c3dpo.make_trunk(
dim_in=app_dim,
n_fully_connected=512,
n_layers=3,
use_bn=self.uv_model_use_bn,
l2_norm=self.uv_model_l2_norm,
)
app_layers.append(torch.nn.Conv2d(512, 3, 1))
self.app_model = torch.nn.Sequential(*app_layers)
def _make_geom_deformation_model(self):
delta_layers = c3dpo.make_trunk(
dim_in=3,
n_fully_connected=512,
n_layers=3,
use_bn=self.uv_model_use_bn,
l2_norm=self.uv_model_l2_norm,
)
dim_out = (self.basis_size+1)*3
delta_layers.append( torch.nn.Conv2d(512, dim_out, 1) )
if self.trunk.final_std != 0.01:
ldelta = delta_layers[-1]
ldelta.weight.data = \
ldelta.weight.data.normal_(0., self.trunk.final_std)
ldelta.bias.data = \
ldelta.bias.data.fill_(self.trunk.final_bias)
print('deltanet: final bias = %1.2e, final std=%1.2e' % \
(ldelta.bias.data.mean(),
ldelta.weight.data.std())
)
# delta vectors predicted from the mean vectors
self.delta_model = torch.nn.Sequential(*delta_layers)
def _get_nrsfm_mean_radius(self):
mu = self.nrsfm_model.get_mean_shape().cuda().detach()
mumu = mu.mean(dim=1, keepdim=True)
return ((mu - mumu) ** 2).mean() ** 0.5
@lru_cache()
def _get_image_grid(self, image_size, grid_size):
imgrid = func.image_meshgrid( ((0, image_size[0]), (0, image_size[1])),
grid_size )
imgrid = imgrid[[1,0]] # convert from yx to xy
return imgrid
def _get_distance_from_grid(self, predicted_coord, image_size,
masks=None, K=None, ray_reprojection=True):
ba = predicted_coord.shape[0]
imgrid = self._get_image_grid(image_size, predicted_coord.size()[2:])
imgrid = imgrid.type_as(predicted_coord)[None].repeat(ba,1,1,1)
if masks is not None:
masks = masks.view(ba, -1)
if ray_reprojection:
#assert self.projection_type=='perspective'
imgrid_proj = func.calc_ray_projection(
predicted_coord.view(ba,3,-1),
imgrid.view(ba,2,-1),
K = K,
min_depth=self.min_depth,
min_r_len=self.repro_loss_min_ray_length,
)
err = func.avg_l2_huber(
imgrid_proj,
predicted_coord.view(ba,3,-1),
scaling=self.huber_scaling_repro,
mask=masks
)
else:
shape_reprojected_image, _ = self.nrsfm_model.camera_projection(
func.clamp_depth(predicted_coord, self.min_depth)
)
if self.projection_type=='perspective':
imgrid = self.nrsfm_model.calibrate_keypoints(imgrid, K)
err = func.avg_l2_huber(
shape_reprojected_image.view(ba,2,-1),
imgrid.view(ba,2,-1),
scaling=self.huber_scaling_repro,
mask=masks,
)
return err
def _get_mean_basis_embed(self, embed):
ba, _, he, wi = embed.shape
embed_re = embed.view(ba, self.basis_size+1, 3, he, wi)
embed_mean = embed_re[:, 0, :, :, :]
# add the bias from the alpha layer!
if self.alpha_bias is not None:
embed_mean_add = (embed_re[:,1:,:,:,:] * self.alpha_bias).sum(1)
embed_mean = embed_mean + embed_mean_add
return embed_mean
def _get_deltas_and_concat(self, embed):
return self.delta_model(embed)
def _gather_supervised_embeddings(self, embed, kp_loc, image_size):
# uses grid sampler now (grid of size KP x 1)
# outputs B x C x KP
ba = embed.shape[0]
image_size_tensor = torch.tensor(image_size).type_as(embed).flip(0)
grid_ = 2. * kp_loc / image_size_tensor[None,:,None] - 1.
grid_ = grid_.permute(0,2,1).view(ba, -1, 1, 2)
supervised_embed = Fu.grid_sample(embed, grid_, align_corners=False)[:,:,:,0]
return supervised_embed
def _get_basis_loss(self, kp_loc, kp_vis, embed, alpha, image_size):
assert self.nrsfm_model_basis is not None, "NRSFM basis not compatible."
ba = kp_loc.shape[0]
if self.dilate_basis_loss > 0.:
ga = GaussianLayer(sigma=self.dilate_basis_loss, separated=True).cuda()
embed = ga(embed)
kp_embed_view = self._gather_supervised_embeddings(
embed, kp_loc, image_size
)
gt_basis = self.nrsfm_model_basis.reshape(
-1, self.nrsfm_model.n_keypoints
)[None].repeat(ba,1,1).detach()
return func.avg_l2_huber( gt_basis, kp_embed_view,
scaling=self.huber_scaling_basis,
mask=kp_vis[:,None,:],
reduce_dims=[],
)
def _get_rotation_loss(self, est_rotation, nrsfm_rotation):
rel_rotation = torch.eye(3, 3).expand_as(est_rotation)
return 1.0 - torch.mean(
so3.so3_relative_angle(est_rotation, nrsfm_rotation, cos_angle=True)
)
def _adjust_nrsfm_model_kp_scale(self, orig_image_size, image_size):
if self.projection_type=='perspective':
# dont change ...
pass
elif self.projection_type=='orthographic':
rel_scale = 0.5 * sum( \
float(orig_image_size.mean(0)[i]) / image_size[i] \
for i in (0,1) )
self.nrsfm_model.keypoint_rescale = \
self.nrsfm_model_kp_rescale * rel_scale
else:
raise ValueError(self.projection_type)
def _similarity_aug(self, images, kp_loc, kp_vis, masks=None, depths=None):
"""
augment images, depths, masks and kp_loc using random
similarity transformation
"""
ba, _, he, wi = images.shape
# random scale
r_scl = images.new_zeros(ba,).uniform_(1., 1.+self.scale_aug_range)
r_rot = so3int.random_2d_rotation(ba, images.type(), self.rot_aug_range)
# random translation
imdiag = float(np.sqrt(he * wi))
r_t = images.new_zeros(ba,2).uniform_( \
-imdiag*self.t_aug_range, imdiag*self.t_aug_range)
# orig image grid
grid_ = self._get_image_grid(images.shape[2:], images.shape[2:])
grid_flat = grid_.type_as(images).repeat(ba,1,1,1).view(ba,2,-1)
# 1st transform the keypoints
kp_loc = torch.bmm(r_rot, kp_loc)
kp_loc = kp_loc * r_scl[:,None,None]
kp_loc = kp_loc - r_t[:,:,None]
# adjust the visibilities
ok = (kp_loc[:,0,:] >= 0.) * (kp_loc[:,1,:] >= 0.) * \
(kp_loc[:,0,:] < wi) * (kp_loc[:,1,:] < he)
kp_vis = kp_vis * ok.float()
kp_loc[kp_vis[:, None, :].expand_as(kp_loc) < 0.5] = 0.0
# then the image but with inverse trans
grid_t = torch.bmm(r_rot.permute(0,2,1), grid_flat)
grid_t = grid_t / r_scl[:,None,None]
grid_t = grid_t + r_t[:,:,None]
grid_t = grid_t / torch.FloatTensor([wi,he])[None,:,None].type_as(grid_t) # norm to 0, 1
grid_t = grid_t * 2. - 1. # norm to -1, 1
grid_t = grid_t.view(ba,2,he,wi).permute(0,2,3,1).contiguous()
# sample the images, depth, masks
images = Fu.grid_sample(images, grid_t, mode='bilinear', align_corners=False)
if depths is not None:
depths = Fu.grid_sample(depths, grid_t, mode='nearest', align_corners=False)
if masks is not None:
masks = Fu.grid_sample(masks, grid_t, mode='nearest', align_corners=False)
return images, kp_loc, kp_vis, masks, depths
def run_on_embed_db(self, preds, texture_desc, K, masks=None, image_size=None):
embed = self.embed_db.get_db()
embed = embed[None,:,:,None].repeat(preds['phi']['T'].size()[0], 1, 1, 1)
# we have to downscale the embeds to make everything well-behaved
embed_full = self._get_deltas_and_concat(embed)
phi_out = self._get_shapes_and_projections(embed_full, None, preds['phi'], K)
out = dict(
embed_db_mean=embed_full,
embed_db_shape_canonical=phi_out['shape_canonical_dense'],
embed_db_shape_camera_coord=phi_out['shape_camera_coord_dense'],
)
if texture_desc is not None:
app = self._run_app_model(embed_full, texture_desc, embed, skip_sph_assert=True)
out['embed_db_app'] = app
return out
def _merge_masked_tensors(self, pcl, masks):
c = pcl.size()[1]
pcl = pcl.transpose(0, 1).reshape(1, c, -1)
if masks is not None:
pcl = pcl[..., :, masks.reshape(-1) > 0.5]
return pcl
def _assert_spherical_embed(self, embed):
norms = (embed**2).sum(1).sqrt()
# we assert that the norms are constant (std <= 0.01)
# (in case we want to have different radius of the sphere)
assert (
embed.shape[1]==3
and float(norms.std()) <= 1e-2
), 'This can only run on spherical embeds!'
def _get_sph_embed_towards_camera_loss(self, embed, masks, R, eps=1e-8):
ba = embed.size()[0]
embed = embed.reshape(ba, 3, -1)
masks = masks.reshape(ba, 1, -1)
avg_emb = Fu.normalize((embed * masks).sum(dim=2) / (masks.sum(dim=2) + eps), dim=-1)
# Rotated by R, it should be ideally (0, 0, 1)
# swap - with + for the non-flipped C3DPO
sign = -1.0 if self.c3dpo_flipped else +1.0
loss = 1. + sign * torch.matmul(R, avg_emb[..., None])[:, 2].mean()
return loss
def _calc_depth_pcl_errs(self, pred, gt, masks=None):
# reshape the predicted depth to gt size (and rescale the values too)
pred_up = Fu.interpolate(pred, gt.shape[2:], mode='bilinear')
errs = eval_func.eval_depth_scale_inv(
pred_up.detach(), gt.detach(), masks=masks
)
return {'pclerr_dist': errs.mean()}
def _get_canonical_shape(self, dense_basis, alpha, masks, target_std=2.0):
ba, di, he, wi = dense_basis.size()
basis = dense_basis.reshape(ba, -1, 3*he*wi)
canon = basis[:, :1, :] + torch.bmm(alpha[:, None, :], basis[:, 1:, :])
return canon.reshape(ba, 3, he, wi)
def _argmin_translation(self, shape_camera_coord, shape_proj, shape_vis, K=None):
if self.projection_type=='orthographic':
projection, _ = self.nrsfm_model.camera_projection(shape_camera_coord)
T_amin = func.argmin_translation(projection, shape_proj, v=shape_vis)
T = Fu.pad(T_amin, (0,1), 'constant', float(0))
elif self.projection_type=='perspective':
ba = shape_camera_coord.size()[0]
if K is None:
K = torch.eye(3).type_as(shape_proj)[None].expand(ba, 3, 3)
if self.argmin_translation_ray_projection:
T = func.find_camera_T(
K, shape_camera_coord, shape_proj, v=shape_vis
)
else:
T = func.minimise_2d_residual_over_T(
K, shape_camera_coord, shape_proj, v=shape_vis
)
else:
raise ValueError(self.projection_type)
return T
def _argmin_camera(self, shape_canonical, masks, grid_normalised, phi):
ba = shape_canonical.size()[0]
centre = torch.sum(
shape_canonical.reshape(ba, 3, -1) * masks.reshape(ba, 1, -1),
dim=(0,2,),
keepdim=True,
) / masks.sum()
shape_centered = shape_canonical.reshape(ba, 3, -1) - centre
assert 'R' in phi, "Rotation should be given for argmin_T"
shape_camera_rotated = torch.bmm(phi['R'], shape_centered)
T = self._argmin_translation(
shape_camera_rotated,
grid_normalised.expand(shape_camera_rotated[:,:2,:].size()),
masks.reshape(ba, -1),
K=None, # ! points already calibrated
)
min_depth = self.argmin_translation_min_depth
if min_depth > 0.:
T = torch.cat((T[:,0:2], torch.clamp(T[:,2:3], min_depth)), dim=1)
T = T - torch.matmul(phi['R'], centre)[:, :, 0]
return T
def _get_shapes_and_projections(
self, dense_basis, masks, global_desc, K, image_repro_gt=None, alpha=None
):
masks = (
masks if masks is not None
else dense_basis.new_ones(dense_basis[:, :1, ...].size())
)
assert len(masks.size()) == 4
ba = dense_basis.size()[0]
kp_mean = global_desc['kp_mean']
phi = copy.copy(global_desc)
rescale = self.nrsfm_model.keypoint_rescale
if alpha is not None:
phi['shape_coeff'] = alpha
if self.projection_type=='perspective':
focal = torch.stack((K[:, 0, 0], K[:, 1, 1]), dim=1)
p0 = K[:, :2, 2]
camera = cameras.SfMPerspectiveCameras(
R=phi['R'].permute(0, 2, 1),
focal_length=focal, principal_point=p0,
device=dense_basis.device,
)
else:
camera = cameras.SfMOrthographicCameras(
R=phi['R'].permute(0, 2, 1),
device=dense_basis.device,
)
shape_canonical = self._get_canonical_shape(
dense_basis, phi['shape_coeff'], masks
)
if 'T' not in phi:
# the grid has to be calibrated (=pre-multiplied by K^{-1}) first!
grid_im_coord = Fu.pad(
image_repro_gt.reshape(1, 2, -1).permute(0,2,1), (0, 1), value=1.0
).repeat(ba, 1, 1)
grid_im_coord = camera.unproject_points(
grid_im_coord, world_coordinates=False
)[:,:,:2].permute(0,2,1)
grid_normalised = (grid_im_coord - kp_mean[:,:,None]) * rescale
phi['T'] = self._argmin_camera(
shape_canonical, masks, grid_normalised, phi
)
camera.T = phi['T']
shape_canonical_pt3d = shape_canonical.reshape(ba, 3, -1).permute(0, 2, 1)
shape_camera_coord = camera.get_world_to_view_transform().transform_points(
shape_canonical_pt3d
)
shape_image_coord_cal_dense = shape_camera_coord
depth_dense = shape_camera_coord[:,:,2:]
shape_proj_image = camera.transform_points(shape_canonical_pt3d)
shape_reprojected_image = shape_proj_image[:, :, :2]
# correct for the kp normalisation
if self.projection_type == 'perspective':
shape_image_coord_cal_dense = shape_image_coord_cal_dense + Fu.pad(
kp_mean[:,None] * shape_camera_coord[:,:,2:], (0, 1), value=0.0
)
shape_reprojected_image = shape_reprojected_image + (kp_mean * focal)[:, None]
else:
assert self.projection_type == 'orthographic'
shape_image_coord_cal_dense = (
shape_image_coord_cal_dense / rescale +
Fu.pad(kp_mean[:,None], (0, 1), value=0.0)
)
shape_reprojected_image = (
shape_reprojected_image / rescale + kp_mean[:, None]
)
return dict(
phi=phi,
shape_canonical_dense=shape_canonical,
shape_camera_coord_dense=shape_camera_coord.permute(0, 2, 1).reshape_as(shape_canonical),
depth_dense=depth_dense.reshape_as(shape_canonical[:, :1]),
shape_reprojected_image=shape_reprojected_image.permute(0, 2, 1).reshape_as(shape_canonical[:, :2]),
shape_image_coord_cal_dense=shape_image_coord_cal_dense.permute(0, 2, 1).reshape_as(shape_canonical),
)
def _get_best_scale(self, preds, image_size):
if self.projection_type=='orthographic':
shape_camera_coord = preds['shape_image_coord_cal_dense']
ba = shape_camera_coord.shape[0]
imgrid = self._get_image_grid(image_size, shape_camera_coord.size()[2:])
imgrid = imgrid.type_as(shape_camera_coord)[None].repeat(ba,1,1,1)
projection, depth = self.nrsfm_model.camera_projection(shape_camera_coord)
s, T = func.argmin_translation_scale(projection, imgrid, v=preds['embed_masks'])
shape_best = torch.cat((
s[:, None, None, None] * shape_camera_coord[:, :2] + T[:, :, None, None],
s[:, None, None, None] * shape_camera_coord[:, 2:]
), dim=1)
elif self.projection_type=='perspective':
# no scale opt here, won't help
shape_best = preds['shape_image_coord_cal_dense']
else:
raise ValueError(self.projection_type)
return shape_best
def _get_sampled_sph_loss(self, preds, K, image_size):
masks = preds['embed_masks']
ba = masks.shape[0]
embed_sphere = torch.randn(
size=(ba, 3, self.sampled_sil_n_samples*10, 1),
dtype=masks.dtype, device=masks.device)
embed_sphere = Fu.normalize(
embed_sphere, dim=1) * self.spherical_embedding_radius
# adjust the mean!
embed_full = self._get_deltas_and_concat(embed_sphere)
dense_phi = self._get_shapes_and_projections(embed_full, masks, preds, K)
image_coords = dense_phi['shape_reprojected_image']
shape = dense_phi['shape_image_coord_cal_dense']
image_size_tensor = torch.FloatTensor(
[s for s in image_size]).type_as(embed_sphere).flip(0)
grid = 2. * (image_coords / image_size_tensor[None,:,None,None]) - 1.
grid_prm = grid.permute(0, 2, 3, 1)
# get all scales until the smallest side is <= 5
samples = []
scl = -1
while min(masks.shape[2:]) > 4:
scl += 1
if scl > 0:
masks = (Fu.interpolate(
masks, scale_factor=0.5, mode='bilinear') > 0.).float()
samples.append(Fu.grid_sample(masks, grid_prm, align_corners=False).view(-1))
samples = torch.cat(samples, dim=0)
loss = (1 - samples).mean()
return {
'loss_sph_sample_mask': loss,
'sph_sample_projs': grid,
'sph_sample_3d': shape,
}
def _get_photometric_losses(
self,
images,
image_coords,
basis_embed,
embed_canonical=None,
n_min=5,
masks=None,
texture_desc=None,
):
ba = images.shape[0]
n_min = min(ba-1, n_min)
assert ba > 1, 'batch_size > 1 for photo losses!'
assert not (self.photo_reenact and texture_desc is None)
image_size = list(images.shape[2:])
image_size_render = list(basis_embed.shape[2:])
image_size_tensor = torch.FloatTensor(image_size).type_as(basis_embed).flip(0)
grid = 2. * (image_coords / image_size_tensor[None,:,None,None]) - 1.
grid = grid.permute(0, 2, 3, 1)
# image warping loss
if self.photo_reenact:
images_reenact = self._run_app_model(
basis_embed, texture_desc[0:1].repeat(ba, 1), embed_canonical
)
else:
images_reenact = images
images_reproject = Fu.grid_sample(images_reenact, grid, align_corners=False)
# resample ref image to images_resample resolution
images_ref = Fu.interpolate(images[:1], size=images_reproject.shape[2:])
images_ref = images_ref.expand_as(images_reproject)
loss_vgg, _, _ = self.appearance_loss(images_reproject, images_ref)
loss_vgg = loss_vgg[:, 0]
# transplant the rendered image by tokp pooling
assert (~torch.isnan(loss_vgg)).all(), "Some photometric loss values are NaN."
if masks is not None:
# weight the losses by seg masks
loss_vgg = masks[:1, 0] * loss_vgg
loss_topk, idx_render = torch.topk(loss_vgg[1:], n_min-1, dim=0, largest=False)
# make sure we include the target view
loss_vgg = (loss_topk.sum(0) + loss_vgg[0]) / n_min
idx_render = idx_render[:,None].expand(-1, 3, -1, -1)
im_render = {
'loss_vgg': (
torch.gather(images_reproject, 0, idx_render).sum(0) + images_reproject[0]
) / n_min
}
out = {}
out['loss_vgg'] = loss_vgg.mean()
out['images_reproject'] = images_reproject.detach()
out['images_gt'] = images_ref.detach()
out['image_ref_render'] = im_render
out['images'] = Fu.interpolate(images, size=images_reproject.shape[2:]).detach()
out['images_reenact'] = Fu.interpolate(images_reenact, size=images_reproject.shape[2:]).detach()
return out
def _mask_gt_image(self, image, mask):
avgcol = (image * mask).sum((2, 3)) / mask.sum((2, 3)).clamp(1)
image_m = image * mask + (1-mask) * avgcol[:, :, None, None]
# blur and mix
ga = GaussianLayer(sigma=5., separated=True).cuda()
image_mf = ga(image_m)
image_m = mask * image_m + (1-mask) * image_mf
return image_m
def _run_app_model(self, embed, texture_desc, embed_canonical, skip_sph_assert=False):
# run the appearance model taking as input per-pixel uv-like
# embeddings `embed` and the global appearance descriptor
# `texture_desc`
n_im_use = self.n_images_for_app_model if \
self.n_images_for_app_model > 0 else embed_canonical.size()[0]
texture_desc = texture_desc[:n_im_use]
embed_for_app = embed_canonical[:n_im_use]
if not skip_sph_assert:
self._assert_spherical_embed(embed_for_app)
if self.detach_app:
embed_for_app = embed_for_app.detach()
embed_app = torch.cat((
texture_desc[:,:,None,None].expand(-1,-1,*list(embed.shape[2:])),
embed_for_app,
), dim=1)
app = self.app_model(embed_app)
return app[:, :3] + 0.5
def _get_app_model_losses(
self,
images,
preds_app,
masks=None,
sigma=None,
):
# for now this is the same
images_pred = preds_app
ba = images_pred.shape[0]
image_size = list(images.shape[2:])
image_size_render = list(images_pred.shape[2:])
if masks is not None:
# weight the losses by seg masks
masks = Fu.interpolate(masks[:ba], size=image_size_render, mode='nearest')
# resample ref image to images_resample resolution
images_gt = Fu.interpolate(images[:ba], size=image_size_render)
# mask the images and do NN interp
if self.app_model_mask_gt:
images_gt = self._mask_gt_image(images_gt, masks)
loss_vgg, loss_rgb, _ = \
self.appearance_loss(
images_pred,
images_gt,
sig=sigma,
mask=masks if self.app_mask_image else None
)
if masks is not None:
# weight the losses by seg masks
loss_vgg, loss_rgb = \
[ (masks * l).sum() / torch.clamp(masks.sum(), 1e-1) \
for l in (loss_vgg, loss_rgb,) ]
else:
loss_vgg, loss_rgb = \
[ l.mean() \
for l in (loss_vgg, loss_rgb,) ]
out = {}
out['loss_vgg'] = loss_vgg
out['loss_l1'] = loss_rgb
out['loss_ssim'] = (loss_rgb * 0.0).detach() # not used
out['images_pred'] = images_pred
out['images_pred_clamp'] = torch.clamp(images_pred,0.,1.)
out['images_gt'] = images_gt
out['images'] = images_gt
return out
def forward(
self,
kp_loc=None,
kp_vis=None,
kp_conf=None,
images=None,
epoch_now=None,
orig_image_size=None,
masks=None,
depths=None,
K=None,
**kwargs
):
ba = images.shape[0] # batch size
image_size = images.size()[2:]
# adjust nrsfm model scale
self._adjust_nrsfm_model_kp_scale(orig_image_size, image_size)
preds = {}
preds['nrsfm_mean_shape'] = self.nrsfm_model.get_mean_shape()
if self.training and (
self.scale_aug_range > 0. or
self.t_aug_range > 0. or
self.rot_aug_range > 0.
):
images, kp_loc, kp_vis, masks, depths = \
self._similarity_aug(images, kp_loc, kp_vis,
masks=masks, depths=depths)
preds.update(
{ 'images_aug': images, 'kp_loc_aug': kp_loc,
'depths_aug': depths, 'masks_aug': masks }
)
embed, glob_features = self.trunk(
images, kp_loc_vis = torch.cat((kp_loc, kp_vis[:,None,:]), dim=1)
)
embed = Fu.normalize(embed, dim=1) * self.spherical_embedding_radius
embed_full = self._get_deltas_and_concat(embed)
#embed_masks = (Fu.interpolate(masks, embed.shape[2:], mode='bilinear') > 0.49).float()
embed_masks = Fu.interpolate(masks, embed.shape[2:], mode='nearest')
image_repro_gt = self._get_image_grid(image_size, embed_full.size()[2:])
preds['embed'] = embed
preds['embed_full'] = embed_full
preds['embed_masks'] = embed_masks
preds['embed_mean'] = self._get_mean_basis_embed(embed_full)
preds['image_repro_gt'] = image_repro_gt
alpha_geom, texture_desc, rotation_code = self.global_head(glob_features)
self.nrsfm_model.eval()
preds['nrsfm'] = self.nrsfm_model(
kp_loc=kp_loc,
kp_vis=kp_vis,
dense_basis=None, # estimate dense Phi here
K=K,
)
assert not self.nrsfm_model.camera_scale # so just ones
assert self.nrsfm_model.argmin_translation
#preds['kp_mean'] = preds['nrsfm']['kp_mean'] # TODO: this should go away
# override top-level preds if regressing directly
assert rotation_code is not None
assert alpha_geom is not None
global_desc = dict(
shape_coeff=alpha_geom,
R=so3int.so3_6d_to_rot(rotation_code),
kp_mean=preds['nrsfm']['kp_mean'],
)
preds.update(self._get_shapes_and_projections(
embed_full, embed_masks, global_desc, K, image_repro_gt
))
preds['shape_image_coord_cal'] = self._gather_supervised_embeddings(
preds['shape_image_coord_cal_dense'], # same as uncal for orthographic
kp_loc,
image_size,
)
preds['kp_reprojected_image'] = self._gather_supervised_embeddings(
preds['shape_reprojected_image'],
kp_loc,
image_size,
)
# compute NR-SFM Prior loss
if self.loss_weights['loss_basis'] > 0.:
preds['loss_basis'] = self._get_basis_loss(
kp_loc,
kp_vis,
embed_full,
preds['nrsfm']['phi']['shape_coeff'],
image_size,
)
if self.loss_weights.loss_alpha > 0.:
assert alpha_geom is not None
preds['loss_alpha'] = func.huber( \
(alpha_geom - preds['nrsfm']['phi']['shape_coeff'])**2,
scaling=self.huber_scaling_basis,
).mean()
if self.loss_weights.loss_rotation > 0.:
preds['loss_rotation'] = self._get_rotation_loss(
preds['phi']['R'],
preds['nrsfm']['phi']['R'],
)
# compute reprojection loss
preds['loss_repro_2d'] = self._get_distance_from_grid(
preds['shape_image_coord_cal_dense'],
image_size,
masks=embed_masks,
K=K,
ray_reprojection=False,
)
# preds['loss_repro_ray'] = 0.0
# if self.projection_type == 'perspective':
preds['loss_repro_ray'] = self._get_distance_from_grid(
preds['shape_image_coord_cal_dense'],
image_size,
masks=embed_masks,
K=K,
ray_reprojection=True,
)
preds['loss_repro'] = preds['loss_repro_ray'] if self.ray_reprojection else preds['loss_repro_2d']
# perceptual loss
preds['photo_out'] = None
if self.photo_min_k > 0 and ba > 1:
# use the first im as a loss as a target
basis_embed_ref = embed_full[:1].expand_as(embed_full)
masks_ref = embed_masks[:1].expand_as(embed_masks)
phi_onto_ref = self._get_shapes_and_projections(basis_embed_ref, masks_ref, preds['phi'], K)
preds['photo_out'] = self._get_photometric_losses(
images,
phi_onto_ref['shape_reprojected_image'],
embed_full,
texture_desc=texture_desc,
n_min=self.photo_min_k,
masks=embed_masks,
embed_canonical=embed,
)
preds['loss_vgg'] = preds['photo_out']['loss_vgg']
# embedding-camera alignment loss
if self.loss_weights['loss_sph_emb_to_cam'] > 0.:
preds['loss_sph_emb_to_cam'] = self._get_sph_embed_towards_camera_loss(
preds['embed'], embed_masks, preds['phi']['R'].detach()
)
# mask sampling loss
if self.loss_weights['loss_sph_sample_mask'] > 0.:
preds.update(self._get_sampled_sph_loss(preds, K, images.shape[2:]))
# appearance model
preds['app'] = None
if texture_desc is not None:
n_im_use = (
self.n_images_for_app_model
if self.n_images_for_app_model > 0
else ba
)
preds['app'] = self._run_app_model(
embed_full[:n_im_use], texture_desc[:n_im_use], embed
)
preds['app_out'] = self._get_app_model_losses(
images, preds['app'][:, :3], masks=masks,
)
for k in ('loss_vgg', 'loss_l1', 'loss_ssim'):
preds[k+'_app'] = preds['app_out'][k]
# finally get the optimization objective using self.loss_weights
preds['objective'] = self.get_objective(preds, epoch_now=epoch_now)
# =================
# the rest is only for visualisation/metrics
# run on cached embed_db
if self.embed_db is not None and self.embed_db_eval:
preds.update(self.run_on_embed_db(preds, texture_desc, K,
masks=embed_masks, image_size=image_size))
# accumulate into embed db
self.embed_db(embed, masks=embed_masks)
depth_pcl_metrics = self._calc_depth_pcl_errs(
preds['depth_dense'], depths, masks=masks
)
preds.update(depth_pcl_metrics)
# find the scale of shape_image_coord that minimizes the repro loss
preds['shape_image_coord_best_scale'] = self._get_best_scale(preds, image_size)
preds['nrsfm_shape_image_coord'] = preds['nrsfm'][{
'orthographic': 'shape_image_coord',
'perspective': 'shape_image_coord_cal',
}[self.projection_type]]
# a hack for vis purposes
preds['misc'] = {}
for k in ('images', 'images_app', 'images_geom', 'embed'):
if k in preds:
preds['misc'][k] = preds[k].detach()
elif k in vars():
preds['misc'][k] = vars()[k]
return preds
def get_objective(self, preds, epoch_now=None):
losses_weighted = {
k: preds[k] * float(w)
for k, w in self.loss_weights.items()
if k in preds and w != 0.0 # avoid adding NaN * 0
}
if not hasattr(self,'_loss_weights_printed') or \
not self._loss_weights_printed:
print('-------\nloss_weights:')
for k,w in self.loss_weights.items():
print('%20s: %1.2e' % (k,w) )
print('-------')
print('-------\nweighted losses:')
for k,w in losses_weighted.items():
print('%20s: %1.2e' % (k,w) )
print('-------')
self._loss_weights_printed = True
loss = torch.stack(list(losses_weighted.values())).sum()
return loss
def visualize( self, visdom_env_imgs, trainmode, \
preds, stats, clear_env=False ):
if stats is not None:
it = stats.it[trainmode]
epoch = stats.epoch
viz = vis_utils.get_visdom_connection(
server=stats.visdom_server,
port=stats.visdom_port,
)
else:
it = 0
epoch = 0
viz = vis_utils.get_visdom_connection()
if not viz.check_connection():
print("no visdom server! -> skipping batch vis")
return
idx_image = 0
title="e%d_it%d_im%d"%(epoch,it,idx_image)
imvar = 'images_aug' if 'images_aug' in preds else 'images'
dvar = 'depths_aug' if 'depths_aug' in preds else 'depths'
mvar = 'masks_aug' if 'masks_aug' in preds else 'masks'
# show depth
ds = preds['depth_dense'].cpu().detach().repeat(1,3,1,1)
ims = preds[imvar].cpu().detach()
ims = Fu.interpolate(ims,size=ds.shape[2:])
if mvar in preds: # mask depths, ims by masks
masks = Fu.interpolate(preds[mvar].cpu().detach(),
size=ds.shape[2:], mode='nearest' )
ims *= masks ; ds *= masks
ds = vis_utils.denorm_image_trivial(ds)
if 'pred_mask' in preds:
pred_mask = torch.sigmoid(preds['pred_mask'][:, None].detach()).cpu().expand_as(ims)
ims_ds = torch.cat( (ims, ds, pred_mask), dim=2 )
else:
ims_ds = torch.cat( (ims, ds), dim=2 )
viz.images(ims_ds, env=visdom_env_imgs, opts={'title':title}, win='depth')
# show aug images if present
imss = []
for k in (imvar, 'images_app', 'images_geom'):
if k in preds:
ims = preds[k].cpu().detach()
ims = Fu.interpolate(ims, scale_factor=0.25)
ims = vis_utils.denorm_image_trivial(ims)
R, R_gt = preds['phi']['R'], preds['nrsfm']['phi']['R']
angle_to_0 = np.rad2deg(
so3.so3_relative_angle(R[0].expand_as(R), R).data.cpu().numpy()
)
angle_to_0_gt = np.rad2deg(
so3.so3_relative_angle(R_gt[0].expand_as(R_gt), R_gt).data.cpu().numpy()
)
if ~np.isnan(angle_to_0).any():
ims = np.stack([
vis_utils.write_into_image(
(im*255.).astype(np.uint8), "%d° / %d°" % (d, d_gt), color=(255,0,255)
) for im, d, d_gt in zip(ims.data.numpy(), angle_to_0, angle_to_0_gt)
])
else:
ims = (ims.data.numpy()*255.).astype(np.uint8)
imss.append(ims)
if len(imss) > 0:
viz.images(
#torch.cat(imss, dim=2),
np.concatenate(imss, axis=2).astype(np.float32)/255.,
env=visdom_env_imgs,
opts={'title': title},
win='imaug',
)
# show reprojections
p1 = preds['kp_loc_aug' if 'kp_loc_aug' in preds else 'kp_loc'][idx_image]
p2 = preds['kp_reprojected_image'][idx_image,0:2]
p3 = preds['nrsfm']['kp_reprojected_image'][idx_image]
p = np.stack([p_.detach().cpu().numpy() for p_ in (p1, p2, p3)])
v = preds['kp_vis'][idx_image].detach().cpu().numpy()
vis_utils.show_projections( viz, visdom_env_imgs, p, v=v,
title=title, cmap__='rainbow',
markersize=50, sticks=None,
stickwidth=1, plot_point_order=False,
image=preds[imvar][idx_image].detach().cpu().numpy(),
win='projections' )
# dense reprojections
p1 = preds['image_repro_gt'].detach().cpu()
p2 = preds['shape_reprojected_image'][idx_image].detach().cpu()
# override mask with downsampled (augmentation applied if any)
mvar = 'embed_masks'
if mvar in preds:
masks = preds[mvar].detach().cpu()
#masks = Fu.interpolate(masks, size=p2.shape[1:], mode='nearest')
p1 = p1 * masks[idx_image]
p2 = p2 * masks[idx_image]
# TEMP
img = (preds[imvar][idx_image].cpu() * Fu.interpolate(
preds[mvar].cpu()[idx_image:idx_image+1], size=preds[imvar][0, 0].size(), mode='nearest'
)[0]).data.cpu().numpy()
p = np.stack([p_.view(2,-1).numpy() for p_ in (p1, p2)])
vis_utils.show_projections( viz, visdom_env_imgs, p, v=None,
title=title, cmap__='rainbow',
markersize=1, sticks=None,
stickwidth=1, plot_point_order=False,
image=img,
win='projections_dense' )
vis_utils.show_flow(viz, visdom_env_imgs, p,
image=preds[imvar][idx_image].detach().cpu().numpy(),
title='flow ' + title,
linewidth=1,
win='projections_flow',
)
if 'sph_sample_projs' in preds:
p = preds['sph_sample_projs'][idx_image].detach().cpu().view(2, -1)
if 'sph_sample_gt' in preds:
p_ = preds['sph_sample_gt'][idx_image].detach().cpu().view(2, -1)
p_ = p_.repeat(1, math.ceil(p.shape[1]/p_.shape[1]))
p = [p, p_[:, :p.shape[1]]]
else:
p = [p.view(2, -1)]
# p = (torch.stack(p) + 1.) / 2.
p = (torch.stack(p) + 1.) / 2.
imsize = preds[imvar][idx_image].shape[1:]
p[:, 0, :] *= imsize[1]
p[:, 1, :] *= imsize[0]
vis_utils.show_projections(viz, visdom_env_imgs,
p, v=None,
title=title + '_spl_sil',
cmap__='rainbow',
markersize=1, sticks=None,
stickwidth=1, plot_point_order=False,
image=preds[imvar][idx_image].detach().cpu().numpy(),
win='projections_spl_sil'
)
merged_embed = self._merge_masked_tensors(
preds['embed_full'], preds['embed_masks']
)[..., None]
gl_desc_0 = {k: v[:1] for k, v in preds['phi'].items()}
merged_with_pivot_phis = self._get_shapes_and_projections(
merged_embed, None, gl_desc_0, preds['K'][:1]
)
preds['shape_canonical_same_alphas'] = merged_with_pivot_phis[
'shape_canonical_dense'
][0 ,..., 0]
# dense 3d
pcl_show = {}
vis_list = ['dense3d', 'mean_shape', 'embed_db', 'batch_fused', 'sph_embed']
if self.loss_weights['loss_sph_sample_mask'] > 0:
vis_list.append('sph_sample_3d')
for vis in vis_list:
if vis=='canonical':
pcl = preds['shape_canonical_dense']
elif vis=='dense3d':
pcl = preds['shape_image_coord_cal_dense']
elif vis=='batch_fused':
pcl = preds['shape_canonical_same_alphas'].detach().cpu()
pcl = torch.cat((pcl, pcl), dim=0)
pcl[3:5,:] = 0.0
pcl[5,:] = 1.0
elif vis=='mean_shape':
pcl = preds['embed_mean']
elif vis=='mean_c3dpo_shape':
pcl = preds['nrsfm_mean_shape']
elif vis=='shape_canonical':
pcl = preds['shape_canonical_dense']
elif vis == 'sph_embed':
pcl = preds['embed'].detach().clone()
elif vis == 'sph_sample_3d':
pcl = preds['sph_sample_3d'][idx_image].detach().cpu().view(3, -1)
pcl = torch.cat((pcl, pcl.clone()), dim=0)
pcl[4:,:] = 0.0
pcl[3,:] = 1.0
# filtering outliers
pcl[:3] -= pcl[:3].mean(dim=1, keepdim=True) # will be centered anyway
std = pcl[:3].std(dim=1).max()
pcl[:3] = pcl[:3].clamp(-2.5*std, 2.5*std)
elif vis == 'embed_db':
pcl = self.embed_db.get_db(uniform_sphere=False).cpu().detach().view(3, -1)
pcl = torch.cat((pcl, pcl.clone()), dim=0)
pcl[3:5,:] = 0.0
pcl[4,:] = 1.0
else:
raise ValueError(vis)
if vis not in ('mean_c3dpo_shape', 'batch_fused', 'sph_sample_3d', 'embed_db'):
pcl_rgb = preds[imvar].detach().cpu()
#pcl = Fu.interpolate(pcl.detach().cpu(), pcl_rgb.shape[2:], mode='bilinear')
pcl_rgb = Fu.interpolate(pcl_rgb, size=pcl.shape[2:], mode='bilinear')
if (mvar in preds):
masks = preds[mvar].detach().cpu()
masks = Fu.interpolate(masks, \
size=pcl.shape[2:], mode='nearest')
else:
masks = None
pcl = pcl.detach().cpu()[idx_image].view(3,-1)
pcl_rgb = pcl_rgb[idx_image].view(3,-1)
pcl = torch.cat((pcl, pcl_rgb), dim=0)
if masks is not None:
masks = masks[idx_image].view(-1)
pcl = pcl[:,masks>0.]
# if vis == 'sph_embed':
# import pdb; pdb.set_trace()
if pcl.numel()==0:
continue
pcl_show[vis] = pcl.numpy()
vis_utils.visdom_plotly_pointclouds(viz, pcl_show, visdom_env_imgs,
title=title+'_'+vis,
markersize=1,
sticks=None, win=vis,
height=700, width=700 ,
normalise=True,
)
var3d = {
'orthographic': 'shape_image_coord',
'perspective': 'shape_image_coord_cal',
}[self.projection_type]
sparse_pcl = {
'nrsfm': preds['nrsfm'][var3d][idx_image].detach().cpu().numpy().copy(),
'dense': preds['shape_image_coord_cal'][idx_image].detach().cpu().numpy().copy(),
}
if 'kp_loc_3d' in preds:
sparse_pcl['gt'] = preds['kp_loc_3d'][idx_image].detach().cpu().numpy().copy()
if 'class_mask' in preds:
class_mask = preds['class_mask'][idx_image].detach().cpu().numpy()
sparse_pcl = {k: v*class_mask[None] for k,v in sparse_pcl.items()}
vis_utils.visdom_plotly_pointclouds(viz, sparse_pcl, visdom_env_imgs, \
title=title+'_sparse3d', \
markersize=5, \
sticks=None, win='nrsfm_3d',
height=500,
width=500 )
if 'photo_out' in preds and preds['photo_out'] is not None:
# show the source images and their renders
ims_src = preds['photo_out']['images'].detach().cpu()
ims_repro = preds['photo_out']['images_reproject'].detach().cpu()
ims_reenact = preds['photo_out']['images_reenact'].detach().cpu()
ims_gt = preds['photo_out']['images_gt'].detach().cpu()
# cat all the images
ims = torch.cat((ims_src,ims_reenact,ims_repro,ims_gt), dim=2)
ims = torch.clamp(ims,0.,1.)
viz.images(ims, env=visdom_env_imgs, opts={'title':title}, win='imrepro')
im_renders = preds['photo_out']['image_ref_render']
for l in im_renders:
im_gt = preds['photo_out']['images_gt'][0].detach().cpu()
im_render = im_renders[l].detach().cpu()
im = torch.cat((im_gt, im_render), dim=2)
im = torch.clamp(im, 0., 1.)
viz.image(im, env=visdom_env_imgs, \
opts={'title':title+'_min_render_%s' % l}, win='imrender_%s' % l)
if 'app_out' in preds and preds['app_out'] is not None:
# show the source images and their predictions
ims_src = preds['app_out']['images'].detach().cpu()
ims_pred = preds['app_out']['images_pred_clamp'].detach().cpu()
ims = torch.cat((ims_src,ims_pred), dim=2)
viz.images(ims, env=visdom_env_imgs, opts={'title':title}, win='impred')
def load_nrsfm_model(exp_name, get_cfg=False):
from dataset.dataset_configs import C3DPO_MODELS, C3DPO_URLS
if exp_name in C3DPO_MODELS:
exp_path = C3DPO_MODELS[exp_name]
else:
exp_path = exp_name
if not os.path.exists(exp_path):
url = C3DPO_URLS[exp_name]
print('Downloading C3DPO model %s from %s' % (exp_name, url))
utils.untar_to_dir(url, exp_path)
cfg_file = os.path.join(exp_path, 'expconfig.yaml')
assert os.path.isfile(cfg_file), 'no config for NR SFM %s!' % cfg_file
with open(cfg_file, 'r') as f:
cfg = yaml.load(f)
# exp = ExperimentConfig(cfg_file=cfg_file)
nrsfm_model = c3dpo.C3DPO(**cfg.MODEL)
model_path = model_io.find_last_checkpoint(exp_path)
assert model_path is not None, "cannot found previous NR SFM model %s" % model_path
print("Loading the model from", model_path)
model_state_dict, _, _ = model_io.load_model(model_path)
nrsfm_model.load_state_dict(model_state_dict, strict=True)
if get_cfg:
return nrsfm_model, cfg
else:
return nrsfm_model
|
c3dm-main
|
c3dm/model.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import copy, os, sys, time
import itertools as itt
import yaml
# torch imports
import numpy as np
import torch
from dataset.batch_samplers import SceneBatchSampler
from dataset.dataset_zoo import dataset_zoo
from dataset.eval_zoo import eval_zoo
from dataset.c3dpo_annotate import run_c3dpo_model_on_dset
from model import Model
from config import set_config_from_file, set_config, \
get_arg_parser, dump_config, get_default_args, auto_init_args
from tools.attr_dict import nested_attr_dict
from tools.utils import get_net_input, pprint_dict
from tools import utils
from tools.stats import Stats
from tools.vis_utils import get_visdom_env
from tools.model_io import find_last_checkpoint, purge_epoch, \
load_model, get_checkpoint, save_model
from tools.cache_preds import cache_preds
def init_model(cfg,force_load=False,clear_stats=False,add_log_vars=None):
# get the model
model = Model(**cfg.MODEL)
# obtain the network outputs that should be logged
if hasattr(model,'log_vars'):
log_vars = copy.deepcopy(model.log_vars)
else:
log_vars = ['objective']
if add_log_vars:
log_vars.extend(copy.deepcopy(add_log_vars))
visdom_env_charts = get_visdom_env(cfg) + "_charts"
# init stats struct
stats = Stats( log_vars, visdom_env=visdom_env_charts, \
verbose=False, visdom_server=cfg.visdom_server, \
visdom_port=cfg.visdom_port )
model_path = None
if cfg.resume_epoch > 0:
model_path = get_checkpoint(cfg.exp_dir,cfg.resume_epoch)
elif cfg.resume_epoch == -1: # find the last checkpoint
model_path = find_last_checkpoint(cfg.exp_dir)
optimizer_state = None
if model_path is None and force_load:
from dataset.dataset_configs import C3DM_URLS
url = C3DM_URLS[cfg.DATASET.dataset_name]
print('Downloading C3DM model %s from %s' % (cfg.DATASET.dataset_name, url))
utils.untar_to_dir(url, cfg.exp_dir)
model_path = find_last_checkpoint(cfg.exp_dir)
if model_path is not None:
print( "found previous model %s" % model_path )
if force_load or cfg.resume:
print( " -> resuming" )
model_state_dict, stats_load, optimizer_state = load_model(model_path)
if not clear_stats:
if stats_load is None:
print(" -> bad stats! -> clearing")
else:
stats = stats_load
else:
print(" -> clearing stats")
try:
model.load_state_dict(model_state_dict, strict=True)
except RuntimeError as e:
print('!!!!! cant load state dict in strict mode:')
print(e)
print('loading in non-strict mode ...')
model.load_state_dict(model_state_dict, strict=False)
model.log_vars = log_vars
else:
print( " -> but not resuming -> starting from scratch" )
elif force_load:
print('!! CANNOT RESUME FROM A CHECKPOINT !!')
# update in case it got lost during load:
stats.visdom_env = visdom_env_charts
stats.visdom_server = cfg.visdom_server
stats.visdom_port = cfg.visdom_port
#stats.plot_file = os.path.join(cfg.exp_dir,'train_stats.pdf')
stats.synchronize_logged_vars(log_vars)
return model, stats, optimizer_state
def init_optimizer(model,optimizer_state,
PARAM_GROUPS=(),
freeze_bn=False,
breed='sgd',
weight_decay=0.0005,
lr_policy='multistep',
lr=0.001,
gamma=0.1,
momentum=0.9,
betas=(0.9,0.999),
milestones=[100,],
max_epochs=300,
):
# init the optimizer
if hasattr(model,'_get_param_groups'): # use the model function
p_groups = model._get_param_groups(lr,wd=weight_decay)
else:
allprm = [prm for prm in model.parameters() if prm.requires_grad]
p_groups = [{'params': allprm, 'lr': lr}]
if breed=='sgd':
optimizer = torch.optim.SGD( p_groups, lr=lr, \
momentum=momentum, \
weight_decay=weight_decay )
elif breed=='adagrad':
optimizer = torch.optim.Adagrad( p_groups, lr=lr, \
weight_decay=weight_decay )
elif breed=='adam':
optimizer = torch.optim.Adam( p_groups, lr=lr, \
betas=betas, \
weight_decay=weight_decay )
else:
raise ValueError("no such solver type %s" % breed)
print(" -> solver type = %s" % breed)
if lr_policy=='multistep':
scheduler = torch.optim.lr_scheduler.MultiStepLR( \
optimizer, milestones=milestones, gamma=gamma)
else:
raise ValueError("no such lr policy %s" % lr_policy)
# add the max epochs here!
scheduler.max_epochs = max_epochs
if optimizer_state is not None:
print(" -> setting loaded optimizer state")
optimizer.load_state_dict(optimizer_state)
optimizer.param_groups[0]['momentum'] = momentum
optimizer.param_groups[0]['dampening'] = 0.0
optimizer.zero_grad()
return optimizer, scheduler
def run_training(cfg):
# run the training loops
# make the exp dir
os.makedirs(cfg.exp_dir,exist_ok=True)
# set the seed
np.random.seed(cfg.seed)
# dump the exp config to the exp dir
dump_config(cfg)
# setup datasets
dset_train, dset_val, dset_test = dataset_zoo(**cfg.DATASET)
# init loaders
if cfg.batch_sampler=='default':
trainloader = torch.utils.data.DataLoader( dset_train,
num_workers=cfg.num_workers, pin_memory=True,
batch_size=cfg.batch_size, shuffle=False )
elif cfg.batch_sampler=='sequence':
trainloader = torch.utils.data.DataLoader( dset_train,
num_workers=cfg.num_workers, pin_memory=True,
batch_sampler=SceneBatchSampler(
torch.utils.data.SequentialSampler(dset_train),
cfg.batch_size,
True,
) )
else:
raise BaseException()
if dset_val is not None:
if cfg.batch_sampler=='default':
valloader = torch.utils.data.DataLoader( dset_val,
num_workers=cfg.num_workers, pin_memory=True,
batch_size=cfg.batch_size, shuffle=False )
elif cfg.batch_sampler=='sequence':
valloader = torch.utils.data.DataLoader( dset_val,
num_workers=cfg.num_workers, pin_memory=True,
batch_sampler=SceneBatchSampler( \
torch.utils.data.SequentialSampler(dset_val),
cfg.batch_size,
True,
) )
else:
raise BaseException()
else:
valloader = None
# test loaders
if dset_test is not None:
testloader = torch.utils.data.DataLoader(dset_test,
num_workers=cfg.num_workers, pin_memory=True,
batch_size=cfg.batch_size, shuffle=False,
)
_,_,eval_vars = eval_zoo(cfg.DATASET.dataset_name)
else:
testloader = None
eval_vars = None
# init the model
model, stats, optimizer_state = init_model(cfg,add_log_vars=eval_vars)
start_epoch = stats.epoch + 1
# annotate dataset with c3dpo outputs
if cfg.annotate_with_c3dpo_outputs:
for dset in dset_train, dset_val, dset_test:
if dset is not None:
run_c3dpo_model_on_dset(dset, cfg.MODEL.nrsfm_exp_path)
# move model to gpu
model.cuda(0)
# init the optimizer
optimizer, scheduler = init_optimizer(\
model, optimizer_state=optimizer_state, **cfg.SOLVER)
# loop through epochs
scheduler.last_epoch = start_epoch
for epoch in range(start_epoch, cfg.SOLVER.max_epochs):
with stats: # automatic new_epoch and plotting of stats at every epoch start
print("scheduler lr = %1.2e" % float(scheduler.get_lr()[-1]))
# train loop
trainvalidate(model, stats, epoch, trainloader, optimizer, False, \
visdom_env_root=get_visdom_env(cfg), **cfg )
# val loop
if valloader is not None:
trainvalidate(model, stats, epoch, valloader, optimizer, True, \
visdom_env_root=get_visdom_env(cfg), **cfg )
# eval loop (optional)
if testloader is not None:
if cfg.eval_interval >= 0:
if cfg.eval_interval == 0 or \
((epoch % cfg.eval_interval)==0 and epoch > 0):
torch.cuda.empty_cache() # we have memory heavy eval ...
with torch.no_grad():
run_eval(cfg,model,stats,testloader)
assert stats.epoch==epoch, "inconsistent stats!"
# delete previous models if required
if cfg.store_checkpoints_purge > 0 and cfg.store_checkpoints:
for prev_epoch in range(epoch-cfg.store_checkpoints_purge):
period = cfg.store_checkpoints_purge_except_every
if (period > 0 and prev_epoch % period == period - 1):
continue
purge_epoch(cfg.exp_dir,prev_epoch)
# save model
if cfg.store_checkpoints:
outfile = get_checkpoint(cfg.exp_dir,epoch)
save_model(model,stats,outfile,optimizer=optimizer)
scheduler.step()
def run_evaluation(cfg):
np.random.seed(cfg.seed)
# setup datasets
dset_train, dset_val, dset_test = dataset_zoo(**cfg.DATASET)
# test loaders
testloader = torch.utils.data.DataLoader(
dset_test,
num_workers=cfg.num_workers, pin_memory=True,
batch_size=cfg.batch_size, shuffle=False,
)
_, _, eval_vars = eval_zoo(cfg.DATASET.dataset_name)
# init the model
model, _, _ = init_model(cfg, force_load=True, add_log_vars=eval_vars)
model.cuda(0)
model.eval()
# init the optimizer
#optimizer, scheduler = init_optimizer(model, optimizer_state=optimizer_state, **cfg.SOLVER)
# val loop
#trainvalidate(model, stats, 0, valloader, optimizer, True,
# visdom_env_root=get_visdom_env(cfg), **cfg )
with torch.no_grad():
run_eval(cfg, model, None, testloader)
def trainvalidate( model,
stats,
epoch,
loader,
optimizer,
validation,
bp_var='objective',
metric_print_interval=5,
visualize_interval=0,
visdom_env_root='trainvalidate',
**kwargs ):
if validation:
model.eval()
trainmode = 'val'
else:
model.train()
trainmode = 'train'
t_start = time.time()
# clear the visualisations on the first run in the epoch
clear_visualisations = True
# get the visdom env name
visdom_env_imgs = visdom_env_root + "_images_" + trainmode
#loader = itt.islice(loader, 1)
n_batches = len(loader)
for it, batch in enumerate(loader):
last_iter = it==n_batches-1
# move to gpu where possible
net_input = get_net_input(batch)
# add epoch to the set of inputs
net_input['epoch_now'] = int(epoch)
if (not validation):
optimizer.zero_grad()
preds = model(**net_input)
else:
with torch.no_grad():
preds = model(**net_input)
# make sure we dont overwrite something
assert not any( k in preds for k in net_input.keys() )
preds.update(net_input) # merge everything into one big dict
# update the stats logger
stats.update(preds,time_start=t_start,stat_set=trainmode)
assert stats.it[trainmode]==it, "inconsistent stat iteration number!"
# print textual status update
if (it%metric_print_interval)==0 or last_iter:
stats.print(stat_set=trainmode,max_it=n_batches)
# optimizer step
if (not validation):
loss = preds[bp_var]
loss.backward()
optimizer.step()
# visualize results
if (visualize_interval>0) and (it%visualize_interval)==0:
model.visualize( visdom_env_imgs, trainmode, \
preds, stats, clear_env=clear_visualisations )
clear_visualisations = False
def run_eval(cfg,model,stats,loader):
if hasattr(model, 'embed_db_eval'):
from dataset.dataset_configs import FILTER_DB_SETTINGS
dset_name = cfg['DATASET']['dataset_name']
if dset_name in FILTER_DB_SETTINGS:
filter_settings = FILTER_DB_SETTINGS[dset_name]
else:
filter_settings = FILTER_DB_SETTINGS['default']
print('filter settings: %s' % str(filter_settings))
print('turning embed_db eval on!')
prev_embed_db_eval = copy.deepcopy(model.embed_db_eval)
model.embed_db_eval = True
model.embed_db.filter_db(**filter_settings)
eval_script, cache_vars, eval_vars = eval_zoo(cfg.DATASET.dataset_name)
if True:
cached_preds = cache_preds(model, loader, stats=stats,
cache_vars=cache_vars)
else:
cached_preds = cache_preds(model, loader, stats=stats,
cache_vars=cache_vars, eval_mode=False)
assert False, 'make sure not to continue beyond here!'
results, _ = eval_script(cached_preds, eval_vars=eval_vars)
if stats is not None:
stats.update(results, stat_set='test') #, log_vars=results.keys())
stats.print(stat_set='test')
if hasattr(model, 'embed_db_eval'):
model.embed_db_eval = prev_embed_db_eval
class ExperimentConfig(object):
def __init__( self,
cfg_file=None,
model_zoo='./data/torch_zoo/',
exp_name='test',
exp_idx=0,
exp_dir='./data/exps/keypoint_densification/default/',
gpu_idx=0,
resume=True,
seed=0,
resume_epoch=-1,
eval_interval=1,
store_checkpoints=True,
store_checkpoints_purge=1,
store_checkpoints_purge_except_every=25,
batch_size=10,
num_workers=8,
visdom_env='',
collect_basis_before_eval=False,
visdom_server='http://localhost',
visdom_port=8097,
metric_print_interval=5,
visualize_interval=0,
mode='trainval',
batch_sampler='sequence',
annotate_with_c3dpo_outputs=True,
SOLVER = get_default_args(init_optimizer),
DATASET = get_default_args(dataset_zoo),
MODEL = get_default_args(Model),
):
self.cfg = get_default_args(ExperimentConfig)
if cfg_file is not None:
set_config_from_file(self.cfg,cfg_file)
else:
auto_init_args(self,tgt='cfg',can_overwrite=True)
self.cfg = nested_attr_dict(self.cfg)
if __name__ == '__main__':
torch.manual_seed(0)
np.random.seed(0)
# init the exp config
exp = ExperimentConfig()
set_config_from_file(exp.cfg, sys.argv[1])
mode = 'train'
if len(sys.argv) > 2 and sys.argv[2] == '--eval':
mode = 'eval'
pprint_dict(exp.cfg)
#with open('freicars.yaml', 'w') as yaml_file:
# yaml.dump(exp.cfg, yaml_file, default_flow_style=False)
os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
os.environ['CUDA_VISIBLE_DEVICES'] = str(exp.cfg.gpu_idx)
if not exp.cfg.model_zoo is None:
os.environ["TORCH_MODEL_ZOO"] = exp.cfg.model_zoo
if mode == 'eval':
run_evaluation(exp.cfg)
else:
run_training(exp.cfg)
|
c3dm-main
|
c3dm/experiment.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import torch
import torch.nn.functional as Fu
def image_meshgrid(bounds,resol):
"""
bounds in 3x2
resol in 3x1
"""
# he,wi,de = resol
# minw,maxw = bounds[0]
# minh,maxh = bounds[1]
# mind,maxd = bounds[2]
axis = [ ((torch.arange(sz).float())/(sz-1))*(b[1]-b[0])+b[0] \
for sz,b in zip(resol,bounds) ]
return torch.stack(torch.meshgrid(axis))
def append1(X, mask=1.):
"""
append 1 as the last dim
"""
X = torch.cat( (X, X[:,-2:-1]*0. + mask), dim=1 )
return X
def depth2pcl( D, K, image_size=None, projection_type='perspective' ):
"""
convert depth D in B x 1 x He x Wi
to a point cloud xyz_world in B x 3 x He x Wi
using projection matrix KRT in B x 3 x 7 (K,R,T stacked along dim=2)
the convention is: K[R|T] xyz_world = xyz_camera
"""
grid_size = D.shape[2:4]
ba = D.shape[0]
if image_size is None:
image_size = grid_size
he , wi = image_size
projection_bounds = torch.FloatTensor( \
[ [0.5,he-0.5],
[0.5,wi-0.5], ] )
yx_cam = image_meshgrid(projection_bounds,grid_size).type_as(D)
xy_cam = yx_cam[[1,0],:,:]
xy_cam = xy_cam[None].repeat(ba,1,1,1)
xyz_cam = torch.cat( (xy_cam, D), dim=1 )
if projection_type=='perspective':
xyz_world = unproject_from_camera( \
xyz_cam.view(ba,3,-1), K )
xyz_world = xyz_world.view(ba,3,grid_size[0],grid_size[1])
elif projection_type=='orthographic':
xyz_world = xyz_cam
else:
raise ValueError(projection_type)
return xyz_world
def unproject_from_camera( xyz_cam, K ):
"""
unprojects the points from the camera coordinates xyz_cam to
the world coordinates xyz_world
xyz_cam in (B,3,N), 3rd dimension is depth, first two x,y pixel coords
projection matrix KRT in B x 3 x 7 (K,R,T stacked along dim=2)
"""
# decompose KRT
xy_cam = xyz_cam[:,0:2,:]
depth = xyz_cam[:,2:3,:]
# calibrate the points
xyz_world = torch.bmm(torch.inverse(K),append1(xy_cam))
# append depth and mult by inverse of the transformation
xyz_world = xyz_world * depth
return xyz_world
|
c3dm-main
|
c3dm/tools/pcl_unproject.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import torch
import torch.nn.functional as Fu
import numpy as np
import collections
import warnings
def clamp_depth(X, min_depth):
xy, depth = X[:,0:2], X[:,2:]
depth = torch.clamp(depth, min_depth)
return torch.cat((xy,depth), dim=1)
def calc_ray_projection(X, Y, K=None, min_r_len=None, min_depth=None):
n = X.shape[2]
ba = X.shape[0]
append1 = lambda x: \
torch.cat((x,x.new_ones(x.shape[0],1,x.shape[2])), dim=1)
if K is None:
# Y is already calibrated
r = append1(Y)
else:
r = torch.bmm(torch.inverse(K), append1(Y))
r = Fu.normalize(r, dim=1)
if min_depth is not None:
X = clamp_depth(X, min_depth)
r_len = (X * r).sum(1, keepdim=True)
if min_r_len is not None:
r_len = torch.clamp(r_len, min_r_len)
r_proj = r_len * r
return r_proj
def minimise_2d_residual_over_T(K, X, Y, v=None):
ba, _, n = X.size()
append1 = lambda x: torch.cat((x, x.new_ones(x[:,:1,:].size())), dim=1)
Y_cam = torch.bmm(torch.inverse(K), append1(Y))
# construct a system AT = b
A_u = torch.cat((Y_cam.new_ones(ba, n, 1), Y_cam.new_zeros(ba, n, 1), -Y_cam[:,:1,:].permute(0,2,1)), dim=2)
A_v = torch.cat((Y_cam.new_zeros(ba, n, 1), Y_cam.new_ones(ba, n, 1), -Y_cam[:,1:2,:].permute(0,2,1)), dim=2)
b_u = (Y_cam[:,0:1,:] * X[:,2:,:] - X[:,0:1,:]).permute(0,2,1)
b_v = (Y_cam[:,1:2,:] * X[:,2:,:] - X[:,1:2,:]).permute(0,2,1)
res = Y_cam.new_empty(ba, 3)
for i in range(ba):
if v is not None:
A = torch.cat((A_u[i, v[i] > 0., :], A_v[i, v[i] > 0., :]), dim=0)
b = torch.cat((b_u[i, v[i] > 0., :], b_v[i, v[i] > 0., :]), dim=0)
else:
A = torch.cat((A_u[i, :, :], A_v[i, :, :]), dim=0)
b = torch.cat((b_u[i, :, :], b_v[i, :, :]), dim=0)
#res[i,:] = torch.lstsq(b, A)[0][:3, 0]
res[i,:] = torch.matmul(torch.pinverse(A), b)[:, 0]
return res
# TODO: if used, extract to test
def test_minimise_2d_residual_over_T():
K = torch.eye(3)[None,:,:]
X = torch.cat((Y, Y.new_ones(1,1,4)), dim=1)
Y = torch.tensor([[0.0, 0.0], [0.0, 1.0], [1.0, 0.0], [1.0, 1.0]]).t()[None,:,:]
res = minimise_2d_residual_over_T(K, X, Y)
assert torch.allclose(res, torch.tensor([[0., 0., 0.]]))
X = torch.cat((Y, 2*Y.new_ones(1,1,4)), dim=1)
assert torch.allclose(res, torch.tensor([[0., 0., -1.]]))
X = torch.cat((Y, Y.new_ones(1,1,4)), dim=1)
Y[:,0,:] += 3
assert torch.allclose(res, torch.tensor([[3., 0., 0.]]))
def find_camera_T(K, X, Y, v=None, eps=1e-4):
"""
estimate camera translation given 3D-2D correspondences and cal matrix
"""
n = X.shape[2]
ba = X.shape[0]
append1 = lambda x: \
torch.cat((x,x.new_ones(x.shape[0],1,x.shape[2])), dim=1)
# projection rays
r = torch.bmm(torch.inverse(K), append1(Y))
r = Fu.normalize(r, dim=1)
# outer projection ray product (need to permute the array first)
rr = r.permute(0,2,1).contiguous().view(n*ba, 3)
rr = torch.bmm(rr[:,:,None], rr[:,None,:])
# I - rr
Irr = torch.eye(3)[None].type_as(X).repeat(ba*n,1,1) - rr
# [rr - I] x
rrIx = torch.bmm(-Irr, X.permute(0,2,1).contiguous().view(n*ba, 3, 1))
Irr = Irr.view(ba,-1,3,3)
rrIx = rrIx.view(ba,-1,3)
if v is not None:
Irr = Irr * v[:,:,None,None]
rrIx = rrIx * v[:,:,None]
Irr_sum = Irr.sum(1)
rrIx_sum = rrIx.sum(1)
if v is not None:
ok = v.sum(1) > 2 # at least three visible
rrI_sum_i = Irr_sum * 0.
rrI_sum_i[ok] = torch.inverse(Irr_sum[ok])
else:
rrI_sum_i = torch.inverse(Irr_sum)
T = torch.bmm(rrI_sum_i, rrIx_sum[:,:,None])[:,:,0]
return T
def image_meshgrid(bounds, resol):
"""
bounds in 3x2
resol in 3x1
"""
# he,wi,de = resol
# minw,maxw = bounds[0]
# minh,maxh = bounds[1]
# mind,maxd = bounds[2]
axis = []
for sz, b in zip(resol, bounds):
binw = (b[1]-b[0]) / sz
g = torch.arange(sz).float().cuda() * binw + 0.5 * binw
axis.append(g)
return torch.stack(torch.meshgrid(axis))
def masked_kp_mean(kp_loc,kp_vis):
visibility_mass = torch.clamp(kp_vis.sum(1),1e-4)
kp_mean = (kp_loc*kp_vis[:,None,:]).sum(2)
kp_mean = kp_mean / visibility_mass[:,None]
return kp_mean
def huber(dfsq, scaling=0.03):
loss = (safe_sqrt(1+dfsq/(scaling*scaling),eps=1e-4)-1) * scaling
return loss
def mod1(h):
ge1 = (h > 1.).float()
le0 = (h < 0.).float()
ok = ((h>=0.) * (h<=1.)).float()
rem_ge1 = h - h.long().float()
rem_le0 = 1. - (-h) - (-h).long().float()
h = ge1 * rem_ge1 + le0 * rem_le0 + ok * h
return h
def avg_l2_huber(x, y, mask=None, scaling=0.03, reduce_dims=[1]):
dist = (x - y) ** 2
if reduce_dims:
dist = dist.sum(reduce_dims)
dist = huber(dist, scaling=float(scaling))
if mask is not None:
dist = (dist*mask).sum(1) / \
torch.clamp(mask.sum(1),1.)
else:
if len(dist.shape)==2 and dist.shape[1] > 1:
dist = dist.mean(1)
dist = dist.mean()
return dist
def avg_l2_dist(x,y,squared=False,mask=None,eps=1e-4):
diff = x - y
dist = (diff*diff).sum(1)
if not squared: dist = safe_sqrt(dist,eps=eps)
if mask is not None:
dist = (dist*mask).sum(1) / \
torch.clamp(mask.sum(1),1.)
else:
if len(dist.shape)==2 and dist.shape[1] > 1:
dist = dist.mean(1)
dist = dist.mean()
return dist
def argmin_translation_scale(x, y, v=None):
# find translation/scale "T/s" st. s x + T = y
ba = x.shape[0]
x = x.view(ba, 2, -1)
y = y.view(ba, 2, -1)
if v is not None:
v = v.view(ba, -1)
x_mu = (x * v[:, None]).sum(2) / v.sum(1).clamp(1.)[:, None]
y_mu = (y * v[:, None]).sum(2) / v.sum(1).clamp(1.)[:, None]
else:
x_mu = x.mean(2)
y_mu = y.mean(2)
x = x - x_mu[:, :, None]
y = y - y_mu[:, :, None]
s = argmin_scale(x, y, v=v)
T = -x_mu * s[:, None] + y_mu
return s, T
def argmin_translation(x,y,v=None):
# find translation "T" st. x + T = y
x_mu = x.mean(2)
if v is not None:
vmass = torch.clamp(v.sum(1,keepdim=True),1e-4)
x_mu = (v[:,None,:]*x).sum(2) / vmass
y_mu = (v[:,None,:]*y).sum(2) / vmass
T = y_mu - x_mu
return T
def argmin_scale(x,y,v=None):
# find scale "s" st.: sx=y
if v is not None: # mask invisible
x = x * v[:,None,:]
y = y * v[:,None,:]
xtx = (x*x).sum(1).sum(1)
xty = (x*y).sum(1).sum(1)
s = xty / torch.clamp(xtx,1e-4)
return s
def logexploss(x,inv_lbd,coeff=1.,accum=True):
lbd = 1 / inv_lbd
conj = lbd.log()
prob = -x*lbd
logl = -(prob+coeff*conj) # neg loglikelyhood
if accum:
return logl.mean()
else:
return logl
def safe_sqrt(A,eps=float(1e-4)):
"""
performs safe differentiable sqrt
"""
return (torch.clamp(A,float(0))+eps).sqrt()
def rgb2hsv(im, eps=0.0000001):
# img = im * 0.5 + 0.5
img = im
# hue = torch.Tensor(im.shape[0], im.shape[2], im.shape[3]).to(im.device)
hue = im.new_zeros( im.shape[0], im.shape[2], im.shape[3] )
hue[ img[:,2]==img.max(1)[0] ] = 4.0 + ( (img[:,0]-img[:,1]) / ( img.max(1)[0] - img.min(1)[0] + eps) ) [ img[:,2]==img.max(1)[0] ]
hue[ img[:,1]==img.max(1)[0] ] = 2.0 + ( (img[:,2]-img[:,0]) / ( img.max(1)[0] - img.min(1)[0] + eps) ) [ img[:,1]==img.max(1)[0] ]
hue[ img[:,0]==img.max(1)[0] ] = (0.0 + ( (img[:,1]-img[:,2]) / ( img.max(1)[0] - img.min(1)[0] + eps) ) [ img[:,0]==img.max(1)[0] ]) % 6
hue[img.min(1)[0]==img.max(1)[0]] = 0.0
hue = hue/6
saturation = ( img.max(1)[0] - img.min(1)[0] ) / ( img.max(1)[0] + eps )
saturation[ img.max(1)[0]==0 ] = 0
value = img.max(1)[0]
hsv = torch.stack((hue, saturation, value), dim=1)
return hsv
def hsv2rgb(hsv):
C = hsv[:,2] * hsv[:,1]
X = C * ( 1 - ( (hsv[:,0]*6)%2 - 1 ).abs() )
m = hsv[:,2] - C
# zero tensor
z = hsv[:,0] * 0.
h = hsv[:,0]
RGB = \
((h <= 1/6) )[:,None,:,:].float() * torch.stack((C,X,z), dim=1) +\
((h > 1/6) * (h <= 2/6))[:,None,:,:].float() * torch.stack((X,C,z), dim=1) +\
((h > 2/6) * (h <= 3/6))[:,None,:,:].float() * torch.stack((z,C,X), dim=1) +\
((h > 3/6) * (h <= 4/6))[:,None,:,:].float() * torch.stack((z,X,C), dim=1) +\
((h > 4/6) * (h <= 5/6))[:,None,:,:].float() * torch.stack((X,z,C), dim=1) +\
((h > 5/6) * (h <= 6/6))[:,None,:,:].float() * torch.stack((C,z,X), dim=1)
# if self.hsv[0] < 1/6:
# R_hat, G_hat, B_hat = C, X, 0
# elif self.hsv[0] < 2/6:
# R_hat, G_hat, B_hat = X, C, 0
# elif self.hsv[0] < 3/6:
# R_hat, G_hat, B_hat = 0, C, X
# elif self.hsv[0] < 4/6:
# R_hat, G_hat, B_hat = 0, X, C
# elif self.hsv[0] < 5/6:
# R_hat, G_hat, B_hat = X, 0, C
# elif self.hsv[0] <= 6/6:
# R_hat, G_hat, B_hat = C, 0, X
RGB = RGB + m[:,None,:,:]
# R, G, B = (R_hat+m), (G_hat+m), (B_hat+m)
return RGB
def wmean(x, weight, dim=-1):
return (
x.mean(dim=dim, keepdim=True) if weight is None
else (x*weight[:,None,:]).sum(dim=dim, keepdim=True) /
weight[:,None,:].sum(dim=dim, keepdim=True)
)
def umeyama(X, Y, weight=None, center=True, allow_reflections=False, eps=1e-9):
"""
umeyama finds a rigid motion (rotation R and translation T) between two sets of points X and Y
s.t. RX+T = Y in the least squares sense
Inputs:
X ... Batch x 3 x N ... each column is a 3d point
Y ... Batch x 3 x N ... each column is a 3d point
Outputs:
R ... rotation component of rigid motion
T ... translation component of rigid motion
"""
assert X.shape[1]==Y.shape[1]
assert X.shape[2]==Y.shape[2]
assert X.shape[1]==3
b, _, n = X.size()
if center:
Xmu = wmean(X, weight)
Ymu = wmean(Y, weight)
X = X - Xmu
Y = Y - Ymu
Sxy = (
torch.bmm(Y, X.transpose(2,1)) / n if weight is None
else torch.bmm(Y*weight[:,None,:], X.transpose(2,1)*weight[:,:,None])
/ weight.sum(-1)[:,None,None]
)
U, _, V = torch.svd(Sxy)
R = torch.bmm(U, V.transpose(2,1))
if not allow_reflections:
s = torch.eye(3, dtype=X.dtype, device=X.device).repeat(b, 1, 1)
s[:,-1,-1] = torch.det(R)
# R = torch.matmul(s, R)
R = torch.matmul(torch.matmul(U, s), V.transpose(2,1))
assert torch.all(torch.det(R) >= 0)
T = (
Ymu - torch.bmm(R, Xmu[:,:])
if center else torch.zeros_like(X)
)[:,:,0]
return R, T
def get_edm(pts, pts2=None):
dtype = pts.data.type()
ba, dim, N = pts.shape
if pts2 is not None:
edm = torch.bmm(-2. * pts2.transpose(1,2), pts)
fNorm1 = (pts*pts).sum(1,keepdim=True)
fNorm2 = (pts2*pts2).sum(1,keepdim=True)
edm += fNorm2.transpose(1,2) # inplace saves memory
edm += fNorm1
# edm = (fNorm2.transpose(1,2) + fGram) + fNorm1
else:
fGram = torch.bmm(2 * pts.transpose(1,2), pts)
fNorm1 = (pts*pts).sum(1,keepdim=True)
edm = (fNorm1.transpose(1,2) - fGram) + fNorm1
return edm.contiguous()
def sample_random_xy(xy, mask, n=100):
ba = xy.shape[0]
xy = xy.reshape(ba, 2, -1)
mask = mask.reshape(ba, -1)
xy_sample = []
for m_, xy_ in zip(mask, xy):
ok = torch.nonzero(m_)
if ok.numel() <= 2:
warnings.warn('nothing in the mask!')
ok = torch.nonzero(m_ + 1).squeeze()
ok = ok.squeeze()
sel = torch.randint(low=0, high=len(ok), size=(n,), device=xy.device)
xy_sample.append(xy_[:, ok[sel]])
xy_sample = torch.stack(xy_sample)
return xy_sample
def get_mask_chamfer(xy_rdr, gt_mask, image_size, n=100):
ba = xy_rdr.shape[0]
render_size = gt_mask.shape[2:]
grid_gt = image_meshgrid(((0, 2), (0, 2)), render_size)
grid_gt = grid_gt.type_as(xy_rdr) - 1.
grid_gt = grid_gt[[1, 0]][None].repeat(ba, 1, 1, 1)
# sample random points from gt mask
gt_samples = sample_random_xy(grid_gt, gt_mask, n=n)
# compute chamfer
edm = get_edm(gt_samples, xy_rdr)
edm = huber(edm, scaling=0.1)
loss = 0.5 * (edm.min(dim=1)[0].mean() + edm.min(dim=2)[0].mean())
return loss, gt_samples
|
c3dm-main
|
c3dm/tools/functions.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from tools.utils import auto_init_args
import torch
import torch.nn.functional as Fu
from torch.nn import Parameter
from tools.utils import Timer
class TensorAccumulator(torch.nn.Module):
def __init__(self, db_size=30000, db_dim=3, perc_replace=0.01):
super().__init__()
auto_init_args(self)
db = torch.zeros(db_dim, db_size).float()
self.db = Parameter(db)
self.db.requires_grad = False
self.pointer = 0
self.uniform_sphere_sampling = False
def get_db(self, uniform_sphere=False):
if uniform_sphere or self.uniform_sphere_sampling:
mean_norm = (self.db.data**2).sum(0).sqrt().mean()
db = Fu.normalize(torch.randn_like(self.db), dim=0) * mean_norm
return db
else:
if hasattr(self, 'db_f'):
return self.db_f.clone()
else:
return self.db.data
def filter_db(self, nn=1e-3, perc_keep=0.9, sig=0.01, lap_size=10, lap_alpha=1.):
print('filtering db')
if nn < 1.: nn = int(self.db.shape[1] * nn)
print('nn size = %d' % nn)
db_f = self.density_filter(nn=nn, perc_keep=perc_keep, \
sig=sig, in_db=self.db.data.clone())
if lap_size < 1.: lap_size = int(self.db.shape[1] * lap_size)
db_f = self.lap_filter(lap_size=lap_size, lap_alpha=lap_alpha, in_db=db_f)
self.db_f = db_f
def get_edm(self, pts, pts2=None):
dtype = pts.data.type()
ba, dim, N = pts.shape
if not(pts2 is None):
edm = torch.bmm(-2. * pts2.transpose(1,2), pts)
fNorm1 = (pts*pts).sum(1,keepdim=True)
fNorm2 = (pts2*pts2).sum(1,keepdim=True)
edm += fNorm2.transpose(1,2)
edm += fNorm1
else:
edm = torch.bmm(-2 * pts.transpose(1,2), pts)
fNorm1 = (pts*pts).sum(1,keepdim=True)
edm += fNorm1.transpose(1,2)
edm += fNorm1
return edm.contiguous()
def reset(self):
self.db.data = torch.zeros(self.db_dim, self.db_size).type_as(self.db.data)
self.pointer = 0
def get_nns(self, pts, pts2, nn, bsize=int(1e4)):
# nb = int(np.ceil(pts.shape[1] / bsize))
chunks = torch.split(pts, bsize, dim=1)
indKNN = []
for chunk in chunks:
edm = self.get_edm(pts2[None], chunk[None])[0]
_, indKNN_ = torch.topk(edm, k=nn, dim=1, largest=False)
indKNN.append(indKNN_)
indKNN = torch.cat(indKNN, dim=0)
return indKNN
def density_filter(self, nn=50, perc_keep=0.9, sig=0.01, in_db=None):
print('density filter ...')
if in_db is None:
pcl = self.db.data
else:
pcl = in_db
indKNN = self.get_nns(pcl, pcl, nn=nn)
# edm = self.get_edm(pcl[None])[0]
# _, indKNN = torch.topk(edm, k=nn, dim=0, largest=False)
NNs = pcl[:,indKNN]
dsity = (-((NNs - pcl[:,:,None])**2).sum(0)/sig).exp().sum(1)
thr = torch.topk(dsity, \
int((1.-perc_keep)*dsity.shape[0]), largest=False)[0][-1]
pcl = pcl[:, dsity>=thr]
if in_db is None:
self.db.data = pcl
else:
return pcl
def lap_filter(self, lap_size=10, lap_alpha=1., in_db=None):
print('lap filter ...')
if in_db is None:
pcl = self.db.data
else:
pcl = in_db
indKNN = self.get_nns(pcl, pcl, nn=lap_size)
NNs = pcl[:,indKNN]
pclf = NNs.mean(dim=2)
pcl = lap_alpha * pclf + (1-lap_alpha) * pcl
if in_db is None:
self.db.data = pcl
else:
return pcl
def forward(self, embed=None, masks=None):
if not self.training: # gather only on the train set
return None
ba = embed.shape[0]
embed_flat = embed.view(ba,self.db_dim,-1).detach()
if masks is not None:
mask_flat = masks.view(ba, -1)
else:
mask_flat = embed_flat[:,0,:] * 0. + 1.
# with Timer():
# embed_flat = embed_flat.permute(1,2,0).contiguous().view(1,self.db_dim,-1)
# mask_flat = mask_flat.t().contiguous().view(1,-1)
for bi, (m, e) in enumerate(zip(mask_flat, embed_flat)):
sel = torch.nonzero(m).squeeze()
if sel.numel()<=2:
continue
nsel = max(int(self.db_size * self.perc_replace), 1)
if self.pointer >= self.db_size: # randomly replace
idx = sel[torch.LongTensor(nsel).random_(0, len(sel))]
idx_replace = torch.LongTensor(nsel).random_(0, self.db_size)
embed_sel = e[:,idx].detach().data
self.db.data[:, idx_replace] = embed_sel
else: # keep adding vectors
# print('filling db ...')
nsel = min(nsel, self.db_size - self.pointer)
idx = sel[torch.LongTensor(nsel).random_(0, len(sel))]
embed_sel = e[:,idx].detach().data
self.db.data[:, self.pointer:(self.pointer+nsel)] = embed_sel
self.pointer += nsel
# print(self.pointer)
return None
|
c3dm-main
|
c3dm/tools/tensor_accumulator.py
|
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
"""Pretty-print tabular data."""
from __future__ import print_function
from __future__ import unicode_literals
from collections import namedtuple
from platform import python_version_tuple
import re
import math
if python_version_tuple() >= ("3", "3", "0"):
from collections.abc import Iterable
else:
from collections import Iterable
if python_version_tuple()[0] < "3":
from itertools import izip_longest
from functools import partial
_none_type = type(None)
_bool_type = bool
_int_type = int
_long_type = long
_float_type = float
_text_type = unicode
_binary_type = str
def _is_file(f):
return isinstance(f, file)
else:
from itertools import zip_longest as izip_longest
from functools import reduce, partial
_none_type = type(None)
_bool_type = bool
_int_type = int
_long_type = int
_float_type = float
_text_type = str
_binary_type = bytes
basestring = str
import io
def _is_file(f):
return isinstance(f, io.IOBase)
try:
import wcwidth # optional wide-character (CJK) support
except ImportError:
wcwidth = None
__all__ = ["tabulate", "tabulate_formats", "simple_separated_format"]
__version__ = "0.8.4"
# minimum extra space in headers
MIN_PADDING = 2
# Whether or not to preserve leading/trailing whitespace in data.
PRESERVE_WHITESPACE = False
_DEFAULT_FLOATFMT="g"
_DEFAULT_MISSINGVAL=""
# if True, enable wide-character (CJK) support
WIDE_CHARS_MODE = wcwidth is not None
Line = namedtuple("Line", ["begin", "hline", "sep", "end"])
DataRow = namedtuple("DataRow", ["begin", "sep", "end"])
# A table structure is suppposed to be:
#
# --- lineabove ---------
# headerrow
# --- linebelowheader ---
# datarow
# --- linebewteenrows ---
# ... (more datarows) ...
# --- linebewteenrows ---
# last datarow
# --- linebelow ---------
#
# TableFormat's line* elements can be
#
# - either None, if the element is not used,
# - or a Line tuple,
# - or a function: [col_widths], [col_alignments] -> string.
#
# TableFormat's *row elements can be
#
# - either None, if the element is not used,
# - or a DataRow tuple,
# - or a function: [cell_values], [col_widths], [col_alignments] -> string.
#
# padding (an integer) is the amount of white space around data values.
#
# with_header_hide:
#
# - either None, to display all table elements unconditionally,
# - or a list of elements not to be displayed if the table has column headers.
#
TableFormat = namedtuple("TableFormat", ["lineabove", "linebelowheader",
"linebetweenrows", "linebelow",
"headerrow", "datarow",
"padding", "with_header_hide"])
def _pipe_segment_with_colons(align, colwidth):
"""Return a segment of a horizontal line with optional colons which
indicate column's alignment (as in `pipe` output format)."""
w = colwidth
if align in ["right", "decimal"]:
return ('-' * (w - 1)) + ":"
elif align == "center":
return ":" + ('-' * (w - 2)) + ":"
elif align == "left":
return ":" + ('-' * (w - 1))
else:
return '-' * w
def _pipe_line_with_colons(colwidths, colaligns):
"""Return a horizontal line with optional colons to indicate column's
alignment (as in `pipe` output format)."""
segments = [_pipe_segment_with_colons(a, w) for a, w in zip(colaligns, colwidths)]
return "|" + "|".join(segments) + "|"
def _mediawiki_row_with_attrs(separator, cell_values, colwidths, colaligns):
alignment = { "left": '',
"right": 'align="right"| ',
"center": 'align="center"| ',
"decimal": 'align="right"| ' }
# hard-coded padding _around_ align attribute and value together
# rather than padding parameter which affects only the value
values_with_attrs = [' ' + alignment.get(a, '') + c + ' '
for c, a in zip(cell_values, colaligns)]
colsep = separator*2
return (separator + colsep.join(values_with_attrs)).rstrip()
def _textile_row_with_attrs(cell_values, colwidths, colaligns):
cell_values[0] += ' '
alignment = { "left": "<.", "right": ">.", "center": "=.", "decimal": ">." }
values = (alignment.get(a, '') + v for a, v in zip(colaligns, cell_values))
return '|' + '|'.join(values) + '|'
def _html_begin_table_without_header(colwidths_ignore, colaligns_ignore):
# this table header will be suppressed if there is a header row
return "\n".join(["<table>", "<tbody>"])
def _html_row_with_attrs(celltag, cell_values, colwidths, colaligns):
alignment = { "left": '',
"right": ' style="text-align: right;"',
"center": ' style="text-align: center;"',
"decimal": ' style="text-align: right;"' }
values_with_attrs = ["<{0}{1}>{2}</{0}>".format(celltag, alignment.get(a, ''), c)
for c, a in zip(cell_values, colaligns)]
rowhtml = "<tr>" + "".join(values_with_attrs).rstrip() + "</tr>"
if celltag == "th": # it's a header row, create a new table header
rowhtml = "\n".join(["<table>",
"<thead>",
rowhtml,
"</thead>",
"<tbody>"])
return rowhtml
def _moin_row_with_attrs(celltag, cell_values, colwidths, colaligns, header=''):
alignment = { "left": '',
"right": '<style="text-align: right;">',
"center": '<style="text-align: center;">',
"decimal": '<style="text-align: right;">' }
values_with_attrs = ["{0}{1} {2} ".format(celltag,
alignment.get(a, ''),
header+c+header)
for c, a in zip(cell_values, colaligns)]
return "".join(values_with_attrs)+"||"
def _latex_line_begin_tabular(colwidths, colaligns, booktabs=False):
alignment = { "left": "l", "right": "r", "center": "c", "decimal": "r" }
tabular_columns_fmt = "".join([alignment.get(a, "l") for a in colaligns])
return "\n".join(["\\begin{tabular}{" + tabular_columns_fmt + "}",
"\\toprule" if booktabs else "\\hline"])
LATEX_ESCAPE_RULES = {r"&": r"\&", r"%": r"\%", r"$": r"\$", r"#": r"\#",
r"_": r"\_", r"^": r"\^{}", r"{": r"\{", r"}": r"\}",
r"~": r"\textasciitilde{}", "\\": r"\textbackslash{}",
r"<": r"\ensuremath{<}", r">": r"\ensuremath{>}"}
def _latex_row(cell_values, colwidths, colaligns, escrules=LATEX_ESCAPE_RULES):
def escape_char(c):
return escrules.get(c, c)
escaped_values = ["".join(map(escape_char, cell)) for cell in cell_values]
rowfmt = DataRow("", "&", "\\\\")
return _build_simple_row(escaped_values, rowfmt)
def _rst_escape_first_column(rows, headers):
def escape_empty(val):
if isinstance(val, (_text_type, _binary_type)) and not val.strip():
return ".."
else:
return val
new_headers = list(headers)
new_rows = []
if headers:
new_headers[0] = escape_empty(headers[0])
for row in rows:
new_row = list(row)
if new_row:
new_row[0] = escape_empty(row[0])
new_rows.append(new_row)
return new_rows, new_headers
_table_formats = {"simple":
TableFormat(lineabove=Line("", "-", " ", ""),
linebelowheader=Line("", "-", " ", ""),
linebetweenrows=None,
linebelow=Line("", "-", " ", ""),
headerrow=DataRow("", " ", ""),
datarow=DataRow("", " ", ""),
padding=0,
with_header_hide=["lineabove", "linebelow"]),
"plain":
TableFormat(lineabove=None, linebelowheader=None,
linebetweenrows=None, linebelow=None,
headerrow=DataRow("", " ", ""),
datarow=DataRow("", " ", ""),
padding=0, with_header_hide=None),
"grid":
TableFormat(lineabove=Line("+", "-", "+", "+"),
linebelowheader=Line("+", "=", "+", "+"),
linebetweenrows=Line("+", "-", "+", "+"),
linebelow=Line("+", "-", "+", "+"),
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1, with_header_hide=None),
"fancy_grid":
TableFormat(lineabove=Line("╒", "═", "╤", "╕"),
linebelowheader=Line("╞", "═", "╪", "╡"),
linebetweenrows=Line("├", "─", "┼", "┤"),
linebelow=Line("╘", "═", "╧", "╛"),
headerrow=DataRow("│", "│", "│"),
datarow=DataRow("│", "│", "│"),
padding=1, with_header_hide=None),
"github":
TableFormat(lineabove=Line("|", "-", "|", "|"),
linebelowheader=Line("|", "-", "|", "|"),
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1,
with_header_hide=["lineabove"]),
"pipe":
TableFormat(lineabove=_pipe_line_with_colons,
linebelowheader=_pipe_line_with_colons,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1,
with_header_hide=["lineabove"]),
"orgtbl":
TableFormat(lineabove=None,
linebelowheader=Line("|", "-", "+", "|"),
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1, with_header_hide=None),
"jira":
TableFormat(lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("||", "||", "||"),
datarow=DataRow("|", "|", "|"),
padding=1, with_header_hide=None),
"presto":
TableFormat(lineabove=None,
linebelowheader=Line("", "-", "+", ""),
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("", "|", ""),
datarow=DataRow("", "|", ""),
padding=1, with_header_hide=None),
"psql":
TableFormat(lineabove=Line("+", "-", "+", "+"),
linebelowheader=Line("|", "-", "+", "|"),
linebetweenrows=None,
linebelow=Line("+", "-", "+", "+"),
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1, with_header_hide=None),
"rst":
TableFormat(lineabove=Line("", "=", " ", ""),
linebelowheader=Line("", "=", " ", ""),
linebetweenrows=None,
linebelow=Line("", "=", " ", ""),
headerrow=DataRow("", " ", ""),
datarow=DataRow("", " ", ""),
padding=0, with_header_hide=None),
"mediawiki":
TableFormat(lineabove=Line("{| class=\"wikitable\" style=\"text-align: left;\"",
"", "", "\n|+ <!-- caption -->\n|-"),
linebelowheader=Line("|-", "", "", ""),
linebetweenrows=Line("|-", "", "", ""),
linebelow=Line("|}", "", "", ""),
headerrow=partial(_mediawiki_row_with_attrs, "!"),
datarow=partial(_mediawiki_row_with_attrs, "|"),
padding=0, with_header_hide=None),
"moinmoin":
TableFormat(lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=partial(_moin_row_with_attrs,"||",header="'''"),
datarow=partial(_moin_row_with_attrs,"||"),
padding=1, with_header_hide=None),
"youtrack":
TableFormat(lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("|| ", " || ", " || "),
datarow=DataRow("| ", " | ", " |"),
padding=1, with_header_hide=None),
"html":
TableFormat(lineabove=_html_begin_table_without_header,
linebelowheader="",
linebetweenrows=None,
linebelow=Line("</tbody>\n</table>", "", "", ""),
headerrow=partial(_html_row_with_attrs, "th"),
datarow=partial(_html_row_with_attrs, "td"),
padding=0, with_header_hide=["lineabove"]),
"latex":
TableFormat(lineabove=_latex_line_begin_tabular,
linebelowheader=Line("\\hline", "", "", ""),
linebetweenrows=None,
linebelow=Line("\\hline\n\\end{tabular}", "", "", ""),
headerrow=_latex_row,
datarow=_latex_row,
padding=1, with_header_hide=None),
"latex_raw":
TableFormat(lineabove=_latex_line_begin_tabular,
linebelowheader=Line("\\hline", "", "", ""),
linebetweenrows=None,
linebelow=Line("\\hline\n\\end{tabular}", "", "", ""),
headerrow=partial(_latex_row, escrules={}),
datarow=partial(_latex_row, escrules={}),
padding=1, with_header_hide=None),
"latex_booktabs":
TableFormat(lineabove=partial(_latex_line_begin_tabular, booktabs=True),
linebelowheader=Line("\\midrule", "", "", ""),
linebetweenrows=None,
linebelow=Line("\\bottomrule\n\\end{tabular}", "", "", ""),
headerrow=_latex_row,
datarow=_latex_row,
padding=1, with_header_hide=None),
"tsv":
TableFormat(lineabove=None, linebelowheader=None,
linebetweenrows=None, linebelow=None,
headerrow=DataRow("", "\t", ""),
datarow=DataRow("", "\t", ""),
padding=0, with_header_hide=None),
"textile":
TableFormat(lineabove=None, linebelowheader=None,
linebetweenrows=None, linebelow=None,
headerrow=DataRow("|_. ", "|_.", "|"),
datarow=_textile_row_with_attrs,
padding=1, with_header_hide=None)}
tabulate_formats = list(sorted(_table_formats.keys()))
# The table formats for which multiline cells will be folded into subsequent
# table rows. The key is the original format specified at the API. The value is
# the format that will be used to represent the original format.
multiline_formats = {
"plain": "plain",
"simple": "simple",
"grid": "grid",
"fancy_grid": "fancy_grid",
"pipe": "pipe",
"orgtbl": "orgtbl",
"jira": "jira",
"presto": "presto",
"psql": "psql",
"rst": "rst",
}
# TODO: Add multiline support for the remaining table formats:
# - mediawiki: Replace \n with <br>
# - moinmoin: TBD
# - youtrack: TBD
# - html: Replace \n with <br>
# - latex*: Use "makecell" package: In header, replace X\nY with
# \thead{X\\Y} and in data row, replace X\nY with \makecell{X\\Y}
# - tsv: TBD
# - textile: Replace \n with <br/> (must be well-formed XML)
_multiline_codes = re.compile(r"\r|\n|\r\n")
_multiline_codes_bytes = re.compile(b"\r|\n|\r\n")
_invisible_codes = re.compile(r"\x1b\[\d+[;\d]*m|\x1b\[\d*\;\d*\;\d*m") # ANSI color codes
_invisible_codes_bytes = re.compile(b"\x1b\[\d+[;\d]*m|\x1b\[\d*\;\d*\;\d*m") # ANSI color codes
def simple_separated_format(separator):
"""Construct a simple TableFormat with columns separated by a separator.
>>> tsv = simple_separated_format("\\t") ; \
tabulate([["foo", 1], ["spam", 23]], tablefmt=tsv) == 'foo \\t 1\\nspam\\t23'
True
"""
return TableFormat(None, None, None, None,
headerrow=DataRow('', separator, ''),
datarow=DataRow('', separator, ''),
padding=0, with_header_hide=None)
def _isconvertible(conv, string):
try:
n = conv(string)
return True
except (ValueError, TypeError):
return False
def _isnumber(string):
"""
>>> _isnumber("123.45")
True
>>> _isnumber("123")
True
>>> _isnumber("spam")
False
>>> _isnumber("123e45678")
False
>>> _isnumber("inf")
True
"""
if not _isconvertible(float, string):
return False
elif isinstance(string, (_text_type, _binary_type)) and (
math.isinf(float(string)) or math.isnan(float(string))):
return string.lower() in ['inf', '-inf', 'nan']
return True
def _isint(string, inttype=int):
"""
>>> _isint("123")
True
>>> _isint("123.45")
False
"""
return type(string) is inttype or\
(isinstance(string, _binary_type) or isinstance(string, _text_type))\
and\
_isconvertible(inttype, string)
def _isbool(string):
"""
>>> _isbool(True)
True
>>> _isbool("False")
True
>>> _isbool(1)
False
"""
return type(string) is _bool_type or\
(isinstance(string, (_binary_type, _text_type))\
and\
string in ("True", "False"))
def _type(string, has_invisible=True, numparse=True):
"""The least generic type (type(None), int, float, str, unicode).
>>> _type(None) is type(None)
True
>>> _type("foo") is type("")
True
>>> _type("1") is type(1)
True
>>> _type('\x1b[31m42\x1b[0m') is type(42)
True
>>> _type('\x1b[31m42\x1b[0m') is type(42)
True
"""
if has_invisible and \
(isinstance(string, _text_type) or isinstance(string, _binary_type)):
string = _strip_invisible(string)
if string is None:
return _none_type
elif hasattr(string, "isoformat"): # datetime.datetime, date, and time
return _text_type
elif _isbool(string):
return _bool_type
elif _isint(string) and numparse:
return int
elif _isint(string, _long_type) and numparse:
return int
elif _isnumber(string) and numparse:
return float
elif isinstance(string, _binary_type):
return _binary_type
else:
return _text_type
def _afterpoint(string):
"""Symbols after a decimal point, -1 if the string lacks the decimal point.
>>> _afterpoint("123.45")
2
>>> _afterpoint("1001")
-1
>>> _afterpoint("eggs")
-1
>>> _afterpoint("123e45")
2
"""
if _isnumber(string):
if _isint(string):
return -1
else:
pos = string.rfind(".")
pos = string.lower().rfind("e") if pos < 0 else pos
if pos >= 0:
return len(string) - pos - 1
else:
return -1 # no point
else:
return -1 # not a number
def _padleft(width, s):
"""Flush right.
>>> _padleft(6, '\u044f\u0439\u0446\u0430') == ' \u044f\u0439\u0446\u0430'
True
"""
fmt = "{0:>%ds}" % width
return fmt.format(s)
def _padright(width, s):
"""Flush left.
>>> _padright(6, '\u044f\u0439\u0446\u0430') == '\u044f\u0439\u0446\u0430 '
True
"""
fmt = "{0:<%ds}" % width
return fmt.format(s)
def _padboth(width, s):
"""Center string.
>>> _padboth(6, '\u044f\u0439\u0446\u0430') == ' \u044f\u0439\u0446\u0430 '
True
"""
fmt = "{0:^%ds}" % width
return fmt.format(s)
def _padnone(ignore_width, s):
return s
def _strip_invisible(s):
"Remove invisible ANSI color codes."
if isinstance(s, _text_type):
return re.sub(_invisible_codes, "", s)
else: # a bytestring
return re.sub(_invisible_codes_bytes, "", s)
def _visible_width(s):
"""Visible width of a printed string. ANSI color codes are removed.
>>> _visible_width('\x1b[31mhello\x1b[0m'), _visible_width("world")
(5, 5)
"""
# optional wide-character support
if wcwidth is not None and WIDE_CHARS_MODE:
len_fn = wcwidth.wcswidth
else:
len_fn = len
if isinstance(s, _text_type) or isinstance(s, _binary_type):
return len_fn(_strip_invisible(s))
else:
return len_fn(_text_type(s))
def _is_multiline(s):
if isinstance(s, _text_type):
return bool(re.search(_multiline_codes, s))
else: # a bytestring
return bool(re.search(_multiline_codes_bytes, s))
def _multiline_width(multiline_s, line_width_fn=len):
"""Visible width of a potentially multiline content."""
return max(map(line_width_fn, re.split("[\r\n]", multiline_s)))
def _choose_width_fn(has_invisible, enable_widechars, is_multiline):
"""Return a function to calculate visible cell width."""
if has_invisible:
line_width_fn = _visible_width
elif enable_widechars: # optional wide-character support if available
line_width_fn = wcwidth.wcswidth
else:
line_width_fn = len
if is_multiline:
width_fn = lambda s: _multiline_width(s, line_width_fn)
else:
width_fn = line_width_fn
return width_fn
def _align_column_choose_padfn(strings, alignment, has_invisible):
if alignment == "right":
if not PRESERVE_WHITESPACE:
strings = [s.strip() for s in strings]
padfn = _padleft
elif alignment == "center":
if not PRESERVE_WHITESPACE:
strings = [s.strip() for s in strings]
padfn = _padboth
elif alignment == "decimal":
if has_invisible:
decimals = [_afterpoint(_strip_invisible(s)) for s in strings]
else:
decimals = [_afterpoint(s) for s in strings]
maxdecimals = max(decimals)
strings = [s + (maxdecimals - decs) * " "
for s, decs in zip(strings, decimals)]
padfn = _padleft
elif not alignment:
padfn = _padnone
else:
if not PRESERVE_WHITESPACE:
strings = [s.strip() for s in strings]
padfn = _padright
return strings, padfn
def _align_column(strings, alignment, minwidth=0,
has_invisible=True, enable_widechars=False, is_multiline=False):
"""[string] -> [padded_string]"""
strings, padfn = _align_column_choose_padfn(strings, alignment, has_invisible)
width_fn = _choose_width_fn(has_invisible, enable_widechars, is_multiline)
s_widths = list(map(width_fn, strings))
maxwidth = max(max(s_widths), minwidth)
# TODO: refactor column alignment in single-line and multiline modes
if is_multiline:
if not enable_widechars and not has_invisible:
padded_strings = [
"\n".join([padfn(maxwidth, s) for s in ms.splitlines()])
for ms in strings]
else:
# enable wide-character width corrections
s_lens = [max((len(s) for s in re.split("[\r\n]", ms))) for ms in strings]
visible_widths = [maxwidth - (w - l) for w, l in zip(s_widths, s_lens)]
# wcswidth and _visible_width don't count invisible characters;
# padfn doesn't need to apply another correction
padded_strings = ["\n".join([padfn(w, s) for s in (ms.splitlines() or ms)])
for ms, w in zip(strings, visible_widths)]
else: # single-line cell values
if not enable_widechars and not has_invisible:
padded_strings = [padfn(maxwidth, s) for s in strings]
else:
# enable wide-character width corrections
s_lens = list(map(len, strings))
visible_widths = [maxwidth - (w - l) for w, l in zip(s_widths, s_lens)]
# wcswidth and _visible_width don't count invisible characters;
# padfn doesn't need to apply another correction
padded_strings = [padfn(w, s) for s, w in zip(strings, visible_widths)]
return padded_strings
def _more_generic(type1, type2):
types = { _none_type: 0, _bool_type: 1, int: 2, float: 3, _binary_type: 4, _text_type: 5 }
invtypes = { 5: _text_type, 4: _binary_type, 3: float, 2: int, 1: _bool_type, 0: _none_type }
moregeneric = max(types.get(type1, 5), types.get(type2, 5))
return invtypes[moregeneric]
def _column_type(strings, has_invisible=True, numparse=True):
"""The least generic type all column values are convertible to.
>>> _column_type([True, False]) is _bool_type
True
>>> _column_type(["1", "2"]) is _int_type
True
>>> _column_type(["1", "2.3"]) is _float_type
True
>>> _column_type(["1", "2.3", "four"]) is _text_type
True
>>> _column_type(["four", '\u043f\u044f\u0442\u044c']) is _text_type
True
>>> _column_type([None, "brux"]) is _text_type
True
>>> _column_type([1, 2, None]) is _int_type
True
>>> import datetime as dt
>>> _column_type([dt.datetime(1991,2,19), dt.time(17,35)]) is _text_type
True
"""
types = [_type(s, has_invisible, numparse) for s in strings ]
return reduce(_more_generic, types, _bool_type)
def _format(val, valtype, floatfmt, missingval="", has_invisible=True):
"""Format a value accoding to its type.
Unicode is supported:
>>> hrow = ['\u0431\u0443\u043a\u0432\u0430', '\u0446\u0438\u0444\u0440\u0430'] ; \
tbl = [['\u0430\u0437', 2], ['\u0431\u0443\u043a\u0438', 4]] ; \
good_result = '\\u0431\\u0443\\u043a\\u0432\\u0430 \\u0446\\u0438\\u0444\\u0440\\u0430\\n------- -------\\n\\u0430\\u0437 2\\n\\u0431\\u0443\\u043a\\u0438 4' ; \
tabulate(tbl, headers=hrow) == good_result
True
"""
if val is None:
return missingval
if valtype in [int, _text_type]:
return "{0}".format(val)
elif valtype is _binary_type:
try:
return _text_type(val, "ascii")
except TypeError:
return _text_type(val)
elif valtype is float:
is_a_colored_number = has_invisible and isinstance(val, (_text_type, _binary_type))
if is_a_colored_number:
raw_val = _strip_invisible(val)
formatted_val = format(float(raw_val), floatfmt)
return val.replace(raw_val, formatted_val)
else:
return format(float(val), floatfmt)
else:
return "{0}".format(val)
def _align_header(header, alignment, width, visible_width, is_multiline=False, width_fn=None):
"Pad string header to width chars given known visible_width of the header."
if is_multiline:
header_lines = re.split(_multiline_codes, header)
padded_lines = [_align_header(h, alignment, width, width_fn(h)) for h in header_lines]
return "\n".join(padded_lines)
# else: not multiline
ninvisible = len(header) - visible_width
width += ninvisible
if alignment == "left":
return _padright(width, header)
elif alignment == "center":
return _padboth(width, header)
elif not alignment:
return "{0}".format(header)
else:
return _padleft(width, header)
def _prepend_row_index(rows, index):
"""Add a left-most index column."""
if index is None or index is False:
return rows
if len(index) != len(rows):
print('index=', index)
print('rows=', rows)
raise ValueError('index must be as long as the number of data rows')
rows = [[v]+list(row) for v,row in zip(index, rows)]
return rows
def _bool(val):
"A wrapper around standard bool() which doesn't throw on NumPy arrays"
try:
return bool(val)
except ValueError: # val is likely to be a numpy array with many elements
return False
def _normalize_tabular_data(tabular_data, headers, showindex="default"):
"""Transform a supported data type to a list of lists, and a list of headers.
Supported tabular data types:
* list-of-lists or another iterable of iterables
* list of named tuples (usually used with headers="keys")
* list of dicts (usually used with headers="keys")
* list of OrderedDicts (usually used with headers="keys")
* 2D NumPy arrays
* NumPy record arrays (usually used with headers="keys")
* dict of iterables (usually used with headers="keys")
* pandas.DataFrame (usually used with headers="keys")
The first row can be used as headers if headers="firstrow",
column indices can be used as headers if headers="keys".
If showindex="default", show row indices of the pandas.DataFrame.
If showindex="always", show row indices for all types of data.
If showindex="never", don't show row indices for all types of data.
If showindex is an iterable, show its values as row indices.
"""
try:
bool(headers)
is_headers2bool_broken = False
except ValueError: # numpy.ndarray, pandas.core.index.Index, ...
is_headers2bool_broken = True
headers = list(headers)
index = None
if hasattr(tabular_data, "keys") and hasattr(tabular_data, "values"):
# dict-like and pandas.DataFrame?
if hasattr(tabular_data.values, "__call__"):
# likely a conventional dict
keys = tabular_data.keys()
rows = list(izip_longest(*tabular_data.values())) # columns have to be transposed
elif hasattr(tabular_data, "index"):
# values is a property, has .index => it's likely a pandas.DataFrame (pandas 0.11.0)
keys = list(tabular_data)
if tabular_data.index.name is not None:
if isinstance(tabular_data.index.name, list):
keys[:0] = tabular_data.index.name
else:
keys[:0] = [tabular_data.index.name]
vals = tabular_data.values # values matrix doesn't need to be transposed
# for DataFrames add an index per default
index = list(tabular_data.index)
rows = [list(row) for row in vals]
else:
raise ValueError("tabular data doesn't appear to be a dict or a DataFrame")
if headers == "keys":
headers = list(map(_text_type,keys)) # headers should be strings
else: # it's a usual an iterable of iterables, or a NumPy array
rows = list(tabular_data)
if (headers == "keys" and not rows):
# an empty table (issue #81)
headers = []
elif (headers == "keys" and
hasattr(tabular_data, "dtype") and
getattr(tabular_data.dtype, "names")):
# numpy record array
headers = tabular_data.dtype.names
elif (headers == "keys"
and len(rows) > 0
and isinstance(rows[0], tuple)
and hasattr(rows[0], "_fields")):
# namedtuple
headers = list(map(_text_type, rows[0]._fields))
elif (len(rows) > 0
and isinstance(rows[0], dict)):
# dict or OrderedDict
uniq_keys = set() # implements hashed lookup
keys = [] # storage for set
if headers == "firstrow":
firstdict = rows[0] if len(rows) > 0 else {}
keys.extend(firstdict.keys())
uniq_keys.update(keys)
rows = rows[1:]
for row in rows:
for k in row.keys():
#Save unique items in input order
if k not in uniq_keys:
keys.append(k)
uniq_keys.add(k)
if headers == 'keys':
headers = keys
elif isinstance(headers, dict):
# a dict of headers for a list of dicts
headers = [headers.get(k, k) for k in keys]
headers = list(map(_text_type, headers))
elif headers == "firstrow":
if len(rows) > 0:
headers = [firstdict.get(k, k) for k in keys]
headers = list(map(_text_type, headers))
else:
headers = []
elif headers:
raise ValueError('headers for a list of dicts is not a dict or a keyword')
rows = [[row.get(k) for k in keys] for row in rows]
elif (headers == "keys"
and hasattr(tabular_data, "description")
and hasattr(tabular_data, "fetchone")
and hasattr(tabular_data, "rowcount")):
# Python Database API cursor object (PEP 0249)
# print tabulate(cursor, headers='keys')
headers = [column[0] for column in tabular_data.description]
elif headers == "keys" and len(rows) > 0:
# keys are column indices
headers = list(map(_text_type, range(len(rows[0]))))
# take headers from the first row if necessary
if headers == "firstrow" and len(rows) > 0:
if index is not None:
headers = [index[0]] + list(rows[0])
index = index[1:]
else:
headers = rows[0]
headers = list(map(_text_type, headers)) # headers should be strings
rows = rows[1:]
headers = list(map(_text_type,headers))
rows = list(map(list,rows))
# add or remove an index column
showindex_is_a_str = type(showindex) in [_text_type, _binary_type]
if showindex == "default" and index is not None:
rows = _prepend_row_index(rows, index)
elif isinstance(showindex, Iterable) and not showindex_is_a_str:
rows = _prepend_row_index(rows, list(showindex))
elif showindex == "always" or (_bool(showindex) and not showindex_is_a_str):
if index is None:
index = list(range(len(rows)))
rows = _prepend_row_index(rows, index)
elif showindex == "never" or (not _bool(showindex) and not showindex_is_a_str):
pass
# pad with empty headers for initial columns if necessary
if headers and len(rows) > 0:
nhs = len(headers)
ncols = len(rows[0])
if nhs < ncols:
headers = [""]*(ncols - nhs) + headers
return rows, headers
def tabulate(tabular_data, headers=(), tablefmt="simple",
floatfmt=_DEFAULT_FLOATFMT, numalign="decimal", stralign="left",
missingval=_DEFAULT_MISSINGVAL, showindex="default", disable_numparse=False,
colalign=None):
"""Format a fixed width table for pretty printing.
>>> print(tabulate([[1, 2.34], [-56, "8.999"], ["2", "10001"]]))
--- ---------
1 2.34
-56 8.999
2 10001
--- ---------
The first required argument (`tabular_data`) can be a
list-of-lists (or another iterable of iterables), a list of named
tuples, a dictionary of iterables, an iterable of dictionaries,
a two-dimensional NumPy array, NumPy record array, or a Pandas'
dataframe.
Table headers
-------------
To print nice column headers, supply the second argument (`headers`):
- `headers` can be an explicit list of column headers
- if `headers="firstrow"`, then the first row of data is used
- if `headers="keys"`, then dictionary keys or column indices are used
Otherwise a headerless table is produced.
If the number of headers is less than the number of columns, they
are supposed to be names of the last columns. This is consistent
with the plain-text format of R and Pandas' dataframes.
>>> print(tabulate([["sex","age"],["Alice","F",24],["Bob","M",19]],
... headers="firstrow"))
sex age
----- ----- -----
Alice F 24
Bob M 19
By default, pandas.DataFrame data have an additional column called
row index. To add a similar column to all other types of data,
use `showindex="always"` or `showindex=True`. To suppress row indices
for all types of data, pass `showindex="never" or `showindex=False`.
To add a custom row index column, pass `showindex=some_iterable`.
>>> print(tabulate([["F",24],["M",19]], showindex="always"))
- - --
0 F 24
1 M 19
- - --
Column alignment
----------------
`tabulate` tries to detect column types automatically, and aligns
the values properly. By default it aligns decimal points of the
numbers (or flushes integer numbers to the right), and flushes
everything else to the left. Possible column alignments
(`numalign`, `stralign`) are: "right", "center", "left", "decimal"
(only for `numalign`), and None (to disable alignment).
Table formats
-------------
`floatfmt` is a format specification used for columns which
contain numeric data with a decimal point. This can also be
a list or tuple of format strings, one per column.
`None` values are replaced with a `missingval` string (like
`floatfmt`, this can also be a list of values for different
columns):
>>> print(tabulate([["spam", 1, None],
... ["eggs", 42, 3.14],
... ["other", None, 2.7]], missingval="?"))
----- -- ----
spam 1 ?
eggs 42 3.14
other ? 2.7
----- -- ----
Various plain-text table formats (`tablefmt`) are supported:
'plain', 'simple', 'grid', 'pipe', 'orgtbl', 'rst', 'mediawiki',
'latex', 'latex_raw' and 'latex_booktabs'. Variable `tabulate_formats`
contains the list of currently supported formats.
"plain" format doesn't use any pseudographics to draw tables,
it separates columns with a double space:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "plain"))
strings numbers
spam 41.9999
eggs 451
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="plain"))
spam 41.9999
eggs 451
"simple" format is like Pandoc simple_tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "simple"))
strings numbers
--------- ---------
spam 41.9999
eggs 451
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="simple"))
---- --------
spam 41.9999
eggs 451
---- --------
"grid" is similar to tables produced by Emacs table.el package or
Pandoc grid_tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "grid"))
+-----------+-----------+
| strings | numbers |
+===========+===========+
| spam | 41.9999 |
+-----------+-----------+
| eggs | 451 |
+-----------+-----------+
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="grid"))
+------+----------+
| spam | 41.9999 |
+------+----------+
| eggs | 451 |
+------+----------+
"fancy_grid" draws a grid using box-drawing characters:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "fancy_grid"))
╒═══════════╤═══════════╕
│ strings │ numbers │
╞═══════════╪═══════════╡
│ spam │ 41.9999 │
├───────────┼───────────┤
│ eggs │ 451 │
╘═══════════╧═══════════╛
"pipe" is like tables in PHP Markdown Extra extension or Pandoc
pipe_tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "pipe"))
| strings | numbers |
|:----------|----------:|
| spam | 41.9999 |
| eggs | 451 |
"presto" is like tables produce by the Presto CLI:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "presto"))
strings | numbers
-----------+-----------
spam | 41.9999
eggs | 451
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="pipe"))
|:-----|---------:|
| spam | 41.9999 |
| eggs | 451 |
"orgtbl" is like tables in Emacs org-mode and orgtbl-mode. They
are slightly different from "pipe" format by not using colons to
define column alignment, and using a "+" sign to indicate line
intersections:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "orgtbl"))
| strings | numbers |
|-----------+-----------|
| spam | 41.9999 |
| eggs | 451 |
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="orgtbl"))
| spam | 41.9999 |
| eggs | 451 |
"rst" is like a simple table format from reStructuredText; please
note that reStructuredText accepts also "grid" tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "rst"))
========= =========
strings numbers
========= =========
spam 41.9999
eggs 451
========= =========
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="rst"))
==== ========
spam 41.9999
eggs 451
==== ========
"mediawiki" produces a table markup used in Wikipedia and on other
MediaWiki-based sites:
>>> print(tabulate([["strings", "numbers"], ["spam", 41.9999], ["eggs", "451.0"]],
... headers="firstrow", tablefmt="mediawiki"))
{| class="wikitable" style="text-align: left;"
|+ <!-- caption -->
|-
! strings !! align="right"| numbers
|-
| spam || align="right"| 41.9999
|-
| eggs || align="right"| 451
|}
"html" produces HTML markup:
>>> print(tabulate([["strings", "numbers"], ["spam", 41.9999], ["eggs", "451.0"]],
... headers="firstrow", tablefmt="html"))
<table>
<thead>
<tr><th>strings </th><th style="text-align: right;"> numbers</th></tr>
</thead>
<tbody>
<tr><td>spam </td><td style="text-align: right;"> 41.9999</td></tr>
<tr><td>eggs </td><td style="text-align: right;"> 451 </td></tr>
</tbody>
</table>
"latex" produces a tabular environment of LaTeX document markup:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="latex"))
\\begin{tabular}{lr}
\\hline
spam & 41.9999 \\\\
eggs & 451 \\\\
\\hline
\\end{tabular}
"latex_raw" is similar to "latex", but doesn't escape special characters,
such as backslash and underscore, so LaTeX commands may embedded into
cells' values:
>>> print(tabulate([["spam$_9$", 41.9999], ["\\\\emph{eggs}", "451.0"]], tablefmt="latex_raw"))
\\begin{tabular}{lr}
\\hline
spam$_9$ & 41.9999 \\\\
\\emph{eggs} & 451 \\\\
\\hline
\\end{tabular}
"latex_booktabs" produces a tabular environment of LaTeX document markup
using the booktabs.sty package:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="latex_booktabs"))
\\begin{tabular}{lr}
\\toprule
spam & 41.9999 \\\\
eggs & 451 \\\\
\\bottomrule
\end{tabular}
Number parsing
--------------
By default, anything which can be parsed as a number is a number.
This ensures numbers represented as strings are aligned properly.
This can lead to weird results for particular strings such as
specific git SHAs e.g. "42992e1" will be parsed into the number
429920 and aligned as such.
To completely disable number parsing (and alignment), use
`disable_numparse=True`. For more fine grained control, a list column
indices is used to disable number parsing only on those columns
e.g. `disable_numparse=[0, 2]` would disable number parsing only on the
first and third columns.
"""
if tabular_data is None:
tabular_data = []
list_of_lists, headers = _normalize_tabular_data(
tabular_data, headers, showindex=showindex)
# empty values in the first column of RST tables should be escaped (issue #82)
# "" should be escaped as "\\ " or ".."
if tablefmt == 'rst':
list_of_lists, headers = _rst_escape_first_column(list_of_lists, headers)
# optimization: look for ANSI control codes once,
# enable smart width functions only if a control code is found
plain_text = '\t'.join(['\t'.join(map(_text_type, headers))] + \
['\t'.join(map(_text_type, row)) for row in list_of_lists])
has_invisible = re.search(_invisible_codes, plain_text)
enable_widechars = wcwidth is not None and WIDE_CHARS_MODE
if tablefmt in multiline_formats and _is_multiline(plain_text):
tablefmt = multiline_formats.get(tablefmt, tablefmt)
is_multiline = True
else:
is_multiline = False
width_fn = _choose_width_fn(has_invisible, enable_widechars, is_multiline)
# format rows and columns, convert numeric values to strings
cols = list(izip_longest(*list_of_lists))
numparses = _expand_numparse(disable_numparse, len(cols))
coltypes = [_column_type(col, numparse=np) for col, np in
zip(cols, numparses)]
if isinstance(floatfmt, basestring): #old version
float_formats = len(cols) * [floatfmt] # just duplicate the string to use in each column
else: # if floatfmt is list, tuple etc we have one per column
float_formats = list(floatfmt)
if len(float_formats) < len(cols):
float_formats.extend( (len(cols)-len(float_formats)) * [_DEFAULT_FLOATFMT] )
if isinstance(missingval, basestring):
missing_vals = len(cols) * [missingval]
else:
missing_vals = list(missingval)
if len(missing_vals) < len(cols):
missing_vals.extend( (len(cols)-len(missing_vals)) * [_DEFAULT_MISSINGVAL] )
cols = [[_format(v, ct, fl_fmt, miss_v, has_invisible) for v in c]
for c, ct, fl_fmt, miss_v in zip(cols, coltypes, float_formats, missing_vals)]
# align columns
aligns = [numalign if ct in [int,float] else stralign for ct in coltypes]
if colalign is not None:
assert isinstance(colalign, Iterable)
for idx, align in enumerate(colalign):
aligns[idx] = align
minwidths = [width_fn(h) + MIN_PADDING for h in headers] if headers else [0]*len(cols)
cols = [_align_column(c, a, minw, has_invisible, enable_widechars, is_multiline)
for c, a, minw in zip(cols, aligns, minwidths)]
if headers:
# align headers and add headers
t_cols = cols or [['']] * len(headers)
t_aligns = aligns or [stralign] * len(headers)
minwidths = [max(minw, max(width_fn(cl) for cl in c)) for minw, c in zip(minwidths, t_cols)]
headers = [_align_header(h, a, minw, width_fn(h), is_multiline, width_fn)
for h, a, minw in zip(headers, t_aligns, minwidths)]
rows = list(zip(*cols))
else:
minwidths = [max(width_fn(cl) for cl in c) for c in cols]
rows = list(zip(*cols))
if not isinstance(tablefmt, TableFormat):
tablefmt = _table_formats.get(tablefmt, _table_formats["simple"])
return _format_table(tablefmt, headers, rows, minwidths, aligns, is_multiline)
def _expand_numparse(disable_numparse, column_count):
"""
Return a list of bools of length `column_count` which indicates whether
number parsing should be used on each column.
If `disable_numparse` is a list of indices, each of those indices are False,
and everything else is True.
If `disable_numparse` is a bool, then the returned list is all the same.
"""
if isinstance(disable_numparse, Iterable):
numparses = [True] * column_count
for index in disable_numparse:
numparses[index] = False
return numparses
else:
return [not disable_numparse] * column_count
def _pad_row(cells, padding):
if cells:
pad = " "*padding
padded_cells = [pad + cell + pad for cell in cells]
return padded_cells
else:
return cells
def _build_simple_row(padded_cells, rowfmt):
"Format row according to DataRow format without padding."
begin, sep, end = rowfmt
return (begin + sep.join(padded_cells) + end).rstrip()
def _build_row(padded_cells, colwidths, colaligns, rowfmt):
"Return a string which represents a row of data cells."
if not rowfmt:
return None
if hasattr(rowfmt, "__call__"):
return rowfmt(padded_cells, colwidths, colaligns)
else:
return _build_simple_row(padded_cells, rowfmt)
def _append_basic_row(lines, padded_cells, colwidths, colaligns, rowfmt):
lines.append(_build_row(padded_cells, colwidths, colaligns, rowfmt))
return lines
def _append_multiline_row(lines, padded_multiline_cells, padded_widths, colaligns, rowfmt, pad):
colwidths = [w - 2*pad for w in padded_widths]
cells_lines = [c.splitlines() for c in padded_multiline_cells]
nlines = max(map(len, cells_lines)) # number of lines in the row
# vertically pad cells where some lines are missing
cells_lines = [(cl + [' '*w]*(nlines - len(cl))) for cl, w in zip(cells_lines, colwidths)]
lines_cells = [[cl[i] for cl in cells_lines] for i in range(nlines)]
for ln in lines_cells:
padded_ln = _pad_row(ln, pad)
_append_basic_row(lines, padded_ln, colwidths, colaligns, rowfmt)
return lines
def _build_line(colwidths, colaligns, linefmt):
"Return a string which represents a horizontal line."
if not linefmt:
return None
if hasattr(linefmt, "__call__"):
return linefmt(colwidths, colaligns)
else:
begin, fill, sep, end = linefmt
cells = [fill*w for w in colwidths]
return _build_simple_row(cells, (begin, sep, end))
def _append_line(lines, colwidths, colaligns, linefmt):
lines.append(_build_line(colwidths, colaligns, linefmt))
return lines
def _format_table(fmt, headers, rows, colwidths, colaligns, is_multiline):
"""Produce a plain-text representation of the table."""
lines = []
hidden = fmt.with_header_hide if (headers and fmt.with_header_hide) else []
pad = fmt.padding
headerrow = fmt.headerrow
padded_widths = [(w + 2*pad) for w in colwidths]
if is_multiline:
pad_row = lambda row, _: row # do it later, in _append_multiline_row
append_row = partial(_append_multiline_row, pad=pad)
else:
pad_row = _pad_row
append_row = _append_basic_row
padded_headers = pad_row(headers, pad)
padded_rows = [pad_row(row, pad) for row in rows]
if fmt.lineabove and "lineabove" not in hidden:
_append_line(lines, padded_widths, colaligns, fmt.lineabove)
if padded_headers:
append_row(lines, padded_headers, padded_widths, colaligns, headerrow)
if fmt.linebelowheader and "linebelowheader" not in hidden:
_append_line(lines, padded_widths, colaligns, fmt.linebelowheader)
if padded_rows and fmt.linebetweenrows and "linebetweenrows" not in hidden:
# initial rows with a line below
for row in padded_rows[:-1]:
append_row(lines, row, padded_widths, colaligns, fmt.datarow)
_append_line(lines, padded_widths, colaligns, fmt.linebetweenrows)
# the last row without a line below
append_row(lines, padded_rows[-1], padded_widths, colaligns, fmt.datarow)
else:
for row in padded_rows:
append_row(lines, row, padded_widths, colaligns, fmt.datarow)
if fmt.linebelow and "linebelow" not in hidden:
_append_line(lines, padded_widths, colaligns, fmt.linebelow)
if headers or rows:
return "\n".join(lines)
else: # a completely empty table
return ""
def _main():
"""\
Usage: tabulate [options] [FILE ...]
Pretty-print tabular data.
See also https://bitbucket.org/astanin/python-tabulate
FILE a filename of the file with tabular data;
if "-" or missing, read data from stdin.
Options:
-h, --help show this message
-1, --header use the first row of data as a table header
-o FILE, --output FILE print table to FILE (default: stdout)
-s REGEXP, --sep REGEXP use a custom column separator (default: whitespace)
-F FPFMT, --float FPFMT floating point number format (default: g)
-f FMT, --format FMT set output table format; supported formats:
plain, simple, grid, fancy_grid, pipe, orgtbl,
rst, mediawiki, html, latex, latex_raw,
latex_booktabs, tsv
(default: simple)
"""
import getopt
import sys
import textwrap
usage = textwrap.dedent(_main.__doc__)
try:
opts, args = getopt.getopt(sys.argv[1:],
"h1o:s:F:A:f:",
["help", "header", "output", "sep=", "float=", "align=",
"format="])
except getopt.GetoptError as e:
print(e)
print(usage)
sys.exit(2)
headers = []
floatfmt = _DEFAULT_FLOATFMT
colalign = None
tablefmt = "simple"
sep = r"\s+"
outfile = "-"
for opt, value in opts:
if opt in ["-1", "--header"]:
headers = "firstrow"
elif opt in ["-o", "--output"]:
outfile = value
elif opt in ["-F", "--float"]:
floatfmt = value
elif opt in ["-C", "--colalign"]:
colalign = value.split()
elif opt in ["-f", "--format"]:
if value not in tabulate_formats:
print("%s is not a supported table format" % value)
print(usage)
sys.exit(3)
tablefmt = value
elif opt in ["-s", "--sep"]:
sep = value
elif opt in ["-h", "--help"]:
print(usage)
sys.exit(0)
files = [sys.stdin] if not args else args
with (sys.stdout if outfile == "-" else open(outfile, "w")) as out:
for f in files:
if f == "-":
f = sys.stdin
if _is_file(f):
_pprint_file(f, headers=headers, tablefmt=tablefmt,
sep=sep, floatfmt=floatfmt, file=out,
colalign=colalign)
else:
with open(f) as fobj:
_pprint_file(fobj, headers=headers, tablefmt=tablefmt,
sep=sep, floatfmt=floatfmt, file=out,
colalign=colalign)
def _pprint_file(fobject, headers, tablefmt, sep, floatfmt, file, colalign):
rows = fobject.readlines()
table = [re.split(sep, r.rstrip()) for r in rows if r.strip()]
print(tabulate(table, headers, tablefmt, floatfmt=floatfmt,
colalign=colalign), file=file)
if __name__ == "__main__":
_main()
|
c3dm-main
|
c3dm/tools/tabulate.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import torch
import torch.nn.functional as Fu
def find_camera_T(K, X, Y):
n = X.shape[2]
ba = X.shape[0]
append1 = lambda x: \
torch.cat((x,x.new_ones(x.shape[0],1,x.shape[2])), dim=1)
# projection rays
r = torch.bmm(torch.inverse(K), append1(Y))
r = Fu.normalize(r, dim=1)
# outer projection ray product (need to permute the array first)
rr = r.permute(0,2,1).contiguous().view(n*ba, 3)
rr = torch.bmm(rr[:,:,None], rr[:,None,:])
# I - rr
Irr = torch.eye(3)[None].repeat(ba*n,1,1) - rr
# [rr - I] x
rrIx = torch.bmm(-Irr, X.permute(0,2,1).contiguous().view(n*ba, 3, 1))
Irr_sum = Irr.view(ba,-1,3,3,).sum(1)
rrIx_sum = rrIx.view(ba,-1,3).sum(1)
rrI_sum_i = torch.inverse(Irr_sum)
T = torch.bmm(rrI_sum_i, rrIx_sum[:,:,None])[:,:,0]
return T
n = 500 # n points
ba = 20 # batch size
# gt 3D points
X = torch.zeros(ba, 3, n).normal_(0., 1.)
for focal in torch.linspace(10.,0.1,20):
# cam K
K = torch.eye(3)
K[0,0] = focal
K[1,1] = focal
K = K[None].repeat(ba,1,1)
if False:
# persp projections - should give 0 error everywhere
T = torch.ones(ba, 3).uniform_()*10.
Y = torch.bmm(K, X + T[:,:,None])
Y = Y[:,0:2,:] / Y[:,2:3,:]
else:
# orth projections - should get higher error with lower focal
Y = X[:,0:2]
T = find_camera_T(K, X, Y)
## test the repro loss
# perspective projections
Yp = torch.bmm(K, X + T[:,:,None])
depth_ = Yp[:,2:3, :]
Yp = Yp[:,0:2, :] / depth_
# the diff between orth and persp
df = ((Y - Yp)**2).sum(1).sqrt().mean(1).mean()
print('focal = %1.2f, repro_df = %1.2e, mean_depth = %1.2f' % \
(focal, df, depth_.mean()) )
|
c3dm-main
|
c3dm/tools/test_orth2pers.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import copy
def nested_attr_dict(dct):
if type(dct) in (dict,AttrDict):
dct = AttrDict(dct)
for k,v in dct.items():
dct[k] = nested_attr_dict(v)
return dct
class AttrDict(dict):
def __getattr__(self, name):
if name in self.__dict__:
return self.__dict__[name]
elif name in self:
return self[name]
else:
raise AttributeError(name)
def __setattr__(self, name, value):
if name in self.__dict__:
self.__dict__[name] = value
else:
self[name] = value
|
c3dm-main
|
c3dm/tools/attr_dict.py
|
c3dm-main
|
c3dm/tools/__init__.py
|
|
# Copyright (c) Facebook, Inc. and its affiliates.
import pickle
import torch
import glob
import os
def load_stats(flstats):
try:
stats, _ = pickle.load(open(flstats,'rb')) # dont load the config
except Exception as e:
print("Cant load stats! %s" % flstats)
stats = None
return stats
def get_model_path(fl):
fl = os.path.splitext(fl)[0]
flmodel = "%s.pth" % fl
return flmodel
def get_optimizer_path(fl):
fl = os.path.splitext(fl)[0]
flopt = "%s_opt.pth" % fl
return flopt
def get_stats_path(fl):
fl = os.path.splitext(fl)[0]
flstats = "%s_stats.pkl" % fl
return flstats
def save_model(model,stats,fl,optimizer=None,cfg=None):
flstats = get_stats_path(fl)
flmodel = get_model_path(fl)
print("saving model to %s" % flmodel)
torch.save(model.state_dict(),flmodel)
if optimizer is not None:
flopt = get_optimizer_path(fl)
print("saving optimizer to %s" % flopt)
torch.save(optimizer.state_dict(),flopt)
print("saving model stats and cfg to %s" % flstats)
pickle.dump((stats,cfg),open(flstats,'wb'))
def load_model(fl):
flstats = get_stats_path(fl)
flmodel = get_model_path(fl)
flopt = get_optimizer_path(fl)
model_state_dict = torch.load(flmodel)
stats = load_stats(flstats)
if os.path.isfile(flopt):
optimizer = torch.load(flopt)
else:
optimizer = None
return model_state_dict, stats, optimizer
def get_checkpoint(exp_dir,epoch):
fl = os.path.join( exp_dir, 'model_epoch_%08d.pth' % epoch )
return fl
def find_last_checkpoint(exp_dir):
fls = sorted( glob.glob( os.path.join(
glob.escape(exp_dir), 'model_epoch_'+'[0-9]'*8+'.pth'
)))
return fls[-1] if len(fls) > 0 else None
def purge_epoch(exp_dir,epoch):
model_path = get_checkpoint(exp_dir,epoch)
to_kill = [ model_path,
get_optimizer_path(model_path),
get_stats_path(model_path) ]
for k in to_kill:
if os.path.isfile(k):
print('deleting %s' % k)
os.remove(k)
|
c3dm-main
|
c3dm/tools/model_io.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import os
import time
import sys
import copy
import torch
from tqdm import tqdm
from tools.stats import Stats
from tools.utils import pprint_dict, has_method, get_net_input
def cache_preds(model,
loader,
cache_vars=None,
stats=None,
n_extract=None,
cat=True,
eval_mode=True,
strict_mode=False,
):
print("caching model predictions: %s" % str(cache_vars) )
if eval_mode:
model.eval()
else:
print('TRAINING EVAL MODE!!!')
model.train()
trainmode = 'test'
t_start = time.time()
iterator = loader.__iter__()
cached_preds = []
cache_size = 0. # in GB ... counts only cached tensor sizes
n_batches = len(loader)
if n_extract is not None:
n_batches = n_extract
with tqdm(total=n_batches,file=sys.stdout) as pbar:
for it, batch in enumerate(loader):
last_iter = it==n_batches-1
# move to gpu and cast to Var
net_input = get_net_input(batch)
with torch.no_grad():
preds = model(**net_input)
if strict_mode:
assert not any( k in preds for k in net_input.keys() )
preds.update(net_input) # merge everything into one big dict
# if True:
# model.visualize('ff_debug', 'eval', preds, None, clear_env=False)
# import pdb; pdb.set_trace()
if stats is not None:
stats.update(preds,time_start=t_start,stat_set=trainmode)
assert stats.it[trainmode]==it, "inconsistent stat iteration number!"
# restrict the variables to cache
if cache_vars is not None:
preds = {k:preds[k] for k in cache_vars if k in preds}
# ... gather and log the size of the cache
preds, preds_size = gather_all(preds)
cache_size += preds_size
# for k in preds:
# if has_method(preds[k],'cuda'):
# preds[k] = preds[k].data.cpu()
# cache_size += preds[k].numpy().nbytes / 1e9
cached_preds.append(preds)
pbar.set_postfix(cache_size="%1.2f GB"%cache_size)
pbar.update(1)
if last_iter and n_extract is not None:
break
if cat:
return concatenate_cache( cached_preds )
else:
return cached_preds
def gather_all( preds ):
cache_size = 0
for k in preds:
if has_method(preds[k],'cuda'):
preds[k] = preds[k].data.cpu()
cache_size += preds[k].numpy().nbytes / 1e9
elif type(preds[k])==dict:
preds[k], size_now = gather_all( preds[k] )
cache_size += size_now
return preds, cache_size
# cache concatenation - largely taken from pytorch default_collate()
import re
from torch._six import container_abcs, string_classes, int_classes
np_str_obj_array_pattern = re.compile(r'[SaUO]')
error_msg_fmt = "batch must contain tensors, numbers, dicts or lists; found {}"
numpy_type_map = {
'float64': torch.DoubleTensor,
'float32': torch.FloatTensor,
'float16': torch.HalfTensor,
'int64': torch.LongTensor,
'int32': torch.IntTensor,
'int16': torch.ShortTensor,
'int8': torch.CharTensor,
'uint8': torch.ByteTensor,
}
def concatenate_cache(batch):
r"""Puts each data field into a tensor with outer dimension batch size"""
elem_type = type(batch[0])
if isinstance(batch[0], torch.Tensor):
out = None
return torch.cat(batch, 0, out=out) # the main difference is here
elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \
and elem_type.__name__ != 'string_':
elem = batch[0]
if elem_type.__name__ == 'ndarray':
# array of string classes and object
if np_str_obj_array_pattern.search(elem.dtype.str) is not None:
raise TypeError(error_msg_fmt.format(elem.dtype))
return concatenate_cache([torch.from_numpy(b) for b in batch])
if elem.shape == (): # scalars
py_type = float if elem.dtype.name.startswith('float') else int
return numpy_type_map[elem.dtype.name](list(map(py_type, batch)))
elif isinstance(batch[0], float):
return torch.tensor(batch, dtype=torch.float64)
elif isinstance(batch[0], int_classes):
return torch.tensor(batch)
elif isinstance(batch[0], string_classes):
return batch
elif isinstance(batch[0], container_abcs.Mapping):
return {key: concatenate_cache([d[key] for d in batch]) for key in batch[0]}
elif isinstance(batch[0], tuple) and hasattr(batch[0], '_fields'): # namedtuple
return type(batch[0])(*(concatenate_cache(samples) for samples in zip(*batch)))
elif isinstance(batch[0], container_abcs.Sequence): # also some diffs here
# just unpack
return [ s_ for s in batch for s_ in s ]
# elif batch[0] is None:
# return batch
raise TypeError((error_msg_fmt.format(type(batch[0]))))
# def concatenate_cache(cached_preds):
# flds = list(cached_preds[0].keys())
# cached_preds_concat = {}
# for fld in flds:
# classic_cat = True
# if type(cached_preds[0][fld])==str or type(cached_preds[0][fld][0])==str:
# classic_cat = True
# else:
# try:
# cached_preds_concat[fld] = torch.cat( \
# [c[fld] for c in cached_preds] , dim=0 )
# classic_cat = False
# except:
# pass
# if classic_cat:
# cached_preds_concat[fld] = \
# [x for c in cached_preds for x in c[fld]]
# return cached_preds_concat
|
c3dm-main
|
c3dm/tools/cache_preds.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import torch
import math
import torch.nn.functional as Fu
def so3_6d_to_rot(d6):
"""
d6 ... batch x 6
Follows Sec. B in the appendix of:
https://arxiv.org/pdf/1812.07035.pdf
"""
a1, a2 = d6[:, :3], d6[:, 3:]
b1 = Fu.normalize(a1, dim=1)
b2 = a2 - (b1 * a2).sum(1, keepdim=True) * b1
b2 = Fu.normalize(b2, dim=1)
b3 = torch.cross(b1, b2)
R = torch.stack((b1, b2, b3), dim=1)
# if True:
# assert torch.allclose(torch.det(R), R.new_ones(R.shape[0]))
return R
def so3_relative_angle(R1, R2):
"""
Calculates the relative angle (in radians) between pairs of
rotation matrices `R1` and `R2` with
:math: `(\\phi = \text{acos}\frac{\text{Trace}(R_1 R_2^T)-1}{2})`.
.. note::
This corresponds to a geodesic distance on the 3D manifold of rotation
matrices.
Args:
R1: Batch of rotation matrices of shape :math:`(\text{minibatch}, 3, 3)`.
R2: Batch of rotation matrices of shape :math:`(\text{minibatch}, 3, 3)`.
Returns:
Corresponding rotation angles of shape :math:`(\text{minibatch},)`.
Raises:
ValueError if `R1` or `R2` is of incorrect shape.
ValueError if `R1` or `R2` has an unexpected trace.
"""
R12 = torch.bmm(R1, R2.permute(0, 2, 1))
return so3_rotation_angle(R12)
def so3_rotation_angle(R, eps: float = 1e-4):
"""
Calculates angles (in radians) of a batch of rotation matrices `R` with
:math: `\\phi = \text{acos}\frac{\text{Trace}(R)-1}{2}`. The trace of the
input matrices is checked to be in the valid range [-1-`eps`,3+`eps`].
The `eps` argument is a small constant that allows for small errors
caused by limited machine precision.
Args:
R: Batch of rotation matrices of shape :math:`(\text{minibatch}, 3, 3)`.
eps: Tolerance for the valid trace check.
Returns:
Corresponding rotation angles of shape :math:`(\text{minibatch},)`.
Raises:
ValueError if `R` is of incorrect shape.
ValueError if `R` has an unexpected trace.
"""
N , dim1, dim2 = R.shape
if dim1 != 3 or dim2 != 3:
raise ValueError('Input has to be a batch of 3x3 Tensors.')
rot_trace = R[:, 0, 0] + R[:, 1, 1] + R[:, 2, 2]
if ((rot_trace < -1. - eps) + (rot_trace > 3. + eps)).any():
raise ValueError('A matrix has trace outside valid range [-1-eps,3+eps].')
# clamp to valid range
rot_trace = torch.clamp(rot_trace, -1., 3.)
# phi ... rotation angle
phi = (0.5 * (rot_trace - 1.)).acos()
return phi
def rand_rot(N,dtype=torch.float32,max_rot_angle=float(math.pi),\
axes=(1,1,1),get_ss=False):
rand_axis = torch.zeros( (N,3) ).type(dtype).normal_()
# apply the axes mask
axes = torch.Tensor(axes).type(dtype)
rand_axis = axes[None,:] * rand_axis
rand_axis = Fu.normalize( rand_axis, dim=1, p=2 )
rand_angle = torch.ones( N ).type(dtype).uniform_(0,max_rot_angle)
R_ss_rand = rand_axis * rand_angle[:,None]
R_rand = so3_exponential_map(R_ss_rand)
# if max_rot_angle < float(np.pi)-1:
# e_ = torch.eye(3).type(R_rand.type())
# angles = so3_geod_dist(e_[None,:,:].repeat(N,1,1),R_rand).acos()
# print( "rand rot angles: mu=%1.3f std=%1.3f" % (angles.mean(),angles.std()) )
if get_ss:
return R_rand, R_ss_rand
else:
return R_rand
def random_2d_rotation(size, dtype, max_angle):
theta = (torch.rand(size).type(dtype) - 0.5) * 2 * max_angle
sins = torch.sin(theta)
coss = torch.cos(theta)
return torch.stack((
torch.stack((coss, -sins), dim=-1),
torch.stack((sins, coss), dim=-1),
), dim=-2)
def so3_exponential_map(log_rot: torch.Tensor, eps: float = 0.0001):
"""
Convert a batch of logarithmic representations of rotation matrices `log_rot`
to a batch of 3x3 rotation matrices using Rodrigues formula.
The conversion has a singularity around 0 which is handled by clamping
controlled with the `eps` argument.
Args:
log_rot: batch of vectors of shape :math:`(\text{minibatch} , 3)`
eps: a float constant handling the conversion singularity around 0
Returns:
batch of rotation matrices of shape :math:`(\text{minibatch} , 3 , 3)`
Raises:
ValueError if `log_rot` is of incorrect shape
"""
_ , dim = log_rot.shape
if dim != 3:
raise ValueError('Input tensor shape has to be Nx3.')
nrms = (log_rot * log_rot).sum(1)
phis = torch.clamp(nrms, 0.).sqrt()
phisi = 1. / (phis+eps)
fac1 = phisi * phis.sin()
fac2 = phisi * phisi * (1. - phis.cos())
ss = hat(log_rot)
R = fac1[:, None, None] * ss + \
fac2[:, None, None] * torch.bmm(ss, ss) + \
torch.eye(3, dtype=log_rot.dtype, device=log_rot.device)[None]
# from old.functions import rotss2rot
# R_ = rotss2rot(log_rot)
# print((R-R_).abs().max())
# import pdb; pdb.set_trace()
return R
def so3_log_map(R, eps: float = 0.0001):
"""
Convert a batch of 3x3 rotation matrices `R`
to a batch of 3-dimensional matrix logarithms of rotation matrices
The conversion has a singularity around `(R=I)` which is handled
by clamping controlled with the `eps` argument.
Args:
R: batch of rotation matrices of shape `(minibatch, 3, 3)`.
eps: A float constant handling the conversion singularity.
Returns:
Batch of logarithms of input rotation matrices
of shape `(minibatch, 3)`.
Raises:
ValueError if `R` is of incorrect shape.
ValueError if `R` has an unexpected trace.
"""
N, dim1, dim2 = R.shape
if dim1 != 3 or dim2 != 3:
raise ValueError("Input has to be a batch of 3x3 Tensors.")
phi = so3_rotation_angle(R)
phi_valid = torch.clamp(phi.abs(), eps) * phi.sign()
phi_valid = phi_valid + (phi_valid==0).type_as(phi_valid) * eps
log_rot_hat = (phi_valid /
(2.0 * phi_valid.sin()))[:, None, None] * (R - R.permute(0, 2, 1))
log_rot = hat_inv(log_rot_hat)
return log_rot
def hat_inv(h: torch.Tensor):
"""
Compute the inverse Hat operator [1] of a batch of 3x3 matrices.
Args:
h: batch of skew-symmetric matrices of shape :math:`(\text{minibatch}, 3, 3)`
Returns:
batch of 3d vectors of shape :math:`(\text{minibatch}, 3)`
Raises:
ValueError if `h` is of incorrect shape
ValueError if `h` not skew-symmetric
[1] https://en.wikipedia.org/wiki/Hat_operator
"""
N , dim1, dim2 = h.shape
if dim1 != 3 or dim2 != 3:
raise ValueError('Input has to be a batch of 3x3 Tensors.')
ss_diff = (h + h.permute(0, 2, 1)).abs().max()
if float(ss_diff) > 1e-5:
raise ValueError('One of input matrices not skew-symmetric.')
x = h[:, 2, 1]
y = h[:, 0, 2]
z = h[:, 1, 0]
v = torch.stack((x, y, z), dim=1)
return v
def hat(v: torch.Tensor):
"""
Compute the Hat operator [1] of a batch of 3D vectors.
Args:
v: batch of vectors of shape :math:`(\text{minibatch} , 3)`
Returns:
batch of skew-symmetric matrices of shape :math:`(\text{minibatch}, 3 , 3)`
Raises:
ValueError if `v` is of incorrect shape
[1] https://en.wikipedia.org/wiki/Hat_operator
"""
N , dim = v.shape
if dim != 3:
raise ValueError('Input vectors have to be 3-dimensional.')
h = v.new_zeros(N, 3, 3)
x, y, z = v[:, 0], v[:, 1], v[:, 2]
h[:, 0, 1] = -z
h[:, 0, 2] = y
h[:, 1, 0] = z
h[:, 1, 2] = -x
h[:, 2, 0] = -y
h[:, 2, 1] = x
return h
|
c3dm-main
|
c3dm/tools/so3.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from tools.attr_dict import AttrDict
import inspect
import io
import os
import tarfile
import time
import urllib.request
import zipfile
import numpy as np
def pprint_dict(d, indent=3):
for key, value in d.items():
print(' ' * indent + str(key),end='', flush=True)
# print('.', end='', flush=True)
if isinstance(value, AttrDict):
# if len(value.keys())==1:
# import pdb; pdb.set_trace() # breakpoint 970a0708 //
# if not isinstance(value, AttrDict):
print("")
pprint_dict(value, indent+1)
else:
print(' = ' + str(value))
def has_method(ob,m):
obcls=ob.__class__
return hasattr(obcls, m) and callable(getattr(obcls,m))
def argparse_to_dict(args):
raise NotImplementedError('finish this')
return cfg
def get_net_input(batch):
# move to gpu and cast to Var
net_input = {}
for k in batch:
if has_method(batch[k],'cuda'):
net_input[k] = batch[k].cuda()
else:
net_input[k] = batch[k]
return net_input
def auto_init_args(obj,tgt=None,can_overwrite=False):
# autoassign constructor arguments
frame = inspect.currentframe().f_back # the frame above
params = frame.f_locals
nparams = frame.f_code.co_argcount
paramnames = frame.f_code.co_varnames[1:nparams]
if tgt is not None:
if not can_overwrite:
assert not hasattr(obj,tgt)
setattr(obj,tgt,AttrDict())
tgt_attr = getattr(obj,tgt)
else:
tgt_attr = obj
for name in paramnames:
# print('autosetting %s -> %s' % (name,str(params[name])) )
setattr(tgt_attr,name,params[name])
def untar_to_dir(url, path):
response = urllib.request.urlopen(url)
compressed_stream = io.BytesIO(response.read())
if url.endswith('zip'):
opener = lambda stream : zipfile.ZipFile(stream, "r")
else:
# assume tarball
opener = lambda stream : tarfile.open(fileobj=stream, mode="r|*")
with opener(compressed_stream) as tar:
os.makedirs(path)
tar.extractall(path=path)
class NumpySeedFix(object):
def __init__(self,seed=0):
self.rstate = None
self.seed = seed
def __enter__(self):
self.rstate = np.random.get_state()
np.random.seed(self.seed)
def __exit__(self, type, value, traceback):
if not(type is None ) and issubclass(type,Exception):
print("error inside 'with' block")
return
np.random.set_state(self.rstate)
class TorchSeedFix(object):
def __init__(self,seed=0):
self.rstate = None
self.seed = seed
def __enter__(self):
self.rstate = torch.random.get_rng_state()
torch.manual_seed(self.seed)
def __exit__(self, type, value, traceback):
if not(type is None ) and issubclass(type,Exception):
print("error inside 'with' block")
return
torch.manual_seed(self.seed)
class Timer:
def __init__(self,name="timer",quiet=False):
self.name = name
self.quiet = quiet
def __enter__(self):
self.start = time.time()
return self
def __exit__(self, *args):
self.end = time.time()
self.interval = self.end - self.start
if not self.quiet:
print( "%20s: %1.6f sec" % ( self.name , self.interval ) )
|
c3dm-main
|
c3dm/tools/utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import torch
import torch.nn as nn
from torchvision import models
import torch.nn.functional as F
Fu = F
from torch.autograd import Variable
import numpy as np
from math import exp
from tools.functions import avg_l2_dist, avg_l2_huber, image_meshgrid, huber, logexploss
if torch.cuda.is_available():
T = torch.cuda
else:
T = torch
def total_variation_loss(image):
# shift one pixel and get difference (for both x and y direction)
loss = torch.abs(image[:, :, :, :-1] - image[:, :, :, 1:]) + \
torch.abs(image[:, :, :-1, :] - image[:, :, 1:, :])
return loss
class GaussianLayer(nn.Module):
def __init__(self, sigma=1., separated=False):
super(GaussianLayer, self).__init__()
self.separated = separated
filter_size = int(2*np.ceil(sigma)+1)
generated_filters = gaussian(filter_size, sigma).reshape([1,filter_size])
if self.separated:
self.gaussian_filter_horizontal = nn.Conv2d(\
in_channels=1, out_channels=1, \
kernel_size=(1,filter_size), padding=(0,filter_size//2),bias=False)
self.gaussian_filter_horizontal.weight.data.copy_(\
generated_filters)
self.gaussian_filter_vertical = nn.Conv2d(\
in_channels=1, out_channels=1, \
kernel_size=(filter_size,1), padding=(filter_size//2,0), bias=False)
self.gaussian_filter_vertical.weight.data.copy_(\
generated_filters.t())
else:
filter_full = generated_filters * generated_filters.t()
self.gaussian_filter = nn.Conv2d(\
in_channels=1, out_channels=1, \
kernel_size=(filter_size,filter_size),
padding=(filter_size//2,filter_size//2),bias=False)
self.gaussian_filter.weight.data = filter_full[None, None]
# do not back prop!!!
for prm in self.parameters():
prm.requires_grad = False
def forward(self, img):
ba, dim, he, wi = img.shape
img = torch.cat((img, img.new_ones(ba,1,he,wi)), dim=1)
img = img.view(ba*(dim+1), 1, he, wi)
if self.separated:
imgb = self.gaussian_filter_horizontal(img)
imgb = self.gaussian_filter_vertical(imgb)
else:
imgb = self.gaussian_filter(img)
imgb = imgb.view(ba, dim+1, he, wi)
imgb = imgb[ :, :dim, :, : ] / \
torch.clamp(imgb[ :, dim:dim+1, :, : ], 0.001)
return imgb
class TVLoss(nn.Module):
def __init__(self):
super(TVLoss, self).__init__()
sobel_filter = torch.FloatTensor([[1, 0, -1],
[2, 0, -2],
[1, 0, -1]])
sobel_filter = sobel_filter / sobel_filter.abs().sum()
self.sobel_filter_horizontal = nn.Conv2d(
in_channels=1, out_channels=1, bias=False,
kernel_size=sobel_filter.shape, padding=sobel_filter.shape[0]//2)
self.sobel_filter_horizontal.weight.data.copy_(sobel_filter)
self.sobel_filter_vertical = nn.Conv2d(
in_channels=1, out_channels=1, bias = False,
kernel_size=sobel_filter.shape, padding=sobel_filter.shape[0]//2)
self.sobel_filter_vertical.weight.data.copy_(sobel_filter.t())
# do not back prop!!!
for prm in self.parameters():
prm.requires_grad = False
def forward(self, im, masks=None):
tv = self.sobel_filter_horizontal(im).abs() + \
self.sobel_filter_vertical(im).abs()
if masks is not None:
masks = Fu.interpolate(masks, tv.shape[2:], mode='nearest')
tv = tv * masks
return tv.mean()
class LapFilter(nn.Module):
def __init__(self, size=5):
super(LapFilter, self).__init__()
# use gauss layer to setup the circular 2D filter (hacky)
gauss = GaussianLayer(sigma=size, separated=False)
flt = gauss.gaussian_filter.weight
thr = flt[0, 0, flt.shape[2]//2, 0]
flt = (flt >= thr).float()
flt = flt / torch.clamp(flt.sum(), 1e-4)
self.circ_filter = nn.Conv2d(
in_channels=1,
out_channels=1,
bias=False,
kernel_size=size,
padding=size
)
self.circ_filter.weight.data = flt.clone()
# do not back prop!!!
for prm in self.parameters():
prm.requires_grad = False
def forward(self, img, masks=None):
ba, dim, he, wi = img.shape
if (masks is not None) and (masks.shape[2:]!=img.shape[2:]):
masks = Fu.interpolate(masks, (he, wi), mode='nearest')
else:
masks = img.new_ones(ba, 1, he, wi)
imgf = img * masks
imgf = torch.cat((imgf, masks), dim=1)
imgf = imgf.view(ba*(dim+1), 1, he, wi)
imgf = self.circ_filter(imgf)
imgf = imgf.view(ba, dim+1, he, wi)
imgf = imgf[ :, :dim, :, : ] / \
torch.clamp(imgf[ :, dim:dim+1, :, : ], 0.001)
return imgf
class LapLoss(nn.Module):
def __init__(self, size=5):
super(LapLoss, self).__init__()
self.lapfilter = LapFilter(size=size)
# do not back prop!!!
for prm in self.parameters():
prm.requires_grad = False
def forward(self, img, masks=None):
if masks is not None:
masks = Fu.interpolate(masks, size=img.shape[2:], mode='nearest')
else:
masks = img[:,0:1,:,:] * 0. + 1.
imgf = self.lapfilter(img, masks=masks)
diff = (((img - imgf)*masks)**2).sum(dim=(1,2,3))
diff = diff / torch.clamp(masks.sum(dim=(1,2,3)), 1e-4)
return diff.mean(), imgf
## Perceptual VGG19 loss
class PerceptualVGG19(nn.Module):
def __init__(self, feature_layers, use_normalization=True,
path=None, input_from_tanh=True, flatten=True,
):
super(PerceptualVGG19, self).__init__()
if path != '' and path is not None:
print('Loading pretrained model')
model = models.vgg19(pretrained=False)
model.load_state_dict(torch.load(path))
else:
model = models.vgg19(pretrained=True)
model.float()
model.eval()
self.model = model
self.feature_layers = feature_layers
self.input_from_tanh = input_from_tanh
self.flatten = flatten
self.mean = torch.FloatTensor([0.485, 0.456, 0.406])
self.mean_tensor = None
self.std = torch.FloatTensor([0.229, 0.224, 0.225])
self.std_tensor = None
self.use_normalization = use_normalization
if torch.cuda.is_available():
self.mean = self.mean.cuda()
self.std = self.std.cuda()
for param in self.parameters():
param.requires_grad = False
def normalize(self, x):
if not self.use_normalization:
return x
if self.mean_tensor is None:
self.mean_tensor = self.mean.view(1, 3, 1, 1)
self.std_tensor = self.std.view(1, 3, 1, 1)
if self.input_from_tanh:
x = (x + 1) / 2
return (x - self.mean_tensor) / self.std_tensor
def run(self, x, resize):
features = []
masks = []
h = x
for f in range(max(self.feature_layers) + 1):
h = self.model.features[f](h)
if f in self.feature_layers:
not_normed_features = h.clone()
if resize:
features.append(not_normed_features.view(h.size(0),-1))
else:
features.append(not_normed_features)
if resize:
features = torch.cat(features, dim=1)
return masks, features
def forward(self, x, resize=True):
h = self.normalize(x)
return self.run(h, resize)
class AppearanceLoss(nn.modules.Module):
def __init__(self, n_l1_scales=4, sigma_coeff=1., huber_thr=0.03, border = 0.1):
super(AppearanceLoss, self).__init__()
self.n_l1_scales = n_l1_scales
self.sigma_coeff = sigma_coeff
self.huber_thr = huber_thr
self.border=border
self.perception_loss_module = PerceptualVGG19( feature_layers=[0, 5, 10, 15],
use_normalization=True,
input_from_tanh=False,
flatten=False )
self.perception_loss_module = self.perception_loss_module.cuda()
def grayscale_transform(self, x):
return x.mean(1,keepdim=True)
def forward(self, input, target, sig=None, mask=None):
# input/target an image between [0,1]
input_rgb = input
gt_tgt_rgb = target
image_size = list(input.shape[2:])
# mask both input and target borders
border_in_pix = int(self.border * np.array(input.shape[2:]).mean())
brd_mask = input.new_zeros(input.shape)
brd_mask[:,:,border_in_pix:-border_in_pix,border_in_pix:-border_in_pix] = 1.
if mask is not None:
brd_mask *= mask
input_rgb = input_rgb * brd_mask
gt_tgt_rgb = gt_tgt_rgb * brd_mask
# make sure we got the right input
assert gt_tgt_rgb.min() >= -0.001
assert gt_tgt_rgb.max() <= 1.001
# VGG
_, fake_features = self.perception_loss_module(input_rgb, resize=False)
_, tgt_features = self.perception_loss_module(gt_tgt_rgb, resize=False)
loss_vgg = 0.
sig_vgg = sig
for fake, tgt in zip(fake_features,tgt_features):
# vgg_df = huber(((fake-tgt)**2).mean(1,keepdim=True),scaling=self.huber_thr)
vgg_df = huber(((fake-tgt)**2),scaling=self.huber_thr).mean(1,keepdim=True)
if sig_vgg is not None:
# first smooth the sigmas
# g_sigma = sum(sig_vgg.shape[i]/fake.shape[i] for i in (2,3))*0.5
# if g_sigma > 1.:
# sig_vgg = gauss_filter(sig_vgg, g_sigma)
sig_vgg = Fu.interpolate(sig_vgg, size=fake.shape[2:],mode='bilinear')
loss_vgg = loss_vgg + \
Fu.interpolate( \
logexploss(vgg_df, sig_vgg, \
coeff=self.sigma_coeff, accum=False),
size=image_size )
else:
loss_vgg = loss_vgg + Fu.interpolate(vgg_df, size=image_size)
# loss_vgg = loss_vgg + vgg_df #.mean((1,2,3))
# RGB L1 ... multiscale
loss_rgb = 0.
sig_rgb = sig
for scale in range(self.n_l1_scales):
if scale > 0:
input_rgb = Fu.interpolate(input_rgb, scale_factor=0.5, mode='bilinear')
gt_tgt_rgb= Fu.interpolate(gt_tgt_rgb, scale_factor=0.5, mode='bilinear')
if sig_rgb is not None:
sig_rgb = Fu.interpolate(sig_rgb, scale_factor=0.5, mode='bilinear')
rgb_diff = huber(((input_rgb-gt_tgt_rgb)**2),scaling=self.huber_thr).mean(1,keepdim=True)
if sig is not None:
loss_rgb = loss_rgb + Fu.interpolate(logexploss(rgb_diff, sig_rgb,
coeff=self.sigma_coeff, accum=False), size=image_size)
else:
loss_rgb = loss_rgb + Fu.interpolate(rgb_diff, size=image_size)
return loss_vgg, loss_rgb, 0
def multiscale_loss(pred, gt, n_scales=4, scaling=0.01,
downscale=0.5, per_dim_loss=False, loss_fun=None,
grid=None):
# basis rendering loss
size = pred.shape[2:]
loss = 0.
# get the gauss filter
sig = 2 * (1/downscale) / 6.0 # as in scipy
g_filter = GaussianLayer(sigma=sig, separated=True).to(pred.device)
for scl in range(n_scales):
if scl==0:
gt_ = gt; p_ = pred; grid_ = grid
else:
p_ = g_filter(p_)
p_ = Fu.interpolate(p_, scale_factor=downscale, mode='bilinear')
gt_ = g_filter(gt_)
gt_ = Fu.interpolate(gt_, scale_factor=downscale, mode='bilinear')
if grid is not None:
grid_ = g_filter(grid_)
grid_ = Fu.interpolate(grid_, scale_factor=downscale, mode='bilinear')
if grid is not None:
gt_sample = Fu.grid_sample(gt_, grid_.permute(0, 2, 3, 1))
else:
gt_sample = gt_
if loss_fun is None:
if per_dim_loss:
h = huber((p_ - gt_sample)**2, scaling=scaling).mean(dim=1, keepdim=True)
else:
h = huber(((p_ - gt_sample)**2).mean(dim=1, keepdim=True), scaling=scaling)
else:
h = loss_fun(p_, gt_sample)
loss = loss + Fu.interpolate(h, size=size, mode='bilinear')
return loss * (1 / n_scales)
|
c3dm-main
|
c3dm/tools/loss_models.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import os
import numpy as np
import sys
import time
import pickle
import matplotlib
import matplotlib.pyplot as plt
import copy
from matplotlib import colors as mcolors
from itertools import cycle
from collections.abc import Iterable
from tools.vis_utils import get_visdom_connection
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.history = []
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1, epoch=0):
# make sure the history is of the same len as epoch
while len(self.history) <= epoch:
self.history.append([])
self.history[epoch].append( val / n )
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def fill_undefined(self, max_epoch=None):
if len(self.history)==0:
return False
last = [float('NaN')]
had_undefined = False
if max_epoch is None:
max_epoch = len(self.history)
while len(self.history) < max_epoch:
self.history.append([])
assert len(self.history) == max_epoch
for hi in range(max_epoch):
h = self.history[min(hi, len(self.history)-1)]
if len(h) > 0:
last = h
else:
had_undefined = True
self.history[hi] = last
self.count = 1
self.val = copy.deepcopy(self.history[-1][0])
self.sum = self.val
self.avg = self.val
return had_undefined
def get_epoch_averages( self, epoch=-1):
if len(self.history) == 0: # no stats here
return None
else:
history = self.history
if epoch==-1:
return [ float(np.array(x).mean()) for x in history ]
else:
return float(np.array(history[epoch]).mean())
def get_all_values( self ):
all_vals = [ np.array(x) for x in self.history ]
all_vals = np.concatenate(all_vals)
return all_vals
def get_epoch(self):
return len(self.history)
class Stats(object):
"""
stats logging object useful for gathering statistics of training a deep net in pytorch
Example:
# init stats structure that logs statistics 'objective' and 'top1e'
stats = Stats( ('objective','top1e') )
network = init_net() # init a pytorch module (=nueral network)
dataloader = init_dataloader() # init a dataloader
for epoch in range(10):
# start of epoch -> call new_epoch
stats.new_epoch()
# iterate over batches
for batch in dataloader:
output = network(batch) # run and save into a dict of output variables "output"
# stats.update() automatically parses the 'objective' and 'top1e' from
# the "output" dict and stores this into the db
stats.update(output)
stats.print() # prints the averages over given epoch
# stores the training plots into '/tmp/epoch_stats.pdf'
# and plots into a visdom server running at localhost (if running)
stats.plot_stats(plot_file='/tmp/epoch_stats.pdf')
"""
def __init__(self,log_vars,verbose=False,epoch=-1,visdom_env='main',
do_plot=True, plot_file=None, visdom_server='http://localhost',
visdom_port=8097 ):
self.verbose = verbose
self.log_vars = log_vars
self.visdom_env = visdom_env
self.visdom_server = visdom_server
self.visdom_port = visdom_port
self.plot_file = plot_file
self.do_plot = do_plot
self.hard_reset(epoch=epoch)
# some sugar to be used with "with stats:" at the beginning of the epoch
def __enter__(self):
if self.do_plot and self.epoch >= 0:
self.plot_stats(self.visdom_env)
self.new_epoch()
def __exit__(self, type, value, traceback):
iserr = not(type is None ) and issubclass(type,Exception)
iserr = iserr or (type is KeyboardInterrupt)
if iserr:
print("error inside 'with' block")
return
if self.do_plot:
self.plot_stats(self.visdom_env)
def reset(self): # to be called after each epoch
stat_sets = list(self.stats.keys())
if self.verbose:
print("stats: epoch %d - reset" % self.epoch)
self.it = { k:-1 for k in stat_sets }
for stat_set in stat_sets:
for stat in self.stats[stat_set]:
self.stats[stat_set][stat].reset()
def hard_reset(self,epoch=-1): # to be called during object __init__
self.epoch = epoch
if self.verbose:
print("stats: epoch %d - hard reset" % self.epoch)
self.stats = {}
# reset
self.reset()
def new_epoch(self):
if self.verbose:
print("stats: new epoch %d" % (self.epoch+1))
self.epoch += 1
self.reset() #zero the stats + increase epoch counter
def gather_value(self,val):
if type(val)==float:
pass
else:
val = val.detach().data.cpu().numpy()
val = float(val.sum())
return val
def fill_undefined(self):
stat_sets = list(self.stats.keys())
undefined = {}
max_epoch = max( \
max(len(stat.history) for stat in self.stats[stat_set].values()) \
for stat_set in self.stats )
for stat_set in stat_sets:
undefined[stat_set] = []
# print(max_epoch)
for stat in self.stats[stat_set].keys():
had_undefined = self.stats[stat_set][\
stat].fill_undefined(max_epoch=max_epoch)
if had_undefined:
# print(stat)
undefined[stat_set].append(stat)
return undefined
def update(self,preds,time_start=None,freeze_iter=False,stat_set='train',log_vars=None):
if self.epoch==-1: # uninitialized
print("warning: epoch==-1 means uninitialized stats structure -> new_epoch() called")
self.new_epoch()
if stat_set not in self.stats:
self.stats[stat_set] = {}
self.it[stat_set] = -1
if not freeze_iter:
self.it[stat_set] += 1
epoch = self.epoch
it = self.it[stat_set]
log_vars = log_vars or self.log_vars #TODO: need it?
for stat in log_vars:
if stat not in self.stats[stat_set]:
self.stats[stat_set][stat] = AverageMeter()
if stat=='sec/it': # compute speed
if time_start is None:
elapsed = 0.
else:
elapsed = time.time() - time_start
time_per_it = float(elapsed) / float(it+1)
val = time_per_it
# self.stats[stat_set]['sec/it'].update(time_per_it,epoch=epoch,n=1)
else:
if stat in preds:
try:
val = self.gather_value(preds[stat])
except:
raise ValueError("could not extract prediction %s\
from the prediction dictionary" % stat)
else:
val = None
if val is not None:
self.stats[stat_set][stat].update(val,epoch=epoch,n=1)
def get_epoch_averages(self, epoch=None):
stat_sets = list(self.stats.keys())
if epoch is None: epoch = self.epoch
if epoch==-1: epoch = list(range(self.epoch))
outvals = {}
for stat_set in stat_sets:
outvals[stat_set] = { 'epoch': epoch,
'it': self.it[stat_set],
'epoch_max': self.epoch }
for stat in self.stats[stat_set].keys():
if self.stats[stat_set][stat].count==0: continue
if isinstance(epoch, Iterable):
avgs = self.stats[stat_set][stat].get_epoch_averages()
avgs = [ avgs[e] for e in epoch ]
else:
avgs = self.stats[stat_set][stat].get_epoch_averages(epoch=epoch)
outvals[stat_set][stat] = avgs
return outvals
def print(self,max_it=None,stat_set='train',vars_print=None,get_str=False):
epoch = self.epoch
stats = self.stats
str_out = ""
it = self.it[stat_set]
stat_str = ""
stats_print = sorted(stats[stat_set].keys())
for stat in stats_print:
if stats[stat_set][stat].count==0: continue
stat_str += " {0:.12}: {1:1.6f} |".format( \
stat,stats[stat_set][stat].avg)
head_str = "[%s] | epoch %3d | it %5d" % (stat_set,epoch,it)
if max_it: head_str += "/ %d" % max_it
str_out = "%s | %s" % (head_str,stat_str)
if get_str:
return str_out
else:
print(str_out)
def plot_stats( self, visdom_env=None, plot_file=None, \
visdom_server=None, visdom_port=None ):
# use the cached visdom env if none supplied
if visdom_env is None: visdom_env = self.visdom_env
if visdom_server is None: visdom_server = self.visdom_server
if visdom_port is None: visdom_port = self.visdom_port
if plot_file is None: plot_file = self.plot_file
stat_sets = list(self.stats.keys())
print("printing charts to visdom env '%s' (%s:%d)" % \
(visdom_env,visdom_server,visdom_port) )
novisdom = False
viz = get_visdom_connection(server=visdom_server,port=visdom_port)
if not viz.check_connection():
print("no visdom server! -> skipping visdom plots")
novisdom = True
lines = []
# plot metrics
if not novisdom:
viz.close(env=visdom_env,win=None)
for stat in self.log_vars:
vals = []
stat_sets_now = []
for stat_set in stat_sets:
val = self.stats[stat_set][stat].get_epoch_averages()
if val is None:
continue;
else:
val = np.array(val)[:,None]
stat_sets_now.append(stat_set)
vals.append(val)
if len(vals)==0:
continue
# pad for skipped test evals
size = np.max([val.shape[0] for val in vals])
vals = [
np.pad(val, ((0, size - val.shape[0]), (0, 0)), mode='edge')
for val in vals
]
try:
vals = np.concatenate(vals, axis=1)
except:
print('cant plot %s!' % stat)
continue
x = np.arange(vals.shape[0])
lines.append( (stat_sets_now,stat,x,vals,) )
if not novisdom:
for idx , ( tmodes, stat , x , vals ) in enumerate( lines ):
if vals.shape[1] == 1: # eval
continue
title = "%s" % stat
opts = dict(title=title,legend=list(tmodes))
try:
viz.line( Y=vals,X=x,env=visdom_env,opts=opts)
except:
print("Warning: problem adding data point", x.shape, vals.shape)
if plot_file:
print("exporting stats to %s" % plot_file)
ncol = 3
nrow = int(np.ceil(float(len(lines))/ncol))
matplotlib.rcParams.update({'font.size': 5})
color=cycle(plt.cm.tab10(np.linspace(0,1,10)))
fig = plt.figure(1); plt.clf()
for idx , ( tmodes, stat , x , vals ) in enumerate( lines ):
c=next(color)
plt.subplot(nrow,ncol,idx+1)
ax = plt.gca()
for vali,vals_ in enumerate(vals.T):
c_ = c * ( 1. - float(vali) * 0.3 )
plt.plot( x, vals_, c = c_, linewidth=1 )
plt.ylabel( stat )
plt.xlabel( "epoch" )
plt.gca().yaxis.label.set_color(c[0:3]*0.75)
plt.legend(tmodes)
gcolor = np.array(mcolors.to_rgba('lightgray'))
plt.grid(b=True, which='major', color=gcolor, linestyle='-', linewidth=0.4)
plt.grid(b=True, which='minor', color=gcolor, linestyle='--', linewidth=0.2)
plt.minorticks_on()
plt.tight_layout()
plt.show()
fig.savefig( plot_file )
def synchronize_logged_vars(self,log_vars,default_val=float('NaN')):
stat_sets = list(self.stats.keys())
# remove the additional log_vars
for stat_set in stat_sets:
for stat in self.stats[stat_set].keys():
if stat not in log_vars:
print("additional stat %s:%s -> removing" % (stat_set,stat) )
self.stats[stat_set] = {
stat: v for stat, v in self.stats[stat_set].items()
if stat in log_vars
}
self.log_vars = log_vars # !!!
for stat_set in stat_sets:
reference_stat = list(self.stats[stat_set].keys())[0]
for stat in log_vars:
if stat not in self.stats[stat_set]:
print("missing stat %s:%s -> filling with default values (%1.2f)" % \
(stat_set,stat,default_val) )
elif len(self.stats[stat_set][stat].history)!=self.epoch+1:
h = self.stats[stat_set][stat].history
if len(h)==0: # just never updated stat ... skip
continue
else:
print("padding stat %s:%s with the last value" % \
(stat_set,stat) )
self.stats[stat_set][stat].history = h + [h[-1]] * (self.epoch+1 - len(h))
assert len(self.stats[stat_set][stat].history) == self.epoch+1
continue
else:
continue
self.stats[stat_set][stat] = AverageMeter()
self.stats[stat_set][stat].reset()
lastep = self.epoch+1
for ep in range(lastep):
self.stats[stat_set][stat].update(default_val,n=1,epoch=ep)
epoch_self = self.stats[stat_set][reference_stat].get_epoch()
epoch_generated = self.stats[stat_set][stat].get_epoch()
assert epoch_self==epoch_generated, \
"bad epoch of synchronized log_var! %d vs %d" % \
(epoch_self,epoch_generated)
|
c3dm-main
|
c3dm/tools/stats.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import numpy as np
import copy
import io
import os
from matplotlib import cm
import matplotlib.pyplot as plt
from PIL import Image
from PIL import ImageFont
from PIL import ImageDraw
import torch
from tools.utils import NumpySeedFix
from visdom import Visdom
import plotly.graph_objects as go
from plotly.subplots import make_subplots
viz = None
def get_visdom_env(cfg):
if len(cfg.visdom_env)==0:
visdom_env = os.path.basename(cfg.exp_dir)
else:
visdom_env = cfg.visdom_env
return visdom_env
def get_visdom_connection(server='http://localhost',port=8097):
global viz
if viz is None:
viz = Visdom(server=server,port=port)
return viz
def denorm_image_trivial(im):
im = im - im.min()
im = im / (im.max()+1e-7)
return im
def ensure_im_width(img,basewidth):
# basewidth = 300
# img = Image.open('somepic.jpg')
wpercent = (basewidth/float(img.size[0]))
hsize = int((float(img.size[1])*float(wpercent)))
img = img.resize((basewidth,hsize), Image.ANTIALIAS)
return img
def denorm_image_trivial(im):
im = im - im.min()
im = im / (im.max()+1e-7)
return im
def fig2data(fig, size=None):
"""Convert a Matplotlib figure to a numpy array
Based on the ICARE wiki.
Args:
fig (matplotlib.Figure): a figure to be converted
Returns:
(ndarray): an array of RGB values
"""
# TODO(samuel): convert figure to provide a tight fit in image
buf = io.BytesIO()
plt.savefig(buf, format='png', bbox_inches='tight', pad_inches=0)
buf.seek(0)
im = Image.open(buf).convert('RGB')
if size:
im = im.resize(size)
# fig.canvas.draw()
# import ipdb ; ipdb.set_trace()
# # w,h = fig.canvas.get_width_height()
# # buf = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8)
# buf.shape = (h, w, 3)
# return buf
return np.array(im)
def get_depth_image(depth, mask=None, cmap='gray'):
cmap_ = cm.get_cmap(cmap)
clr = cmap_(depth)
clr = clr[0].transpose(2,0,1)[:3]
# if mask is not None:
# clr = clr * mask + (1-mask)
return clr
def show_flow(
viz,
env,
p,
image=None,
title='flow',
linewidth=2,
win=None,
):
fig = plt.figure(figsize=[11,11])
if image is not None:
plt.imshow(image.transpose( (1,2,0) ))
plt.axis('off')
plt.plot(p[:,0,:], p[:,1,:], '-', color='m', linewidth=linewidth, zorder=1)
if image is None:
plt.gca().invert_yaxis()
plt.axis('equal')
plt.gca().axes.get_xaxis().set_visible(False)
plt.gca().axes.get_yaxis().set_visible(False)
plt.gca().set_axis_off()
# return fig
improj = np.array(fig2data(fig))
if env is not None:
win = viz.image(
np.array(improj).transpose(2,0,1),
env=env,
opts={'title': title},
win=win,
)
else:
win = None
plt.close(fig)
return improj, win
def show_projections( viz,
env,
p,
v=None,
image_path=None,
image=None,
title='projs',
cmap__='gist_ncar',
markersize=None,
sticks=None,
stickwidth=2,
stick_color=None,
plot_point_order=False,
bbox = None,
win=None ):
if image is None:
try:
im = Image.open(image_path).convert('RGB')
im = np.array(im).transpose(2,0,1)
except:
im = None
print('!cant load image %s' % image_path)
else:
im = image
nkp = int(p.shape[2])
pid = np.linspace(0.,1.,nkp);
if v is not None:
okp = np.where(v > 0)[0]
else:
okp = np.where(np.ones(nkp))[0]
possible_markers = ['.','*','+']
markers = [possible_markers[i%len(possible_markers)] for i in range(len(p))]
if markersize is None:
msz = 50
if nkp > 40:
msz = 5
markersizes = [msz]*nkp
else:
markersizes = [markersize]*nkp
fig = plt.figure(figsize=[11,11])
if im is not None:
plt.imshow( im.transpose( (1,2,0) ) ); plt.axis('off')
if sticks is not None:
if stick_color is not None:
linecol = stick_color
else:
linecol = [0.,0.,0.]
for p_ in p:
for stick in sticks:
if v is not None:
if v[stick[0]]>0 and v[stick[1]]>0:
linestyle='-'
else:
continue
plt.plot( p_[0,stick], p_[1,stick], linestyle,
color=linecol, linewidth=stickwidth, zorder=1 )
for p_, marker, msz in zip(p, markers, markersizes):
plt.scatter( p_[0,okp], p_[1,okp], msz, pid[okp],
cmap=cmap__, linewidths=2, marker=marker, zorder=2, \
vmin=0., vmax=1. )
if plot_point_order:
for ii in okp:
plt.text( p_[0,ii], p_[1,ii], '%d' % ii, fontsize=int(msz*0.25) )
if bbox is not None:
import matplotlib.patches as patches
# Create a Rectangle patch
rect = patches.Rectangle((bbox[0],bbox[1]),bbox[2],bbox[3],\
linewidth=1,edgecolor='r',facecolor='none')
plt.gca().add_patch(rect)
if im is None:
plt.gca().invert_yaxis()
plt.axis('equal')
plt.gca().axes.get_xaxis().set_visible(False)
plt.gca().axes.get_yaxis().set_visible(False)
# plt.gca().set_frame_on(False)
plt.gca().set_axis_off()
else: # remove all margins
# plt.gca().axes.get_xaxis().set_visible(False)
# plt.gca().axes.get_yaxis().set_visible(False)
# plt.gca().set_frame_on(False)
# plt.gca().set_axis_off()
pass
# return fig
improj = np.array(fig2data(fig))
if env is not None:
win = viz.image( np.array(improj).transpose(2,0,1), \
env=env, opts={ 'title': title }, win=win )
else:
win = None
plt.close(fig)
return improj, win
def extend_to_3d_skeleton_simple(ptcloud,sticks,line_resol=10,rgb=None):
H36M_TO_MPII_PERM = [ 3, 2, 1, 4, 5, 6, 0, 8, 9, 10, 16, 15, 14, 11, 12, 13]
rgb_now = rgb.T if rgb is not None else None
ptcloud_now = ptcloud.T
ptcloud = ptcloud.T
rgb = rgb.T if rgb is not None else rgb
if ptcloud_now.shape[1]==16: # MPII
sticks_new = []
for stick in sticks:
if stick[0] in H36M_TO_MPII_PERM and stick[1] in H36M_TO_MPII_PERM:
s1 = H36M_TO_MPII_PERM.index(int(stick[0]))
s2 = H36M_TO_MPII_PERM.index(int(stick[1]))
sticks_new.append( [s1,s2] )
sticks = sticks_new
for sticki,stick in enumerate(sticks):
alpha = np.linspace(0,1,line_resol)[:,None]
linepoints = ptcloud[stick[0],:][None,:] * alpha + \
ptcloud[stick[1],:][None,:] * ( 1. - alpha )
ptcloud_now = np.concatenate((ptcloud_now,linepoints),axis=0)
if rgb is not None:
linergb = rgb[stick[0],:][None,:] * alpha + \
rgb[stick[1],:][None,:] * ( 1.-alpha )
rgb_now = np.concatenate((rgb_now,linergb.astype(np.int32)),axis=0)
if rgb is not None:
rgb_now = rgb_now.T
return ptcloud_now.T, rgb_now
def autocolor_point_cloud(pcl, dim=1):
d = pcl[dim]
d = d - d.mean()
d = d / d.std()
d = np.minimum(np.maximum(d,-2.),2.)
d = (d + 2.) / 4.
rgb = (cm.get_cmap('jet')(d)[:,:3]*255.).astype(np.int32)
return rgb.T
def visdom_plot_pointclouds( viz, pcl, visdom_env, title,\
plot_legend=False, markersize=2,\
nmax=5000, sticks=None, win=None, \
autocolor=False ):
if sticks is not None:
pcl = { k:extend_to_3d_skeleton_simple(v,sticks)[0] \
for k,v in pcl.items() }
legend = list(pcl.keys())
cmap = 'tab10'
npcl = len(pcl)
rgb = (cm.get_cmap(cmap)(np.linspace(0,1,10)) \
[:,:3]*255.).astype(np.int32).T
rgb = np.tile(rgb,(1,int(np.ceil(npcl/10))))[:,0:npcl]
rgb_cat = { k:np.tile(rgb[:,i:i+1],(1,p.shape[1])) for \
i,(k,p) in enumerate(pcl.items()) }
rgb_cat = np.concatenate(list(rgb_cat.values()),axis=1)
pcl_cat = np.concatenate(list(pcl.values()),axis=1)
if pcl_cat.shape[0] > 3:
rgb_cat = (pcl_cat[3:6, :] * 255).astype(np.int32)
pcl_cat = pcl_cat[0:3, :]
elif autocolor:
rgb_cat = autocolor_point_cloud(pcl_cat)
if pcl_cat.shape[1] > nmax:
with NumpySeedFix():
prm = np.random.permutation( \
pcl_cat.shape[1])[0:nmax]
pcl_cat = pcl_cat[:,prm]
rgb_cat = rgb_cat[:,prm]
win = viz.scatter( pcl_cat.T, env=visdom_env, \
opts= { 'title': title, 'markersize': markersize, \
'markercolor': rgb_cat.T }, win=win )
# legend
if plot_legend:
dummy_vals = np.tile(np.arange(npcl)[:,None],(1,2)).astype(np.float32)
title = "%s_%s" % (title,legend)
opts = dict( title=title, legend=legend, width=400, height=400 )
viz.line( dummy_vals.T,env=visdom_env,opts=opts,win=win+'_legend')
return win
def visdom_plotly_pointclouds( viz, pcl, visdom_env,
title=None,
markersize=2,
nmax=5000,
sticks=None,
win=None,
autocolor=False,
in_subplots=False,
height=500,
width=500,
normalise=False ):
if sticks is not None:
pcl = { k:extend_to_3d_skeleton_simple(v,sticks)[0] \
for k,v in pcl.items() }
npcl = len(pcl)
rgb = np.linspace(0,1,10)
rgb = np.array([rgb[i%10] for i in range(npcl)])
if in_subplots:
cols = npcl
else:
cols = 1
titles = [None]*cols; titles[0] = title
fig = make_subplots(
rows = 1, cols = cols,
specs=[[{"type": "scene"}]*cols],
subplot_titles=titles,
column_widths=[1.]*cols,
)
for pcli, ((pcl_name, pcl_data),color) in enumerate(zip(pcl.items(), rgb)):
if pcl_data.shape[1] > nmax:
with NumpySeedFix():
prm = np.random.permutation(pcl_data.shape[1])[0:nmax]
pcl_data = pcl_data[:,prm]
if pcl_data.shape[0]==6:
# we have color
pcl_color = np.minimum(np.maximum(pcl_data[3:],0.),1.)
pcl_data = pcl_data[:3]
pcl_color = [(255.*c).astype(int).tolist() for c in pcl_color.T]
marker=dict(
size=markersize,
color=pcl_color,
opacity=1.)
else:
marker=dict(
size=markersize,
color=color,
colorscale='Spectral',
opacity=1.)
if normalise:
pcl_data -= pcl_data.mean(axis=1, keepdims=True)
pcl_data /= (pcl_data.max(axis=1) - pcl_data.min(axis=1)).max()
pcl[pcl_name] = pcl_data
fig.add_trace(
go.Scatter3d(
x=pcl_data[0, :],
y=pcl_data[1, :],
z=pcl_data[2, :],
mode='markers',
name=pcl_name,
visible=True,
marker=marker,
),
row = 1,
col = pcli+1 if in_subplots else 1
)
pcl_cat = np.concatenate(list(pcl.values()),axis=1)[:3]
pcl_c = pcl_cat.mean(1)
maxextent = (pcl_cat.max(axis=1) - pcl_cat.min(axis=1)).max()
bounds = np.stack((pcl_c-maxextent, pcl_c+maxextent))
height = height
width = width * cols
fig.update_layout(height = height, width = width,
scene = dict(
xaxis=dict(range=[bounds[0,0],bounds[1,0]]),
yaxis=dict(range=[bounds[0,1],bounds[1,1]]),
zaxis=dict(range=[bounds[0,2],bounds[1,2]]),
aspectmode='cube',
)
)
# print(win)
viz.plotlyplot(fig, env=visdom_env, win=win)
return win
def write_into_image(image_np, txt, color=(0,0,255)):
img = Image.fromarray(image_np.transpose((1,2,0)))
draw = ImageDraw.Draw(img)
draw.text((0, 0), txt, color)
image_np = np.transpose(np.array(img),(2,0,1))
return image_np
def make_match_image(
im_kps,
im_paths,
nmax=5000,
line_width=5
):
# images
ims = [np.array(Image.open(im).convert('RGB')) for im in im_paths]
_, img_width, _ = ims[0].shape
# pad smaller image height if not the same
if ims[0].shape[0] != ims[1].shape[0]:
pad_amnt = np.abs(ims[0].shape[0] - ims[1].shape[0])
if ims[0].shape[0] < ims[1].shape[0]:
im_to_pad = 0
else:
im_to_pad = 1
ims[im_to_pad] = np.pad(
ims[im_to_pad], ((0, pad_amnt), (0, 0), (0, 0)), mode='constant')
assert ims[0].shape[0] == ims[1].shape[0]
ims = np.concatenate(ims, axis=1)
ims = Image.fromarray(ims.astype(np.uint8))
if im_kps is not None:
# image keypoints
if im_kps.shape[0] > nmax:
prm = np.random.permutation(im_kps.shape[0])[0:nmax]
im_kps = im_kps[prm]
else:
im_kps = im_kps.copy()
im_kps[:,0,1] += img_width
# round for imdraw
im_kps = np.round(im_kps).astype(int)
cmap = cm.get_cmap('rainbow')
d = ImageDraw.Draw(ims)
for mi, match in enumerate(im_kps):
clr = cmap(float(mi) / im_kps.shape[0])
clr = (np.array(clr) * 255.).astype(int).tolist()
d.line((
tuple(match[:,0].tolist()),
tuple(match[:,1].tolist())
), fill=tuple(clr), width=line_width)
return ims
def visdom_show_many_image_matches(
viz,
ims_kps,
ims_paths,
visdom_env='main',
visdom_win=None,
title=None,
line_width=10,
nmax=5000,
max_im_sz=200,
):
ims = []
for im_kps, im_paths in zip(ims_kps, ims_paths):
im_ = make_match_image(
im_kps,
im_paths,
nmax=nmax,
line_width=line_width,
)
sz_ = (
np.array(im_.size) * (max_im_sz / max(im_.size))
).astype(int).tolist()
im_ = im_.resize(sz_, Image.BILINEAR)
im_ = np.array(im_).astype(float)/255.
im_ = np.transpose(im_, (2,0,1))
ims.append(im_)
# pad all images so that we can stack
max_h = max(im.shape[1] for im in ims)
max_w = max(im.shape[2] for im in ims)
for imi, im in enumerate(ims):
pad_h = max_h - im.shape[1]
pad_w = max_w - im.shape[2]
ims[imi] = np.pad(
im, ((0, 0), (0, pad_h), (0, pad_w)), mode='constant')
ims = np.stack(ims)
viz.images(ims, env=visdom_env, win=visdom_win)
def _get_camera_wireframe(scale=1.):
a = 0.5*np.array([-2, 1.5, 4])
b = 0.5*np.array([ 2, 1.5, 4])
c = 0.5*np.array([-2, -1.5, 4])
d = 0.5*np.array([ 2, -1.5, 4])
C = np.zeros(3)
F = np.array([0, 0, 3])
lines = np.array([a,b,d,c,a,C,b,d,C,c,C,F]) * scale
return lines
def visdom_plotly_cameras(
viz,
cameras,
visdom_env='main',
visdom_win=None,
title=None,
markersize=2,
nmax=5000,
in_subplots=False,
camera_scale=0.05, # in multiples of std_dev of the scene pointcloud
height=1000,
width=1000,
):
titles = [title]
fig = make_subplots(
rows = 1, cols = 1,
specs=[[{"type": "scene"}]],
subplot_titles=titles,
column_widths=[1.],
)
all_pts = []
# add cameras
R = cameras[:,:,:3]
t = cameras[:,:,3:]
C = -np.matmul(R.transpose(0, 2, 1), t)
all_pts = C[:,:,0]
scene_std = all_pts.std(0).mean()
cam_lines_canonical = _get_camera_wireframe(scale=camera_scale*scene_std)
cmap = cm.get_cmap('rainbow')
camera_colors = cmap(np.linspace(0., 1., R.shape[0]))[:, :3]
# mult by 220 here to make the colors a bit darker
camera_colors = ['rgb(%s)' % ','.join(
[str(int(c*220.)) for c in clr]
) for clr in camera_colors]
for clr_, R_, t_ in zip(camera_colors, R, t):
cam_lines_world = R_.T @ (cam_lines_canonical.T - t_)
x, y, z = cam_lines_world
fig.add_trace(
go.Scatter3d(
x=x, y=y, z=z,
marker=dict(
size=2,
# colorscale='Spectral',
color=clr_,
),
line=dict(
# colorscale='Spectral',
color=clr_,
width=2,
)
),
row=1,
col=1,
)
pcl_c = all_pts.mean(0)
maxextent = (all_pts.max(axis=0) - all_pts.min(axis=0)).max()
bounds = np.stack((pcl_c.T-maxextent, pcl_c.T+maxextent))
fig.update_layout(height = height, width = width,
showlegend=False,
scene = dict(
xaxis=dict(range=[bounds[0,0], bounds[1,0]]),
yaxis=dict(range=[bounds[0,1], bounds[1,1]]),
zaxis=dict(range=[bounds[0,2], bounds[1,2]]),
aspectmode='cube',
)
)
viz.plotlyplot(fig, env=visdom_env, win=visdom_win)
|
c3dm-main
|
c3dm/tools/vis_utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from visdom import Visdom
from tools.vis_utils import get_visdom_connection, denorm_image_trivial
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import numpy as np
import os
from PIL import Image
fig = make_subplots(
rows = 1, cols = 1,
specs=[[{"type": "scene"}]],
subplot_titles=(title),
column_widths=[0.5],
)
fig.add_trace(
go.Scatter3d(
x=-pt_cloud_np[:, 0],
y=-pt_cloud_np[:, 1],
z=-pt_cloud_np[:, 2],
mode='markers',
name=k,
visible=True,
marker=dict(
size=8,
color=color,
opacity=1.,
)), row = 0, col = 0)
class VisdomPlotly():
def __init__(self, viz, visdom_env_imgs=None, win=None):
self.camera = dict(
up=dict(x=0, y=0, z=1),
center=dict(x=0, y=0, z=0),
eye=dict(x=0.0, y=2.0, z=0.0)
)
self.scene = dict(
xaxis = dict(nticks=10, range=[-100,100],),
yaxis = dict(nticks=10, range=[-100,100],),
zaxis = dict(nticks=10, range=[-100,100],),
camera = self.camera)
def extend_to_skeleton(self, pt_cloud, skeleton, line_resolution = 25):
ptcloud_now = pt_cloud
for stick in skeleton:
alpha = np.linspace(0,1,line_resolution)[:, None]
linepoints = pt_cloud[stick[0],:][None,:] * alpha + \
pt_cloud[stick[1],:][None,:] * ( 1. - alpha )
ptcloud_now = np.concatenate((ptcloud_now,linepoints),axis=0)
return ptcloud_now
def make_fig(self, rows, cols, epoch, it, idx_image, acc_detail, percent_agree):
# fig_dict['subplot_title']
title="e%d_it%d_im%d"%(epoch, it, idx_image)
self.fig = make_subplots(
rows = rows, cols = cols,
specs=[[{"type": "xy"},{"type": "xy"},{"type": "scene"}, {"type": "xy"}, {"type": "xy"}]],
subplot_titles=(
"Input: {0}".format(title),
"Projection",
acc_detail,
'Mode Freqs',
'Mode Freqs (Flow): {0}'.format(percent_agree)
),
column_widths=[0.5] * cols,
)
# vis_plot = VisdomPlotly(visdom_env_imgs, stats.visdom_server, stats.visdom_port)
# vis_plot.make_fig(1, 5, stats.epoch, stats.it[trainmode], idx_image, "sqerr [M{0}]: {1:.2f}".format(min_mode, rmse_h36m), flow_agree)
# vis_plot.add_image(img_with_gt)
# vis_plot.add_2d_points(keypoints_2d.reshape(-1, 2), 1, 1, 'Input (Joints)', 'green')
# vis_plot.add_2d_points(keypoints_2d.reshape(-1, 2), 1, 2, 'Input (Joints)', 'green')
# vis_plot.add_3d_points(gt_sample.reshape(-1, 3) * 0.1, 1, 3, 'GT', 'green', visible='legendonly')
# vis_plot.add_3d_points(in_verts[idx_image].reshape(-1, 3) * 0.1, 1, 3, 'GT', 'green', s=1, opacity=0.5)
def add_image(self, img):
bg_image = Image.fromarray(img)
self.fig.update_layout(
images = [
go.layout.Image(
source=bg_image,
xref="x1",
yref="y1",
x=0,
y=bg_image.size[1],
sizex=bg_image.size[0],
sizey=bg_image.size[1],
sizing="stretch",
opacity=0.75,
layer="below"),
go.layout.Image(
source=bg_image,
xref="x2",
yref="y2",
x=0,
y=bg_image.size[1],
sizex=bg_image.size[0],
sizey=bg_image.size[1],
sizing="stretch",
opacity=0.75,
layer="below")
]
)
def add_3d_points(self, pt_cloud_np, row, col, name, color, opacity=1.0, s=8, visible=True):
self.fig.add_trace(
go.Scatter3d(
x=-1 * pt_cloud_np[:, 0],
y=-1 * pt_cloud_np[:, 2],
z=-1 * pt_cloud_np[:, 1],
mode='markers',
name=name,
visible=visible,
marker=dict(
size=s,
color=color,
opacity=opacity,
)), row = row, col = col)
self.fig.update_scenes(patch = self.scene, row = row, col = col)
self.add_hack_points(row, col)
# def add_mesh(self, verts, triangles, row, col, name, color):
# self.fig.add_trace(
# go.Mesh3d(
# x=verts[:, 0],
# y=verts[:, 1],
# z=verts[:, 2],
# colorbar_title='z',
# colorscale=[[0, 'gold'],
# [0.5, 'mediumturquoise'],
# [1, 'magenta']],
# # Intensity of each vertex, which will be interpolated and color-coded
# intensity=[0, 0.33, 0.66, 1],
# # i, j and k give the vertices of triangles
# i=triangles[:, 0],
# j=triangles[:, 1],
# k=triangles[:, 2],
# name=name,
# showscale=True
# )
# )
# self.fig.update_scenes(patch = self.scene, row = row, col = col)
def add_2d_points(self, points, row, col, name, color, scale=6, opacity=1.0, im_size = 224, extend=False, visible=True):
points_npy = points
if extend:
points_npy = self.extend_to_skeleton(points_npy, SKELETON_2D)
self.fig.add_trace(
go.Scatter(
x=points_npy[:, 0],
y=im_size-points_npy[:, 1],
mode='markers',
name=name,
visible=visible,
marker=dict(
size=scale,
color=color, # set color to an array/list of desired values
opacity=opacity,
)), row = row, col = col)
self.fig.update_xaxes(range=[0, im_size], row=row, col=col)
self.fig.update_yaxes(range=[0, im_size], row=row, col=col)
def show(self):
raw_size = 400
self.fig.update_layout(height = raw_size, width = raw_size * 5)
self.viz.plotlyplot(self.fig, env=self.visdom_env_imgs)
def add_hack_points(self, row, col):
hack_points = np.array([
[-1000.0, -1000.0, -1000.0],
[-1000.0, -1000.0, 1000.0],
[-1000.0, 1000.0, -1000.0],
[-1000.0, 1000.0, 1000.0],
[1000.0, -1000.0, -1000.0],
[1000.0, -1000.0, 1000.0],
[1000.0, 1000.0, -1000.0],
[1000.0, 1000.0, 1000.0]])
self.fig.add_trace(
go.Scatter3d(
x=-1 * hack_points[:, 0],
y=-1 * hack_points[:, 2],
z=-1 * hack_points[:, 1],
mode='markers',
name='_fake_pts',
visible=False,
marker=dict(
size=1,
opacity = 0,
color=(0.0, 0.0, 0.0),
)), row = row, col = col)
def add_bar(self, stats, num_modes, row, col, name):
freqs = np.bincount(stats, minlength=num_modes)
fig = self.fig.add_trace(
go.Bar(
x=list(range(num_modes)),
y=freqs,
name=name), row = row, col = col)
|
c3dm-main
|
c3dm/tools/visdom_plotly.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import time
import torch
import torch.nn.functional as Fu
import numpy as np
import collections
from tools.functions import safe_sqrt
from tools.pcl_unproject import depth2pcl
def in_hull(p, hull, extendy=False):
"""
Test if points in `p` are in `hull`
`p` should be a `NxK` coordinates of `N` points in `K` dimensions
`hull` is either a scipy.spatial.Delaunay object or the `MxK` array of the
coordinates of `M` points in `K`dimensions for which Delaunay triangulation
will be computed
"""
from scipy.spatial import Delaunay
if not isinstance(hull, Delaunay):
hull = Delaunay(hull, incremental=True)
if extendy:
pts = hull.points
minx = np.min(pts[:,0])
maxx = np.max(pts[:,0])
new_pts = [[minx, 0], [maxx, 0]]
hull.add_points(new_pts)
return hull.find_simplex(p)>=0
def get_ff_head_mask(pcl_pred, kp_loc):
axx = np.arange( pcl_pred.shape[2] ) + 0.5
axy = np.arange( pcl_pred.shape[1] ) + 0.5
all_pt = np.stack(np.meshgrid(axx, axy))
all_pt = all_pt.reshape(2, -1)
kpmask = in_hull(all_pt.T, kp_loc.t().numpy())
kpmask = kpmask.reshape( list(pcl_pred.shape[1:]) ).astype(float)
return torch.tensor(kpmask).float()
def cut_ff_head(pcl_pred, kp_loc, mask):
if True:
axx = np.arange( pcl_pred.shape[2] ) + 0.5
axy = np.arange( pcl_pred.shape[1] ) + 0.5
all_pt = np.stack(np.meshgrid(axx, axy))
all_pt = all_pt.reshape(2, -1)
kpmask = in_hull(all_pt.T, kp_loc.t().numpy())
# kpmask = kpmask.reshape( list(pcl_pred.shape[1:]) ).astype(float)
ok = np.where(kpmask.reshape(-1))[0].tolist()
else:
chin_pt = kp_loc[:, 16].long()
nose_pt = kp_loc[:, 54].long()
chin_3d_pt = pcl_pred[:, chin_pt[1], chin_pt[0]]
nose_3d_pt = pcl_pred[:, nose_pt[1], nose_pt[0]]
thr = ((nose_3d_pt - chin_3d_pt)**2).sum().sqrt()
thr *= 1.01
df = ((pcl_pred - nose_3d_pt[:,None,None])**2).sum(0, keepdim=True).sqrt()
df = df * mask + (1-mask) * thr * 1000.
ok = torch.nonzero(df.view(-1) <= thr).squeeze()
# if True:
# npix = pcl_pred[0].numel()
# nok = np.setdiff1d(np.arange(npix), ok)
# pcl_pred_nok = pcl_pred.view(3,-1)[:, nok].numpy()
# pcl_pred_raw = pcl_pred.view(3,-1).numpy()
# pcl_pred_ok = pcl_pred.view(3,-1)[:, ok].numpy()
# from tools.vis_utils import get_visdom_connection, \
# visdom_plotly_pointclouds
# viz = get_visdom_connection()
# visdom_plotly_pointclouds( \
# viz,
# { 'pred': pcl_pred_ok,
# 'pred_nok': pcl_pred_nok,
# 'pred_raw': pcl_pred_raw, },
# 'ff_debug',
# title='ff_debug', win='ff_debug_',
# markersize=2,
# in_subplots=True,
# )
# import pdb; pdb.set_trace()
pcl_pred = pcl_pred.view(3,-1)[:, ok]
pcl_pred = apply_pcl_pred_transform(pcl_pred)
return pcl_pred
def apply_pcl_pred_transform(pcl_pred):
a = np.pi + np.pi/2. # original
Rx = [
[ 1., 0., 0. ],
[ 0., np.cos(a), -np.sin(a) ],
[ 0., np.sin(a), np.cos(a) ],
]
pcl_pred = torch.FloatTensor(Rx) @ pcl_pred
return pcl_pred
def get_nose_loc(pcl_gt):
nose_id = np.argmin(pcl_gt[1,:])
nose_loc = pcl_gt[:, nose_id:(nose_id+1)]
return nose_loc
def cut_nose(pcl_gt, thr=100., nose_loc=None):
if nose_loc is None:
nose_loc = get_nose_loc(pcl_gt)
df = pcl_gt - nose_loc
dst = np.sqrt((df*df).sum(0))
ok = np.where(dst <= thr)[0]
pcl_gt = pcl_gt[:, ok]
return pcl_gt
def cut_ff_nose(pcl_gt, do_rotate=True):
# 2) 45 deg along x
# a = np.pi / 4. # original
a = np.pi / 4. + np.pi / 10.
Rx = [
[ 1., 0., 0. ],
[ 0., np.cos(a), -np.sin(a) ],
[ 0., np.sin(a), np.cos(a) ],
]
if do_rotate:
pcl_gt = Rx @ pcl_gt
pcl_gt = cut_nose(pcl_gt)
return pcl_gt
def re_cut_ff_nose(matrix_scl, pcl_pred, kp_loc, trans_scl, mask, mu, scl):
ok = torch.nonzero(mask.view(-1) > 0.).squeeze()
# cut off the hull
if True:
axx = np.arange( pcl_pred.shape[2] ) + 0.5
axy = np.arange( pcl_pred.shape[1] ) + 0.5
all_pt = np.stack(np.meshgrid(axx, axy))
all_pt = all_pt.reshape(2, -1)
kpmask = in_hull(all_pt.T, kp_loc.t().numpy(), extendy=True)
# kpmask = kpmask.reshape( list(pcl_pred.shape[1:]) ).astype(float)
okkp = np.where(kpmask.reshape(-1))[0]
ok = np.intersect1d( okkp, ok.numpy() ).tolist()
if len(ok)==0:
print('WARNING: RE-CUT results in empty face!')
return None
pcl_pred_ok = pcl_pred.view(3, -1)[:, ok]
pcl_pred_ok = apply_pcl_pred_transform(pcl_pred_ok)
pcl_pred_ok -= torch.FloatTensor(mu)
pcl_pred_ok *= scl
R = torch.FloatTensor(matrix_scl[:3,:3])
T = torch.FloatTensor(matrix_scl[:3,3:4])
pcl_pred_ok_t_t = R @ pcl_pred_ok + T
nose_loc = torch.FloatTensor(get_nose_loc(trans_scl.T))
pcl_pred_recut = cut_nose(pcl_pred_ok_t_t, nose_loc=nose_loc)
pcl_pred_recut = pcl_pred_recut.numpy()
return pcl_pred_recut
def eval_pcl_icp(pcl_pred, mesh_gt, mask, kp_loc):
import trimesh
from tools.utils import Timer
profile = True # actually this is inverted
with Timer(quiet=profile):
# sample points from the surface
pcl_gt_orig = trimesh.sample.sample_surface(mesh_gt, 60000)[0]
# cut stuff
pcl_gt_cut = cut_ff_nose(pcl_gt_orig.T)
pcl_pred_cut = cut_ff_head(pcl_pred, kp_loc, mask).numpy()
# center
pred_cut_mean = pcl_pred_cut.mean(1)[:, None]
pcl_pred_cut = pcl_pred_cut - pred_cut_mean
pcl_gt_cut = pcl_gt_cut - pcl_gt_cut.mean(1)[:, None]
# align stds
pred_std = pcl_pred_cut.std(1).mean()
gt_std = pcl_gt_cut.std(1).mean()
pcl_pred_cut = pcl_pred_cut * (gt_std / pred_std)
# matrix, transformed, _ = \
# trimesh.registration.icp( \
# pcl_pred_cut.T, pcl_gt_cut.T, \
# initial=np.identity(4), threshold=1e-5, \
# max_iterations=50, **{'scale': True})
with Timer(quiet=profile):
matrix_scl, transformed_scl, _ = \
trimesh.registration.icp( \
pcl_pred_cut.T, pcl_gt_cut.T, \
initial=np.identity(4), threshold=1e-5, \
max_iterations=30, **{'scale': False})
with Timer(quiet=profile):
pcl_pred_recut = re_cut_ff_nose( matrix_scl, pcl_pred, kp_loc,
transformed_scl, mask,
pred_cut_mean,
gt_std / pred_std )
if pcl_pred_recut is None or pcl_pred_recut.size==0:
print('WARNING: RE-CUT results in empty face!')
pcl_pred_recut = pcl_pred_cut
with Timer(quiet=profile):
matrix_scl_recut, transformed_scl_recut, _ = \
trimesh.registration.icp( \
pcl_pred_recut.T, pcl_gt_cut.T, \
initial=np.identity(4), threshold=1e-5, \
max_iterations=30, **{'scale': False})
# if True:
# from tools.vis_utils import get_visdom_connection, \
# visdom_plotly_pointclouds
# viz = get_visdom_connection()
# visdom_plotly_pointclouds( \
# viz,
# {
# 'pred': pcl_pred_cut,
# 'pred_align': transformed_scl.T,
# # 'pred_align_scl': transformed.T,
# 'pcl_gt': pcl_gt_cut,
# 'pred_recut': pcl_pred_recut,
# 'pred_align_recut': transformed_scl_recut.T
# },
# 'ff_debug',
# title='ff_debug',
# win='ff_debug_align',
# markersize=2,
# in_subplots=False,
# height=600,
# width=600
# )
# time.sleep(1)
# import pdb; pdb.set_trace()
# pcl distance
ft = lambda x: torch.FloatTensor(x).t().cuda()
fl = lambda x: torch.FloatTensor(x).cuda()
with Timer(quiet=profile):
# err = chamfer(ft(transformed), fl(pcl_gt_cut))
err_scl = float(chamfer(ft(transformed_scl), fl(pcl_gt_cut)).detach())
err_scl_recut = float(chamfer(ft(transformed_scl_recut), fl(pcl_gt_cut)).detach())
res = collections.OrderedDict( [
('dist_pcl', err_scl),
('dist_pcl_scl', err_scl),
('dist_pcl_scl_recut', err_scl_recut),
# ('pred_t', ft(transformed)),
('pred_t_scl', ft(transformed_scl)),
('gt', fl(pcl_gt_cut)),
] )
return res
def eval_depth( pred, gt, crop=5, masks=None,
get_best_scale=False):
# chuck out border
gt = gt [ :, :, crop:-crop, crop:-crop ]
pred = pred[ :, :, crop:-crop, crop:-crop ]
if masks is not None:
# mult gt by mask
masks = masks[:,:,crop:-crop,crop:-crop]
gt = gt * (masks > 0.).float()
dmask = (gt > 0.).float()
dmask_mass = torch.clamp(dmask.sum((1,2,3)),1e-4)
if get_best_scale:
# mult preds by a scalar "scale_best"
# s.t. we get best possible mse error
xy = pred * gt ; xx = pred * pred
if masks is not None:
xy *= masks ; xx *= masks
scale_best = xy.mean((1,2,3)) / torch.clamp(xx.mean((1,2,3)), 1e-4)
pred = pred * scale_best[:, None, None, None]
df = gt - pred
mse_depth = (dmask*(df**2)).sum((1,2,3)) / dmask_mass
abs_depth = (dmask*df.abs()).sum((1,2,3)) / dmask_mass
res = collections.OrderedDict( [
('mse_depth', mse_depth),
('abs_depth', abs_depth),
] )
# as in https://arxiv.org/pdf/1606.00373.pdf
for thr_exp in (1.,2.,3.):
delta = (1.25**thr_exp) / 100. # to meters
lessdelta = (dmask*(df.abs()<=delta).float()).sum((1,2,3)) \
/ dmask_mass
res[ 'delta_%d'%int(thr_exp) ] = lessdelta.cpu()
# delta error for linspaced thresholds
for delta in np.linspace(0.,2.,21):
if delta <= 0.: continue
lessdelta = (dmask*(df.abs()<=delta).float()).sum((1,2,3)) \
/ dmask_mass
res[ 'delta_%03d'%int(100*delta) ] = lessdelta.cpu()
if get_best_scale:
res['scale_best'] = scale_best
return res
def set_mean_depth_to_0(x,mask=None):
x = x.copy()
if mask is not None:
x = x * mask[:,None,:]
mu_depth = (x.sum(2)/mask.sum(1)[:,None])[:,2]
else:
mu_depth = x.mean(2)[:,2]
x[:,2,:] = x[:,2,:] - mu_depth[:,None]
if mask is not None:
x = x * mask[:,None,:]
return x
def get_edm(pts,pts2=None):
dtype = pts.data.type()
ba, dim, N = pts.shape
if pts2 is not None:
edm = torch.bmm(-2. * pts2.transpose(1,2), pts)
fNorm1 = (pts*pts).sum(1,keepdim=True)
fNorm2 = (pts2*pts2).sum(1,keepdim=True)
edm += fNorm2.transpose(1,2) # inplace saves memory
edm += fNorm1
# edm = (fNorm2.transpose(1,2) + fGram) + fNorm1
else:
fGram = torch.bmm(2 * pts.transpose(1,2), pts)
fNorm1 = (pts*pts).sum(1,keepdim=True)
edm = (fNorm1.transpose(1,2) - fGram) + fNorm1
return edm.contiguous()
def chamfer(a, b, med=False):
return 0.5 * (nn_err(a, b, med=med) + nn_err(b, a, med=med))
def nn_err(a, b, med=False):
D = get_edm(a[None].detach(), b[None].detach())
minvals, minidx = D.min(dim=1)
minvals = torch.clamp(minvals,0.).squeeze().sqrt()
if med:
assert False
errs = minvals.median()
else:
errs = minvals.mean()
# if True:
# from pykeops.torch import LazyTensor
# a = a.t().contiguous()
# b = b.t().contiguous()
# A = LazyTensor(a[:, None, :]) # (M, 1, 3)
# B = LazyTensor(b[None, :, :]) # (1, N, 3)
# D = ((A - B) ** 2).sum(2) # (M, N) symbolic matrix of squared distances
# indKNN = D.argKmin(1, dim=1).squeeze() # Grid <-> Samples, (M**2, K) integer tensor
# errs_ = ((a - b[indKNN,:])**2).sum(1).sqrt()
# if True:
# nns = b[indKNN,:]
# from tools.vis_utils import get_visdom_connection, \
# visdom_plotly_pointclouds
# viz = get_visdom_connection()
# show = {
# 'in': a.t().contiguous().view(3,-1),
# 'nns': nns.t().contiguous().view(3,-1),
# }
# visdom_plotly_pointclouds( \
# viz,
# show,
# 'pcl_debug',
# title='pcl_debug',
# win='pcl_debug_nns',
# markersize=2,
# )
# import pdb; pdb.set_trace()
return errs
# def get_best_scale_cov(pcl_pred, pcl_gt):
# # compute the pcl centers
# pred_cnt, gt_cnt = [ \
# p.mean(2, keepdim=True) for p in (pcl_pred, pcl_gt) ]
# # center
# c_pred, c_gt = [ \
# p - c for p, c in zip((pcl_pred, pcl_gt), (pred_cnt, gt_cnt)) ]
# cov_pred, cov_gt = [torch.bmm(c, c.permute(0,2,1)) * (1. / c.shape[2]) for c in [c_pred, c_gt]]
# import pdb; pdb.set_trace()
# det_pred = torch.stack([torch.det(c) for c in cov_pred])
# det_gt = torch.stack([torch.det(c) for c in cov_gt])
# # eigs_pred = torch.stack([torch.eig(c)[0][:,0] for c in cov_pred])
# # eigs_gt = torch.stack([torch.eig(c)[0][:,0] for c in cov_gt])
# import pdb; pdb.set_trace()
def eval_full_pcl(pcl_pred,
pcl_gt,
K=None,
scale_best=None):
# faces=None):
import trimesh
# batch size
ba = pcl_pred.shape[0]
# compute the pcl centers
pred_cnt, gt_cnt = [ \
p.mean(2, keepdim=True) for p in (pcl_pred, pcl_gt) ]
# center
c_pred, c_gt = [ \
p - c for p, c in zip((pcl_pred, pcl_gt), (pred_cnt, gt_cnt)) ]
if False:
# apply the best scale
c_pred = c_pred * scale_best[:, None, None]
else:
# recompute the best scale
# scale_best = get_best_scale_cov(pcl_pred, pcl_gt)
scale_best = (c_gt.std(2) / c_pred.std(2)).mean(1)
if not np.isfinite(scale_best):
scale_best = scale_best.new_ones([1])
c_pred = c_pred * scale_best[:, None, None]
e = []
c_pred_align = []
for ip in range(ba):
_, transformed, _ = \
trimesh.registration.icp( \
c_pred[ip].numpy().T, c_gt[ip].numpy().T, \
initial=np.identity(4), threshold=1e-10, \
max_iterations=30, **{'scale': False})
c_pred_align.append(torch.FloatTensor(transformed.T))
e_ = chamfer(c_gt[ip].float().cuda(), c_pred[ip].float().cuda())
e_al_ = chamfer(c_gt[ip].float().cuda(), c_pred_align[ip].float().cuda())
e.append([e_, e_al_])
c_pred_align = torch.stack(c_pred_align)
e = torch.FloatTensor(e)
res = collections.OrderedDict( [
('pcl_error', e[:, 0]),
('pcl_error_align', e[:, 1]),
('scale_best', scale_best),
('pred_align', c_pred_align),
('pred_orig', pcl_pred),
('pred', c_pred),
('gt', c_gt),
] )
return res
def eval_sparse_pcl(pred, gt, rescale_factor):
# get best scale
xy = pred * gt ; xx = pred * pred
scale_best = xy.mean((1, 2)) / xx.mean((1, 2)).clamp(1e-4)
pred_scl = pred * scale_best[:, None, None]
err = ((pred_scl-gt)**2).sum(1).sqrt().mean(1)
err_resc = err * rescale_factor
return err_resc.mean()
def eval_depth_pcl( pred, gt, K=None, masks=None,
gt_projection_type='perspective',
pred_projection_type='orthographic',
debug=False,
lap_thr=0.3,
):
ba = gt.shape[0]
if masks is not None:
# mult gt by mask
gt = gt * (masks > 0.).float()
gt = depth_flat_filter(gt, size=5, thr=lap_thr)
dmask = (gt > 0.).float()
dmask_mass = torch.clamp(dmask.sum((1,2,3)), 1e-4)
# convert to point clouds
pcl_pred = depth2pcl(pred, K, projection_type=pred_projection_type)
pcl_gt = depth2pcl(gt, K, projection_type=gt_projection_type)
if gt_projection_type==pred_projection_type and \
gt_projection_type=='perspective' and False:
# estimate the best scale
xy = pred * gt ; xx = pred * pred
xy *= dmask ; xx *= dmask
scale_best = xy.mean((1,2,3)) / torch.clamp(xx.mean((1,2,3)), 1e-12)
pred = pred * scale_best[:, None, None, None]
# convert to point clouds
c_pred = depth2pcl(pred, K, projection_type=pred_projection_type)
c_gt = depth2pcl(gt, K, projection_type=gt_projection_type)
# if debug:
# import pdb; pdb.set_trace()
# c_pred = c_pred * 3
else:
# debug visualisations
# pcl_pred = pcl_pred * masks
# from tools.vis_utils import get_visdom_connection, visdom_plot_pointclouds
# pcl_show = pcl_pred[0].view(3,-1)[:,masks[0].view(-1)>0.]
# viz = get_visdom_connection()
# visdom_plot_pointclouds(viz, \
# {'pcl_pred': pcl_show.cpu().detach().numpy()},
# 'pcl_debug',
# 'pcl_debug',
# win='pcl_debug',
# )
# import pdb; pdb.set_trace()
# mask the point clouds
pcl_pred, pcl_gt = [p * dmask for p in (pcl_pred, pcl_gt)]
# compute the pcl centers
pred_cnt, gt_cnt = [ \
p.sum((2,3), keepdim=True) / dmask_mass[:,None,None,None] \
for p in (pcl_pred, pcl_gt) ]
# center
c_pred, c_gt = [ \
p - c for p, c in zip((pcl_pred, pcl_gt), (pred_cnt, gt_cnt)) ]
# mask the centered point clouds
c_pred, c_gt = [p * dmask for p in (c_pred, c_gt)]
# estimate the best scale
xy = c_pred * c_gt ; xx = c_pred * c_pred
xy *= dmask ; xx *= dmask
scale_best = xy.mean((1,2,3)) / torch.clamp(xx.mean((1,2,3)), 1e-4)
# apply the best scale
c_pred = c_pred * scale_best[:, None, None, None]
# translate the point clouds back to original meanxy
# xy_mask = torch.FloatTensor([1.,1.,0.])[None,:,None,None].type_as(c_pred)
# d_c_pred, d_c_gt = [ \
# p.clone() + c * xy_mask \
# for p, c in zip((c_pred, c_gt), (pred_cnt, gt_cnt)) ]
# compute the per-vertex distance
df = c_gt - c_pred
dist = torch.clamp(df**2, 0.).sum(1,keepdim=True).sqrt()
dist = (dmask * dist).sum((1,2,3)) / dmask_mass
# if float(dist) <= 1e-3:
# import pdb; pdb.set_trace()
res = collections.OrderedDict( [
('dist_pcl', dist),
('scale_best', scale_best),
('pred', c_pred),
('pred_orig', pcl_pred),
('gt', c_gt),
('dmask', dmask),
] )
return res
def depth_flat_filter(depth, size=5, thr=0.3):
mask = (depth > 0.).float()
fsz = size*2+1
w = depth.new_ones( (2,1,fsz,fsz) ) / float(fsz*fsz)
depthf = Fu.conv2d( \
torch.cat((depth, mask), dim=1), \
w,
padding=size,
groups=2)
depthf = depthf[:,0:1,:,:] / torch.clamp(depthf[:,1:2,:,:], 1e-4)
df = (depth - depthf).abs()
mask_mass = torch.clamp(mask.sum((1,2,3), keepdim=True), 1e-4)
dmean = (depth * mask) / mask_mass
dvar = (((depth - dmean) * mask) ** 2).sum((1,2,3), keepdim=True)
dstd = safe_sqrt(dvar / mask_mass)
bad = (df > dstd * thr).float()
return depth * (1-bad)
def eval_depth_scale_inv(
pred,
gt,
masks=None,
lap_thr=0.3,
):
if masks is not None:
# mult gt by mask
gt = gt * (masks > 0.).float()
gt = depth_flat_filter(gt, size=5, thr=lap_thr)
dmask = (gt > 0.).float()
dmask_mass = torch.clamp(dmask.sum((1,2,3)), 1e-4)
# estimate the best scale
xy = pred * gt ; xx = pred * pred
xy *= dmask ; xx *= dmask
scale_best = xy.mean((1,2,3)) / torch.clamp(xx.mean((1,2,3)), 1e-12)
pred = pred * scale_best[:, None, None, None]
df = pred - gt
err = (dmask * df.abs()).sum((1,2,3)) / dmask_mass
return err
|
c3dm-main
|
c3dm/tools/eval_functions.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import os
import copy
import io
import gzip
import urllib.request
from dataset.dataset_configs import (
IMAGE_ROOTS, MASK_ROOTS, DEPTH_ROOTS, DATASET_ROOT, DATASET_CFG,
IMAGE_URLS, MASK_URLS, DEPTH_URLS
)
from dataset.keypoints_dataset import KeypointsDataset
from tools import utils
def dataset_zoo( dataset_name='freicars',
sets_to_load = ['train','val'],
force_download = False,
test_on_trainset=False,
TRAIN= { 'rand_sample': 6000,
'limit_to': -1,
'limit_seq_to': [-1],
'subsample': 1,
'dilate_masks': 5,
},
VAL = { 'rand_sample': 1000,
'limit_to': -1,
'limit_seq_to': -1,
'subsample': 1,
'dilate_masks': 0,
},
TEST = { 'rand_sample': -1,
'limit_seq_to': -1,
'limit_to': -1,
'subsample': 1,
'dilate_masks': 0,
},
**kwargs ):
main_root = DATASET_ROOT
ext = '.json'
json_train = os.path.join( main_root, dataset_name + '_train' + ext )
json_val = os.path.join( main_root, dataset_name + '_val' + ext )
image_root_train, image_root_val = get_train_val_roots(dataset_name, IMAGE_ROOTS, IMAGE_URLS)
mask_root_train, mask_root_val = get_train_val_roots(dataset_name, MASK_ROOTS, MASK_URLS)
depth_root_train, depth_root_val = get_train_val_roots(dataset_name, DEPTH_ROOTS, DEPTH_URLS)
# auto-download dataset file if doesnt exist
for json_file in (json_train, json_val):
if not os.path.isfile(json_file) or force_download:
download_dataset_json(json_file)
dataset_train = None
dataset_val = None
dataset_test = None
if dataset_name in DATASET_CFG:
dataset_cfg = copy.deepcopy(DATASET_CFG[dataset_name])
else:
dataset_cfg = copy.deepcopy(DATASET_CFG['default'])
TRAIN, VAL, TEST = [ copy.deepcopy(set_) for set_ in (TRAIN, VAL, TEST) ]
for set_ in (TRAIN, VAL, TEST):
set_.update(dataset_cfg)
print(set_)
if 'train' in sets_to_load:
dataset_train = KeypointsDataset(\
image_root=image_root_train,
mask_root=mask_root_train,
depth_root=depth_root_train,
jsonfile=json_train, train=True, **TRAIN)
if 'val' in sets_to_load:
if dataset_name in ('celeba_ff',):
TEST['box_crop'] = True
VAL['box_crop'] = True
if test_on_trainset:
image_root_val, json_val = image_root_train, json_train
dataset_val = KeypointsDataset(\
image_root=image_root_val,
mask_root=mask_root_val,
depth_root=depth_root_val,
jsonfile=json_val, train=False, **VAL)
dataset_test = KeypointsDataset(\
image_root=image_root_val,
mask_root=mask_root_val,
depth_root=depth_root_val,
jsonfile=json_val, train=False, **TEST)
return dataset_train, dataset_val, dataset_test
def get_train_val_roots(dataset_name, image_roots, urls):
if dataset_name not in image_roots:
return None, None
for subset_idx, images_dir in enumerate(image_roots[dataset_name]):
if not os.path.exists(images_dir):
if dataset_name not in urls:
raise ValueError(
f"Images for {dataset_name} not found in {images_dir}. "
"Please download manually."
)
url = urls[dataset_name][subset_idx]
print('Downloading images to %s from %s' % (images_dir, url))
utils.untar_to_dir(url, images_dir)
image_roots = copy.copy(image_roots[dataset_name])
if len(image_roots) == 2:
return image_roots
elif len(image_roots) == 1:
return image_roots[0], image_roots[0]
else:
raise ValueError('Wrong image roots format.')
def download_dataset_json(json_file):
from dataset.dataset_configs import DATASET_URL
json_dir = '/'.join(json_file.split('/')[0:-1])
json_name = json_file.split('/')[-1].split('.')[0]
os.makedirs(json_dir, exist_ok=True)
url = DATASET_URL[json_name]
print('downloading dataset json %s from %s' % (json_name, url))
response = urllib.request.urlopen(url)
compressed_file = io.BytesIO(response.read())
decompressed_file = gzip.GzipFile(fileobj=compressed_file)
try:
with open(json_file, 'wb') as outfile:
outfile.write(decompressed_file.read())
except:
if os.path.isfile(json_file):
os.remove(json_file)
# can be zipped
# print('checking dataset')
# with open(json_file,'r') as f:
# dt = json.load(f)
# assert dt['dataset']==json_name
|
c3dm-main
|
c3dm/dataset/dataset_zoo.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import numpy as np
import torch
from tools.utils import Timer
from torch.utils.data.sampler import Sampler
from torch._six import int_classes as _int_classes
class SceneBatchSampler(Sampler):
def __init__(self, sampler, batch_size, drop_last, \
train=True, strategy='uniform_viewpoints'):
if not isinstance(sampler, Sampler):
raise ValueError("sampler should be an instance of "
"torch.utils.data.Sampler, but got sampler={}"
.format(sampler))
if not isinstance(batch_size, _int_classes) or isinstance(batch_size, bool) or \
batch_size <= 0:
raise ValueError("batch_size should be a positive integeral value, "
"but got batch_size={}".format(batch_size))
if not isinstance(drop_last, bool):
raise ValueError("drop_last should be a boolean value, but got "
"drop_last={}".format(drop_last))
assert strategy == 'uniform_viewpoints'
self.sampler = sampler
self.batch_size = batch_size
self.drop_last = drop_last
self.strategy = strategy
self.train = train
self.restrict_seq = None
def __iter__(self):
batch = []
for idx,_ in enumerate(self.sampler):
ii = idx % self.batch_size
if ii==0:
sample_fun = {
'uniform_viewpoints': self.sample_batch_vp_diff,
}[self.strategy]
with Timer(name='batch_sample', quiet=True):
batch, seq = sample_fun(idx)
if ii==(self.batch_size-1):
yield batch
batch = []
def _get_dataset_yaws(self):
dataset = self.sampler.data_source
rots = dataset.nrsfm_model_outputs['phi']['R']
pr_axes = rots[:, -1, :]
up = torch.svd(pr_axes)[2][:, -1]
x = torch.cross(up, torch.tensor([0., 0., 1.]))
x = x / x.norm()
y = torch.cross(x, up)
y = y / y.norm()
x_c = torch.matmul(pr_axes, x)
y_c = torch.matmul(pr_axes, y)
yaw = torch.atan2(x_c, y_c)
return yaw
def sample_batch_vp_diff(self, idx):
dataset = self.sampler.data_source
# get the cached log rots
assert (
hasattr(dataset, 'nrsfm_model_outputs') and
dataset.nrsfm_model_outputs is not None
), 'make sure to set cfg.annotate_with_c3dpo_outputs=True'
yaws = self._get_dataset_yaws()
hist, edges = np.histogram(yaws, bins=16)
bins = (yaws.cpu().data.numpy().reshape(-1, 1) > edges[1:]).sum(axis=1)
weights = 1. / hist[bins]
weights /= weights.sum()
pivot = np.random.choice(np.arange(len(dataset.db)), p=weights)
seq = dataset.dbT['seq'][pivot]
rots = dataset.nrsfm_model_outputs['phi']['R']
seqs = rots.new_tensor(dataset.dbT['seq'], dtype=torch.int64)
# convert bool array to indices
okdata = (seqs != seqs[pivot]).nonzero().view(-1).tolist()
for o in okdata:
assert o <= len(dataset.db), \
'%d out of range (%d)!' % (o, len(dataset.db))
if len(okdata) >= (self.batch_size-1):
replace = False
else:
replace = True
if len(okdata)==0:
print('no samples!!')
okdata = list(range(len(dataset.db)))
if weights is not None: # cross with okdata:
weights = weights[okdata] / weights[okdata].sum()
sample = np.random.choice(okdata, \
self.batch_size-1, replace=replace, p=weights).tolist()
sample.insert(0, pivot)
for si, s in enumerate(sample):
assert s < len(dataset.db), \
'%d out of range (%d)!' % (s, len(dataset.db))
return sample, seq
def __len__(self):
if self.drop_last:
return len(self.sampler) // self.batch_size
else:
return (len(self.sampler) + self.batch_size - 1) // self.batch_size
|
c3dm-main
|
c3dm/dataset/batch_samplers.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import torch
import numpy as np
import copy
from model import load_nrsfm_model
from tools.cache_preds import cache_preds
def run_c3dpo_model_on_dset(dset, nrsfm_exp_dir):
print('caching c3dpo outputs')
# make a dataset copy without any random sampling
# and image/mask/depth loading
dset_copy = copy.deepcopy(dset)
dset_copy.load_images = False
dset_copy.load_masks = False
dset_copy.load_depths = False
dset_copy.rand_sample = -1
nrsfm_model, nrsfm_cfg = load_nrsfm_model(nrsfm_exp_dir, get_cfg=True)
nrsfm_model.cuda()
nrsfm_model.eval()
loader = torch.utils.data.DataLoader( \
dset_copy,
num_workers=0,
pin_memory=True,
batch_size=nrsfm_cfg.batch_size )
cache_vars = ('phi', 'image_path')
cache = cache_preds(nrsfm_model, loader,
cache_vars=cache_vars, cat=True)
dset.nrsfm_model_outputs = cache
|
c3dm-main
|
c3dm/dataset/c3dpo_annotate.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from collections import defaultdict
import json
import os
import numpy as np
import torch
import trimesh
from visdom import Visdom
from dataset.dataset_configs import IMAGE_ROOTS
from dataset.keypoints_dataset import load_depth, load_mask
from tools.eval_functions import eval_depth_pcl, eval_full_pcl, eval_sparse_pcl
from tools.pcl_unproject import depth2pcl
import torch.nn.functional as Fu
from tqdm import tqdm
import pickle
import time
def eval_zoo(dataset_name, include_debug_vars=False):
if 'freicars_clickp_filtd' in dataset_name:
eval_script = eval_freicars
cache_vars = [
'masks', 'depth_dense',
'K_orig', 'image_path',
'orig_image_size',
'depth_path', 'mask_path',
'seq_name', 'R', 'T',
'embed_db_shape_camera_coord',
'cmr_faces',
'kp_loc_3d',
'shape_image_coord_cal',
'nrsfm_shape_image_coord'
]
eval_vars = [
'EVAL_depth_scl_perspective_med',
'EVAL_pcl_scl_perspective_med',
'EVAL_pcl_corr_scl_perspective_med',
'EVAL_depth_scl_orthographic_med',
'EVAL_pcl_scl_orthographic_med',
'EVAL_pcl_corr_scl_orthographic_med',
'EVAL_depth_scl_perspective',
'EVAL_pcl_scl_perspective',
'EVAL_pcl_corr_scl_perspective',
'EVAL_depth_scl_orthographic',
'EVAL_pcl_scl_orthographic',
'EVAL_pcl_corr_scl_orthographic',
'EVAL_sparse_pcl',
'EVAL_sparse_pcl_nrsfm',
]
elif dataset_name in ('celeba_ff',):
eval_script = eval_florence
cache_vars = [ 'masks', 'depth_dense',
'K_orig', 'image_path',
'orig_image_size',
'depth_path', 'mask_path',
'seq_name',
'images',
'embed_db_shape_camera_coord',
'shape_image_coord_cal_dense',
'cmr_faces',
'kp_loc',
'mesh_path',
'shape_image_coord_best_scale',
]
eval_vars = [ \
'EVAL_pcl_scl_recut_orthographic_flip_med',
'EVAL_pcl_scl_orthographic_flip_med',
'EVAL_pcl_orthographic_flip_med',
'EVAL_pcl_scl_recut_orthographic_med',
'EVAL_pcl_scl_orthographic_med',
'EVAL_pcl_orthographic_med',
'EVAL_pcl_scl_recut_orthographic_flip',
'EVAL_pcl_scl_orthographic_flip',
'EVAL_pcl_orthographic_flip',
'EVAL_pcl_scl_recut_orthographic',
'EVAL_pcl_scl_orthographic',
'EVAL_pcl_orthographic',
]
elif 'pascal3d' in dataset_name:
eval_script = eval_p3d
cache_vars = [ 'masks', 'depth_dense',
'image_path', 'R', 'T',
'orig_image_size',
'mask_path',
'images',
'embed_db_shape_camera_coord',
'shape_image_coord_cal_dense',
'cmr_faces',
'kp_loc',
'mesh_path' ]
eval_vars = [ \
'EVAL_pcl_scl_detkp',
'EVAL_pcl_corr_scl_detkp',
'EVAL_pcl_corr_scl_detkp_med',
]
else:
eval_script = eval_dummy
cache_vars = [ 'images', ]
eval_vars = [ 'EVAL_pcl_dist_scl', ]
return eval_script, cache_vars, eval_vars
def eval_dummy(cached_preds, eval_vars=None):
return {'EVAL_pcl_dist_scl': -1.}, None
def load_freicar_gt_pcl():
print('loading gt freicar point clouds ...')
# load the gt point clouds
gt_pcl_dir = '.../vpdr/freicars_sfm/'
unqseq = ['037', '036', '042', '022', '034']
gt_pcl_db = {}
for seq in unqseq:
fl = os.path.join(gt_pcl_dir, seq + '.pkl')
with open(fl, 'rb') as f:
pcl_data = pickle.load(f)
pcl_data = torch.FloatTensor(pcl_data)
pcl_std = pcl_data.std(1).mean()
gt_pcl_db[seq] = {
'xyz': pcl_data,
'scale_correction': 1. / pcl_std,
}
return gt_pcl_db
def load_freicar_data(imname, seq_name):
data_root = IMAGE_ROOTS['freicars_clickp_filtd'][0]
depth_name = imname + '.jpg.half.jpg.filtdepth.tiff'
depth_path = os.path.join(data_root, seq_name, \
'undistort/stereo/filtered_depth_0.2', depth_name)
assert 'filtdepth' in depth_path
mask_name = imname + '.jpg.half.png'
mask_path = os.path.join(data_root, seq_name, \
'masks', mask_name)
depth_gt = torch.FloatTensor(load_depth({'depth_path': depth_path}))
mask = torch.FloatTensor(load_mask({'mask_path': mask_path}))
return depth_gt, mask
def load_freicar_gt_pcl_clean(cache):
print('loading clean gt freicar point clouds ...')
# load the gt point clouds
gt_pcl_dir = '.../vpdr/freicars_sfm/'
unqseq = ['037', '036', '042', '022', '034']
gt_pcl_db = {}
for seq in tqdm(unqseq):
ok = [ (1 if seq==s else 0) for s in cache['seq_name'] ]
ok = np.where(np.array(ok))[0]
if len(ok)==0:
continue
pcl_seq = []
for idx in ok:
orig_sz = cache['orig_image_size'][idx].long().tolist()
imname = cache['depth_path'][idx].split('/')[-1].split('.')[0]
depth_gt, mask = load_freicar_data(imname, seq)
mask = Fu.interpolate(mask[None], size=orig_sz, mode='nearest')[0]
depth_gt = Fu.interpolate(depth_gt[None], size=orig_sz, mode='nearest')[0]
mask = mask * (depth_gt > 0.).float()
ok = torch.nonzero(mask.view(-1)).squeeze()
if len(ok)==0: continue
K, R, T = cache['K_orig'][idx], cache['R'][idx], cache['T'][idx]
pcl = depth2pcl(depth_gt[None], K[None], image_size=orig_sz, projection_type='perspective')[0]
pcl = pcl.view(3, -1)[:, ok]
pcl = R.t() @ (pcl - T[:,None])
pcl_seq.append(pcl)
pcl_seq = torch.cat(pcl_seq, dim=1)
if pcl_seq.shape[1] > 30000:
state = torch.get_rng_state()
torch.manual_seed(0)
prm = torch.randperm(pcl_seq.shape[1])[:30000]
torch.set_rng_state(state)
pcl_seq = pcl_seq[:, prm]
pcl_std = pcl_seq.std(1).mean()
gt_pcl_db[seq] = { 'xyz': pcl_seq, 'scale_correction': 1. / pcl_std }
outdir = './data/vpdr/'
os.makedirs(outdir, exist_ok=True)
outfile = os.path.join(outdir, 'freicars_pcl_db_eval.pth')
torch.save(gt_pcl_db, outfile)
return gt_pcl_db
def load_p3d_meshes(cached_preds, n_sample=30000):
mesh_db = {}
root = IMAGE_ROOTS['pascal3d_clickp_all'][0]
for mesh_path in cached_preds['mesh_path']:
if mesh_path not in mesh_db:
vertices, faces = load_off(os.path.join(root,mesh_path))
if vertices is None:
continue
mesh = trimesh.Trimesh( \
vertices=vertices.tolist(), \
faces=faces.tolist() )
pcl = trimesh.sample.sample_surface(mesh, n_sample)
mesh_db[mesh_path] = torch.from_numpy(pcl[0].T).float()
return mesh_db
def eval_p3d(cached_preds, eval_vars=None, visualize=False, \
dump_dir=None, skip_flip=False):
nim = len(cached_preds['masks'])
errs = []
mesh_db = load_p3d_meshes(cached_preds)
for imi in tqdm(range(nim)):
gt_pcl = mesh_db[cached_preds['mesh_path'][imi]]
gt_pcl_imcoord = (cached_preds['R'][imi] @ gt_pcl + cached_preds['T'][imi][:,None])
# pcl prediction
pcl_pred = cached_preds['embed_db_shape_camera_coord'][imi,:,:,0].clone()
errs_this_im = {}
pcl_out_this_im = {}
for flip in (False, True):
gt_pcl_test = gt_pcl_imcoord.clone()
if skip_flip and flip:
pass # use the previous result
else:
if flip: gt_pcl_test[2,:] *= -1.
errs_now_pcl = eval_full_pcl( \
pcl_pred[None].clone(),
gt_pcl_test[None].clone() )
pcl_full_err = float(errs_now_pcl['pcl_error'])
pcl_full_err_align = float(errs_now_pcl['pcl_error_align'])
errs_now = \
{ 'EVAL_pcl_scl_detkp': pcl_full_err,
'EVAL_pcl_corr_scl_detkp': pcl_full_err_align }
errs_this_im[flip] = errs_now
pcl_out_this_im[flip] = errs_now_pcl
decvar = 'EVAL_pcl_corr_scl_detkp' # decide whether we flip based on this
flip_better = errs_this_im[True][decvar] < errs_this_im[False][decvar]
# take the better one in case of flipping
pcl_out_this_im = pcl_out_this_im[flip_better]
errs_this_im = errs_this_im[flip_better]
if False:
from tools.vis_utils import get_visdom_connection, \
visdom_plotly_pointclouds
viz = get_visdom_connection()
from PIL import Image
im = Image.open(cached_preds['image_path'][imi]).convert('RGB')
im = torch.FloatTensor(np.array(im)).permute(2,0,1)
viz.image(im, env='pcl_debug', win='im')
pcl_gt = pcl_out_this_im['gt']
pcl_pred = pcl_out_this_im['pred']
pcl_pred_orig = pcl_out_this_im['pred_orig']
pcl_pred_align = pcl_out_this_im['pred_align']
for imii in (0,):
show = {
'gt': pcl_gt[imii].view(3, -1),
# 'pred': pcl_pred[imii].view(3, -1),
'pred_orig': pcl_pred_orig[imii].view(3, -1),
'pred_align': pcl_pred_align[imii].view(3, -1),
}
visdom_plotly_pointclouds( \
viz,
show,
'pcl_debug',
title='pcl_debug',
win='pcl_debug',
markersize=2,
height=800,
width=800,
)
import pdb; pdb.set_trace()
errs.append(errs_this_im)
results = {}
for med in (False, True): # dont show the median
for k in errs[0]:
res = torch.FloatTensor([float(err[k]) for err in errs])
res = float(res.median()) if med else float(res.mean())
results[(k+'_med') if med else k] = res
print('P3D evaluation results:')
for k, v in results.items():
print('%20s: %1.5f' % (k,v) )
if eval_vars is not None:
for eval_var in eval_vars:
assert eval_var in results, \
'evaluation variable missing! (%s)' % eval_var
print('eval vars check ok!')
# if TGT_NIMS==None:
# results = { k+'_DBG':v for k, v in results.items() }
return results, None
def eval_freicars(
cached_preds, eval_vars=None, visualize=True,
TGT_NIMS=1427, dump_dir=None
):
from dataset.dataset_configs import FREIBURG_VAL_IMAGES
cache_path = './cache/vpdr/freicars_pcl_db_eval.pth'
if not os.path.isfile(cache_path):
gt_pcl_db = load_freicar_gt_pcl_clean(cached_preds)
else:
gt_pcl_db = torch.load(cache_path)
nim = len(cached_preds['depth_path'])
if TGT_NIMS is None:
print('\n\n\n!!!! DEBUG MODE !!!!\n\n\n')
errs = []
for imi in tqdm(range(nim)):
seq_name = cached_preds['seq_name'][imi]
gt_pcl = gt_pcl_db[seq_name]['xyz']
gt_pcl_imcoord = (cached_preds['R'][imi] @ gt_pcl + \
cached_preds['T'][imi][:,None])
scale_correction = gt_pcl_db[seq_name]['scale_correction']
orig_sz = cached_preds[
'orig_image_size'][imi].type(torch.int32).tolist()
imname = cached_preds['depth_path'][imi].split('/')[-1].split('.')[0]
depth_gt, mask = load_freicar_data(imname, seq_name)
depth_gt = Fu.interpolate(depth_gt[None], size=orig_sz, mode='nearest' )[0]
mask = Fu.interpolate(mask[None], size=orig_sz, mode='nearest')[0]
# check we have a correct size
for s, s_ in zip(orig_sz, depth_gt.shape[1:]): assert s==s_
depth_pred = cached_preds['depth_dense'][imi].clone()
minscale = min(depth_pred.shape[i] / orig_sz[i-1] for i in [1, 2])
newsz = np.ceil(np.array(depth_pred.shape[1:])/minscale).astype(int).tolist()
depth_pred_up = Fu.interpolate( \
depth_pred[None], \
size=newsz, \
mode='bilinear' )[0]
depth_pred_up = depth_pred_up[:,:depth_gt.shape[1],:depth_gt.shape[2]]
depth_pred_up /= minscale
K = cached_preds['K_orig'][imi:imi+1].clone()
errs_this_im = {}
for pred_projection_type in ( 'perspective', 'orthographic'):
errs_now = eval_depth_pcl(depth_pred_up[None].clone(),
depth_gt[None].clone(),
K=K.clone(),
pred_projection_type=pred_projection_type,
gt_projection_type='perspective',
masks=mask[None],
lap_thr=0.01)
pcl_err_corrected = scale_correction * float(errs_now['dist_pcl'])
errs_this_im.update( \
{ 'EVAL_depth_scl_'+pred_projection_type: pcl_err_corrected} )
if True:
pcl_pred = cached_preds['embed_db_shape_camera_coord'][imi,:,:,0].clone()
pcl_pred /= minscale # !!!!
errs_now_pcl = eval_full_pcl( \
pcl_pred[None].clone(),
gt_pcl_imcoord[None].clone(),
K=K.clone(),
scale_best=errs_now['scale_best'], )
pcl_full_err_corrected = \
scale_correction * float(errs_now_pcl['pcl_error'])
pcl_full_err_align_corrected = \
scale_correction * float(errs_now_pcl['pcl_error_align'])
for pred_projection_type in ('perspective', 'orthographic'):
errs_this_im.update( \
{ 'EVAL_pcl_scl_'+pred_projection_type: \
pcl_full_err_corrected,
'EVAL_pcl_corr_scl_'+pred_projection_type: \
pcl_full_err_align_corrected} )
errs.append(errs_this_im)
results = {}
for med in (True, False):
for k in errs[0]:
res = torch.FloatTensor([float(err[k]) for err in errs])
res = float(res.median()) if med else float(res.mean())
results[(k+'_med') if med else k] = res
if True: # eval sparse kps
gt_kp_loc_3d = cached_preds['kp_loc_3d']
pred_kp_loc_3d = cached_preds['shape_image_coord_cal']
nrsfm_kp_loc_3d = cached_preds['nrsfm_shape_image_coord']
scale_corrs = torch.stack([
gt_pcl_db[cached_preds['seq_name'][imi]]['scale_correction']
for imi in range(nim)
])
results['EVAL_sparse_pcl'] = float(eval_sparse_pcl(
pred_kp_loc_3d, gt_kp_loc_3d, scale_corrs))
results['EVAL_sparse_pcl_nrsfm'] = float(eval_sparse_pcl(
nrsfm_kp_loc_3d, gt_kp_loc_3d, scale_corrs))
print('Freiburg Cars evaluation results:')
for k, v in results.items():
print('%20s: %1.5f' % (k,v) )
if eval_vars is not None:
for eval_var in eval_vars:
assert eval_var in results, \
'evaluation variable missing! (%s)' % eval_var
print('eval vars check ok!')
if TGT_NIMS==None:
results = { k+'_DBG':v for k, v in results.items() }
return results, None
def load_off(obj_path):
if not os.path.isfile(obj_path):
print('%s does not exist!' % obj_path)
return None, None
with open(obj_path, 'r', encoding='utf-8', errors='ignore') as f:
lines = f.readlines()
lines = [ l.strip() for l in lines ]
nv, nf, _ = [int(x) for x in lines[1].split(' ')]
entries = lines[2:]
for vertface in ('v', 'f'):
if vertface=='v':
vertices = [ [float(v_) for v_ in v.split(' ')] for v in entries[:nv]]
vertices = torch.FloatTensor(vertices).float()
entries = entries[nv:]
elif vertface=='f':
faces = [ [int(v_) for v_ in v.split(' ')[1:]] for v in entries]
faces = torch.LongTensor(faces)
assert faces.shape[0]==nf
else:
raise ValueError()
return vertices, faces
def load_ff_obj(obj_path):
with open(obj_path, 'r', encoding='utf-8', errors='ignore') as f:
lines = f.readlines()
lines = [ l.strip() for l in lines ]
for vertface in ('v', 'f'):
entries = [ [ v for v in l.split(' ')[1:4] ] \
for l in lines if l.split(' ')[0]==vertface ]
if vertface=='v':
entries = [ [float(v_) for v_ in v] for v in entries ]
entries = torch.FloatTensor(entries)
elif vertface=='f':
entries = [ [ int(v_.split('/')[0]) for v_ in v ] \
for v in entries ]
entries = torch.LongTensor(entries)
else:
raise ValueError()
if vertface=='v':
vertices = entries.float()
else:
faces = (entries-1).long()
return vertices, faces
def eval_florence(cached_preds, eval_vars=None, TGT_NIMS=1427, visualize=False):
from tools.pcl_unproject import depth2pcl
from tools.eval_functions import eval_pcl_icp
root = IMAGE_ROOTS['celeba_ff'][1]
nim = len(cached_preds['mesh_path'])
errs = []
for imi in tqdm(range(nim)):
# if imi <= 775:
# continue
# get the ff mesh
mesh_path = cached_preds['mesh_path'][imi]
if len(mesh_path)==0: continue
mesh_path = os.path.join(root, mesh_path)
vertices, faces = load_ff_obj(mesh_path)
mesh_gt = trimesh.Trimesh(
vertices=vertices.tolist(),
faces=faces.tolist()
)
# get our prediction
kp_loc = cached_preds['kp_loc'][imi]
# image_size = list(cached_preds['images'][imi].shape[1:])
mask = cached_preds['masks'][imi]
if mask.sum()<=1:
print('Empty mask!!!')
continue
image_size = list(mask.shape[1:])
# mask = Fu.interpolate(mask[None], size=image_size)[0]
pcl_pred = cached_preds['shape_image_coord_best_scale'][imi]
pcl_pred = Fu.interpolate(pcl_pred[None], size=image_size)[0]
err_now = {}
for flip in (True, False):
pcl_pred_now = pcl_pred.clone()
if flip: pcl_pred_now[2,:] = -pcl_pred_now[2,:]
# compute icp error
err = eval_pcl_icp(pcl_pred_now, mesh_gt, mask, kp_loc)
err = {
'EVAL_pcl_scl_recut_orthographic': err['dist_pcl_scl_recut'],
'EVAL_pcl_scl_orthographic': err['dist_pcl_scl'],
'EVAL_pcl_orthographic': err['dist_pcl'],
}
if flip: err = {k+'_flip':v for k, v in err.items()}
err_now.update(err)
errs.append(err_now)
print('<EVAL_STATE>')
print(f'IMAGE={imi}')
print(err_now)
print('<\EVAL_STATE>')
results = {}
for med in (True, False):
for k in errs[0]:
res = torch.FloatTensor([float(err[k]) for err in errs])
if med:
res = float(res.median())
else:
res = float(res.mean())
results[(k+'_med') if med else k] = res
print('Florence Face evaluation results:')
for k, v in results.items():
print('%20s: %1.5f' % (k,v) )
if eval_vars is not None:
for eval_var in eval_vars:
assert eval_var in results, \
'evaluation variable missing! (%s)' % str(eval_var)
print('eval vars check ok!')
return results, None
|
c3dm-main
|
c3dm/dataset/eval_zoo.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import torch
import os
import sys
import json
import copy
import glob
import pickle, gzip
import numpy as np
import torch
from PIL import Image
from torch.utils import data
from tools.utils import NumpySeedFix, auto_init_args
class KeypointsDataset(data.Dataset):
"""
This is a generalized class suitable for storing object keypoint annotations
The input jsonfile needs to be a list of dictionaries
(one dictionary per pose annotation) of the form:
{
# REQUIRED FIELDS #
"kp_loc" : 2 x N list of keypoints
"kp_vis" : 1 x N list of 1/0 boolean indicators
# OPTIONAL FIELDS #
"file_name": name of file from image_root
"kp_loc_3d": 3 x N list of 3D keypoint locations in camera coords
}
"""
def __init__( self,
jsonfile=None,
train=True,
limit_to=0,
limit_seq_to=-1,
rand_sample=0,
image_root=None,
mask_root=None,
depth_root=None,
refresh_db=False,
min_visible=0,
subsample=1,
load_images=True,
load_depths=True,
load_masks=True,
image_height=9*20*2,
image_width=9*20*2,
dilate_masks=5,
max_frame_diff = -1.,
max_angle_diff = 4.,
kp_conf_thr = 0.,
nrsfm_model_outputs = None,
box_crop_context=1.,
box_crop=False,
):
auto_init_args(self)
self.load_db_file()
has_classes = 'class_mask' in self.db[0]
if has_classes:
self.class_db = self.get_class_db()
else:
self.class_db = None
self.get_transposed_db()
def get_transposed_db(self):
print('getting transposed db ...')
self.dbT = {}
self.dbT['unqseq'] = sorted(list(set([e['seq'] for e in self.db])))
self.dbT['seq_dict'] = {}
self.dbT['seq'] = [e['seq'] for e in self.db]
dict_seq = {s:i for i,s in enumerate(self.dbT['seq'])}
for i in range(len(self.db)):
# seq_ = self.dbT['unqseq'].index(self.db[i]['seq'])
seq = dict_seq[self.db[i]['seq']]
# assert seq_==seq
# print('%d==%d' % (seq_,seq))
if seq not in self.dbT['seq_dict']:
self.dbT['seq_dict'][seq] = []
self.dbT['seq_dict'][seq].append(i)
def load_db_file(self):
print("loading data from %s" % self.jsonfile)
ext = self.jsonfile.split('.')[-1]
if ext=='json':
with open(self.jsonfile,'r') as data_file:
db = json.load(data_file)
elif ext=='pkl':
with open(self.jsonfile,'rb') as data_file:
db = pickle.load(data_file)
elif ext=='pgz':
with gzip.GzipFile(self.jsonfile, 'r') as data_file:
db = pickle.load(data_file)
else:
raise ValueError('bad extension %s' % ext)
if 'seq' not in db[0]:
print('no sequence numbers! => filling with unique seq per image')
for ei, e in enumerate(db):
e['seq_name'] = str(ei)
e['seq'] = ei
unqseq = list(range(len(db)))
else:
unqseq = sorted(list(set([e['seq'] for e in db])))
for e in db:
e['seq_name'] = copy.deepcopy(e['seq'])
e['seq'] = unqseq.index(e['seq'])
print("data train=%d , n frames = %d, n seq = %d" % \
(self.train, len(db), len(unqseq)))
self.db = db
self.restrict_images()
def get_class_db(self):
print('parsing class db ...')
masks = np.stack([np.array(e['class_mask']) for e in self.db])
unq_masks = np.unique(masks, axis=0)
n_cls = unq_masks.shape[0]
class_db = {tuple(m.tolist()):[] for m in unq_masks}
for ei,e in enumerate(self.db):
class_db[tuple(e['class_mask'])].append(ei)
class_db = list(class_db.values())
for eis in class_db: # sanity check
cls_array = np.stack([self.db[ei]['class_mask'] for ei in eis])
assert ((cls_array - cls_array[0:1,:])**2).sum()<=1e-6
return class_db
def restrict_images(self):
print( "limitting dataset to seqs: " + str(self.limit_seq_to) )
if type(self.limit_seq_to) in (tuple,list):
if len(self.limit_seq_to) > 1 or self.limit_seq_to[0] >= 0:
self.db = [f for f in self.db if f['seq'] in self.limit_seq_to ]
elif type(self.limit_seq_to)==int:
if self.limit_seq_to > 0:
self.db = [f for f in self.db if f['seq'] < self.limit_seq_to ]
else:
assert False, "bad seq limit type"
if self.limit_to > 0:
tgtnum = min( self.limit_to, len(self.db) )
prm = list(range(len(self.db)))[0:tgtnum]
# with NumpySeedFix():
# prm = np.random.permutation( \
# len(self.db))[0:tgtnum]
print( "limitting dataset to %d samples" % tgtnum )
self.db = [self.db[i] for i in prm]
if self.subsample > 1:
orig_len = len(self.db)
self.db = [self.db[i] for i in range(0, len(self.db), self.subsample)]
print('db subsampled %d -> %d' % (orig_len, len(self.db)))
if self.kp_conf_thr > 0. and 'kp_conf' in self.db[0]:
for e in self.db:
v = torch.FloatTensor(e['kp_vis'])
c = torch.FloatTensor(e['kp_conf'])
e['kp_vis'] = (c > self.kp_conf_thr).float().tolist()
if self.min_visible > 0:
len_orig = len(self.db)
self.db = [ e for e in self.db \
if (torch.FloatTensor(e['kp_vis'])>0).float().sum()>self.min_visible]
print('kept %3.1f %% entries' % (100.*len(self.db)/float(len_orig)) )
assert len(self.db) > 10
def resize_image(self, image, mode='bilinear'):
image_size = [self.image_height, self.image_width]
minscale = min(image_size[i] / image.shape[i+1] for i in [0, 1])
imre = torch.nn.functional.interpolate( \
image[None], scale_factor=minscale, mode=mode)[0]
imre_ = torch.zeros(image.shape[0],image_size[0],image_size[1])
imre_[:,0:imre.shape[1],0:imre.shape[2]] = imre
return imre_, minscale
def load_image(self, entry):
im = np.array(Image.open(entry['image_path']).convert('RGB'))
im = im.transpose((2,0,1))
im = im.astype(np.float32) / 255.
return im
def crop_around_box(self, entry, box_context=1.):
bbox = entry['bbox'].clone() # [xmin, ymin, w, h]
# increase box size
c = box_context
bbox[0] -= bbox[2]*c/2
bbox[1] -= bbox[3]*c/2
bbox[2] += bbox[2]*c
bbox[3] += bbox[3]*c
bbox = bbox.long()
# assert bbox[2] >= 2, 'weird box!'
# assert bbox[3] >= 2, 'weird box!'
bbox[2:4] = torch.clamp(bbox[2:4], 2)
entry['orig_image_size'] = bbox[[3,2]].float()
bbox[2:4] += bbox[0:2]+1 # convert to [xmin, ymin, xmax, ymax]
for k in ['images', 'masks', 'depths']:
if getattr(self, 'load_'+k) and k in entry:
crop_tensor = entry[k]
bbox[[0,2]] = torch.clamp(bbox[[0,2]], 0., crop_tensor.shape[2])
bbox[[1,3]] = torch.clamp(bbox[[1,3]], 0., crop_tensor.shape[1])
crop_tensor = crop_tensor[:, bbox[1]:bbox[3], bbox[0]:bbox[2]]
assert all(c>0 for c in crop_tensor.shape), 'squashed image'
entry[k] = crop_tensor
entry['kp_loc'] = entry['kp_loc'] - bbox[0:2,None].float()
return entry
def __len__(self):
if self.rand_sample > 0:
return self.rand_sample
else:
return len(self.db)
def __getitem__(self, index):
assert index < len(self.db), \
'index %d out of range (%d)' % (index, len(self.db))
entry = copy.deepcopy(self.db[index])
if self.image_root is not None and 'image_path' in entry:
entry['image_path'] = os.path.join(self.image_root,entry['image_path'])
if self.mask_root is not None and 'mask_path' in entry:
entry['mask_path'] = os.path.join(self.mask_root,entry['mask_path'])
if self.depth_root is not None and 'depth_path' in entry:
entry['depth_path'] = os.path.join(self.depth_root,entry['depth_path'])
if self.load_images:
entry['images'] = self.load_image(entry)
entry['orig_image_size'] = list(entry['images'].shape[1:])
if self.load_depths:
entry['depths'] = load_depth(entry)
if self.load_masks:
entry['masks'] = load_mask(entry)
if entry['masks'] is None:
entry['masks'] = np.zeros(entry['images'].shape[1:3] \
)[None].astype(np.float32)
else:
# assert entry['masks'].shape[1:3]==entry['images'].shape[1:3]
if self.load_images and \
entry['masks'].shape[1:3] != entry['images'].shape[1:3]:
# print(entry['mask_path'])
# print(entry['image_path'])
# import pdb; pdb.set_trace()
print('bad mask size!!!!')
# print(entry['image_path'])
# print(entry['mask_path'])
# import pdb; pdb.set_trace()
entry['masks'] = np.zeros(entry['images'].shape[1:3] \
)[None].astype(np.float32)
# convert to torch Tensors where possible
for fld in ( 'kp_loc', 'kp_vis', 'kp_loc_3d',
'class_mask', 'kp_defined', 'images',
'orig_image_size', 'masks', 'K', 'depths', 'bbox',
'kp_conf', 'R', 'T'):
if fld in entry:
entry[fld] = torch.FloatTensor(entry[fld])
# first crop if needed, then resize
if self.box_crop and self.load_images:
entry = self.crop_around_box(entry, self.box_crop_context)
if 'sfm_model' not in entry:
entry['sfm_model'] = '<NO_MODEL>'
entry['K_orig'] = entry['K'].clone()
if self.load_images:
# resize image
entry['images'], scale = self.resize_image(entry['images'],
mode='bilinear')
for fld in ('kp_loc', 'kp_loc_3d', 'K'):
if fld in entry:
entry[fld] *= scale
if fld=='K':
entry[fld][2,2] = 1.
else:
scale = 1.
if self.load_masks:
entry['masks'], _ = self.resize_image(entry['masks'],
mode='nearest')
if self.dilate_masks > 0:
#print('mask dilation')
entry['masks'] = torch.nn.functional.max_pool2d(
entry['masks'],
self.dilate_masks*2+1,
stride=1,
padding=self.dilate_masks )
elif self.dilate_masks < 0:
imask_dil = torch.nn.functional.max_pool2d(
1-entry['masks'],
abs(self.dilate_masks)*2+1,
stride=1,
padding=abs(self.dilate_masks) )
entry['masks'] = torch.clamp(entry['masks'] - imask_dil, 0.)
if self.load_depths:
entry['depths'], _ = self.resize_image(entry['depths'],
mode='nearest')
entry['depths'] *= scale
if 'p3d_info' in entry: # filter the kp out of bbox
bbox = torch.FloatTensor(entry['p3d_info']['bbox'])
bbox_vis, bbox_err = bbox_kp_visibility( \
bbox, entry['kp_loc'], entry['kp_vis'])
entry['kp_vis'] = entry['kp_vis'] * bbox_vis.float()
# mask out invisible
entry['kp_loc'] = entry['kp_loc'] * entry['kp_vis'][None]
return entry
def bbox_kp_visibility(bbox, keypoints, vis):
bx,by,bw,bh = bbox
x = keypoints[0]; y = keypoints[1]
ctx_ = 0.1
in_box = (x>=bx-ctx_*bw) * (x<=bx+bw*(1+ctx_)) * \
(y>=by-ctx_*bh) * (y<=by+bh*(1+ctx_))
in_box = in_box * (vis==1)
err = torch.stack( [ (bx-ctx_*bw)-x,
x-(bx+bw*(1+ctx_)),
(by-ctx_*bh)-y,
y-(by+bh*(1+ctx_)) ] )
err = torch.relu(err) * vis[None].float()
err = torch.stack( ( torch.max( err[0],err[1] ),
torch.max( err[2],err[3] ) ) ).max(dim=1)[0]
return in_box, err
def read_colmap_depth(path):
with open(path, "rb") as fid:
width, height, channels = np.genfromtxt(fid, delimiter="&", max_rows=1,
usecols=(0, 1, 2), dtype=int)
fid.seek(0)
num_delimiter = 0
byte = fid.read(1)
while True:
if byte == b"&":
num_delimiter += 1
if num_delimiter >= 3:
break
byte = fid.read(1)
array = np.fromfile(fid, np.float32)
array = array.reshape((width, height, channels), order="F")
return np.transpose(array, (1, 0, 2)).squeeze()
def load_depth(entry):
if entry['depth_path'].endswith('<NO_DEPTH>'):
# we dont have depth
d = np.ones(entry['images'].shape[1:]).astype(float)[None]
else:
ext = os.path.splitext(entry['depth_path'])[-1]
if ext=='.bin': # colmap binary format
d = read_colmap_depth(entry['depth_path'])
# clamp the values
min_depth, max_depth = np.percentile(d, [1, 95])
d[d < min_depth] = min_depth
d[d > max_depth] = max_depth
d = d.astype(np.float32)[None]
elif ext=='.png': # ldos depth
postfixl = len('081276300.rgb.jpg')
dpath_corrected = glob.glob(entry['depth_path'][0:-postfixl]+'*')
assert len(dpath_corrected)==1
d = np.array(Image.open(dpath_corrected[0])).astype(float)[None]
d /= 1000. # to meters
elif ext=='.tiff': # sparse colmap depth
d = np.array(Image.open(entry['depth_path'])).astype(float)[None]
else:
raise ValueError('unsupported depth ext "%s"' % ext)
return d
def load_mask(entry):
# fix for birds
if not os.path.isfile(entry['mask_path']):
for ext in ('.png', '.jpg'):
new_path = os.path.splitext(entry['mask_path'])[0] + ext
if os.path.isfile(new_path):
entry['mask_path'] = new_path
if not os.path.isfile(entry['mask_path']):
print('no mask!')
print(entry['mask_path'])
mask = None
else:
mask = np.array(Image.open(entry['mask_path']))
mask = mask.astype(np.float32)[None]
return mask
|
c3dm-main
|
c3dm/dataset/keypoints_dataset.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import copy
# list of root folders containing the dataset images
IMAGE_ROOTS = {
'freicars_clickp_filtd': ('./dataset_root/freicars/',),
'freicars_clickp_filtd_dbg': ('./dataset_root/freicars/',),
'cub_birds_hrnet_v2': ('./dataset_root/cub_birds/',),
'celeba_ff': ('./dataset_root/celeba/',
'./dataset_root/florence/'),
'pascal3d_clickp_all': ('./dataset_root/PASCAL3D+_release1.1',),
}
MASK_ROOTS = copy.deepcopy(IMAGE_ROOTS)
DEPTH_ROOTS = copy.deepcopy(IMAGE_ROOTS)
MASK_ROOTS['cub_birds_hrnet_v2'] = ('./dataset_root/cub_birds/',)
DATASET_ROOT = './dataset_root'
DATASET_URL = {
'freicars_clickp_filtd_train': 'https://dl.fbaipublicfiles.com/c3dm/freicars_clickp_filtd_train.json.gz',
'freicars_clickp_filtd_val': 'https://dl.fbaipublicfiles.com/c3dm/freicars_clickp_filtd_val.json.gz',
}
IMAGE_URLS = {
'cub_birds_hrnet_v2': ('http://www.vision.caltech.edu/visipedia-data/CUB-200-2011/CUB_200_2011.tgz',),
'pascal3d_clickp_all': ('ftp://cs.stanford.edu/cs/cvgl/PASCAL3D+_release1.1.zip',),
}
MASK_URLS = {
'cub_birds_hrnet_v2': ('',),
}
DEPTH_URLS = {
'cub_birds_hrnet_v2': ('',),
}
C3DM_URLS = {
'freicars_clickp_filtd': 'https://dl.fbaipublicfiles.com/c3dm/c3dm_freicars.tar.gz',
}
C3DPO_MODELS = {
'cub_birds_hrnet_orth_b50': './dataset_root/c3dpo_cub',
'celeba_orth_b50': '',
'p3d_all_orth_b10': '',
'freicars_clickp_persp_b10_ray': './dataset_root/c3dpo_freicars',
}
C3DPO_URLS = {
'cub_birds_hrnet_orth_b50': '',
'celeba_orth_b50': '',
'p3d_all_orth_b10': '',
'freicars_clickp_persp_b10_ray': 'https://dl.fbaipublicfiles.com/c3dm/c3dpo_freicars.tar.gz',
}
# ----- connectivity patterns for visualizing the stick-men
STICKS = {
'pose_track': [ [2, 0],[0, 1],[1, 5],[5, 7],
[9, 7],[1, 6],[6, 8],[10, 8],
[1, 12],[12, 11],[11, 1],[14, 12],
[11, 13],[15, 13],[16, 14]] ,
'h36m': [ [10, 9], [9, 8], [8, 14],
[14, 15], [15, 16], [8, 11],
[11, 12], [12, 13], [8, 7],
[7, 0], [1, 0], [1, 2],
[2, 3], [0, 4], [4, 5], [5, 6] ],
'cub_birds': [ [1, 5], [5, 4], [4, 9],
[9, 0], [0, 13], [0, 12],
[0, 8], [12, 13], [1, 14],
[14, 3], [3, 2], [2, 7],
[1, 10], [1, 6], [2, 11],
[2, 7], [8, 13] ],
'coco': [ [13,15], [14,16], [12,14], [11,12,], [11,13],
[0,12], [0,11], [8,10], [6,8],
[7,9], [5,7], [0,5], [0,6],
[0,3], [0,4], [0,2], [0,1] ],
'freicars': [[0, 8], [0, 4], [4, 10], [8, 10],
[10, 9], [9, 11], [8, 11],
[11, 6], [9, 2], [2, 6],
[4, 1], [5, 1], [0, 5], [5, 7], [1, 3],
[7, 3], [3, 2], [7, 6]],
'pascal3d': {
'car': [[0, 8], [0, 4], [4, 10], [8, 10],
[10, 9], [9, 11], [8, 11],
[11, 6], [9, 2], [2, 6],
[4, 1], [5, 1], [0, 5], [5, 7], [1, 3],
[7, 3], [3, 2], [7, 6]],
'aeroplane': [[2, 5], [1, 4], [5, 3], [3, 7],
[7, 0], [0, 5], [5, 7], [5, 6],
[6, 0], [6, 3], [2, 4], [2, 1]],
'motorbike': [[6, 2],
[2, 9],
[2, 3],
[3, 8],
[5, 8],
[3, 5],
[2, 1],
[1, 0],
[0, 7],
[0, 4],
[4, 7],
[1, 4],
[1, 7],
[1, 5],
[1, 8]],
'sofa': [[1, 5],
[5, 4],
[4, 6],
[6, 2],
[2, 0],
[1, 0],
[0, 4],
[1, 3],
[7, 5],
[2, 3],
[3, 7],
[9, 7],
[7, 6],
[6, 8],
[8, 9]],
'chair': [[7, 3],
[6, 2],
[9, 5],
[8, 4],
[7, 9],
[8, 6],
[6, 7],
[9, 8],
[9, 1],
[8, 0],
[1, 0]],
},
}
STICKS['cub_birds_hrnet'] = STICKS['cub_birds']
H36M_ACTIONS = [ 'Directions','Discussion','Eating','Greeting',
'Phoning','Photo','Posing','Purchases','Sitting',
'SittingDown','Smoking','Waiting','WalkDog',
'Walking','WalkTogether' ]
P3D_NUM_KEYPOINTS = {\
'aeroplane': 8,
'car': 12,
'tvmonitor': 8,
'sofa': 10,
'motorbike': 10,
'diningtable': 12,
'chair': 10,
'bus': 12,
'bottle': 7,
'boat': 7,
'bicycle': 11,
'train': 17 }
P3D_CLASSES = list(P3D_NUM_KEYPOINTS.keys())
# add the per-class p3d db paths
for cls_ in P3D_CLASSES:
IMAGE_ROOTS['pascal3d_clickp_'+cls_] = IMAGE_ROOTS['pascal3d_clickp_all']
IMAGE_ROOTS['pascal3d_clickp_mesh_'+cls_] = IMAGE_ROOTS['pascal3d_clickp_all']
IMAGE_ROOTS['pascal3d_clickp_clean_'+cls_] = IMAGE_ROOTS['pascal3d_clickp_all']
P3D_NUM_IMAGES={
'train':{"aeroplane": 1953, "car": 5627,
"tvmonitor": 1374,"sofa": 669,
"motorbike": 725,"diningtable": 751,
"chair": 1186,"bus": 1185,
"bottle": 1601,"boat": 2046,
"bicycle": 904,"train": 1113,},
'val': {"aeroplane": 269,"car": 294,
"tvmonitor": 206,"sofa": 37,
"motorbike": 116,"diningtable": 12,
"chair": 227,"bus": 153,
"bottle": 249,"boat": 163,
"bicycle": 115,"train": 109}}
DATASET_CFG = {
'freicars_clickp_filtd':
{
'image_height': 9*40,
'image_width': 16*40,
'max_angle_diff': 3.14/2,
'box_crop': False,
},
'celeba':
{
'image_height': 3*130,
'image_width': 3*130,
'max_angle_diff': 3.14/2,
'box_crop': False,
'subsample': 4,
},
'ldos_chairs':
{
'image_height': 3*110,
'image_width': 4*110,
'max_angle_diff': 3.14/2,
'min_visible': 6,
'kp_conf_thr': 0.8,
'box_crop': False,
},
'ldos_chairs_armchair':
{
'image_height': 3*110,
'image_width': 4*110,
'max_angle_diff': 3.14/2,
'min_visible': 4,
'kp_conf_thr': 0.6,
'box_crop': False,
},
'pascal3d_clickp':
{
'image_height': 3*6*20,
'image_width': 4*6*20,
'max_angle_diff': 3.14/2,
'min_visible': 6,
'box_crop': True,
},
'pascal3d_clickp_clean':
{
'image_height': 3*6*20,
'image_width': 4*6*20,
'max_angle_diff': 3.14/2,
# 'min_visible': 4,
'box_crop': True,
'dilate_masks': 0,
'box_crop_context': 0.2,
},
'h36m_sparse':
{
'image_height': 25*20,
'image_width': 15*20,
'max_angle_diff': 3.14/2,
# 'max_frame_diff': 0.33,
# 'min_visible': 6,
'subsample': 10,
'box_crop': True,
'box_crop_context': 0.2,
'dilate_masks': 0,
},
'cub_birds_hrnet_v2':
{
'image_height': 3*130,
'image_width': 3*130,
'max_angle_diff': 3.14/2,
'box_crop': False,
},
'default':
{
'image_height': 3*110,
'image_width': 4*110,
'max_angle_diff': 3.14/2,
'box_crop': False,
}
}
for cls_ in P3D_CLASSES:
DATASET_CFG['pascal3d_clickp_'+cls_] = DATASET_CFG['pascal3d_clickp']
DATASET_CFG['pascal3d_clickp_clean_'+cls_] = DATASET_CFG['pascal3d_clickp_clean']
FILTER_DB_SETTINGS = {
'freicars_clickp_filtd': {
'nn': 1e-3,
'perc_keep': 0.95,
'sig': 0.02,
'lap_size': 5e-4,
'lap_alpha': 0.9,
},
'default': {
'nn': 1e-3,
'perc_keep': 0.9,
'sig': 0.01,
'lap_size': 1e-3,
'lap_alpha': 0.9,
}
}
FREIBURG_VAL_IMAGES = [
"022/undistort/images/frame_0000001.jpg.half.jpg",
"022/undistort/images/frame_0000002.jpg.half.jpg",
"022/undistort/images/frame_0000003.jpg.half.jpg",
"022/undistort/images/frame_0000004.jpg.half.jpg",
"022/undistort/images/frame_0000005.jpg.half.jpg",
"022/undistort/images/frame_0000006.jpg.half.jpg",
"022/undistort/images/frame_0000007.jpg.half.jpg",
"022/undistort/images/frame_0000008.jpg.half.jpg",
"022/undistort/images/frame_0000009.jpg.half.jpg",
"022/undistort/images/frame_0000010.jpg.half.jpg",
"022/undistort/images/frame_0000011.jpg.half.jpg",
"022/undistort/images/frame_0000012.jpg.half.jpg",
"022/undistort/images/frame_0000013.jpg.half.jpg",
"022/undistort/images/frame_0000014.jpg.half.jpg",
"022/undistort/images/frame_0000015.jpg.half.jpg",
"022/undistort/images/frame_0000016.jpg.half.jpg",
"022/undistort/images/frame_0000017.jpg.half.jpg",
"022/undistort/images/frame_0000018.jpg.half.jpg",
"022/undistort/images/frame_0000019.jpg.half.jpg",
"022/undistort/images/frame_0000020.jpg.half.jpg",
"022/undistort/images/frame_0000021.jpg.half.jpg",
"022/undistort/images/frame_0000022.jpg.half.jpg",
"022/undistort/images/frame_0000023.jpg.half.jpg",
"022/undistort/images/frame_0000024.jpg.half.jpg",
"022/undistort/images/frame_0000025.jpg.half.jpg",
"022/undistort/images/frame_0000026.jpg.half.jpg",
"022/undistort/images/frame_0000027.jpg.half.jpg",
"022/undistort/images/frame_0000030.jpg.half.jpg",
"022/undistort/images/frame_0000031.jpg.half.jpg",
"022/undistort/images/frame_0000032.jpg.half.jpg",
"022/undistort/images/frame_0000033.jpg.half.jpg",
"022/undistort/images/frame_0000034.jpg.half.jpg",
"022/undistort/images/frame_0000035.jpg.half.jpg",
"022/undistort/images/frame_0000036.jpg.half.jpg",
"022/undistort/images/frame_0000037.jpg.half.jpg",
"022/undistort/images/frame_0000038.jpg.half.jpg",
"022/undistort/images/frame_0000039.jpg.half.jpg",
"022/undistort/images/frame_0000040.jpg.half.jpg",
"022/undistort/images/frame_0000041.jpg.half.jpg",
"022/undistort/images/frame_0000042.jpg.half.jpg",
"022/undistort/images/frame_0000043.jpg.half.jpg",
"022/undistort/images/frame_0000044.jpg.half.jpg",
"022/undistort/images/frame_0000045.jpg.half.jpg",
"022/undistort/images/frame_0000046.jpg.half.jpg",
"022/undistort/images/frame_0000047.jpg.half.jpg",
"022/undistort/images/frame_0000048.jpg.half.jpg",
"022/undistort/images/frame_0000049.jpg.half.jpg",
"022/undistort/images/frame_0000050.jpg.half.jpg",
"022/undistort/images/frame_0000051.jpg.half.jpg",
"022/undistort/images/frame_0000052.jpg.half.jpg",
"022/undistort/images/frame_0000053.jpg.half.jpg",
"022/undistort/images/frame_0000054.jpg.half.jpg",
"022/undistort/images/frame_0000055.jpg.half.jpg",
"022/undistort/images/frame_0000056.jpg.half.jpg",
"022/undistort/images/frame_0000057.jpg.half.jpg",
"022/undistort/images/frame_0000058.jpg.half.jpg",
"022/undistort/images/frame_0000059.jpg.half.jpg",
"022/undistort/images/frame_0000060.jpg.half.jpg",
"022/undistort/images/frame_0000061.jpg.half.jpg",
"022/undistort/images/frame_0000062.jpg.half.jpg",
"022/undistort/images/frame_0000063.jpg.half.jpg",
"022/undistort/images/frame_0000064.jpg.half.jpg",
"022/undistort/images/frame_0000065.jpg.half.jpg",
"022/undistort/images/frame_0000066.jpg.half.jpg",
"022/undistort/images/frame_0000067.jpg.half.jpg",
"022/undistort/images/frame_0000068.jpg.half.jpg",
"022/undistort/images/frame_0000069.jpg.half.jpg",
"022/undistort/images/frame_0000070.jpg.half.jpg",
"022/undistort/images/frame_0000071.jpg.half.jpg",
"022/undistort/images/frame_0000072.jpg.half.jpg",
"022/undistort/images/frame_0000073.jpg.half.jpg",
"022/undistort/images/frame_0000074.jpg.half.jpg",
"022/undistort/images/frame_0000075.jpg.half.jpg",
"022/undistort/images/frame_0000076.jpg.half.jpg",
"022/undistort/images/frame_0000077.jpg.half.jpg",
"022/undistort/images/frame_0000078.jpg.half.jpg",
"022/undistort/images/frame_0000079.jpg.half.jpg",
"022/undistort/images/frame_0000080.jpg.half.jpg",
"022/undistort/images/frame_0000081.jpg.half.jpg",
"022/undistort/images/frame_0000082.jpg.half.jpg",
"022/undistort/images/frame_0000083.jpg.half.jpg",
"022/undistort/images/frame_0000084.jpg.half.jpg",
"022/undistort/images/frame_0000085.jpg.half.jpg",
"022/undistort/images/frame_0000086.jpg.half.jpg",
"022/undistort/images/frame_0000087.jpg.half.jpg",
"022/undistort/images/frame_0000088.jpg.half.jpg",
"022/undistort/images/frame_0000089.jpg.half.jpg",
"022/undistort/images/frame_0000090.jpg.half.jpg",
"022/undistort/images/frame_0000091.jpg.half.jpg",
"022/undistort/images/frame_0000092.jpg.half.jpg",
"022/undistort/images/frame_0000093.jpg.half.jpg",
"022/undistort/images/frame_0000094.jpg.half.jpg",
"022/undistort/images/frame_0000095.jpg.half.jpg",
"022/undistort/images/frame_0000096.jpg.half.jpg",
"022/undistort/images/frame_0000097.jpg.half.jpg",
"022/undistort/images/frame_0000098.jpg.half.jpg",
"022/undistort/images/frame_0000099.jpg.half.jpg",
"022/undistort/images/frame_0000101.jpg.half.jpg",
"022/undistort/images/frame_0000104.jpg.half.jpg",
"022/undistort/images/frame_0000105.jpg.half.jpg",
"022/undistort/images/frame_0000106.jpg.half.jpg",
"022/undistort/images/frame_0000107.jpg.half.jpg",
"022/undistort/images/frame_0000108.jpg.half.jpg",
"022/undistort/images/frame_0000109.jpg.half.jpg",
"022/undistort/images/frame_0000110.jpg.half.jpg",
"022/undistort/images/frame_0000111.jpg.half.jpg",
"022/undistort/images/frame_0000112.jpg.half.jpg",
"022/undistort/images/frame_0000113.jpg.half.jpg",
"022/undistort/images/frame_0000114.jpg.half.jpg",
"022/undistort/images/frame_0000115.jpg.half.jpg",
"022/undistort/images/frame_0000116.jpg.half.jpg",
"022/undistort/images/frame_0000117.jpg.half.jpg",
"022/undistort/images/frame_0000118.jpg.half.jpg",
"022/undistort/images/frame_0000119.jpg.half.jpg",
"022/undistort/images/frame_0000120.jpg.half.jpg",
"022/undistort/images/frame_0000121.jpg.half.jpg",
"022/undistort/images/frame_0000122.jpg.half.jpg",
"022/undistort/images/frame_0000123.jpg.half.jpg",
"022/undistort/images/frame_0000124.jpg.half.jpg",
"022/undistort/images/frame_0000125.jpg.half.jpg",
"022/undistort/images/frame_0000126.jpg.half.jpg",
"022/undistort/images/frame_0000127.jpg.half.jpg",
"022/undistort/images/frame_0000128.jpg.half.jpg",
"022/undistort/images/frame_0000129.jpg.half.jpg",
"022/undistort/images/frame_0000130.jpg.half.jpg",
"022/undistort/images/frame_0000131.jpg.half.jpg",
"022/undistort/images/frame_0000132.jpg.half.jpg",
"022/undistort/images/frame_0000133.jpg.half.jpg",
"022/undistort/images/frame_0000134.jpg.half.jpg",
"022/undistort/images/frame_0000135.jpg.half.jpg",
"022/undistort/images/frame_0000136.jpg.half.jpg",
"022/undistort/images/frame_0000137.jpg.half.jpg",
"022/undistort/images/frame_0000138.jpg.half.jpg",
"022/undistort/images/frame_0000139.jpg.half.jpg",
"022/undistort/images/frame_0000140.jpg.half.jpg",
"022/undistort/images/frame_0000141.jpg.half.jpg",
"022/undistort/images/frame_0000142.jpg.half.jpg",
"022/undistort/images/frame_0000143.jpg.half.jpg",
"022/undistort/images/frame_0000144.jpg.half.jpg",
"022/undistort/images/frame_0000145.jpg.half.jpg",
"022/undistort/images/frame_0000146.jpg.half.jpg",
"022/undistort/images/frame_0000147.jpg.half.jpg",
"022/undistort/images/frame_0000148.jpg.half.jpg",
"022/undistort/images/frame_0000149.jpg.half.jpg",
"022/undistort/images/frame_0000150.jpg.half.jpg",
"022/undistort/images/frame_0000151.jpg.half.jpg",
"022/undistort/images/frame_0000152.jpg.half.jpg",
"022/undistort/images/frame_0000153.jpg.half.jpg",
"022/undistort/images/frame_0000154.jpg.half.jpg",
"022/undistort/images/frame_0000155.jpg.half.jpg",
"022/undistort/images/frame_0000156.jpg.half.jpg",
"022/undistort/images/frame_0000157.jpg.half.jpg",
"022/undistort/images/frame_0000158.jpg.half.jpg",
"022/undistort/images/frame_0000159.jpg.half.jpg",
"022/undistort/images/frame_0000160.jpg.half.jpg",
"022/undistort/images/frame_0000161.jpg.half.jpg",
"022/undistort/images/frame_0000162.jpg.half.jpg",
"022/undistort/images/frame_0000163.jpg.half.jpg",
"022/undistort/images/frame_0000164.jpg.half.jpg",
"022/undistort/images/frame_0000165.jpg.half.jpg",
"022/undistort/images/frame_0000166.jpg.half.jpg",
"022/undistort/images/frame_0000167.jpg.half.jpg",
"022/undistort/images/frame_0000168.jpg.half.jpg",
"022/undistort/images/frame_0000169.jpg.half.jpg",
"022/undistort/images/frame_0000170.jpg.half.jpg",
"022/undistort/images/frame_0000171.jpg.half.jpg",
"022/undistort/images/frame_0000172.jpg.half.jpg",
"022/undistort/images/frame_0000173.jpg.half.jpg",
"022/undistort/images/frame_0000174.jpg.half.jpg",
"022/undistort/images/frame_0000176.jpg.half.jpg",
"022/undistort/images/frame_0000177.jpg.half.jpg",
"022/undistort/images/frame_0000178.jpg.half.jpg",
"022/undistort/images/frame_0000179.jpg.half.jpg",
"022/undistort/images/frame_0000180.jpg.half.jpg",
"022/undistort/images/frame_0000181.jpg.half.jpg",
"022/undistort/images/frame_0000182.jpg.half.jpg",
"022/undistort/images/frame_0000183.jpg.half.jpg",
"022/undistort/images/frame_0000184.jpg.half.jpg",
"022/undistort/images/frame_0000185.jpg.half.jpg",
"022/undistort/images/frame_0000186.jpg.half.jpg",
"022/undistort/images/frame_0000187.jpg.half.jpg",
"022/undistort/images/frame_0000188.jpg.half.jpg",
"022/undistort/images/frame_0000189.jpg.half.jpg",
"022/undistort/images/frame_0000190.jpg.half.jpg",
"022/undistort/images/frame_0000191.jpg.half.jpg",
"022/undistort/images/frame_0000192.jpg.half.jpg",
"022/undistort/images/frame_0000193.jpg.half.jpg",
"022/undistort/images/frame_0000194.jpg.half.jpg",
"022/undistort/images/frame_0000195.jpg.half.jpg",
"022/undistort/images/frame_0000196.jpg.half.jpg",
"022/undistort/images/frame_0000197.jpg.half.jpg",
"022/undistort/images/frame_0000198.jpg.half.jpg",
"022/undistort/images/frame_0000199.jpg.half.jpg",
"022/undistort/images/frame_0000200.jpg.half.jpg",
"022/undistort/images/frame_0000201.jpg.half.jpg",
"022/undistort/images/frame_0000202.jpg.half.jpg",
"022/undistort/images/frame_0000203.jpg.half.jpg",
"022/undistort/images/frame_0000204.jpg.half.jpg",
"022/undistort/images/frame_0000205.jpg.half.jpg",
"022/undistort/images/frame_0000206.jpg.half.jpg",
"022/undistort/images/frame_0000207.jpg.half.jpg",
"022/undistort/images/frame_0000208.jpg.half.jpg",
"022/undistort/images/frame_0000209.jpg.half.jpg",
"022/undistort/images/frame_0000210.jpg.half.jpg",
"022/undistort/images/frame_0000211.jpg.half.jpg",
"022/undistort/images/frame_0000212.jpg.half.jpg",
"022/undistort/images/frame_0000213.jpg.half.jpg",
"022/undistort/images/frame_0000214.jpg.half.jpg",
"022/undistort/images/frame_0000215.jpg.half.jpg",
"022/undistort/images/frame_0000216.jpg.half.jpg",
"022/undistort/images/frame_0000217.jpg.half.jpg",
"022/undistort/images/frame_0000218.jpg.half.jpg",
"022/undistort/images/frame_0000219.jpg.half.jpg",
"022/undistort/images/frame_0000220.jpg.half.jpg",
"022/undistort/images/frame_0000221.jpg.half.jpg",
"022/undistort/images/frame_0000222.jpg.half.jpg",
"022/undistort/images/frame_0000223.jpg.half.jpg",
"022/undistort/images/frame_0000224.jpg.half.jpg",
"022/undistort/images/frame_0000225.jpg.half.jpg",
"022/undistort/images/frame_0000226.jpg.half.jpg",
"022/undistort/images/frame_0000227.jpg.half.jpg",
"022/undistort/images/frame_0000228.jpg.half.jpg",
"022/undistort/images/frame_0000229.jpg.half.jpg",
"022/undistort/images/frame_0000230.jpg.half.jpg",
"022/undistort/images/frame_0000231.jpg.half.jpg",
"022/undistort/images/frame_0000232.jpg.half.jpg",
"022/undistort/images/frame_0000233.jpg.half.jpg",
"022/undistort/images/frame_0000234.jpg.half.jpg",
"022/undistort/images/frame_0000235.jpg.half.jpg",
"022/undistort/images/frame_0000236.jpg.half.jpg",
"022/undistort/images/frame_0000237.jpg.half.jpg",
"022/undistort/images/frame_0000238.jpg.half.jpg",
"022/undistort/images/frame_0000239.jpg.half.jpg",
"022/undistort/images/frame_0000240.jpg.half.jpg",
"022/undistort/images/frame_0000241.jpg.half.jpg",
"022/undistort/images/frame_0000242.jpg.half.jpg",
"022/undistort/images/frame_0000243.jpg.half.jpg",
"022/undistort/images/frame_0000244.jpg.half.jpg",
"022/undistort/images/frame_0000245.jpg.half.jpg",
"022/undistort/images/frame_0000246.jpg.half.jpg",
"022/undistort/images/frame_0000247.jpg.half.jpg",
"022/undistort/images/frame_0000248.jpg.half.jpg",
"022/undistort/images/frame_0000249.jpg.half.jpg",
"022/undistort/images/frame_0000250.jpg.half.jpg",
"022/undistort/images/frame_0000251.jpg.half.jpg",
"022/undistort/images/frame_0000252.jpg.half.jpg",
"022/undistort/images/frame_0000253.jpg.half.jpg",
"022/undistort/images/frame_0000254.jpg.half.jpg",
"022/undistort/images/frame_0000255.jpg.half.jpg",
"022/undistort/images/frame_0000256.jpg.half.jpg",
"022/undistort/images/frame_0000257.jpg.half.jpg",
"022/undistort/images/frame_0000258.jpg.half.jpg",
"022/undistort/images/frame_0000259.jpg.half.jpg",
"022/undistort/images/frame_0000260.jpg.half.jpg",
"022/undistort/images/frame_0000261.jpg.half.jpg",
"022/undistort/images/frame_0000262.jpg.half.jpg",
"022/undistort/images/frame_0000263.jpg.half.jpg",
"022/undistort/images/frame_0000264.jpg.half.jpg",
"022/undistort/images/frame_0000265.jpg.half.jpg",
"022/undistort/images/frame_0000266.jpg.half.jpg",
"022/undistort/images/frame_0000267.jpg.half.jpg",
"022/undistort/images/frame_0000268.jpg.half.jpg",
"022/undistort/images/frame_0000269.jpg.half.jpg",
"022/undistort/images/frame_0000270.jpg.half.jpg",
"022/undistort/images/frame_0000271.jpg.half.jpg",
"022/undistort/images/frame_0000272.jpg.half.jpg",
"022/undistort/images/frame_0000273.jpg.half.jpg",
"022/undistort/images/frame_0000274.jpg.half.jpg",
"022/undistort/images/frame_0000275.jpg.half.jpg",
"022/undistort/images/frame_0000276.jpg.half.jpg",
"022/undistort/images/frame_0000277.jpg.half.jpg",
"022/undistort/images/frame_0000278.jpg.half.jpg",
"022/undistort/images/frame_0000279.jpg.half.jpg",
"022/undistort/images/frame_0000280.jpg.half.jpg",
"022/undistort/images/frame_0000281.jpg.half.jpg",
"022/undistort/images/frame_0000283.jpg.half.jpg",
"022/undistort/images/frame_0000284.jpg.half.jpg",
"022/undistort/images/frame_0000285.jpg.half.jpg",
"022/undistort/images/frame_0000286.jpg.half.jpg",
"022/undistort/images/frame_0000287.jpg.half.jpg",
"022/undistort/images/frame_0000288.jpg.half.jpg",
"022/undistort/images/frame_0000289.jpg.half.jpg",
"022/undistort/images/frame_0000290.jpg.half.jpg",
"022/undistort/images/frame_0000291.jpg.half.jpg",
"022/undistort/images/frame_0000292.jpg.half.jpg",
"022/undistort/images/frame_0000293.jpg.half.jpg",
"022/undistort/images/frame_0000294.jpg.half.jpg",
"022/undistort/images/frame_0000295.jpg.half.jpg",
"022/undistort/images/frame_0000296.jpg.half.jpg",
"022/undistort/images/frame_0000297.jpg.half.jpg",
"022/undistort/images/frame_0000298.jpg.half.jpg",
"022/undistort/images/frame_0000299.jpg.half.jpg",
"022/undistort/images/frame_0000300.jpg.half.jpg",
"022/undistort/images/frame_0000301.jpg.half.jpg",
"022/undistort/images/frame_0000302.jpg.half.jpg",
"022/undistort/images/frame_0000303.jpg.half.jpg",
"022/undistort/images/frame_0000304.jpg.half.jpg",
"022/undistort/images/frame_0000305.jpg.half.jpg",
"022/undistort/images/frame_0000306.jpg.half.jpg",
"022/undistort/images/frame_0000307.jpg.half.jpg",
"022/undistort/images/frame_0000308.jpg.half.jpg",
"022/undistort/images/frame_0000309.jpg.half.jpg",
"022/undistort/images/frame_0000310.jpg.half.jpg",
"022/undistort/images/frame_0000311.jpg.half.jpg",
"022/undistort/images/frame_0000312.jpg.half.jpg",
"022/undistort/images/frame_0000313.jpg.half.jpg",
"022/undistort/images/frame_0000314.jpg.half.jpg",
"022/undistort/images/frame_0000315.jpg.half.jpg",
"022/undistort/images/frame_0000316.jpg.half.jpg",
"022/undistort/images/frame_0000317.jpg.half.jpg",
"022/undistort/images/frame_0000318.jpg.half.jpg",
"022/undistort/images/frame_0000319.jpg.half.jpg",
"022/undistort/images/frame_0000320.jpg.half.jpg",
"022/undistort/images/frame_0000321.jpg.half.jpg",
"022/undistort/images/frame_0000322.jpg.half.jpg",
"022/undistort/images/frame_0000323.jpg.half.jpg",
"022/undistort/images/frame_0000324.jpg.half.jpg",
"022/undistort/images/frame_0000325.jpg.half.jpg",
"022/undistort/images/frame_0000326.jpg.half.jpg",
"022/undistort/images/frame_0000327.jpg.half.jpg",
"022/undistort/images/frame_0000328.jpg.half.jpg",
"022/undistort/images/frame_0000329.jpg.half.jpg",
"022/undistort/images/frame_0000330.jpg.half.jpg",
"022/undistort/images/frame_0000331.jpg.half.jpg",
"022/undistort/images/frame_0000332.jpg.half.jpg",
"022/undistort/images/frame_0000333.jpg.half.jpg",
"022/undistort/images/frame_0000334.jpg.half.jpg",
"022/undistort/images/frame_0000335.jpg.half.jpg",
"022/undistort/images/frame_0000336.jpg.half.jpg",
"022/undistort/images/frame_0000337.jpg.half.jpg",
"022/undistort/images/frame_0000338.jpg.half.jpg",
"022/undistort/images/frame_0000339.jpg.half.jpg",
"022/undistort/images/frame_0000340.jpg.half.jpg",
"022/undistort/images/frame_0000341.jpg.half.jpg",
"022/undistort/images/frame_0000342.jpg.half.jpg",
"022/undistort/images/frame_0000343.jpg.half.jpg",
"022/undistort/images/frame_0000344.jpg.half.jpg",
"022/undistort/images/frame_0000345.jpg.half.jpg",
"022/undistort/images/frame_0000346.jpg.half.jpg",
"022/undistort/images/frame_0000347.jpg.half.jpg",
"022/undistort/images/frame_0000348.jpg.half.jpg",
"022/undistort/images/frame_0000349.jpg.half.jpg",
"022/undistort/images/frame_0000350.jpg.half.jpg",
"022/undistort/images/frame_0000351.jpg.half.jpg",
"022/undistort/images/frame_0000352.jpg.half.jpg",
"022/undistort/images/frame_0000353.jpg.half.jpg",
"034/undistort/images/frame_0000001.jpg.half.jpg",
"034/undistort/images/frame_0000002.jpg.half.jpg",
"034/undistort/images/frame_0000003.jpg.half.jpg",
"034/undistort/images/frame_0000004.jpg.half.jpg",
"034/undistort/images/frame_0000005.jpg.half.jpg",
"034/undistort/images/frame_0000006.jpg.half.jpg",
"034/undistort/images/frame_0000007.jpg.half.jpg",
"034/undistort/images/frame_0000008.jpg.half.jpg",
"034/undistort/images/frame_0000009.jpg.half.jpg",
"034/undistort/images/frame_0000010.jpg.half.jpg",
"034/undistort/images/frame_0000011.jpg.half.jpg",
"034/undistort/images/frame_0000013.jpg.half.jpg",
"034/undistort/images/frame_0000014.jpg.half.jpg",
"034/undistort/images/frame_0000015.jpg.half.jpg",
"034/undistort/images/frame_0000016.jpg.half.jpg",
"034/undistort/images/frame_0000017.jpg.half.jpg",
"034/undistort/images/frame_0000018.jpg.half.jpg",
"034/undistort/images/frame_0000019.jpg.half.jpg",
"034/undistort/images/frame_0000020.jpg.half.jpg",
"034/undistort/images/frame_0000021.jpg.half.jpg",
"034/undistort/images/frame_0000022.jpg.half.jpg",
"034/undistort/images/frame_0000023.jpg.half.jpg",
"034/undistort/images/frame_0000024.jpg.half.jpg",
"034/undistort/images/frame_0000025.jpg.half.jpg",
"034/undistort/images/frame_0000027.jpg.half.jpg",
"034/undistort/images/frame_0000028.jpg.half.jpg",
"034/undistort/images/frame_0000029.jpg.half.jpg",
"034/undistort/images/frame_0000031.jpg.half.jpg",
"034/undistort/images/frame_0000032.jpg.half.jpg",
"034/undistort/images/frame_0000033.jpg.half.jpg",
"034/undistort/images/frame_0000036.jpg.half.jpg",
"034/undistort/images/frame_0000037.jpg.half.jpg",
"034/undistort/images/frame_0000038.jpg.half.jpg",
"034/undistort/images/frame_0000039.jpg.half.jpg",
"034/undistort/images/frame_0000040.jpg.half.jpg",
"034/undistort/images/frame_0000041.jpg.half.jpg",
"034/undistort/images/frame_0000043.jpg.half.jpg",
"034/undistort/images/frame_0000044.jpg.half.jpg",
"034/undistort/images/frame_0000045.jpg.half.jpg",
"034/undistort/images/frame_0000049.jpg.half.jpg",
"034/undistort/images/frame_0000106.jpg.half.jpg",
"034/undistort/images/frame_0000107.jpg.half.jpg",
"034/undistort/images/frame_0000108.jpg.half.jpg",
"034/undistort/images/frame_0000109.jpg.half.jpg",
"034/undistort/images/frame_0000110.jpg.half.jpg",
"034/undistort/images/frame_0000111.jpg.half.jpg",
"034/undistort/images/frame_0000112.jpg.half.jpg",
"034/undistort/images/frame_0000113.jpg.half.jpg",
"034/undistort/images/frame_0000114.jpg.half.jpg",
"034/undistort/images/frame_0000115.jpg.half.jpg",
"034/undistort/images/frame_0000116.jpg.half.jpg",
"034/undistort/images/frame_0000117.jpg.half.jpg",
"034/undistort/images/frame_0000118.jpg.half.jpg",
"034/undistort/images/frame_0000119.jpg.half.jpg",
"034/undistort/images/frame_0000120.jpg.half.jpg",
"034/undistort/images/frame_0000121.jpg.half.jpg",
"034/undistort/images/frame_0000122.jpg.half.jpg",
"034/undistort/images/frame_0000123.jpg.half.jpg",
"034/undistort/images/frame_0000124.jpg.half.jpg",
"034/undistort/images/frame_0000125.jpg.half.jpg",
"034/undistort/images/frame_0000126.jpg.half.jpg",
"034/undistort/images/frame_0000127.jpg.half.jpg",
"034/undistort/images/frame_0000128.jpg.half.jpg",
"034/undistort/images/frame_0000129.jpg.half.jpg",
"034/undistort/images/frame_0000130.jpg.half.jpg",
"034/undistort/images/frame_0000131.jpg.half.jpg",
"034/undistort/images/frame_0000132.jpg.half.jpg",
"034/undistort/images/frame_0000133.jpg.half.jpg",
"034/undistort/images/frame_0000134.jpg.half.jpg",
"034/undistort/images/frame_0000135.jpg.half.jpg",
"034/undistort/images/frame_0000136.jpg.half.jpg",
"034/undistort/images/frame_0000137.jpg.half.jpg",
"034/undistort/images/frame_0000138.jpg.half.jpg",
"034/undistort/images/frame_0000139.jpg.half.jpg",
"034/undistort/images/frame_0000140.jpg.half.jpg",
"034/undistort/images/frame_0000141.jpg.half.jpg",
"034/undistort/images/frame_0000142.jpg.half.jpg",
"034/undistort/images/frame_0000143.jpg.half.jpg",
"034/undistort/images/frame_0000144.jpg.half.jpg",
"034/undistort/images/frame_0000145.jpg.half.jpg",
"034/undistort/images/frame_0000146.jpg.half.jpg",
"034/undistort/images/frame_0000147.jpg.half.jpg",
"034/undistort/images/frame_0000148.jpg.half.jpg",
"034/undistort/images/frame_0000149.jpg.half.jpg",
"034/undistort/images/frame_0000150.jpg.half.jpg",
"034/undistort/images/frame_0000151.jpg.half.jpg",
"034/undistort/images/frame_0000152.jpg.half.jpg",
"034/undistort/images/frame_0000153.jpg.half.jpg",
"034/undistort/images/frame_0000154.jpg.half.jpg",
"034/undistort/images/frame_0000155.jpg.half.jpg",
"034/undistort/images/frame_0000156.jpg.half.jpg",
"034/undistort/images/frame_0000157.jpg.half.jpg",
"034/undistort/images/frame_0000158.jpg.half.jpg",
"034/undistort/images/frame_0000159.jpg.half.jpg",
"034/undistort/images/frame_0000160.jpg.half.jpg",
"034/undistort/images/frame_0000161.jpg.half.jpg",
"034/undistort/images/frame_0000162.jpg.half.jpg",
"034/undistort/images/frame_0000163.jpg.half.jpg",
"034/undistort/images/frame_0000164.jpg.half.jpg",
"034/undistort/images/frame_0000165.jpg.half.jpg",
"034/undistort/images/frame_0000166.jpg.half.jpg",
"034/undistort/images/frame_0000167.jpg.half.jpg",
"034/undistort/images/frame_0000168.jpg.half.jpg",
"034/undistort/images/frame_0000169.jpg.half.jpg",
"034/undistort/images/frame_0000170.jpg.half.jpg",
"034/undistort/images/frame_0000171.jpg.half.jpg",
"034/undistort/images/frame_0000172.jpg.half.jpg",
"034/undistort/images/frame_0000173.jpg.half.jpg",
"034/undistort/images/frame_0000174.jpg.half.jpg",
"034/undistort/images/frame_0000175.jpg.half.jpg",
"034/undistort/images/frame_0000176.jpg.half.jpg",
"034/undistort/images/frame_0000177.jpg.half.jpg",
"034/undistort/images/frame_0000178.jpg.half.jpg",
"034/undistort/images/frame_0000179.jpg.half.jpg",
"034/undistort/images/frame_0000180.jpg.half.jpg",
"034/undistort/images/frame_0000181.jpg.half.jpg",
"034/undistort/images/frame_0000182.jpg.half.jpg",
"034/undistort/images/frame_0000184.jpg.half.jpg",
"034/undistort/images/frame_0000185.jpg.half.jpg",
"034/undistort/images/frame_0000186.jpg.half.jpg",
"034/undistort/images/frame_0000187.jpg.half.jpg",
"034/undistort/images/frame_0000188.jpg.half.jpg",
"034/undistort/images/frame_0000189.jpg.half.jpg",
"034/undistort/images/frame_0000190.jpg.half.jpg",
"034/undistort/images/frame_0000191.jpg.half.jpg",
"034/undistort/images/frame_0000192.jpg.half.jpg",
"034/undistort/images/frame_0000193.jpg.half.jpg",
"034/undistort/images/frame_0000194.jpg.half.jpg",
"034/undistort/images/frame_0000195.jpg.half.jpg",
"034/undistort/images/frame_0000196.jpg.half.jpg",
"034/undistort/images/frame_0000197.jpg.half.jpg",
"034/undistort/images/frame_0000198.jpg.half.jpg",
"034/undistort/images/frame_0000199.jpg.half.jpg",
"034/undistort/images/frame_0000200.jpg.half.jpg",
"034/undistort/images/frame_0000201.jpg.half.jpg",
"034/undistort/images/frame_0000202.jpg.half.jpg",
"034/undistort/images/frame_0000203.jpg.half.jpg",
"034/undistort/images/frame_0000204.jpg.half.jpg",
"034/undistort/images/frame_0000205.jpg.half.jpg",
"034/undistort/images/frame_0000206.jpg.half.jpg",
"034/undistort/images/frame_0000207.jpg.half.jpg",
"034/undistort/images/frame_0000208.jpg.half.jpg",
"034/undistort/images/frame_0000209.jpg.half.jpg",
"034/undistort/images/frame_0000210.jpg.half.jpg",
"034/undistort/images/frame_0000211.jpg.half.jpg",
"034/undistort/images/frame_0000213.jpg.half.jpg",
"034/undistort/images/frame_0000214.jpg.half.jpg",
"034/undistort/images/frame_0000215.jpg.half.jpg",
"034/undistort/images/frame_0000216.jpg.half.jpg",
"034/undistort/images/frame_0000218.jpg.half.jpg",
"034/undistort/images/frame_0000219.jpg.half.jpg",
"034/undistort/images/frame_0000220.jpg.half.jpg",
"034/undistort/images/frame_0000221.jpg.half.jpg",
"034/undistort/images/frame_0000222.jpg.half.jpg",
"034/undistort/images/frame_0000223.jpg.half.jpg",
"034/undistort/images/frame_0000224.jpg.half.jpg",
"034/undistort/images/frame_0000225.jpg.half.jpg",
"034/undistort/images/frame_0000226.jpg.half.jpg",
"034/undistort/images/frame_0000227.jpg.half.jpg",
"034/undistort/images/frame_0000228.jpg.half.jpg",
"034/undistort/images/frame_0000229.jpg.half.jpg",
"034/undistort/images/frame_0000232.jpg.half.jpg",
"034/undistort/images/frame_0000233.jpg.half.jpg",
"034/undistort/images/frame_0000234.jpg.half.jpg",
"034/undistort/images/frame_0000236.jpg.half.jpg",
"034/undistort/images/frame_0000237.jpg.half.jpg",
"034/undistort/images/frame_0000239.jpg.half.jpg",
"034/undistort/images/frame_0000240.jpg.half.jpg",
"034/undistort/images/frame_0000241.jpg.half.jpg",
"034/undistort/images/frame_0000242.jpg.half.jpg",
"034/undistort/images/frame_0000243.jpg.half.jpg",
"034/undistort/images/frame_0000247.jpg.half.jpg",
"034/undistort/images/frame_0000248.jpg.half.jpg",
"034/undistort/images/frame_0000249.jpg.half.jpg",
"034/undistort/images/frame_0000250.jpg.half.jpg",
"034/undistort/images/frame_0000254.jpg.half.jpg",
"034/undistort/images/frame_0000255.jpg.half.jpg",
"034/undistort/images/frame_0000256.jpg.half.jpg",
"034/undistort/images/frame_0000257.jpg.half.jpg",
"034/undistort/images/frame_0000259.jpg.half.jpg",
"034/undistort/images/frame_0000260.jpg.half.jpg",
"034/undistort/images/frame_0000261.jpg.half.jpg",
"034/undistort/images/frame_0000262.jpg.half.jpg",
"034/undistort/images/frame_0000263.jpg.half.jpg",
"034/undistort/images/frame_0000264.jpg.half.jpg",
"034/undistort/images/frame_0000265.jpg.half.jpg",
"034/undistort/images/frame_0000268.jpg.half.jpg",
"036/undistort/images/frame_0000001.jpg.half.jpg",
"036/undistort/images/frame_0000002.jpg.half.jpg",
"036/undistort/images/frame_0000003.jpg.half.jpg",
"036/undistort/images/frame_0000004.jpg.half.jpg",
"036/undistort/images/frame_0000005.jpg.half.jpg",
"036/undistort/images/frame_0000006.jpg.half.jpg",
"036/undistort/images/frame_0000007.jpg.half.jpg",
"036/undistort/images/frame_0000008.jpg.half.jpg",
"036/undistort/images/frame_0000009.jpg.half.jpg",
"036/undistort/images/frame_0000010.jpg.half.jpg",
"036/undistort/images/frame_0000011.jpg.half.jpg",
"036/undistort/images/frame_0000012.jpg.half.jpg",
"036/undistort/images/frame_0000013.jpg.half.jpg",
"036/undistort/images/frame_0000014.jpg.half.jpg",
"036/undistort/images/frame_0000015.jpg.half.jpg",
"036/undistort/images/frame_0000016.jpg.half.jpg",
"036/undistort/images/frame_0000017.jpg.half.jpg",
"036/undistort/images/frame_0000018.jpg.half.jpg",
"036/undistort/images/frame_0000019.jpg.half.jpg",
"036/undistort/images/frame_0000020.jpg.half.jpg",
"036/undistort/images/frame_0000021.jpg.half.jpg",
"036/undistort/images/frame_0000022.jpg.half.jpg",
"036/undistort/images/frame_0000023.jpg.half.jpg",
"036/undistort/images/frame_0000024.jpg.half.jpg",
"036/undistort/images/frame_0000025.jpg.half.jpg",
"036/undistort/images/frame_0000026.jpg.half.jpg",
"036/undistort/images/frame_0000027.jpg.half.jpg",
"036/undistort/images/frame_0000028.jpg.half.jpg",
"036/undistort/images/frame_0000029.jpg.half.jpg",
"036/undistort/images/frame_0000030.jpg.half.jpg",
"036/undistort/images/frame_0000031.jpg.half.jpg",
"036/undistort/images/frame_0000032.jpg.half.jpg",
"036/undistort/images/frame_0000033.jpg.half.jpg",
"036/undistort/images/frame_0000034.jpg.half.jpg",
"036/undistort/images/frame_0000035.jpg.half.jpg",
"036/undistort/images/frame_0000036.jpg.half.jpg",
"036/undistort/images/frame_0000037.jpg.half.jpg",
"036/undistort/images/frame_0000038.jpg.half.jpg",
"036/undistort/images/frame_0000039.jpg.half.jpg",
"036/undistort/images/frame_0000041.jpg.half.jpg",
"036/undistort/images/frame_0000042.jpg.half.jpg",
"036/undistort/images/frame_0000043.jpg.half.jpg",
"036/undistort/images/frame_0000044.jpg.half.jpg",
"036/undistort/images/frame_0000045.jpg.half.jpg",
"036/undistort/images/frame_0000046.jpg.half.jpg",
"036/undistort/images/frame_0000047.jpg.half.jpg",
"036/undistort/images/frame_0000048.jpg.half.jpg",
"036/undistort/images/frame_0000049.jpg.half.jpg",
"036/undistort/images/frame_0000050.jpg.half.jpg",
"036/undistort/images/frame_0000051.jpg.half.jpg",
"036/undistort/images/frame_0000052.jpg.half.jpg",
"036/undistort/images/frame_0000053.jpg.half.jpg",
"036/undistort/images/frame_0000054.jpg.half.jpg",
"036/undistort/images/frame_0000055.jpg.half.jpg",
"036/undistort/images/frame_0000056.jpg.half.jpg",
"036/undistort/images/frame_0000057.jpg.half.jpg",
"036/undistort/images/frame_0000058.jpg.half.jpg",
"036/undistort/images/frame_0000059.jpg.half.jpg",
"036/undistort/images/frame_0000060.jpg.half.jpg",
"036/undistort/images/frame_0000061.jpg.half.jpg",
"036/undistort/images/frame_0000062.jpg.half.jpg",
"036/undistort/images/frame_0000063.jpg.half.jpg",
"036/undistort/images/frame_0000064.jpg.half.jpg",
"036/undistort/images/frame_0000065.jpg.half.jpg",
"036/undistort/images/frame_0000066.jpg.half.jpg",
"036/undistort/images/frame_0000067.jpg.half.jpg",
"036/undistort/images/frame_0000068.jpg.half.jpg",
"036/undistort/images/frame_0000069.jpg.half.jpg",
"036/undistort/images/frame_0000070.jpg.half.jpg",
"036/undistort/images/frame_0000071.jpg.half.jpg",
"036/undistort/images/frame_0000072.jpg.half.jpg",
"036/undistort/images/frame_0000073.jpg.half.jpg",
"036/undistort/images/frame_0000074.jpg.half.jpg",
"036/undistort/images/frame_0000075.jpg.half.jpg",
"036/undistort/images/frame_0000076.jpg.half.jpg",
"036/undistort/images/frame_0000077.jpg.half.jpg",
"036/undistort/images/frame_0000078.jpg.half.jpg",
"036/undistort/images/frame_0000079.jpg.half.jpg",
"036/undistort/images/frame_0000080.jpg.half.jpg",
"036/undistort/images/frame_0000081.jpg.half.jpg",
"036/undistort/images/frame_0000082.jpg.half.jpg",
"036/undistort/images/frame_0000083.jpg.half.jpg",
"036/undistort/images/frame_0000084.jpg.half.jpg",
"036/undistort/images/frame_0000085.jpg.half.jpg",
"036/undistort/images/frame_0000086.jpg.half.jpg",
"036/undistort/images/frame_0000087.jpg.half.jpg",
"036/undistort/images/frame_0000088.jpg.half.jpg",
"036/undistort/images/frame_0000089.jpg.half.jpg",
"036/undistort/images/frame_0000090.jpg.half.jpg",
"036/undistort/images/frame_0000091.jpg.half.jpg",
"036/undistort/images/frame_0000092.jpg.half.jpg",
"036/undistort/images/frame_0000093.jpg.half.jpg",
"036/undistort/images/frame_0000095.jpg.half.jpg",
"036/undistort/images/frame_0000096.jpg.half.jpg",
"036/undistort/images/frame_0000097.jpg.half.jpg",
"036/undistort/images/frame_0000098.jpg.half.jpg",
"036/undistort/images/frame_0000099.jpg.half.jpg",
"036/undistort/images/frame_0000100.jpg.half.jpg",
"036/undistort/images/frame_0000101.jpg.half.jpg",
"036/undistort/images/frame_0000102.jpg.half.jpg",
"036/undistort/images/frame_0000103.jpg.half.jpg",
"036/undistort/images/frame_0000104.jpg.half.jpg",
"036/undistort/images/frame_0000105.jpg.half.jpg",
"036/undistort/images/frame_0000106.jpg.half.jpg",
"036/undistort/images/frame_0000107.jpg.half.jpg",
"036/undistort/images/frame_0000108.jpg.half.jpg",
"036/undistort/images/frame_0000109.jpg.half.jpg",
"036/undistort/images/frame_0000110.jpg.half.jpg",
"036/undistort/images/frame_0000111.jpg.half.jpg",
"036/undistort/images/frame_0000112.jpg.half.jpg",
"036/undistort/images/frame_0000113.jpg.half.jpg",
"036/undistort/images/frame_0000114.jpg.half.jpg",
"036/undistort/images/frame_0000115.jpg.half.jpg",
"036/undistort/images/frame_0000116.jpg.half.jpg",
"036/undistort/images/frame_0000117.jpg.half.jpg",
"036/undistort/images/frame_0000118.jpg.half.jpg",
"036/undistort/images/frame_0000121.jpg.half.jpg",
"036/undistort/images/frame_0000122.jpg.half.jpg",
"036/undistort/images/frame_0000123.jpg.half.jpg",
"036/undistort/images/frame_0000124.jpg.half.jpg",
"036/undistort/images/frame_0000125.jpg.half.jpg",
"036/undistort/images/frame_0000126.jpg.half.jpg",
"036/undistort/images/frame_0000127.jpg.half.jpg",
"036/undistort/images/frame_0000128.jpg.half.jpg",
"036/undistort/images/frame_0000129.jpg.half.jpg",
"036/undistort/images/frame_0000130.jpg.half.jpg",
"036/undistort/images/frame_0000131.jpg.half.jpg",
"036/undistort/images/frame_0000132.jpg.half.jpg",
"036/undistort/images/frame_0000133.jpg.half.jpg",
"036/undistort/images/frame_0000134.jpg.half.jpg",
"036/undistort/images/frame_0000135.jpg.half.jpg",
"036/undistort/images/frame_0000136.jpg.half.jpg",
"036/undistort/images/frame_0000137.jpg.half.jpg",
"036/undistort/images/frame_0000138.jpg.half.jpg",
"036/undistort/images/frame_0000139.jpg.half.jpg",
"036/undistort/images/frame_0000140.jpg.half.jpg",
"036/undistort/images/frame_0000141.jpg.half.jpg",
"036/undistort/images/frame_0000142.jpg.half.jpg",
"036/undistort/images/frame_0000143.jpg.half.jpg",
"036/undistort/images/frame_0000144.jpg.half.jpg",
"036/undistort/images/frame_0000145.jpg.half.jpg",
"036/undistort/images/frame_0000146.jpg.half.jpg",
"036/undistort/images/frame_0000147.jpg.half.jpg",
"036/undistort/images/frame_0000148.jpg.half.jpg",
"036/undistort/images/frame_0000149.jpg.half.jpg",
"036/undistort/images/frame_0000150.jpg.half.jpg",
"036/undistort/images/frame_0000151.jpg.half.jpg",
"036/undistort/images/frame_0000152.jpg.half.jpg",
"036/undistort/images/frame_0000153.jpg.half.jpg",
"036/undistort/images/frame_0000154.jpg.half.jpg",
"036/undistort/images/frame_0000155.jpg.half.jpg",
"036/undistort/images/frame_0000156.jpg.half.jpg",
"036/undistort/images/frame_0000157.jpg.half.jpg",
"036/undistort/images/frame_0000158.jpg.half.jpg",
"036/undistort/images/frame_0000159.jpg.half.jpg",
"036/undistort/images/frame_0000160.jpg.half.jpg",
"036/undistort/images/frame_0000161.jpg.half.jpg",
"036/undistort/images/frame_0000162.jpg.half.jpg",
"036/undistort/images/frame_0000163.jpg.half.jpg",
"036/undistort/images/frame_0000164.jpg.half.jpg",
"036/undistort/images/frame_0000165.jpg.half.jpg",
"036/undistort/images/frame_0000166.jpg.half.jpg",
"036/undistort/images/frame_0000167.jpg.half.jpg",
"036/undistort/images/frame_0000168.jpg.half.jpg",
"036/undistort/images/frame_0000169.jpg.half.jpg",
"036/undistort/images/frame_0000170.jpg.half.jpg",
"036/undistort/images/frame_0000171.jpg.half.jpg",
"036/undistort/images/frame_0000172.jpg.half.jpg",
"036/undistort/images/frame_0000173.jpg.half.jpg",
"036/undistort/images/frame_0000174.jpg.half.jpg",
"036/undistort/images/frame_0000175.jpg.half.jpg",
"036/undistort/images/frame_0000176.jpg.half.jpg",
"036/undistort/images/frame_0000177.jpg.half.jpg",
"036/undistort/images/frame_0000178.jpg.half.jpg",
"036/undistort/images/frame_0000179.jpg.half.jpg",
"036/undistort/images/frame_0000180.jpg.half.jpg",
"036/undistort/images/frame_0000181.jpg.half.jpg",
"036/undistort/images/frame_0000182.jpg.half.jpg",
"036/undistort/images/frame_0000183.jpg.half.jpg",
"036/undistort/images/frame_0000184.jpg.half.jpg",
"036/undistort/images/frame_0000185.jpg.half.jpg",
"036/undistort/images/frame_0000186.jpg.half.jpg",
"036/undistort/images/frame_0000187.jpg.half.jpg",
"036/undistort/images/frame_0000188.jpg.half.jpg",
"036/undistort/images/frame_0000189.jpg.half.jpg",
"036/undistort/images/frame_0000190.jpg.half.jpg",
"036/undistort/images/frame_0000191.jpg.half.jpg",
"036/undistort/images/frame_0000192.jpg.half.jpg",
"036/undistort/images/frame_0000193.jpg.half.jpg",
"036/undistort/images/frame_0000194.jpg.half.jpg",
"036/undistort/images/frame_0000195.jpg.half.jpg",
"036/undistort/images/frame_0000196.jpg.half.jpg",
"036/undistort/images/frame_0000197.jpg.half.jpg",
"036/undistort/images/frame_0000198.jpg.half.jpg",
"036/undistort/images/frame_0000199.jpg.half.jpg",
"036/undistort/images/frame_0000200.jpg.half.jpg",
"036/undistort/images/frame_0000201.jpg.half.jpg",
"036/undistort/images/frame_0000202.jpg.half.jpg",
"036/undistort/images/frame_0000203.jpg.half.jpg",
"036/undistort/images/frame_0000204.jpg.half.jpg",
"036/undistort/images/frame_0000205.jpg.half.jpg",
"036/undistort/images/frame_0000206.jpg.half.jpg",
"036/undistort/images/frame_0000207.jpg.half.jpg",
"036/undistort/images/frame_0000208.jpg.half.jpg",
"036/undistort/images/frame_0000209.jpg.half.jpg",
"036/undistort/images/frame_0000210.jpg.half.jpg",
"036/undistort/images/frame_0000211.jpg.half.jpg",
"036/undistort/images/frame_0000212.jpg.half.jpg",
"036/undistort/images/frame_0000213.jpg.half.jpg",
"036/undistort/images/frame_0000214.jpg.half.jpg",
"036/undistort/images/frame_0000215.jpg.half.jpg",
"036/undistort/images/frame_0000216.jpg.half.jpg",
"036/undistort/images/frame_0000217.jpg.half.jpg",
"036/undistort/images/frame_0000218.jpg.half.jpg",
"036/undistort/images/frame_0000219.jpg.half.jpg",
"036/undistort/images/frame_0000220.jpg.half.jpg",
"036/undistort/images/frame_0000221.jpg.half.jpg",
"036/undistort/images/frame_0000222.jpg.half.jpg",
"036/undistort/images/frame_0000223.jpg.half.jpg",
"036/undistort/images/frame_0000224.jpg.half.jpg",
"036/undistort/images/frame_0000225.jpg.half.jpg",
"036/undistort/images/frame_0000226.jpg.half.jpg",
"036/undistort/images/frame_0000227.jpg.half.jpg",
"036/undistort/images/frame_0000228.jpg.half.jpg",
"036/undistort/images/frame_0000229.jpg.half.jpg",
"036/undistort/images/frame_0000230.jpg.half.jpg",
"036/undistort/images/frame_0000231.jpg.half.jpg",
"036/undistort/images/frame_0000232.jpg.half.jpg",
"036/undistort/images/frame_0000233.jpg.half.jpg",
"036/undistort/images/frame_0000234.jpg.half.jpg",
"036/undistort/images/frame_0000235.jpg.half.jpg",
"036/undistort/images/frame_0000236.jpg.half.jpg",
"036/undistort/images/frame_0000237.jpg.half.jpg",
"036/undistort/images/frame_0000238.jpg.half.jpg",
"036/undistort/images/frame_0000239.jpg.half.jpg",
"036/undistort/images/frame_0000240.jpg.half.jpg",
"036/undistort/images/frame_0000241.jpg.half.jpg",
"036/undistort/images/frame_0000242.jpg.half.jpg",
"036/undistort/images/frame_0000243.jpg.half.jpg",
"036/undistort/images/frame_0000244.jpg.half.jpg",
"036/undistort/images/frame_0000245.jpg.half.jpg",
"036/undistort/images/frame_0000246.jpg.half.jpg",
"036/undistort/images/frame_0000247.jpg.half.jpg",
"036/undistort/images/frame_0000248.jpg.half.jpg",
"036/undistort/images/frame_0000249.jpg.half.jpg",
"036/undistort/images/frame_0000250.jpg.half.jpg",
"036/undistort/images/frame_0000251.jpg.half.jpg",
"036/undistort/images/frame_0000252.jpg.half.jpg",
"036/undistort/images/frame_0000253.jpg.half.jpg",
"036/undistort/images/frame_0000254.jpg.half.jpg",
"036/undistort/images/frame_0000255.jpg.half.jpg",
"036/undistort/images/frame_0000256.jpg.half.jpg",
"036/undistort/images/frame_0000257.jpg.half.jpg",
"036/undistort/images/frame_0000258.jpg.half.jpg",
"036/undistort/images/frame_0000259.jpg.half.jpg",
"036/undistort/images/frame_0000260.jpg.half.jpg",
"036/undistort/images/frame_0000261.jpg.half.jpg",
"036/undistort/images/frame_0000262.jpg.half.jpg",
"036/undistort/images/frame_0000263.jpg.half.jpg",
"036/undistort/images/frame_0000264.jpg.half.jpg",
"036/undistort/images/frame_0000265.jpg.half.jpg",
"036/undistort/images/frame_0000266.jpg.half.jpg",
"036/undistort/images/frame_0000267.jpg.half.jpg",
"036/undistort/images/frame_0000268.jpg.half.jpg",
"036/undistort/images/frame_0000269.jpg.half.jpg",
"036/undistort/images/frame_0000270.jpg.half.jpg",
"036/undistort/images/frame_0000271.jpg.half.jpg",
"036/undistort/images/frame_0000272.jpg.half.jpg",
"036/undistort/images/frame_0000273.jpg.half.jpg",
"036/undistort/images/frame_0000274.jpg.half.jpg",
"036/undistort/images/frame_0000275.jpg.half.jpg",
"036/undistort/images/frame_0000276.jpg.half.jpg",
"036/undistort/images/frame_0000277.jpg.half.jpg",
"036/undistort/images/frame_0000278.jpg.half.jpg",
"036/undistort/images/frame_0000279.jpg.half.jpg",
"036/undistort/images/frame_0000280.jpg.half.jpg",
"036/undistort/images/frame_0000281.jpg.half.jpg",
"036/undistort/images/frame_0000282.jpg.half.jpg",
"036/undistort/images/frame_0000283.jpg.half.jpg",
"036/undistort/images/frame_0000284.jpg.half.jpg",
"036/undistort/images/frame_0000285.jpg.half.jpg",
"036/undistort/images/frame_0000286.jpg.half.jpg",
"036/undistort/images/frame_0000287.jpg.half.jpg",
"036/undistort/images/frame_0000288.jpg.half.jpg",
"036/undistort/images/frame_0000289.jpg.half.jpg",
"036/undistort/images/frame_0000290.jpg.half.jpg",
"036/undistort/images/frame_0000291.jpg.half.jpg",
"036/undistort/images/frame_0000292.jpg.half.jpg",
"036/undistort/images/frame_0000293.jpg.half.jpg",
"036/undistort/images/frame_0000294.jpg.half.jpg",
"036/undistort/images/frame_0000295.jpg.half.jpg",
"036/undistort/images/frame_0000296.jpg.half.jpg",
"036/undistort/images/frame_0000297.jpg.half.jpg",
"036/undistort/images/frame_0000298.jpg.half.jpg",
"036/undistort/images/frame_0000299.jpg.half.jpg",
"036/undistort/images/frame_0000300.jpg.half.jpg",
"036/undistort/images/frame_0000301.jpg.half.jpg",
"036/undistort/images/frame_0000302.jpg.half.jpg",
"036/undistort/images/frame_0000303.jpg.half.jpg",
"036/undistort/images/frame_0000304.jpg.half.jpg",
"036/undistort/images/frame_0000305.jpg.half.jpg",
"036/undistort/images/frame_0000306.jpg.half.jpg",
"036/undistort/images/frame_0000307.jpg.half.jpg",
"036/undistort/images/frame_0000308.jpg.half.jpg",
"036/undistort/images/frame_0000309.jpg.half.jpg",
"036/undistort/images/frame_0000310.jpg.half.jpg",
"036/undistort/images/frame_0000311.jpg.half.jpg",
"036/undistort/images/frame_0000312.jpg.half.jpg",
"036/undistort/images/frame_0000313.jpg.half.jpg",
"036/undistort/images/frame_0000314.jpg.half.jpg",
"036/undistort/images/frame_0000315.jpg.half.jpg",
"036/undistort/images/frame_0000316.jpg.half.jpg",
"036/undistort/images/frame_0000317.jpg.half.jpg",
"036/undistort/images/frame_0000318.jpg.half.jpg",
"036/undistort/images/frame_0000319.jpg.half.jpg",
"036/undistort/images/frame_0000320.jpg.half.jpg",
"036/undistort/images/frame_0000321.jpg.half.jpg",
"036/undistort/images/frame_0000322.jpg.half.jpg",
"036/undistort/images/frame_0000323.jpg.half.jpg",
"036/undistort/images/frame_0000324.jpg.half.jpg",
"036/undistort/images/frame_0000325.jpg.half.jpg",
"036/undistort/images/frame_0000326.jpg.half.jpg",
"036/undistort/images/frame_0000327.jpg.half.jpg",
"036/undistort/images/frame_0000328.jpg.half.jpg",
"036/undistort/images/frame_0000329.jpg.half.jpg",
"036/undistort/images/frame_0000330.jpg.half.jpg",
"036/undistort/images/frame_0000331.jpg.half.jpg",
"036/undistort/images/frame_0000332.jpg.half.jpg",
"036/undistort/images/frame_0000334.jpg.half.jpg",
"036/undistort/images/frame_0000335.jpg.half.jpg",
"036/undistort/images/frame_0000336.jpg.half.jpg",
"036/undistort/images/frame_0000337.jpg.half.jpg",
"036/undistort/images/frame_0000338.jpg.half.jpg",
"036/undistort/images/frame_0000339.jpg.half.jpg",
"036/undistort/images/frame_0000340.jpg.half.jpg",
"036/undistort/images/frame_0000341.jpg.half.jpg",
"036/undistort/images/frame_0000342.jpg.half.jpg",
"036/undistort/images/frame_0000343.jpg.half.jpg",
"036/undistort/images/frame_0000344.jpg.half.jpg",
"036/undistort/images/frame_0000345.jpg.half.jpg",
"036/undistort/images/frame_0000346.jpg.half.jpg",
"036/undistort/images/frame_0000347.jpg.half.jpg",
"036/undistort/images/frame_0000348.jpg.half.jpg",
"036/undistort/images/frame_0000349.jpg.half.jpg",
"036/undistort/images/frame_0000350.jpg.half.jpg",
"036/undistort/images/frame_0000351.jpg.half.jpg",
"036/undistort/images/frame_0000352.jpg.half.jpg",
"036/undistort/images/frame_0000353.jpg.half.jpg",
"036/undistort/images/frame_0000354.jpg.half.jpg",
"036/undistort/images/frame_0000355.jpg.half.jpg",
"036/undistort/images/frame_0000356.jpg.half.jpg",
"036/undistort/images/frame_0000357.jpg.half.jpg",
"036/undistort/images/frame_0000358.jpg.half.jpg",
"036/undistort/images/frame_0000359.jpg.half.jpg",
"036/undistort/images/frame_0000360.jpg.half.jpg",
"036/undistort/images/frame_0000361.jpg.half.jpg",
"036/undistort/images/frame_0000362.jpg.half.jpg",
"036/undistort/images/frame_0000363.jpg.half.jpg",
"036/undistort/images/frame_0000364.jpg.half.jpg",
"036/undistort/images/frame_0000365.jpg.half.jpg",
"036/undistort/images/frame_0000366.jpg.half.jpg",
"036/undistort/images/frame_0000367.jpg.half.jpg",
"036/undistort/images/frame_0000368.jpg.half.jpg",
"036/undistort/images/frame_0000369.jpg.half.jpg",
"036/undistort/images/frame_0000370.jpg.half.jpg",
"036/undistort/images/frame_0000371.jpg.half.jpg",
"036/undistort/images/frame_0000372.jpg.half.jpg",
"037/undistort/images/frame_0000001.jpg.half.jpg",
"037/undistort/images/frame_0000002.jpg.half.jpg",
"037/undistort/images/frame_0000003.jpg.half.jpg",
"037/undistort/images/frame_0000004.jpg.half.jpg",
"037/undistort/images/frame_0000005.jpg.half.jpg",
"037/undistort/images/frame_0000006.jpg.half.jpg",
"037/undistort/images/frame_0000007.jpg.half.jpg",
"037/undistort/images/frame_0000008.jpg.half.jpg",
"037/undistort/images/frame_0000009.jpg.half.jpg",
"037/undistort/images/frame_0000010.jpg.half.jpg",
"037/undistort/images/frame_0000011.jpg.half.jpg",
"037/undistort/images/frame_0000012.jpg.half.jpg",
"037/undistort/images/frame_0000013.jpg.half.jpg",
"037/undistort/images/frame_0000014.jpg.half.jpg",
"037/undistort/images/frame_0000015.jpg.half.jpg",
"037/undistort/images/frame_0000016.jpg.half.jpg",
"037/undistort/images/frame_0000017.jpg.half.jpg",
"037/undistort/images/frame_0000018.jpg.half.jpg",
"037/undistort/images/frame_0000019.jpg.half.jpg",
"037/undistort/images/frame_0000020.jpg.half.jpg",
"037/undistort/images/frame_0000021.jpg.half.jpg",
"037/undistort/images/frame_0000022.jpg.half.jpg",
"037/undistort/images/frame_0000023.jpg.half.jpg",
"037/undistort/images/frame_0000024.jpg.half.jpg",
"037/undistort/images/frame_0000025.jpg.half.jpg",
"037/undistort/images/frame_0000026.jpg.half.jpg",
"037/undistort/images/frame_0000027.jpg.half.jpg",
"037/undistort/images/frame_0000028.jpg.half.jpg",
"037/undistort/images/frame_0000029.jpg.half.jpg",
"037/undistort/images/frame_0000030.jpg.half.jpg",
"037/undistort/images/frame_0000031.jpg.half.jpg",
"037/undistort/images/frame_0000032.jpg.half.jpg",
"037/undistort/images/frame_0000033.jpg.half.jpg",
"037/undistort/images/frame_0000034.jpg.half.jpg",
"037/undistort/images/frame_0000035.jpg.half.jpg",
"037/undistort/images/frame_0000036.jpg.half.jpg",
"037/undistort/images/frame_0000037.jpg.half.jpg",
"037/undistort/images/frame_0000038.jpg.half.jpg",
"037/undistort/images/frame_0000039.jpg.half.jpg",
"037/undistort/images/frame_0000040.jpg.half.jpg",
"037/undistort/images/frame_0000041.jpg.half.jpg",
"037/undistort/images/frame_0000042.jpg.half.jpg",
"037/undistort/images/frame_0000043.jpg.half.jpg",
"037/undistort/images/frame_0000044.jpg.half.jpg",
"037/undistort/images/frame_0000045.jpg.half.jpg",
"037/undistort/images/frame_0000046.jpg.half.jpg",
"037/undistort/images/frame_0000047.jpg.half.jpg",
"037/undistort/images/frame_0000048.jpg.half.jpg",
"037/undistort/images/frame_0000049.jpg.half.jpg",
"037/undistort/images/frame_0000050.jpg.half.jpg",
"037/undistort/images/frame_0000051.jpg.half.jpg",
"037/undistort/images/frame_0000052.jpg.half.jpg",
"037/undistort/images/frame_0000053.jpg.half.jpg",
"037/undistort/images/frame_0000054.jpg.half.jpg",
"037/undistort/images/frame_0000055.jpg.half.jpg",
"037/undistort/images/frame_0000056.jpg.half.jpg",
"037/undistort/images/frame_0000057.jpg.half.jpg",
"037/undistort/images/frame_0000058.jpg.half.jpg",
"037/undistort/images/frame_0000059.jpg.half.jpg",
"037/undistort/images/frame_0000060.jpg.half.jpg",
"037/undistort/images/frame_0000061.jpg.half.jpg",
"037/undistort/images/frame_0000062.jpg.half.jpg",
"037/undistort/images/frame_0000063.jpg.half.jpg",
"037/undistort/images/frame_0000064.jpg.half.jpg",
"037/undistort/images/frame_0000065.jpg.half.jpg",
"037/undistort/images/frame_0000066.jpg.half.jpg",
"037/undistort/images/frame_0000067.jpg.half.jpg",
"037/undistort/images/frame_0000068.jpg.half.jpg",
"037/undistort/images/frame_0000069.jpg.half.jpg",
"037/undistort/images/frame_0000070.jpg.half.jpg",
"037/undistort/images/frame_0000071.jpg.half.jpg",
"037/undistort/images/frame_0000072.jpg.half.jpg",
"037/undistort/images/frame_0000073.jpg.half.jpg",
"037/undistort/images/frame_0000074.jpg.half.jpg",
"037/undistort/images/frame_0000075.jpg.half.jpg",
"037/undistort/images/frame_0000076.jpg.half.jpg",
"037/undistort/images/frame_0000077.jpg.half.jpg",
"037/undistort/images/frame_0000078.jpg.half.jpg",
"037/undistort/images/frame_0000079.jpg.half.jpg",
"037/undistort/images/frame_0000080.jpg.half.jpg",
"037/undistort/images/frame_0000081.jpg.half.jpg",
"037/undistort/images/frame_0000082.jpg.half.jpg",
"037/undistort/images/frame_0000083.jpg.half.jpg",
"037/undistort/images/frame_0000084.jpg.half.jpg",
"037/undistort/images/frame_0000085.jpg.half.jpg",
"037/undistort/images/frame_0000086.jpg.half.jpg",
"037/undistort/images/frame_0000087.jpg.half.jpg",
"037/undistort/images/frame_0000088.jpg.half.jpg",
"037/undistort/images/frame_0000089.jpg.half.jpg",
"037/undistort/images/frame_0000090.jpg.half.jpg",
"037/undistort/images/frame_0000091.jpg.half.jpg",
"037/undistort/images/frame_0000092.jpg.half.jpg",
"037/undistort/images/frame_0000093.jpg.half.jpg",
"037/undistort/images/frame_0000094.jpg.half.jpg",
"037/undistort/images/frame_0000095.jpg.half.jpg",
"037/undistort/images/frame_0000096.jpg.half.jpg",
"037/undistort/images/frame_0000097.jpg.half.jpg",
"037/undistort/images/frame_0000098.jpg.half.jpg",
"037/undistort/images/frame_0000099.jpg.half.jpg",
"037/undistort/images/frame_0000100.jpg.half.jpg",
"037/undistort/images/frame_0000101.jpg.half.jpg",
"037/undistort/images/frame_0000102.jpg.half.jpg",
"037/undistort/images/frame_0000103.jpg.half.jpg",
"037/undistort/images/frame_0000104.jpg.half.jpg",
"037/undistort/images/frame_0000105.jpg.half.jpg",
"037/undistort/images/frame_0000106.jpg.half.jpg",
"037/undistort/images/frame_0000107.jpg.half.jpg",
"037/undistort/images/frame_0000108.jpg.half.jpg",
"037/undistort/images/frame_0000109.jpg.half.jpg",
"037/undistort/images/frame_0000110.jpg.half.jpg",
"037/undistort/images/frame_0000111.jpg.half.jpg",
"037/undistort/images/frame_0000112.jpg.half.jpg",
"037/undistort/images/frame_0000113.jpg.half.jpg",
"037/undistort/images/frame_0000114.jpg.half.jpg",
"037/undistort/images/frame_0000115.jpg.half.jpg",
"037/undistort/images/frame_0000116.jpg.half.jpg",
"037/undistort/images/frame_0000117.jpg.half.jpg",
"037/undistort/images/frame_0000118.jpg.half.jpg",
"037/undistort/images/frame_0000119.jpg.half.jpg",
"037/undistort/images/frame_0000120.jpg.half.jpg",
"037/undistort/images/frame_0000121.jpg.half.jpg",
"037/undistort/images/frame_0000122.jpg.half.jpg",
"037/undistort/images/frame_0000123.jpg.half.jpg",
"037/undistort/images/frame_0000124.jpg.half.jpg",
"037/undistort/images/frame_0000125.jpg.half.jpg",
"037/undistort/images/frame_0000126.jpg.half.jpg",
"037/undistort/images/frame_0000127.jpg.half.jpg",
"037/undistort/images/frame_0000128.jpg.half.jpg",
"037/undistort/images/frame_0000129.jpg.half.jpg",
"037/undistort/images/frame_0000130.jpg.half.jpg",
"037/undistort/images/frame_0000131.jpg.half.jpg",
"037/undistort/images/frame_0000132.jpg.half.jpg",
"037/undistort/images/frame_0000133.jpg.half.jpg",
"037/undistort/images/frame_0000134.jpg.half.jpg",
"037/undistort/images/frame_0000135.jpg.half.jpg",
"037/undistort/images/frame_0000136.jpg.half.jpg",
"037/undistort/images/frame_0000137.jpg.half.jpg",
"037/undistort/images/frame_0000138.jpg.half.jpg",
"037/undistort/images/frame_0000139.jpg.half.jpg",
"037/undistort/images/frame_0000140.jpg.half.jpg",
"037/undistort/images/frame_0000141.jpg.half.jpg",
"037/undistort/images/frame_0000142.jpg.half.jpg",
"037/undistort/images/frame_0000143.jpg.half.jpg",
"037/undistort/images/frame_0000144.jpg.half.jpg",
"037/undistort/images/frame_0000145.jpg.half.jpg",
"037/undistort/images/frame_0000146.jpg.half.jpg",
"037/undistort/images/frame_0000147.jpg.half.jpg",
"037/undistort/images/frame_0000148.jpg.half.jpg",
"037/undistort/images/frame_0000149.jpg.half.jpg",
"037/undistort/images/frame_0000150.jpg.half.jpg",
"037/undistort/images/frame_0000151.jpg.half.jpg",
"037/undistort/images/frame_0000152.jpg.half.jpg",
"037/undistort/images/frame_0000153.jpg.half.jpg",
"037/undistort/images/frame_0000154.jpg.half.jpg",
"037/undistort/images/frame_0000155.jpg.half.jpg",
"037/undistort/images/frame_0000156.jpg.half.jpg",
"037/undistort/images/frame_0000157.jpg.half.jpg",
"037/undistort/images/frame_0000158.jpg.half.jpg",
"037/undistort/images/frame_0000159.jpg.half.jpg",
"037/undistort/images/frame_0000160.jpg.half.jpg",
"037/undistort/images/frame_0000161.jpg.half.jpg",
"037/undistort/images/frame_0000162.jpg.half.jpg",
"037/undistort/images/frame_0000163.jpg.half.jpg",
"037/undistort/images/frame_0000164.jpg.half.jpg",
"037/undistort/images/frame_0000165.jpg.half.jpg",
"037/undistort/images/frame_0000166.jpg.half.jpg",
"037/undistort/images/frame_0000167.jpg.half.jpg",
"037/undistort/images/frame_0000168.jpg.half.jpg",
"037/undistort/images/frame_0000169.jpg.half.jpg",
"037/undistort/images/frame_0000170.jpg.half.jpg",
"037/undistort/images/frame_0000171.jpg.half.jpg",
"037/undistort/images/frame_0000172.jpg.half.jpg",
"037/undistort/images/frame_0000173.jpg.half.jpg",
"037/undistort/images/frame_0000174.jpg.half.jpg",
"037/undistort/images/frame_0000175.jpg.half.jpg",
"037/undistort/images/frame_0000176.jpg.half.jpg",
"037/undistort/images/frame_0000177.jpg.half.jpg",
"037/undistort/images/frame_0000178.jpg.half.jpg",
"037/undistort/images/frame_0000179.jpg.half.jpg",
"037/undistort/images/frame_0000180.jpg.half.jpg",
"037/undistort/images/frame_0000181.jpg.half.jpg",
"037/undistort/images/frame_0000182.jpg.half.jpg",
"037/undistort/images/frame_0000183.jpg.half.jpg",
"037/undistort/images/frame_0000184.jpg.half.jpg",
"037/undistort/images/frame_0000185.jpg.half.jpg",
"037/undistort/images/frame_0000186.jpg.half.jpg",
"037/undistort/images/frame_0000187.jpg.half.jpg",
"037/undistort/images/frame_0000188.jpg.half.jpg",
"037/undistort/images/frame_0000189.jpg.half.jpg",
"037/undistort/images/frame_0000190.jpg.half.jpg",
"037/undistort/images/frame_0000191.jpg.half.jpg",
"037/undistort/images/frame_0000192.jpg.half.jpg",
"037/undistort/images/frame_0000193.jpg.half.jpg",
"037/undistort/images/frame_0000194.jpg.half.jpg",
"037/undistort/images/frame_0000195.jpg.half.jpg",
"037/undistort/images/frame_0000196.jpg.half.jpg",
"037/undistort/images/frame_0000197.jpg.half.jpg",
"037/undistort/images/frame_0000198.jpg.half.jpg",
"037/undistort/images/frame_0000199.jpg.half.jpg",
"037/undistort/images/frame_0000200.jpg.half.jpg",
"037/undistort/images/frame_0000201.jpg.half.jpg",
"037/undistort/images/frame_0000202.jpg.half.jpg",
"037/undistort/images/frame_0000203.jpg.half.jpg",
"037/undistort/images/frame_0000204.jpg.half.jpg",
"037/undistort/images/frame_0000205.jpg.half.jpg",
"037/undistort/images/frame_0000206.jpg.half.jpg",
"037/undistort/images/frame_0000207.jpg.half.jpg",
"037/undistort/images/frame_0000208.jpg.half.jpg",
"037/undistort/images/frame_0000209.jpg.half.jpg",
"037/undistort/images/frame_0000210.jpg.half.jpg",
"037/undistort/images/frame_0000211.jpg.half.jpg",
"037/undistort/images/frame_0000212.jpg.half.jpg",
"037/undistort/images/frame_0000213.jpg.half.jpg",
"037/undistort/images/frame_0000214.jpg.half.jpg",
"037/undistort/images/frame_0000215.jpg.half.jpg",
"037/undistort/images/frame_0000216.jpg.half.jpg",
"037/undistort/images/frame_0000217.jpg.half.jpg",
"037/undistort/images/frame_0000218.jpg.half.jpg",
"037/undistort/images/frame_0000219.jpg.half.jpg",
"037/undistort/images/frame_0000220.jpg.half.jpg",
"037/undistort/images/frame_0000221.jpg.half.jpg",
"037/undistort/images/frame_0000222.jpg.half.jpg",
"037/undistort/images/frame_0000223.jpg.half.jpg",
"037/undistort/images/frame_0000224.jpg.half.jpg",
"037/undistort/images/frame_0000225.jpg.half.jpg",
"037/undistort/images/frame_0000226.jpg.half.jpg",
"037/undistort/images/frame_0000227.jpg.half.jpg",
"037/undistort/images/frame_0000228.jpg.half.jpg",
"037/undistort/images/frame_0000229.jpg.half.jpg",
"037/undistort/images/frame_0000230.jpg.half.jpg",
"037/undistort/images/frame_0000231.jpg.half.jpg",
"037/undistort/images/frame_0000232.jpg.half.jpg",
"037/undistort/images/frame_0000233.jpg.half.jpg",
"037/undistort/images/frame_0000234.jpg.half.jpg",
"037/undistort/images/frame_0000235.jpg.half.jpg",
"037/undistort/images/frame_0000236.jpg.half.jpg",
"037/undistort/images/frame_0000237.jpg.half.jpg",
"037/undistort/images/frame_0000238.jpg.half.jpg",
"037/undistort/images/frame_0000239.jpg.half.jpg",
"037/undistort/images/frame_0000240.jpg.half.jpg",
"037/undistort/images/frame_0000241.jpg.half.jpg",
"037/undistort/images/frame_0000242.jpg.half.jpg",
"037/undistort/images/frame_0000243.jpg.half.jpg",
"037/undistort/images/frame_0000244.jpg.half.jpg",
"037/undistort/images/frame_0000245.jpg.half.jpg",
"037/undistort/images/frame_0000246.jpg.half.jpg",
"037/undistort/images/frame_0000247.jpg.half.jpg",
"037/undistort/images/frame_0000248.jpg.half.jpg",
"037/undistort/images/frame_0000249.jpg.half.jpg",
"037/undistort/images/frame_0000250.jpg.half.jpg",
"037/undistort/images/frame_0000252.jpg.half.jpg",
"037/undistort/images/frame_0000253.jpg.half.jpg",
"037/undistort/images/frame_0000254.jpg.half.jpg",
"037/undistort/images/frame_0000255.jpg.half.jpg",
"037/undistort/images/frame_0000257.jpg.half.jpg",
"037/undistort/images/frame_0000260.jpg.half.jpg",
"037/undistort/images/frame_0000261.jpg.half.jpg",
"037/undistort/images/frame_0000262.jpg.half.jpg",
"037/undistort/images/frame_0000263.jpg.half.jpg",
"037/undistort/images/frame_0000264.jpg.half.jpg",
"037/undistort/images/frame_0000265.jpg.half.jpg",
"037/undistort/images/frame_0000266.jpg.half.jpg",
"037/undistort/images/frame_0000267.jpg.half.jpg",
"037/undistort/images/frame_0000268.jpg.half.jpg",
"037/undistort/images/frame_0000269.jpg.half.jpg",
"037/undistort/images/frame_0000270.jpg.half.jpg",
"037/undistort/images/frame_0000271.jpg.half.jpg",
"037/undistort/images/frame_0000272.jpg.half.jpg",
"037/undistort/images/frame_0000273.jpg.half.jpg",
"037/undistort/images/frame_0000274.jpg.half.jpg",
"037/undistort/images/frame_0000275.jpg.half.jpg",
"037/undistort/images/frame_0000276.jpg.half.jpg",
"037/undistort/images/frame_0000277.jpg.half.jpg",
"037/undistort/images/frame_0000278.jpg.half.jpg",
"037/undistort/images/frame_0000279.jpg.half.jpg",
"037/undistort/images/frame_0000280.jpg.half.jpg",
"037/undistort/images/frame_0000281.jpg.half.jpg",
"037/undistort/images/frame_0000282.jpg.half.jpg",
"037/undistort/images/frame_0000283.jpg.half.jpg",
"037/undistort/images/frame_0000284.jpg.half.jpg",
"037/undistort/images/frame_0000285.jpg.half.jpg",
"037/undistort/images/frame_0000286.jpg.half.jpg",
"037/undistort/images/frame_0000287.jpg.half.jpg",
"037/undistort/images/frame_0000288.jpg.half.jpg",
"037/undistort/images/frame_0000289.jpg.half.jpg",
"037/undistort/images/frame_0000290.jpg.half.jpg",
"037/undistort/images/frame_0000291.jpg.half.jpg",
"037/undistort/images/frame_0000292.jpg.half.jpg",
"037/undistort/images/frame_0000293.jpg.half.jpg",
"037/undistort/images/frame_0000294.jpg.half.jpg",
"037/undistort/images/frame_0000295.jpg.half.jpg",
"037/undistort/images/frame_0000296.jpg.half.jpg",
"037/undistort/images/frame_0000297.jpg.half.jpg",
"037/undistort/images/frame_0000298.jpg.half.jpg",
"037/undistort/images/frame_0000299.jpg.half.jpg",
"037/undistort/images/frame_0000300.jpg.half.jpg",
"037/undistort/images/frame_0000301.jpg.half.jpg",
"037/undistort/images/frame_0000302.jpg.half.jpg",
"037/undistort/images/frame_0000303.jpg.half.jpg",
"037/undistort/images/frame_0000304.jpg.half.jpg",
"037/undistort/images/frame_0000305.jpg.half.jpg",
"037/undistort/images/frame_0000306.jpg.half.jpg",
"037/undistort/images/frame_0000307.jpg.half.jpg",
"037/undistort/images/frame_0000308.jpg.half.jpg",
"037/undistort/images/frame_0000309.jpg.half.jpg",
"037/undistort/images/frame_0000310.jpg.half.jpg",
"037/undistort/images/frame_0000311.jpg.half.jpg",
"037/undistort/images/frame_0000312.jpg.half.jpg",
"037/undistort/images/frame_0000313.jpg.half.jpg",
"037/undistort/images/frame_0000314.jpg.half.jpg",
"037/undistort/images/frame_0000315.jpg.half.jpg",
"037/undistort/images/frame_0000316.jpg.half.jpg",
"037/undistort/images/frame_0000317.jpg.half.jpg",
"037/undistort/images/frame_0000318.jpg.half.jpg",
"037/undistort/images/frame_0000319.jpg.half.jpg",
"037/undistort/images/frame_0000320.jpg.half.jpg",
"037/undistort/images/frame_0000321.jpg.half.jpg",
"037/undistort/images/frame_0000322.jpg.half.jpg",
"037/undistort/images/frame_0000323.jpg.half.jpg",
"037/undistort/images/frame_0000324.jpg.half.jpg",
"037/undistort/images/frame_0000325.jpg.half.jpg",
"037/undistort/images/frame_0000326.jpg.half.jpg",
"037/undistort/images/frame_0000327.jpg.half.jpg",
"037/undistort/images/frame_0000328.jpg.half.jpg",
"037/undistort/images/frame_0000329.jpg.half.jpg",
"037/undistort/images/frame_0000330.jpg.half.jpg",
"037/undistort/images/frame_0000331.jpg.half.jpg",
"037/undistort/images/frame_0000332.jpg.half.jpg",
"037/undistort/images/frame_0000333.jpg.half.jpg",
"037/undistort/images/frame_0000334.jpg.half.jpg",
"037/undistort/images/frame_0000335.jpg.half.jpg",
"037/undistort/images/frame_0000336.jpg.half.jpg",
"037/undistort/images/frame_0000337.jpg.half.jpg",
"037/undistort/images/frame_0000338.jpg.half.jpg",
"037/undistort/images/frame_0000339.jpg.half.jpg",
"037/undistort/images/frame_0000340.jpg.half.jpg",
"037/undistort/images/frame_0000341.jpg.half.jpg",
"037/undistort/images/frame_0000342.jpg.half.jpg",
"037/undistort/images/frame_0000343.jpg.half.jpg",
"037/undistort/images/frame_0000344.jpg.half.jpg",
"037/undistort/images/frame_0000345.jpg.half.jpg",
"037/undistort/images/frame_0000346.jpg.half.jpg",
"037/undistort/images/frame_0000347.jpg.half.jpg",
"037/undistort/images/frame_0000348.jpg.half.jpg",
"037/undistort/images/frame_0000349.jpg.half.jpg",
"037/undistort/images/frame_0000350.jpg.half.jpg",
"037/undistort/images/frame_0000351.jpg.half.jpg",
"037/undistort/images/frame_0000352.jpg.half.jpg",
"037/undistort/images/frame_0000353.jpg.half.jpg",
"037/undistort/images/frame_0000354.jpg.half.jpg",
"037/undistort/images/frame_0000355.jpg.half.jpg",
"037/undistort/images/frame_0000356.jpg.half.jpg",
"037/undistort/images/frame_0000357.jpg.half.jpg",
"037/undistort/images/frame_0000358.jpg.half.jpg",
"037/undistort/images/frame_0000359.jpg.half.jpg",
"037/undistort/images/frame_0000360.jpg.half.jpg",
"037/undistort/images/frame_0000361.jpg.half.jpg",
"037/undistort/images/frame_0000362.jpg.half.jpg",
"037/undistort/images/frame_0000363.jpg.half.jpg",
"037/undistort/images/frame_0000364.jpg.half.jpg",
"037/undistort/images/frame_0000365.jpg.half.jpg",
"037/undistort/images/frame_0000366.jpg.half.jpg",
"037/undistort/images/frame_0000367.jpg.half.jpg",
"037/undistort/images/frame_0000368.jpg.half.jpg",
"037/undistort/images/frame_0000369.jpg.half.jpg",
"037/undistort/images/frame_0000370.jpg.half.jpg",
"037/undistort/images/frame_0000371.jpg.half.jpg",
"037/undistort/images/frame_0000372.jpg.half.jpg",
"037/undistort/images/frame_0000373.jpg.half.jpg",
"037/undistort/images/frame_0000374.jpg.half.jpg",
"037/undistort/images/frame_0000375.jpg.half.jpg",
"037/undistort/images/frame_0000376.jpg.half.jpg",
"037/undistort/images/frame_0000377.jpg.half.jpg",
"037/undistort/images/frame_0000378.jpg.half.jpg",
"037/undistort/images/frame_0000379.jpg.half.jpg",
"037/undistort/images/frame_0000380.jpg.half.jpg",
"037/undistort/images/frame_0000381.jpg.half.jpg",
"037/undistort/images/frame_0000382.jpg.half.jpg",
"037/undistort/images/frame_0000383.jpg.half.jpg",
"037/undistort/images/frame_0000384.jpg.half.jpg",
"037/undistort/images/frame_0000385.jpg.half.jpg",
"037/undistort/images/frame_0000386.jpg.half.jpg",
"042/undistort/images/frame_0000001.jpg.half.jpg",
"042/undistort/images/frame_0000002.jpg.half.jpg",
"042/undistort/images/frame_0000003.jpg.half.jpg",
"042/undistort/images/frame_0000004.jpg.half.jpg",
"042/undistort/images/frame_0000005.jpg.half.jpg",
"042/undistort/images/frame_0000006.jpg.half.jpg",
"042/undistort/images/frame_0000008.jpg.half.jpg",
"042/undistort/images/frame_0000009.jpg.half.jpg",
"042/undistort/images/frame_0000010.jpg.half.jpg",
"042/undistort/images/frame_0000011.jpg.half.jpg",
"042/undistort/images/frame_0000013.jpg.half.jpg",
"042/undistort/images/frame_0000014.jpg.half.jpg",
"042/undistort/images/frame_0000015.jpg.half.jpg",
"042/undistort/images/frame_0000016.jpg.half.jpg",
"042/undistort/images/frame_0000017.jpg.half.jpg",
"042/undistort/images/frame_0000018.jpg.half.jpg",
"042/undistort/images/frame_0000019.jpg.half.jpg",
"042/undistort/images/frame_0000020.jpg.half.jpg",
"042/undistort/images/frame_0000021.jpg.half.jpg",
"042/undistort/images/frame_0000022.jpg.half.jpg",
"042/undistort/images/frame_0000023.jpg.half.jpg",
"042/undistort/images/frame_0000024.jpg.half.jpg",
"042/undistort/images/frame_0000025.jpg.half.jpg",
"042/undistort/images/frame_0000026.jpg.half.jpg",
"042/undistort/images/frame_0000027.jpg.half.jpg",
"042/undistort/images/frame_0000029.jpg.half.jpg",
"042/undistort/images/frame_0000031.jpg.half.jpg",
"042/undistort/images/frame_0000032.jpg.half.jpg",
"042/undistort/images/frame_0000033.jpg.half.jpg",
"042/undistort/images/frame_0000034.jpg.half.jpg",
"042/undistort/images/frame_0000035.jpg.half.jpg",
"042/undistort/images/frame_0000037.jpg.half.jpg",
"042/undistort/images/frame_0000040.jpg.half.jpg",
"042/undistort/images/frame_0000042.jpg.half.jpg",
"042/undistort/images/frame_0000043.jpg.half.jpg",
"042/undistort/images/frame_0000045.jpg.half.jpg",
"042/undistort/images/frame_0000046.jpg.half.jpg",
"042/undistort/images/frame_0000047.jpg.half.jpg",
"042/undistort/images/frame_0000048.jpg.half.jpg",
"042/undistort/images/frame_0000050.jpg.half.jpg",
"042/undistort/images/frame_0000051.jpg.half.jpg",
"042/undistort/images/frame_0000052.jpg.half.jpg",
"042/undistort/images/frame_0000053.jpg.half.jpg",
"042/undistort/images/frame_0000054.jpg.half.jpg",
"042/undistort/images/frame_0000056.jpg.half.jpg",
"042/undistort/images/frame_0000057.jpg.half.jpg",
"042/undistort/images/frame_0000058.jpg.half.jpg",
"042/undistort/images/frame_0000061.jpg.half.jpg",
"042/undistort/images/frame_0000126.jpg.half.jpg",
"042/undistort/images/frame_0000127.jpg.half.jpg",
"042/undistort/images/frame_0000129.jpg.half.jpg",
"042/undistort/images/frame_0000133.jpg.half.jpg",
"042/undistort/images/frame_0000134.jpg.half.jpg",
"042/undistort/images/frame_0000135.jpg.half.jpg",
"042/undistort/images/frame_0000136.jpg.half.jpg",
"042/undistort/images/frame_0000137.jpg.half.jpg",
"042/undistort/images/frame_0000138.jpg.half.jpg",
"042/undistort/images/frame_0000139.jpg.half.jpg",
"042/undistort/images/frame_0000140.jpg.half.jpg",
"042/undistort/images/frame_0000141.jpg.half.jpg",
"042/undistort/images/frame_0000143.jpg.half.jpg",
"042/undistort/images/frame_0000144.jpg.half.jpg",
"042/undistort/images/frame_0000146.jpg.half.jpg",
"042/undistort/images/frame_0000147.jpg.half.jpg",
"042/undistort/images/frame_0000148.jpg.half.jpg",
"042/undistort/images/frame_0000150.jpg.half.jpg",
"042/undistort/images/frame_0000151.jpg.half.jpg",
"042/undistort/images/frame_0000152.jpg.half.jpg",
"042/undistort/images/frame_0000153.jpg.half.jpg",
"042/undistort/images/frame_0000154.jpg.half.jpg",
"042/undistort/images/frame_0000158.jpg.half.jpg",
"042/undistort/images/frame_0000159.jpg.half.jpg",
"042/undistort/images/frame_0000161.jpg.half.jpg",
"042/undistort/images/frame_0000163.jpg.half.jpg",
"042/undistort/images/frame_0000164.jpg.half.jpg",
"042/undistort/images/frame_0000165.jpg.half.jpg",
"042/undistort/images/frame_0000166.jpg.half.jpg",
"042/undistort/images/frame_0000167.jpg.half.jpg",
"042/undistort/images/frame_0000168.jpg.half.jpg",
"042/undistort/images/frame_0000169.jpg.half.jpg",
"042/undistort/images/frame_0000170.jpg.half.jpg",
"042/undistort/images/frame_0000172.jpg.half.jpg",
"042/undistort/images/frame_0000173.jpg.half.jpg",
"042/undistort/images/frame_0000174.jpg.half.jpg",
"042/undistort/images/frame_0000175.jpg.half.jpg",
"042/undistort/images/frame_0000176.jpg.half.jpg",
"042/undistort/images/frame_0000177.jpg.half.jpg",
"042/undistort/images/frame_0000178.jpg.half.jpg",
"042/undistort/images/frame_0000179.jpg.half.jpg",
"042/undistort/images/frame_0000180.jpg.half.jpg",
"042/undistort/images/frame_0000181.jpg.half.jpg",
"042/undistort/images/frame_0000182.jpg.half.jpg",
"042/undistort/images/frame_0000183.jpg.half.jpg",
"042/undistort/images/frame_0000184.jpg.half.jpg",
"042/undistort/images/frame_0000185.jpg.half.jpg",
"042/undistort/images/frame_0000186.jpg.half.jpg",
"042/undistort/images/frame_0000187.jpg.half.jpg",
"042/undistort/images/frame_0000188.jpg.half.jpg",
"042/undistort/images/frame_0000189.jpg.half.jpg",
"042/undistort/images/frame_0000190.jpg.half.jpg",
"042/undistort/images/frame_0000191.jpg.half.jpg",
"042/undistort/images/frame_0000192.jpg.half.jpg",
"042/undistort/images/frame_0000193.jpg.half.jpg",
"042/undistort/images/frame_0000194.jpg.half.jpg",
"042/undistort/images/frame_0000195.jpg.half.jpg",
"042/undistort/images/frame_0000196.jpg.half.jpg",
"042/undistort/images/frame_0000197.jpg.half.jpg",
"042/undistort/images/frame_0000198.jpg.half.jpg",
"042/undistort/images/frame_0000199.jpg.half.jpg",
"042/undistort/images/frame_0000200.jpg.half.jpg",
"042/undistort/images/frame_0000201.jpg.half.jpg",
"042/undistort/images/frame_0000202.jpg.half.jpg",
"042/undistort/images/frame_0000203.jpg.half.jpg",
"042/undistort/images/frame_0000204.jpg.half.jpg",
"042/undistort/images/frame_0000205.jpg.half.jpg",
"042/undistort/images/frame_0000207.jpg.half.jpg",
"042/undistort/images/frame_0000208.jpg.half.jpg",
"042/undistort/images/frame_0000209.jpg.half.jpg",
"042/undistort/images/frame_0000210.jpg.half.jpg",
"042/undistort/images/frame_0000211.jpg.half.jpg",
"042/undistort/images/frame_0000214.jpg.half.jpg",
"042/undistort/images/frame_0000225.jpg.half.jpg",
"042/undistort/images/frame_0000231.jpg.half.jpg",
"042/undistort/images/frame_0000232.jpg.half.jpg",
"042/undistort/images/frame_0000233.jpg.half.jpg",
"042/undistort/images/frame_0000234.jpg.half.jpg",
"042/undistort/images/frame_0000235.jpg.half.jpg",
"042/undistort/images/frame_0000237.jpg.half.jpg",
"042/undistort/images/frame_0000238.jpg.half.jpg",
"042/undistort/images/frame_0000239.jpg.half.jpg",
"042/undistort/images/frame_0000241.jpg.half.jpg",
"042/undistort/images/frame_0000242.jpg.half.jpg",
"042/undistort/images/frame_0000243.jpg.half.jpg",
"042/undistort/images/frame_0000244.jpg.half.jpg",
"042/undistort/images/frame_0000245.jpg.half.jpg",
"042/undistort/images/frame_0000246.jpg.half.jpg",
"042/undistort/images/frame_0000247.jpg.half.jpg",
"042/undistort/images/frame_0000248.jpg.half.jpg",
"042/undistort/images/frame_0000251.jpg.half.jpg",
"042/undistort/images/frame_0000252.jpg.half.jpg",
"042/undistort/images/frame_0000253.jpg.half.jpg",
"042/undistort/images/frame_0000254.jpg.half.jpg",
"042/undistort/images/frame_0000255.jpg.half.jpg",
"042/undistort/images/frame_0000256.jpg.half.jpg",
"042/undistort/images/frame_0000257.jpg.half.jpg",]
|
c3dm-main
|
c3dm/dataset/dataset_configs.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
from setuptools import find_packages, setup
install_requires = [
"numpy",
"pandas",
"Pillow",
"pytorch-lightning",
"pyyaml",
"scipy",
"torch",
"torchvision",
"tqdm",
]
setup(
name="covidprognosis",
author="Facebook AI Research",
author_email="mmuckley@fb.com",
version="0.1",
packages=find_packages(exclude=["tests", "cp_examples", "configs"]),
setup_requires=["wheel"],
install_requires=install_requires,
)
|
CovidPrognosis-main
|
setup.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
from pathlib import Path
import numpy as np
import pytest
import yaml
from covidprognosis.data import (
CheXpertDataset,
CombinedXrayDataset,
MimicCxrJpgDataset,
NIHChestDataset,
)
from PIL import Image
DATA_CONFIG = "configs/data.yaml"
def create_input(shape, label_count=12):
image = np.arange(np.product(shape)).reshape(shape).astype(np.uint8)
image = Image.fromarray(image)
labels = []
for _ in range(label_count):
if np.random.normal() < 0.1:
labels.append(np.nan)
elif np.random.normal() < 0.2:
labels.append(-1)
elif np.random.normal() < 0.6:
labels.append(0)
else:
labels.append(1)
labels = np.array(labels)
return {"image": image, "labels": labels, "metadata": {}}
def fetch_dataset(dataset_name, transform):
dataset_name, split = dataset_name.split("_")
with open(DATA_CONFIG, "r") as f:
paths = yaml.load(f, Loader=yaml.SafeLoader)["paths"]
if dataset_name == "combined":
data_path = [paths["chexpert"], paths["nih"], paths["mimic"]]
split = [split, split, split]
transform = [transform, transform, transform]
else:
data_path = paths[dataset_name]
if dataset_name == "combined":
for path in data_path:
if not Path(path).exists():
pytest.skip()
elif not Path(data_path).exists():
return None
if dataset_name == "nih":
dataset = NIHChestDataset(directory=data_path, split=split, transform=transform)
elif dataset_name == "chexpert":
dataset = CheXpertDataset(directory=data_path, split=split, transform=transform)
elif dataset_name == "mimic":
dataset = MimicCxrJpgDataset(
directory=data_path, split=split, transform=transform
)
elif dataset_name == "combined":
dataset = CombinedXrayDataset(
directory_list=data_path,
dataset_list=["chexpert_v1", "nih-chest-xrays", "mimic-cxr"],
split_list=split,
transform_list=transform,
)
return dataset
@pytest.fixture
def dataset_length_dict():
datalengths = {
"nih_train": 112120,
"nih_all": 112120,
"chexpert_train": 223414,
"chexpert_val": 234,
"chexpert_all": 223648,
"mimic_train": 368960,
"mimic_val": 2991,
"mimic_test": 5159,
"mimic_all": 377110,
"combined_train": 704494,
"combined_val": 3225,
"combined_test": 5159,
"combined_all": 712878,
}
return datalengths
|
CovidPrognosis-main
|
tests/conftest.py
|
CovidPrognosis-main
|
tests/__init__.py
|
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import pytest
import torchvision.transforms as tvt
from covidprognosis.data.transforms import Compose
from .conftest import fetch_dataset
@pytest.mark.parametrize(
"dataset_name",
[
"nih_train",
"nih_all",
"chexpert_train",
"chexpert_val",
"chexpert_all",
"mimic_train",
"mimic_val",
"mimic_test",
"mimic_all",
"combined_train",
"combined_val",
"combined_test",
"combined_all",
],
)
def test_dataset_lengths(dataset_name, dataset_length_dict):
transform = Compose([tvt.ToTensor()])
dataset = fetch_dataset(dataset_name, transform)
if dataset is None:
pytest.skip()
else:
assert len(dataset) == dataset_length_dict[dataset_name]
@pytest.mark.parametrize(
"dataset_name",
[
"nih_train",
"nih_all",
"chexpert_train",
"chexpert_val",
"chexpert_all",
"mimic_train",
"mimic_val",
"mimic_test",
"mimic_all",
"combined_train",
"combined_val",
"combined_test",
"combined_all",
],
)
def test_dataset_getitem(dataset_name):
transform = Compose([tvt.ToTensor()])
dataset = fetch_dataset(dataset_name, transform)
if dataset is None:
pytest.skip()
else:
item1 = dataset[0]
item2 = dataset[-1]
assert item1 is not None
assert item2 is not None
def test_combined_loader():
transform = Compose([tvt.ToTensor()])
dataset = fetch_dataset("combined_all", transform=transform)
sample = dataset[0]
assert "CheXpert" in str(sample["metadata"]["filename"])
sample = dataset[300000]
assert "nih-chest-xrays" in str(sample["metadata"]["filename"])
sample = dataset[600000]
assert "mimic-cxr-jpg" in str(sample["metadata"]["filename"])
|
CovidPrognosis-main
|
tests/test_xray_datasets.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import covidprognosis.data.transforms as cpt
import numpy as np
import pytest
import torch
import torchvision.transforms as tvt
from scipy.ndimage import gaussian_filter
from .conftest import create_input
@pytest.mark.parametrize("shape", [[32, 32, 3], [45, 16, 3]])
def test_compose(shape):
sample = create_input(shape)
transform = cpt.Compose(
[tvt.RandomHorizontalFlip(), tvt.ToTensor(), cpt.RandomGaussianBlur()]
)
sample = transform(sample)
assert sample["image"] is not None
@pytest.mark.parametrize("shape, label_idx", [[[32, 32, 3], 0], [[45, 16, 3], 5]])
def test_nan_to_int(shape, label_idx):
sample = create_input(shape)
transform = cpt.Compose([tvt.ToTensor(), cpt.NanToInt(5)])
sample["labels"][label_idx] = np.nan
sample = transform(sample)
assert sample["labels"][label_idx] == 5
@pytest.mark.parametrize(
"shape, label_idx, start_label, end_label",
[[[32, 32, 3], 2, -1, 0], [[45, 16, 3], 10, 1, 0]],
)
def test_remap_label(shape, label_idx, start_label, end_label):
sample = create_input(shape)
transform = cpt.Compose([tvt.ToTensor(), cpt.RemapLabel(start_label, end_label)])
sample["labels"][label_idx] = start_label
sample = transform(sample)
assert sample["labels"][label_idx] == end_label
@pytest.mark.parametrize("shape", [[32, 32, 3], [45, 16, 3]])
def test_histnorm(shape):
"""Test this to guard against an implementation change."""
sample = create_input(shape)
transform = cpt.Compose([tvt.ToTensor(), cpt.HistogramNormalize()])
image = np.transpose(
torch.tensor(np.array(sample["image"]), dtype=torch.float).numpy(), (2, 0, 1)
)
# get image histogram
image_histogram, bins = np.histogram(
image.flatten(), transform.transforms[1].number_bins, density=True
)
cdf = image_histogram.cumsum() # cumulative distribution function
cdf = 255 * cdf / cdf[-1] # normalize
# use linear interpolation of cdf to find new pixel values
image_equalized = np.interp(image.flatten(), bins[:-1], cdf)
image_equalized.reshape(image.shape)
image = torch.tensor(image_equalized.reshape(image.shape)).to(torch.float)
sample = transform(sample)
assert torch.allclose(sample["image"], image)
@pytest.mark.parametrize("shape", [[32, 32, 3], [45, 16, 3]])
def test_rand_gauss_blur(shape):
"""Test this to guard against an implementation change."""
seed = 123
sample = create_input(shape)
transform = cpt.Compose([tvt.ToTensor(), cpt.RandomGaussianBlur(p=1)])
# run the custom blur
np.random.seed(seed)
image = tvt.functional.to_tensor(sample["image"]) * 1
sigma = np.random.uniform(
transform.transforms[1].sigma_range[0], transform.transforms[1].sigma_range[1]
)
image = torch.tensor(gaussian_filter(image.numpy(), sigma), dtype=image.dtype,)
# transform blur
transform = cpt.Compose(
[tvt.ToTensor(), cpt.RandomGaussianBlur(p=1, sigma_range=(sigma, sigma))]
)
sample = transform(sample)
assert torch.allclose(sample["image"], image)
# retest for 0 probability
sample = create_input(shape)
transform = cpt.Compose([tvt.ToTensor(), cpt.RandomGaussianBlur(p=-0.1)])
# run the custom blur
image = tvt.functional.to_tensor(sample["image"]) * 1
# transform blur
sample = transform(sample)
assert torch.allclose(sample["image"], image)
@pytest.mark.parametrize("shape", [[32, 32, 3], [45, 16, 3]])
def test_add_noise(shape):
"""Test this to guard against an implementation change."""
seed = 456
sample = create_input(shape)
transform = cpt.Compose([tvt.ToTensor(), cpt.AddGaussianNoise(p=1)])
# run the custom noise
np.random.seed(seed)
image = tvt.functional.to_tensor(sample["image"]) * 1
np.random.uniform()
snr_level = np.random.uniform(
low=transform.transforms[1].snr_range[0],
high=transform.transforms[1].snr_range[1],
)
signal_level = np.mean(image.numpy())
image = image + (signal_level / snr_level) * torch.tensor(
np.random.normal(size=tuple(image.shape)), dtype=image.dtype,
)
# transform blur
np.random.seed(seed)
sample = transform(sample)
assert torch.allclose(sample["image"], image)
# retest for 0 probability
sample = create_input(shape)
transform = cpt.Compose([tvt.ToTensor(), cpt.AddGaussianNoise(p=-0.1)])
# run the custom blur
image = tvt.functional.to_tensor(sample["image"]) * 1
# transform blur
sample = transform(sample)
assert torch.allclose(sample["image"], image)
@pytest.mark.parametrize("shape", [[32, 32, 3], [45, 16, 3]])
def test_tensor_to_rgb(shape):
sample = create_input(shape)
transform = cpt.Compose([tvt.ToTensor(), cpt.TensorToRGB()])
image = tvt.functional.to_tensor(sample["image"]) * 1
expands = list()
for i in range(image.ndim):
if i == 0:
expands.append(3)
else:
expands.append(-1)
image = image.expand(*expands)
sample = transform(sample)
assert torch.allclose(sample["image"], image)
|
CovidPrognosis-main
|
tests/test_transforms.py
|
import covidprognosis.data
import covidprognosis.models
import covidprognosis.plmodules
|
CovidPrognosis-main
|
covidprognosis/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# Code adapted from https://github.com/facebookresearch/moco
from typing import Tuple
import torch
import torch.nn as nn
from torch import Tensor
class MoCo(nn.Module):
"""
Build a MoCo model with: a query encoder, a key encoder, and a queue
https://arxiv.org/abs/1911.05722
"""
def __init__(
self,
encoder_q: nn.Module,
encoder_k: nn.Module,
dim: int = 128,
K: int = 65536,
m: float = 0.999,
T: float = 0.07,
mlp: bool = False,
):
"""
dim: feature dimension (default: 128)
K: queue size; number of negative keys (default: 65536)
m: moco momentum of updating key encoder (default: 0.999)
T: softmax temperature (default: 0.07)
"""
super().__init__()
self.K = K
self.m = m
self.T = T
# create the encoders
# num_classes is the output fc dimension
self.encoder_q = encoder_q
self.encoder_k = encoder_k
if mlp: # hack: brute-force replacement
if hasattr(self.encoder_q, "fc"): # ResNet models
dim_mlp = self.encoder_q.fc.weight.shape[1]
self.encoder_q.fc = nn.Sequential(
nn.Linear(dim_mlp, dim_mlp), nn.ReLU(), self.encoder_q.fc
)
self.encoder_k.fc = nn.Sequential(
nn.Linear(dim_mlp, dim_mlp), nn.ReLU(), self.encoder_k.fc
)
elif hasattr(self.encoder_q, "classifier"): # Densenet models
dim_mlp = self.encoder_q.classifier.weight.shape[1]
self.encoder_q.classifier = nn.Sequential(
nn.Linear(dim_mlp, dim_mlp), nn.ReLU(), self.encoder_q.classifier
)
self.encoder_k.classifier = nn.Sequential(
nn.Linear(dim_mlp, dim_mlp), nn.ReLU(), self.encoder_k.classifier
)
for param_q, param_k in zip(
self.encoder_q.parameters(), self.encoder_k.parameters()
):
param_k.data.copy_(param_q.data) # initialize
param_k.requires_grad = False # not update by gradient
# create the queue
self.register_buffer(
"queue", nn.functional.normalize(torch.randn(dim, K), dim=0)
)
self.register_buffer("queue_ptr", torch.zeros(1, dtype=torch.long))
@torch.no_grad()
def _momentum_update_key_encoder(self):
"""
Momentum update of the key encoder
"""
for param_q, param_k in zip(
self.encoder_q.parameters(), self.encoder_k.parameters()
):
param_k.data = param_k.data * self.m + param_q.data * (1.0 - self.m)
@torch.no_grad()
def _dequeue_and_enqueue(self, keys: Tensor):
# gather keys before updating queue
keys = concat_all_gather(keys)
batch_size = keys.shape[0]
assert isinstance(self.queue_ptr, Tensor)
ptr = int(self.queue_ptr)
assert (
self.K % batch_size == 0
), f"batch_size={batch_size}, K={self.K}" # for simplicity
# replace the keys at ptr (dequeue and enqueue)
self.queue[:, ptr : ptr + batch_size] = keys.T
ptr = (ptr + batch_size) % self.K # move pointer
self.queue_ptr[0] = ptr
@torch.no_grad()
def _batch_shuffle_ddp(self, x: Tensor) -> Tuple[Tensor, Tensor]:
"""
Batch shuffle, for making use of BatchNorm.
*** Only support DistributedDataParallel (DDP) model. ***
"""
# gather from all gpus
batch_size_this = x.shape[0]
x_gather = concat_all_gather(x)
batch_size_all = x_gather.shape[0]
num_gpus = batch_size_all // batch_size_this
# random shuffle index
idx_shuffle = torch.randperm(batch_size_all).cuda()
# broadcast to all gpus
torch.distributed.broadcast(idx_shuffle, src=0)
# index for restoring
idx_unshuffle = torch.argsort(idx_shuffle)
# shuffled index for this gpu
gpu_idx = torch.distributed.get_rank()
idx_this = idx_shuffle.view(num_gpus, -1)[gpu_idx]
return x_gather[idx_this], idx_unshuffle
@torch.no_grad()
def _batch_unshuffle_ddp(self, x: Tensor, idx_unshuffle: Tensor) -> Tensor:
"""
Undo batch shuffle.
*** Only support DistributedDataParallel (DDP) model. ***
"""
# gather from all gpus
batch_size_this = x.shape[0]
x_gather = concat_all_gather(x)
batch_size_all = x_gather.shape[0]
num_gpus = batch_size_all // batch_size_this
# restored index for this gpu
gpu_idx = torch.distributed.get_rank()
idx_this = idx_unshuffle.view(num_gpus, -1)[gpu_idx]
return x_gather[idx_this]
def forward(self, im_q: Tensor, im_k: Tensor) -> Tuple[Tensor, Tensor]:
"""
Input:
im_q: a batch of query images
im_k: a batch of key images
Output:
logits, targets
"""
# compute query features
q = self.encoder_q(im_q) # queries: NxC
q = nn.functional.normalize(q, dim=1)
# compute key features
with torch.no_grad(): # no gradient to keys
self._momentum_update_key_encoder() # update the key encoder
# shuffle for making use of BN
im_k, idx_unshuffle = self._batch_shuffle_ddp(im_k)
k = self.encoder_k(im_k) # keys: NxC
k = nn.functional.normalize(k, dim=1)
# undo shuffle
k = self._batch_unshuffle_ddp(k, idx_unshuffle)
# compute logits
# Einstein sum is more intuitive
# positive logits: Nx1
l_pos = torch.einsum("nc,nc->n", [q, k]).unsqueeze(-1)
# negative logits: NxK
l_neg = torch.einsum("nc,ck->nk", [q, self.queue.clone().detach()])
# logits: Nx(1+K)
logits = torch.cat([l_pos, l_neg], dim=1)
# apply temperature
logits /= self.T
# labels: positive key indicators
labels = torch.zeros(logits.shape[0], dtype=torch.long).cuda()
# dequeue and enqueue
self._dequeue_and_enqueue(k)
return logits, labels
# utils
@torch.no_grad()
def concat_all_gather(tensor: Tensor) -> Tensor:
"""
Performs all_gather operation on the provided tensors.
*** Warning ***: torch.distributed.all_gather has no gradient.
"""
tensors_gather = [
torch.ones_like(tensor) for _ in range(torch.distributed.get_world_size())
]
torch.distributed.all_gather(tensors_gather, tensor, async_op=False)
output = torch.cat(tensors_gather, dim=0)
return output
|
CovidPrognosis-main
|
covidprognosis/models/moco_model.py
|
from .moco_model import MoCo
|
CovidPrognosis-main
|
covidprognosis/models/__init__.py
|
from .xray_datamodule import XrayDataModule
|
CovidPrognosis-main
|
covidprognosis/plmodules/__init__.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import os
from argparse import ArgumentParser
from typing import Callable, List, Optional, Union
import covidprognosis as cp
import numpy as np
import pytorch_lightning as pl
import torch
class TwoImageDataset(torch.utils.data.Dataset):
"""
Wrapper for returning two augmentations of the same image.
Args:
dataset: Pre-initialized data set to return multiple samples from.
"""
def __init__(self, dataset: cp.data.BaseDataset):
assert isinstance(dataset, cp.data.BaseDataset)
self.dataset = dataset
def __len__(self):
return len(self.dataset)
def __getitem__(self, idx):
# randomness handled via the transform objects
# this requires the transforms to sample randomness from the process
# generator
item0 = self.dataset[idx]
item1 = self.dataset[idx]
sample = {
"image0": item0["image"],
"image1": item1["image"],
"label": item0["labels"],
}
return sample
def fetch_dataset(
dataset_name: str,
dataset_dir: Union[List[Union[str, os.PathLike]], Union[str, os.PathLike]],
split: str,
transform: Optional[Callable],
two_image: bool = False,
label_list="all",
):
"""Dataset fetcher for config handling."""
assert split in ("train", "val", "test")
dataset: Union[cp.data.BaseDataset, TwoImageDataset]
# determine the dataset
if dataset_name == "nih":
assert not isinstance(dataset_dir, list)
dataset = cp.data.NIHChestDataset(
directory=dataset_dir,
split=split,
transform=transform,
label_list=label_list,
resplit=True,
)
if dataset_name == "mimic":
assert not isinstance(dataset_dir, list)
dataset = cp.data.MimicCxrJpgDataset(
directory=dataset_dir,
split=split,
transform=transform,
label_list=label_list,
)
elif dataset_name == "chexpert":
assert not isinstance(dataset_dir, list)
dataset = cp.data.CheXpertDataset(
directory=dataset_dir,
split=split,
transform=transform,
label_list=label_list,
)
elif dataset_name == "mimic-chexpert":
assert isinstance(dataset_dir, list)
dataset = cp.data.CombinedXrayDataset(
dataset_list=["chexpert_v1", "mimic-cxr"],
directory_list=dataset_dir,
transform_list=[transform, transform],
label_list=[label_list, label_list],
split_list=[split, split],
)
else:
raise ValueError(f"dataset {dataset_name} not recognized")
if two_image is True:
dataset = TwoImageDataset(dataset)
return dataset
def worker_init_fn(worker_id):
"""Handle random seeding."""
worker_info = torch.utils.data.get_worker_info()
seed = worker_info.seed % (2 ** 32 - 1) # pylint: disable=no-member
np.random.seed(seed)
class XrayDataModule(pl.LightningDataModule):
"""
X-ray data module for training models with PyTorch Lightning.
Args:
dataset_name: Name of the dataset.
dataset_dir: Location of the data.
label_list: Labels to load for training.
batch_size: Training batch size.
num_workers: Number of workers for dataloaders.
use_two_images: Whether to return two augmentations of same image from
dataset (for MoCo pretraining).
train_transform: Transform for training loop.
val_transform: Transform for validation loop.
test_transform: Transform for test loop.
"""
def __init__(
self,
dataset_name: str,
dataset_dir: Union[List[Union[str, os.PathLike]], Union[str, os.PathLike]],
label_list: Union[str, List[str]] = "all",
batch_size: int = 1,
num_workers: int = 4,
use_two_images: bool = False,
train_transform: Optional[Callable] = None,
val_transform: Optional[Callable] = None,
test_transform: Optional[Callable] = None,
):
super().__init__()
self.dataset_name = dataset_name
self.dataset_dir = dataset_dir
self.batch_size = batch_size
self.num_workers = num_workers
self.train_dataset = fetch_dataset(
self.dataset_name,
self.dataset_dir,
"train",
train_transform,
label_list=label_list,
two_image=use_two_images,
)
self.val_dataset = fetch_dataset(
self.dataset_name,
self.dataset_dir,
"val",
val_transform,
label_list=label_list,
two_image=use_two_images,
)
self.test_dataset = fetch_dataset(
self.dataset_name,
self.dataset_dir,
"test",
test_transform,
label_list=label_list,
two_image=use_two_images,
)
if isinstance(self.train_dataset, TwoImageDataset):
self.label_list = None
else:
self.label_list = self.train_dataset.label_list
def __dataloader(self, split: str) -> torch.utils.data.DataLoader:
assert split in ("train", "val", "test")
shuffle = False
if split == "train":
dataset = self.train_dataset
shuffle = True
elif split == "val":
dataset = self.val_dataset
else:
dataset = self.test_dataset
loader = torch.utils.data.DataLoader(
dataset=dataset,
batch_size=self.batch_size,
num_workers=self.num_workers,
drop_last=True,
shuffle=shuffle,
worker_init_fn=worker_init_fn,
)
return loader
def train_dataloader(self):
return self.__dataloader(split="train")
def val_dataloader(self):
return self.__dataloader(split="val")
def test_dataloader(self):
return self.__dataloader(split="test")
@staticmethod
def add_model_specific_args(parent_parser):
parser = ArgumentParser(parents=[parent_parser], add_help=False)
parser.add_argument("--dataset_name", default="mimic", type=str)
parser.add_argument("--dataset_dir", default=None, type=str)
parser.add_argument("--batch_size", default=64, type=int)
parser.add_argument("--num_workers", default=4, type=int)
return parser
|
CovidPrognosis-main
|
covidprognosis/plmodules/xray_datamodule.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
from typing import Callable, Dict, List, Tuple, Union
import numpy as np
import torch
from scipy.ndimage import gaussian_filter
class XRayTransform:
"""XRayTransform base class."""
def __repr__(self):
return "XRayTransform: {}".format(self.__class__.__name__)
class Compose(XRayTransform):
"""
Compose a list of transforms into one.
Args:
transforms: The list of transforms.
"""
def __init__(self, transforms: List[Callable]):
self.transforms = transforms
def __call__(self, sample: Dict) -> Dict:
for t in self.transforms:
if isinstance(t, XRayTransform):
sample = t(sample)
else:
# assume torchvision transform
sample["image"] = t(sample["image"])
return sample
class NanToInt(XRayTransform):
"""
Convert an np.nan label to an integer.
Args:
num: Integer to convert to.
"""
def __init__(self, num: int = -100):
self.num = num
def __call__(self, sample: Dict) -> Dict:
sample["labels"][np.isnan(sample["labels"])] = self.num
return sample
class RemapLabel(XRayTransform):
"""
Convert a label from one value to another.
Args:
input_val: Value to convert from.
output_val: Value to convert to.
"""
def __init__(self, input_val: Union[float, int], output_val: Union[float, int]):
self.input_val = input_val
self.output_val = output_val
def __call__(self, sample: Dict) -> Dict:
labels = np.copy(sample["labels"])
labels[labels == self.input_val] = self.output_val
sample["labels"] = labels
return sample
class HistogramNormalize(XRayTransform):
"""
Apply histogram normalization.
Args:
number_bins: Number of bins to use in histogram.
"""
def __init__(self, number_bins: int = 256):
self.number_bins = number_bins
def __call__(self, sample: Dict) -> Dict:
image = sample["image"].numpy()
# get image histogram
image_histogram, bins = np.histogram(
image.flatten(), self.number_bins, density=True
)
cdf = image_histogram.cumsum() # cumulative distribution function
cdf = 255 * cdf / cdf[-1] # normalize
# use linear interpolation of cdf to find new pixel values
image_equalized = np.interp(image.flatten(), bins[:-1], cdf)
image_equalized.reshape(image.shape)
sample["image"] = torch.tensor(image_equalized.reshape(image.shape)).to(
sample["image"]
)
return sample
class RandomGaussianBlur(XRayTransform):
"""
Random Gaussian blur transform.
Args:
p: Probability to apply transform.
sigma_range: Range of sigma values for Gaussian kernel.
"""
def __init__(self, p: float = 0.5, sigma_range: Tuple[float, float] = (0.1, 2.0)):
self.p = p
self.sigma_range = sigma_range
def __call__(self, sample: Dict) -> Dict:
if np.random.uniform() <= self.p:
sigma = np.random.uniform(self.sigma_range[0], self.sigma_range[1])
sample["image"] = torch.tensor(
gaussian_filter(sample["image"].numpy(), sigma),
dtype=sample["image"].dtype,
)
return sample
class AddGaussianNoise(XRayTransform):
"""
Gaussian noise transform.
Args:
p: Probability of adding Gaussian noise.
snr_range: SNR range for Gaussian noise addition.
"""
def __init__(self, p: float = 0.5, snr_range: Tuple[float, float] = (2.0, 8.0)):
self.p = p
self.snr_range = snr_range
def __call__(self, sample: Dict) -> Dict:
if np.random.uniform() <= self.p:
snr_level = np.random.uniform(low=self.snr_range[0], high=self.snr_range[1])
signal_level = np.mean(sample["image"].numpy())
# use numpy to keep things consistent on numpy random seed
sample["image"] = sample["image"] + (
signal_level / snr_level
) * torch.tensor(
np.random.normal(size=tuple(sample["image"].shape)),
dtype=sample["image"].dtype,
)
return sample
class TensorToRGB(XRayTransform):
"""
Convert Tensor to RGB by replicating channels.
Args:
num_output_channels: Number of output channels (3 for RGB).
"""
def __init__(self, num_output_channels: int = 3):
self.num_output_channels = num_output_channels
def __call__(self, sample: Dict) -> Dict:
expands = list()
for i in range(sample["image"].ndim):
if i == 0:
expands.append(self.num_output_channels)
else:
expands.append(-1)
sample["image"] = sample["image"].expand(*expands)
return sample
|
CovidPrognosis-main
|
covidprognosis/data/transforms.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import os
from pathlib import Path
from typing import Callable, Dict, List, Optional, Union
import numpy as np
import pandas as pd
from PIL import Image
from torch.utils.data import Dataset
class BaseDataset(Dataset):
"""
Root class for X-ray data sets.
The base data seet logs parameters as attributes, reducing code duplication
across the various public X-ray data loaders.
Args:
dataset_name: Name of the dataset.
directory: Location of the data.
split: One of ('train', 'val', 'test', 'all').
label_list: A list of labels for the data loader to extract.
subselect: Argument to pass to `pandas` subselect.
transform: A set of data transforms.
"""
def __init__(
self,
dataset_name: str,
directory: Union[str, os.PathLike],
split: str,
label_list: Union[str, List[str]],
subselect: Optional[str],
transform: Optional[Callable],
):
self.dataset_name = dataset_name
split_list = ["train", "val", "test", "all"]
if split not in split_list:
raise ValueError("split {} not a valid split".format(split))
self.directory = Path(directory)
self.csv = None
self.split = split
self.label_list = label_list
self.subselect = subselect
self.transform = transform
self.metadata_keys: List[str] = []
def preproc_csv(self, csv: pd.DataFrame, subselect: str) -> pd.DataFrame:
if subselect is not None:
csv = csv.query(subselect)
return csv
def open_image(self, path: Union[str, os.PathLike]) -> Image:
with open(path, "rb") as f:
with Image.open(f) as img:
return img.convert("F")
def __len__(self) -> int:
return 0
@property
def calc_pos_weights(self) -> float:
if self.csv is None:
return 0.0
pos = (self.csv[self.label_list] == 1).sum()
neg = (self.csv[self.label_list] == 0).sum()
neg_pos_ratio = (neg / np.maximum(pos, 1)).values.astype(np.float)
return neg_pos_ratio
def retrieve_metadata(
self, idx: int, filename: Union[str, os.PathLike], exam: pd.Series
) -> Dict:
metadata = {}
metadata["dataset_name"] = self.dataset_name
metadata["dataloader class"] = self.__class__.__name__
metadata["idx"] = idx # type: ignore
for key in self.metadata_keys:
# cast to string due to typing issues with dataloader
metadata[key] = str(exam[key])
metadata["filename"] = str(filename)
metadata["label_list"] = self.label_list # type: ignore
return metadata
def __repr__(self):
return self.__class__.__name__ + " num_samples={}".format(len(self))
@property
def labels(self) -> Union[str, List[str]]:
return self.label_list
|
CovidPrognosis-main
|
covidprognosis/data/base_dataset.py
|
from .base_dataset import BaseDataset
from .chexpert import CheXpertDataset
from .combined_datasets import CombinedXrayDataset
from .mimic_cxr import MimicCxrJpgDataset
from .nih_chest_xrays import NIHChestDataset
|
CovidPrognosis-main
|
covidprognosis/data/__init__.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import logging
import os
from typing import Callable, Dict, List, Optional, Union
import numpy as np
import pandas as pd
from .base_dataset import BaseDataset
class CheXpertDataset(BaseDataset):
"""
Data loader for CheXpert data set.
Args:
directory: Base directory for data set with subdirectory
'CheXpert-v1.0'.
split: String specifying split.
options include:
'all': Include all splits.
'train': Include training split.
'val': Include validation split.
label_list: String specifying labels to include. Default is 'all',
which loads all labels.
transform: A composible transform list to be applied to the data.
Irvin, Jeremy, et al. "Chexpert: A large chest radiograph dataset with
uncertainty labels and expert comparison." Proceedings of the AAAI
Conference on Artificial Intelligence. Vol. 33. 2019.
Dataset website here:
https://stanfordmlgroup.github.io/competitions/chexpert/
"""
def __init__(
self,
directory: Union[str, os.PathLike],
split: str = "train",
label_list: Union[str, List[str]] = "all",
subselect: Optional[str] = None,
transform: Optional[Callable] = None,
):
super().__init__(
"chexpert_v1", directory, split, label_list, subselect, transform
)
if label_list == "all":
self.label_list = self.default_labels()
else:
self.label_list = label_list
self.metadata_keys = [
"Patient ID",
"Path",
"Sex",
"Age",
"Frontal/Lateral",
"AP/PA",
]
if self.split == "train":
self.csv_path = self.directory / "CheXpert-v1.0" / "train.csv"
self.csv = pd.read_csv(self.directory / self.csv_path)
elif self.split == "val":
self.csv_path = self.directory / "CheXpert-v1.0" / "valid.csv"
self.csv = pd.read_csv(self.directory / self.csv_path)
elif self.split == "all":
self.csv_path = self.directory / "train.csv"
self.csv = pd.concat(
[
pd.read_csv(self.directory / "CheXpert-v1.0" / "train.csv"),
pd.read_csv(self.directory / "CheXpert-v1.0" / "valid.csv"),
]
)
else:
logging.warning(
"split {} not recognized for dataset {}, "
"not returning samples".format(split, self.__class__.__name__)
)
self.csv = self.preproc_csv(self.csv, self.subselect)
@staticmethod
def default_labels() -> List[str]:
return [
"No Finding",
"Enlarged Cardiomediastinum",
"Cardiomegaly",
"Lung Opacity",
"Lung Lesion",
"Edema",
"Consolidation",
"Pneumonia",
"Atelectasis",
"Pneumothorax",
"Pleural Effusion",
"Pleural Other",
"Fracture",
"Support Devices",
]
def preproc_csv(self, csv: pd.DataFrame, subselect: Optional[str]) -> pd.DataFrame:
if csv is not None:
csv["Patient ID"] = csv["Path"].str.extract(pat="(patient\\d+)")
csv["view"] = csv["Frontal/Lateral"].str.lower()
if subselect is not None:
csv = csv.query(subselect)
return csv
def __len__(self) -> int:
length = 0
if self.csv is not None:
length = len(self.csv)
return length
def __getitem__(self, idx: int) -> Dict:
assert self.csv is not None
exam = self.csv.iloc[idx]
filename = self.directory / exam["Path"]
image = self.open_image(filename)
metadata = self.retrieve_metadata(idx, filename, exam)
# retrieve labels while handling missing ones for combined data loader
labels = np.array(exam.reindex(self.label_list)[self.label_list]).astype(
np.float
)
sample = {"image": image, "labels": labels, "metadata": metadata}
if self.transform is not None:
sample = self.transform(sample)
return sample
|
CovidPrognosis-main
|
covidprognosis/data/chexpert.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import os
from typing import Callable, List, Optional, Union
from .base_dataset import BaseDataset
from .chexpert import CheXpertDataset
from .mimic_cxr import MimicCxrJpgDataset
from .nih_chest_xrays import NIHChestDataset
class CombinedXrayDataset(BaseDataset):
"""
Combine several x-ray datasets into one.
Args:
directory_list: List of paths for directories for each dataset.
dataset_list: List of datasets to load. Current options include:
'all': Include all datasets.
'chexpert': Include CheXpert dataset (223,414 in 'train').
'nih-chest-xrays': Include NIH Chest x-rays (112,120 images in
'train').
split_list: List of strings specifying split. If a string is passed
(e.g., 'train'), that split will be broacast to all
sub-dataloaders.
options include:
'all': Include all splits.
'train': Include training split.
'val': Include validation split.
label_list: String specifying labels to include. Default is 'all',
which loads all labels from all datasets.
transform_list: A list of composed transforms. If a single composed
transform is passed, it will be broadcast to all sub-dataloaders.
"""
def __init__(
self,
directory_list: List[Union[str, os.PathLike]],
dataset_list: Union[str, List[str]] = "all",
split_list: Union[str, List[str]] = "train",
label_list: Union[str, List[str]] = "all",
subselect_list: Optional[List[str]] = None,
transform_list: Optional[List[Optional[Callable]]] = None,
):
self.dataset_name = "combined-xray-dataset"
if dataset_list == "all":
dataset_list = ["chexpert_v1", "nih-chest-xrays", "mimic-cxr"]
self.dataset_list = dataset_list
elif isinstance(dataset_list, str):
raise RuntimeError("Unrecognized dataset list string.")
else:
self.dataset_list = dataset_list # type:ignore
self.directory_list = directory_list = self.to_list(directory_list)
self.split_list = split_list = self.to_list(split_list)
self.subselect_list = self.to_list(subselect_list)
self.transform_list = transform_list = self.to_list(transform_list)
# find all possible labels if using 'all'
if label_list == "all":
self.label_list = self.fetch_label_list(self.dataset_list)
else:
if isinstance(label_list, str):
raise ValueError(
"If inputting label_list, label_list must not be a string"
)
self.label_list = label_list
self.datasets = []
for (dataset_name, directory, split, subselect, transform) in zip(
self.dataset_list,
self.directory_list,
self.split_list,
self.subselect_list,
self.transform_list,
):
self.datasets.append(
self.fetch_dataset(
dataset_name,
directory,
split,
self.label_list,
subselect,
transform,
)
)
def to_list(self, item):
if not isinstance(item, list):
item = [item] * len(self.dataset_list)
assert len(item) == len(self.dataset_list)
return item
def fetch_label_list(self, dataset_name_list: List[str]) -> List[str]:
label_list: List[str] = []
for dataset_name in dataset_name_list:
if dataset_name == "chexpert_v1":
label_list = label_list + CheXpertDataset.default_labels()
elif dataset_name == "nih-chest-xrays":
label_list = label_list + NIHChestDataset.default_labels()
elif dataset_name == "mimic-cxr":
label_list = label_list + MimicCxrJpgDataset.default_labels()
# remove duplicates
label_list = list(set(label_list))
return label_list
def fetch_dataset(
self,
dataset_name: str,
directory: Union[str, os.PathLike],
split: str,
label_list: Union[str, List[str]],
subselect: str,
transform: Callable,
) -> BaseDataset:
dataset: BaseDataset
if dataset_name == "chexpert_v1":
dataset = CheXpertDataset(
directory=directory,
split=split,
label_list=label_list,
subselect=subselect,
transform=transform,
)
elif dataset_name == "nih-chest-xrays":
dataset = NIHChestDataset(
directory=directory,
split=split,
label_list=label_list,
subselect=subselect,
transform=transform,
)
elif dataset_name == "mimic-cxr":
dataset = MimicCxrJpgDataset(
directory=directory,
split=split,
label_list=label_list,
subselect=subselect,
transform=transform,
)
else:
raise RuntimeError(f"Data set {dataset_name} not found.")
return dataset
def __len__(self) -> int:
count = 0
for dataset in self.datasets:
count = count + len(dataset)
return count
def __getitem__(self, idx: int):
if idx < 0:
idx = len(self) + idx
for dataset in self.datasets:
if idx < len(dataset):
return dataset[idx]
else:
idx = idx - len(dataset)
|
CovidPrognosis-main
|
covidprognosis/data/combined_datasets.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import logging
import os
from typing import Callable, Dict, List, Optional, Union
import numpy as np
import pandas as pd
from .base_dataset import BaseDataset
class NIHChestDataset(BaseDataset):
"""
Data loader for NIH data set.
Args:
directory: Base directory for data set.
split: String specifying split.
options include:
'all': Include all splits.
'train': Include training split.
label_list: String specifying labels to include. Default is 'all',
which loads all labels.
transform: A composible transform list to be applied to the data.
"""
def __init__(
self,
directory: Union[str, os.PathLike],
split: str = "train",
label_list: Union[str, List[str]] = "all",
subselect: Optional[str] = None,
transform: Optional[Callable] = None,
resplit: bool = False,
resplit_seed: int = 2019,
resplit_ratios: List[float] = [0.7, 0.2, 0.1],
):
super().__init__(
"nih-chest-xrays", directory, split, label_list, subselect, transform
)
if label_list == "all":
self.label_list = self.default_labels()
else:
self.label_list = label_list
self.metadata_keys = [
"Image Index",
"Follow-up #",
"Patient ID",
"Patient Age",
"Patient Gender",
"View Position",
]
if resplit:
rg = np.random.default_rng(resplit_seed)
self.csv_path = self.directory / "Data_Entry_2017.csv"
csv = pd.read_csv(self.csv_path)
patient_list = csv["Patient ID"].unique()
rand_inds = rg.permutation(len(patient_list))
train_count = int(np.round(resplit_ratios[0] * len(patient_list)))
val_count = int(np.round(resplit_ratios[1] * len(patient_list)))
grouped = csv.groupby("Patient ID")
if self.split == "train":
patient_list = patient_list[rand_inds[:train_count]]
self.csv = pd.concat([grouped.get_group(pat) for pat in patient_list])
elif self.split == "val":
patient_list = patient_list[
rand_inds[train_count : train_count + val_count]
]
self.csv = pd.concat([grouped.get_group(pat) for pat in patient_list])
elif self.split == "test":
patient_list = patient_list[rand_inds[train_count + val_count :]]
self.csv = pd.concat([grouped.get_group(pat) for pat in patient_list])
else:
logging.warning(
"split {} not recognized for dataset {}, "
"not returning samples".format(split, self.__class__.__name__)
)
else:
if self.split == "train":
self.csv_path = self.directory / "Data_Entry_2017.csv"
self.csv = pd.read_csv(self.csv_path)
elif self.split == "all":
self.csv_path = self.directory / "Data_Entry_2017.csv"
self.csv = pd.read_csv(self.csv_path)
else:
logging.warning(
"split {} not recognized for dataset {}, "
"not returning samples".format(split, self.__class__.__name__)
)
self.csv = self.preproc_csv(self.csv, self.subselect)
@staticmethod
def default_labels() -> List[str]:
return [
"Atelectasis",
"Consolidation",
"Infiltration",
"Pneumothorax",
"Edema",
"Emphysema",
"Fibrosis",
"Effusion",
"Pneumonia",
"Pleural_Thickening",
"Cardiomegaly",
"Nodule",
"Mass",
"Hernia",
]
def preproc_csv(self, csv: pd.DataFrame, subselect: Optional[str]) -> pd.DataFrame:
if csv is not None:
def format_view(s):
return "frontal" if s in ("AP", "PA") else None
csv["view"] = csv["View Position"].apply(format_view)
if subselect is not None:
csv = csv = csv.query(subselect)
return csv
def __len__(self) -> int:
length = 0
if self.csv is not None:
length = len(self.csv)
return length
def __getitem__(self, idx: int) -> Dict:
assert self.csv is not None
exam = self.csv.iloc[idx]
filename = self.directory / "images" / exam["Image Index"]
image = self.open_image(filename)
metadata = self.retrieve_metadata(idx, filename, exam)
# example: exam['Finding Labels'] = 'Pneumonia|Cardiomegaly'
# goal here is to see if label is a substring of
# 'Pneumonia|Cardiomegaly' for each label in self.label_list
labels = [
1 if label in exam["Finding Labels"] else 0 for label in self.label_list
]
labels = np.array(labels).astype(np.float)
sample = {"image": image, "labels": labels, "metadata": metadata}
if self.transform is not None:
sample = self.transform(sample)
return sample
|
CovidPrognosis-main
|
covidprognosis/data/nih_chest_xrays.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import logging
import os
from typing import Callable, Dict, List, Optional, Union
import numpy as np
import pandas as pd
from .base_dataset import BaseDataset
class MimicCxrJpgDataset(BaseDataset):
"""
Data loader for MIMIC CXR data set.
Args:
directory: Base directory for data set.
split: String specifying split.
options include:
'all': Include all splits.
'train': Include training split.
'val': Include validation split.
'test': Include testing split.
label_list: String specifying labels to include. Default is 'all',
which loads all labels.
transform: A composible transform list to be applied to the data.
"""
def __init__(
self,
directory: Union[str, os.PathLike],
split: str = "train",
label_list: Union[str, List[str]] = "all",
subselect: Optional[str] = None,
transform: Optional[Callable] = None,
):
super().__init__(
"mimic-cxr-jpg", directory, split, label_list, subselect, transform
)
if label_list == "all":
self.label_list = self.default_labels()
else:
self.label_list = label_list
self.metadata_keys = [
"dicom_id",
"subject_id",
"study_id",
"PerformedProcedureStepDescription",
"ViewPosition",
"Rows",
"Columns",
"StudyDate",
"StudyTime",
"ProcedureCodeSequence_CodeMeaning",
"ViewCodeSequence_CodeMeaning",
"PatientOrientationCodeSequence_CodeMeaning",
]
self.label_csv_path = (
self.directory / "2.0.0" / "mimic-cxr-2.0.0-chexpert.csv.gz"
)
self.meta_csv_path = (
self.directory / "2.0.0" / "mimic-cxr-2.0.0-metadata.csv.gz"
)
self.split_csv_path = self.directory / "2.0.0" / "mimic-cxr-2.0.0-split.csv.gz"
if self.split in ("train", "val", "test"):
split_csv = pd.read_csv(self.split_csv_path)["split"].str.contains(
self.split
)
meta_csv = pd.read_csv(self.meta_csv_path)[split_csv].set_index(
["subject_id", "study_id"]
)
label_csv = pd.read_csv(self.label_csv_path).set_index(
["subject_id", "study_id"]
)
self.csv = meta_csv.join(label_csv).reset_index()
elif self.split == "all":
meta_csv = pd.read_csv(self.meta_csv_path).set_index(
["subject_id", "study_id"]
)
label_csv = pd.read_csv(self.label_csv_path).set_index(
["subject_id", "study_id"]
)
self.csv = meta_csv.join(label_csv).reset_index()
else:
logging.warning(
"split {} not recognized for dataset {}, "
"not returning samples".format(split, self.__class__.__name__)
)
self.csv = self.preproc_csv(self.csv, self.subselect)
@staticmethod
def default_labels() -> List[str]:
return [
"No Finding",
"Enlarged Cardiomediastinum",
"Cardiomegaly",
"Lung Opacity",
"Lung Lesion",
"Edema",
"Consolidation",
"Pneumonia",
"Atelectasis",
"Pneumothorax",
"Pleural Effusion",
"Pleural Other",
"Fracture",
"Support Devices",
]
def preproc_csv(self, csv: pd.DataFrame, subselect: Optional[str]) -> pd.DataFrame:
if csv is not None:
def format_view(s):
if s in ("AP", "PA", "AP|PA"):
return "frontal"
elif s in ("LATERAL", "LL"):
return "lateral"
else:
return None
csv["view"] = csv.ViewPosition.apply(format_view)
if subselect is not None:
csv = csv.query(subselect)
return csv
def __len__(self):
length = 0
if self.csv is not None:
length = len(self.csv)
return length
def __getitem__(self, idx: int) -> Dict:
assert self.csv is not None
exam = self.csv.iloc[idx]
subject_id = str(exam["subject_id"])
study_id = str(exam["study_id"])
dicom_id = str(exam["dicom_id"])
filename = self.directory / "2.0.0" / "files"
filename = (
filename
/ "p{}".format(subject_id[:2])
/ "p{}".format(subject_id)
/ "s{}".format(study_id)
/ "{}.jpg".format(dicom_id)
)
image = self.open_image(filename)
metadata = self.retrieve_metadata(idx, filename, exam)
# retrieve labels while handling missing ones for combined data loader
labels = np.array(exam.reindex(self.label_list)[self.label_list]).astype(
np.float
)
sample = {"image": image, "labels": labels, "metadata": metadata}
if self.transform is not None:
sample = self.transform(sample)
return sample
|
CovidPrognosis-main
|
covidprognosis/data/mimic_cxr.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
from torch.utils.data._utils.collate import default_collate
def collate_fn(batch):
"""Collate function to handle X-ray metadata."""
metadata = []
for el in batch:
metadata.append(el["metadata"])
del el["metadata"]
batch = default_collate(batch)
batch["metadata"] = metadata
return batch
|
CovidPrognosis-main
|
covidprognosis/data/collate_fn.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
from argparse import ArgumentParser
import covidprognosis as cp
import pytorch_lightning as pl
import torch
import torchvision.models as models
class MoCoModule(pl.LightningModule):
def __init__(
self,
arch,
feature_dim,
queue_size,
use_mlp=False,
learning_rate=1.0,
momentum=0.9,
weight_decay=1e-4,
epochs=1,
):
super().__init__()
self.learning_rate = learning_rate
self.momentum = momentum
self.weight_decay = weight_decay
self.epochs = epochs
# build model
self.model = cp.models.MoCo(
encoder_q=models.__dict__[arch](num_classes=feature_dim),
encoder_k=models.__dict__[arch](num_classes=feature_dim),
dim=feature_dim,
K=queue_size,
mlp=use_mlp,
)
self.loss_fn = torch.nn.CrossEntropyLoss()
self.train_acc = pl.metrics.Accuracy()
self.val_acc = pl.metrics.Accuracy()
def forward(self, image0, image1):
return self.model(image0, image1)
def training_step(self, batch, batch_idx):
image0, image1 = batch["image0"], batch["image1"]
output, target = self(image0, image1)
# metrics
loss_val = self.loss_fn(output, target)
self.train_acc(output, target)
self.log("train_metrics/loss", loss_val)
self.log("train_metrics/accuracy", self.train_acc, on_step=True, on_epoch=False)
return loss_val
def configure_optimizers(self):
optimizer = torch.optim.SGD(
self.model.parameters(),
self.learning_rate,
momentum=self.momentum,
weight_decay=self.weight_decay,
)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, self.epochs)
return [optimizer], [scheduler]
@staticmethod
def add_model_specific_args(parent_parser):
parser = ArgumentParser(parents=[parent_parser], add_help=False)
parser.add_argument("--arch", default="densenet121", type=str)
parser.add_argument("--feature_dim", default=256, type=int)
parser.add_argument("--queue_size", default=65536, type=int)
parser.add_argument("--use_mlp", default=False, type=bool)
parser.add_argument("--learning_rate", default=1.0, type=float)
parser.add_argument("--momentum", default=0.9, type=float)
parser.add_argument("--weight-decay", default=1e-4, type=float)
return parser
|
CovidPrognosis-main
|
cp_examples/moco_pretrain/moco_module.py
|
CovidPrognosis-main
|
cp_examples/moco_pretrain/__init__.py
|
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import os
from argparse import ArgumentParser
from pathlib import Path
import pytorch_lightning as pl
import yaml
from covidprognosis.data.transforms import (
AddGaussianNoise,
Compose,
HistogramNormalize,
RandomGaussianBlur,
TensorToRGB,
)
from covidprognosis.plmodules import XrayDataModule
from torchvision import transforms
from moco_module import MoCoModule
def build_args(arg_defaults=None):
pl.seed_everything(1234)
data_config = Path.cwd() / "../../configs/data.yaml"
tmp = arg_defaults
arg_defaults = {
"accelerator": "ddp",
"max_epochs": 200,
"gpus": 2,
"num_workers": 10,
"batch_size": 128,
"callbacks": [],
}
if tmp is not None:
arg_defaults.update(tmp)
# ------------
# args
# ------------
parser = ArgumentParser()
parser.add_argument("--im_size", default=224, type=int)
parser = pl.Trainer.add_argparse_args(parser)
parser = XrayDataModule.add_model_specific_args(parser)
parser = MoCoModule.add_model_specific_args(parser)
parser.set_defaults(**arg_defaults)
args = parser.parse_args()
if args.default_root_dir is None:
args.default_root_dir = Path.cwd()
if args.dataset_dir is None:
with open(data_config, "r") as f:
paths = yaml.load(f, Loader=yaml.SafeLoader)["paths"]
if args.dataset_name == "nih":
args.dataset_dir = paths["nih"]
if args.dataset_name == "mimic":
args.dataset_dir = paths["mimic"]
elif args.dataset_name == "chexpert":
args.dataset_dir = paths["chexpert"]
elif args.dataset_name == "mimic-chexpert":
args.dataset_dir = [paths["chexpert"], paths["mimic"]]
else:
raise ValueError("Unrecognized path config.")
# ------------
# checkpoints
# ------------
checkpoint_dir = Path(args.default_root_dir) / "checkpoints"
if not checkpoint_dir.exists():
checkpoint_dir.mkdir(parents=True)
elif args.resume_from_checkpoint is None:
ckpt_list = sorted(checkpoint_dir.glob("*.ckpt"), key=os.path.getmtime)
if ckpt_list:
args.resume_from_checkpoint = str(ckpt_list[-1])
args.callbacks.append(
pl.callbacks.ModelCheckpoint(dirpath=checkpoint_dir, verbose=True)
)
return args
def cli_main(args):
# ------------
# data
# ------------
transform_list = [
transforms.RandomResizedCrop(args.im_size, scale=(0.2, 1.0)),
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.ToTensor(),
RandomGaussianBlur(),
AddGaussianNoise(snr_range=(4, 8)),
HistogramNormalize(),
TensorToRGB(),
]
data_module = XrayDataModule(
dataset_name=args.dataset_name,
dataset_dir=args.dataset_dir,
batch_size=args.batch_size,
num_workers=args.num_workers,
use_two_images=True,
train_transform=Compose(transform_list),
val_transform=Compose(transform_list),
test_transform=Compose(transform_list),
)
# ------------
# model
# ------------
model = MoCoModule(
arch=args.arch,
feature_dim=args.feature_dim,
queue_size=args.queue_size,
use_mlp=args.use_mlp,
learning_rate=args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay,
epochs=args.max_epochs,
)
# ------------
# training
# ------------
trainer = pl.Trainer.from_argparse_args(args)
trainer.fit(model, datamodule=data_module)
if __name__ == "__main__":
args = build_args()
cli_main(args)
|
CovidPrognosis-main
|
cp_examples/moco_pretrain/train_moco.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
from argparse import ArgumentParser
from pathlib import Path
import math
import pytorch_lightning as pl
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as tvmodels
class DenseNet(tvmodels.DenseNet):
def forward(self, x):
features = self.features(x)
return F.relu(features, inplace=True)
def filter_nans(logits, labels):
logits = logits[~torch.isnan(labels)]
labels = labels[~torch.isnan(labels)]
return logits, labels
def load_pretrained_model(arch, pretrained_file):
pretrained_dict = torch.load(pretrained_file)["state_dict"]
state_dict = {}
for k, v in pretrained_dict.items():
if k.startswith("model.encoder_q."):
k = k.replace("model.encoder_q.", "")
state_dict[k] = v
if arch.startswith("densenet"):
num_classes = pretrained_dict["model.encoder_q.classifier.weight"].shape[0]
model = DenseNet(num_classes=num_classes)
model.load_state_dict(state_dict)
feature_dim = pretrained_dict["model.encoder_q.classifier.weight"].shape[1]
del model.classifier
else:
raise ValueError(f"Model architecture {arch} is not supported.")
return model, feature_dim
class ContinuousPosEncoding(nn.Module):
def __init__(self, dim, drop=0.1, maxtime=360):
super().__init__()
self.dropout = nn.Dropout(drop)
position = torch.arange(0, maxtime, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(
torch.arange(0, dim, 2).float() * (-math.log(10000.0) / dim)
)
pe = torch.zeros(maxtime, dim)
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
self.register_buffer("pe", pe)
def forward(self, xs, times):
ys = xs
times = times.long()
for b in range(xs.shape[1]):
ys[:, b] += self.pe[times[b]]
return self.dropout(ys)
class MIPModel(nn.Module):
def __init__(
self,
image_model,
feature_dim,
projection_dim,
num_classes,
num_heads,
feedforward_dim,
drop_transformer,
drop_cpe,
pooling,
image_shape=(7, 7),
):
super().__init__()
self.image_shape = image_shape
self.pooling = pooling
self.image_model = image_model
self.group_norm = nn.GroupNorm(32, feature_dim)
self.projection = nn.Conv2d(feature_dim, projection_dim, (1, 1))
transformer_dim = projection_dim * image_shape[0] * image_shape[1]
self.pos_encoding = ContinuousPosEncoding(transformer_dim, drop=drop_cpe)
self.transformer = nn.TransformerEncoderLayer(
d_model=transformer_dim,
dim_feedforward=feedforward_dim,
nhead=num_heads,
dropout=drop_transformer,
)
self.classifier = nn.Linear(feature_dim + projection_dim, num_classes)
def _apply_transformer(self, image_feats: torch.Tensor, times, lens):
B, N, C, H, W = image_feats.shape
image_feats = image_feats.flatten(start_dim=2).permute(
[1, 0, 2]
) # [N, B, C * H * W]
image_feats = self.pos_encoding(image_feats, times)
image_feats = self.transformer(image_feats)
return image_feats.permute([1, 0, 2]).reshape([B, N, C, H, W])
def _pool(self, image_feats, lens):
if self.pooling == "last_timestep":
pooled_feats = []
for b, l in enumerate(lens.tolist()):
pooled_feats.append(image_feats[b, int(l) - 1])
elif self.pooling == "sum":
pooled_feats = []
for b, l in enumerate(lens.tolist()):
pooled_feats.append(image_feats[b, : int(l)].sum(0))
else:
raise ValueError(f"Unkown pooling method: {self.pooling}")
pooled_feats = torch.stack(pooled_feats)
pooled_feats = F.adaptive_avg_pool2d(pooled_feats, (1, 1))
return pooled_feats.squeeze(3).squeeze(2)
def forward(self, images, times, lens):
B, N, C, H, W = images.shape
images = images.reshape([B * N, C, H, W])
# Apply Image Model
image_feats = self.image_model(images)
image_feats = F.relu(self.group_norm(image_feats))
# Apply transformer
image_feats_proj = self.projection(image_feats).reshape(
[B, N, -1, *self.image_shape]
)
image_feats_trans = self._apply_transformer(image_feats_proj, times, lens)
# Concat and apply classifier
image_feats = image_feats.reshape([B, N, -1, *self.image_shape])
image_feats_combined = torch.cat([image_feats, image_feats_trans], dim=2)
image_feats_pooled = self._pool(image_feats_combined, lens)
return self.classifier(image_feats_pooled)
class MIPModule(pl.LightningModule):
def __init__(
self, args, label_list, pos_weights=None,
):
super().__init__()
self.args = args
self.label_list = label_list
self.val_pathology_list = args.val_pathology_list
self.learning_rate = args.learning_rate
self.epochs = args.epochs
# loss function
pos_weights = pos_weights or torch.ones(args.num_classes)
self.register_buffer("pos_weights", pos_weights)
# metrics
self.train_acc = torch.nn.ModuleList(
[pl.metrics.Accuracy() for _ in args.val_pathology_list]
)
self.val_acc = torch.nn.ModuleList(
[pl.metrics.Accuracy() for _ in args.val_pathology_list]
)
image_model, feature_dim = load_pretrained_model(
args.arch, args.pretrained_file
)
self.model = MIPModel(
image_model,
feature_dim,
args.projection_dim,
args.num_classes,
args.num_heads,
args.feedforward_dim,
args.drop_transformer,
args.drop_cpe,
args.pooling,
args.image_shape,
)
def forward(self, images, times, lens):
return self.model(images, times, lens)
def loss(self, output, target):
counts = 0
loss = 0
for i in range(len(output)):
pos_weights, _ = filter_nans(self.pos_weights, target[i])
loss_fn = torch.nn.BCEWithLogitsLoss(
pos_weight=pos_weights, reduction="sum"
)
bind_logits, bind_labels = filter_nans(output[i], target[i])
loss = loss + loss_fn(bind_logits, bind_labels)
counts = counts + bind_labels.numel()
counts = 1 if counts == 0 else counts
loss = loss / counts
return loss
def training_step(self, batch, batch_idx):
# forward pass
output = self(batch["images"], batch["times"], batch["lens"])
target = batch["labels"]
# calculate loss
loss_val = self.loss(output, target)
# metrics
self.log("train_metrics/loss", loss_val)
for i, path in enumerate(self.val_pathology_list):
j = self.label_list.index(path)
logits, labels = filter_nans(output[:, j], target[:, j])
self.train_acc[i](logits, labels)
self.log(
f"train_metrics/accuracy_{path}",
self.train_acc[i],
on_step=True,
on_epoch=False,
)
return loss_val
def validation_step(self, batch, batch_idx):
# forward pass
output = self(batch["images"], batch["times"], batch["lens"])
target = batch["labels"]
# calculate loss
loss_val = self.loss(output, target)
# metrics
result_logits = {}
result_labels = {}
self.log("val_metrics/loss", loss_val)
for path in self.val_pathology_list:
j = self.label_list.index(path)
logits, labels = filter_nans(output[:, j], target[:, j])
result_logits[path] = logits
result_labels[path] = labels
return {"logits": result_logits, "targets": result_labels}
def validation_epoch_end(self, outputs):
auc_vals = []
for i, path in enumerate(self.val_pathology_list):
logits = []
targets = []
for output in outputs:
logits.append(output["logits"][path].flatten())
targets.append(output["targets"][path].flatten())
logits = torch.cat(logits)
targets = torch.cat(targets)
print(f"path: {path}, len: {len(logits)}")
self.val_acc[i](logits, targets)
try:
auc_val = pl.metrics.functional.auroc(torch.sigmoid(logits), targets)
auc_vals.append(auc_val)
except ValueError:
auc_val = 0
print(f"path: {path}, auc_val: {auc_val}")
self.log(
f"val_metrics/accuracy_{path}",
self.val_acc[i],
on_step=False,
on_epoch=True,
)
self.log(f"val_metrics/auc_{path}", auc_val)
self.log("val_metrics/auc_mean", sum(auc_vals) / len(auc_vals))
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.model.parameters(), self.learning_rate)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, self.epochs)
return [optimizer], [scheduler]
@staticmethod
def add_model_specific_args(parent_parser):
parser = ArgumentParser(parents=[parent_parser], add_help=False)
parser.add_argument("--pretrained_file", type=Path, required=True)
parser.add_argument("--arch", default="densenet121", type=str)
parser.add_argument("--num_classes", default=14, type=int)
parser.add_argument("--val_pathology_list", nargs="+")
parser.add_argument("--pos_weights", default=None, type=float)
# Training params
parser.add_argument("--learning_rate", default=1e-3, type=float)
parser.add_argument("--epochs", default=50, type=int)
# Model params
parser.add_argument("--projection_dim", type=int, default=64)
parser.add_argument("--num_heads", type=int, default=2)
parser.add_argument("--feedforward_dim", type=int, default=128)
parser.add_argument("--drop_transformer", type=float, default=0.5)
parser.add_argument("--drop_cpe", type=float, default=0.5)
parser.add_argument(
"--pooling", choices=["last_timestep", "sum"], default="last_timestep"
)
parser.add_argument("--image_shape", default=(7, 7))
return parser
|
CovidPrognosis-main
|
cp_examples/mip_finetune/mip_model.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import logging
import os
from argparse import ArgumentParser
from pathlib import Path
from warnings import warn
import numpy as np
import pytorch_lightning as pl
import torch
from covidprognosis.data.transforms import (
HistogramNormalize,
NanToInt,
RemapLabel,
TensorToRGB,
)
from covidprognosis.plmodules import XrayDataModule
from torchvision import transforms
from cp_examples.mip_finetune.mip_model import MIPModule
def build_args(arg_defaults=None):
pl.seed_everything(1234)
tmp = arg_defaults
arg_defaults = {
"accelerator": "ddp",
"batch_size": 32,
"max_epochs": 50,
"gpus": 1,
"num_workers": 10,
"callbacks": [],
}
if tmp is not None:
arg_defaults.update(tmp)
# ------------
# args
# ------------
parser = ArgumentParser()
parser.add_argument("--im_size", default=224, type=int)
parser.add_argument("--uncertain_label", default=np.nan, type=float)
parser.add_argument("--nan_label", default=np.nan, type=float)
parser = pl.Trainer.add_argparse_args(parser)
parser = XrayDataModule.add_model_specific_args(parser)
parser = MIPModule.add_model_specific_args(parser)
parser.set_defaults(**arg_defaults)
args = parser.parse_args()
if args.default_root_dir is None:
args.default_root_dir = Path.cwd()
if args.pretrained_file is None:
warn("Pretrained file not specified, training from scratch.")
else:
logging.info(f"Loading pretrained file from {args.pretrained_file}")
checkpoint_dir = Path(args.default_root_dir) / "checkpoints"
checkpoint_dir.mkdir(exist_ok=True, parents=True)
ckpt_list = sorted(checkpoint_dir.glob("*.ckpt"), key=os.path.getmtime)
if ckpt_list:
args.resume_from_checkpoint = str(ckpt_list[-1])
args.callbacks.append(
pl.callbacks.ModelCheckpoint(dirpath=checkpoint_dir, verbose=True)
)
return args
def fetch_pos_weights(csv, label_list, uncertain_label, nan_label):
pos = (csv[label_list] == 1).sum()
neg = (csv[label_list] == 0).sum()
if uncertain_label == 1:
pos = pos + (csv[label_list] == -1).sum()
elif uncertain_label == -1:
neg = neg + (csv[label_list] == -1).sum()
if nan_label == 1:
pos = pos + (csv[label_list].isna()).sum()
elif nan_label == -1:
neg = neg + (csv[label_list].isna()).sum()
pos_weights = torch.tensor((neg / np.maximum(pos, 1)).values.astype(np.float))
return pos_weights
def create_data_module(train_transform_list, val_transform_list):
data_module = None # TODO: Create data loader
return data_module
def cli_main(args):
# ------------
# data
# ------------
train_transform_list = [
transforms.Resize(args.im_size),
transforms.CenterCrop(args.im_size),
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.ToTensor(),
HistogramNormalize(),
TensorToRGB(),
RemapLabel(-1, args.uncertain_label),
NanToInt(args.nan_label),
]
val_transform_list = [
transforms.Resize(args.im_size),
transforms.CenterCrop(args.im_size),
transforms.ToTensor(),
HistogramNormalize(),
TensorToRGB(),
RemapLabel(-1, args.uncertain_label),
]
data_module = create_data_module(train_transform_list, val_transform_list)
# ------------
# model
# ------------
pos_weights = fetch_pos_weights(
csv=data_module.train_dataset.csv,
label_list=data_module.label_list,
uncertain_label=args.uncertain_label,
nan_label=args.nan_label,
)
model = MIPModule(
args,
data_module.label_list,
pos_weights,
)
# ------------
# training
# ------------
trainer = pl.Trainer.from_argparse_args(args)
trainer.fit(model, datamodule=data_module)
if __name__ == "__main__":
args = build_args()
cli_main(args)
|
CovidPrognosis-main
|
cp_examples/mip_finetune/train_mip.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
from argparse import ArgumentParser
from pathlib import Path
import pytorch_lightning as pl
import requests
import torch
import torchvision.models as models
from tqdm import tqdm
def filter_nans(logits, labels):
logits = logits[~torch.isnan(labels)]
labels = labels[~torch.isnan(labels)]
return logits, labels
def validate_pretrained_model(state_dict, pretrained_file):
# sanity check to make sure we're not altering weights
pretrained_dict = torch.load(pretrained_file, map_location="cpu")["state_dict"]
model_dict = dict()
for k, v in pretrained_dict.items():
if "model.encoder_q" in k:
model_dict[k[len("model.encoder_q.") :]] = v
for k in list(model_dict.keys()):
# only ignore fc layer
if "classifier.weight" in k or "classifier.bias" in k:
continue
if "fc.weight" in k or "fc.bias" in k:
continue
assert (
state_dict[k].cpu() == model_dict[k]
).all(), f"{k} changed in linear classifier training."
def download_model(url, fname):
response = requests.get(url, timeout=10, stream=True)
chunk_size = 8 * 1024 * 1024 # 8 MB chunks
total_size_in_bytes = int(response.headers.get("content-length", 0))
progress_bar = tqdm(
desc="Downloading state_dict",
total=total_size_in_bytes,
unit="iB",
unit_scale=True,
)
with open(fname, "wb") as fh:
for chunk in response.iter_content(chunk_size):
progress_bar.update(len(chunk))
fh.write(chunk)
class SipModule(pl.LightningModule):
def __init__(
self,
arch,
num_classes,
label_list,
val_pathology_list,
pretrained_file=None,
learning_rate=1e-3,
pos_weights=None,
epochs=5,
):
super().__init__()
pretrained_file = str(pretrained_file)
self.label_list = label_list
self.val_pathology_list = val_pathology_list
self.learning_rate = learning_rate
self.epochs = epochs
self.pretrained_file = pretrained_file
# load the pretrained model
if pretrained_file is not None:
self.pretrained_file = str(self.pretrained_file)
# download the model if given a url
if "https://" in pretrained_file:
url = self.pretrained_file
self.pretrained_file = Path.cwd() / pretrained_file.split("/")[-1]
download_model(url, self.pretrained_file)
pretrained_dict = torch.load(self.pretrained_file)["state_dict"]
state_dict = {}
for k, v in pretrained_dict.items():
if k.startswith("model.encoder_q."):
k = k.replace("model.encoder_q.", "")
state_dict[k] = v
if "model.encoder_q.classifier.weight" in pretrained_dict.keys():
feature_dim = pretrained_dict[
"model.encoder_q.classifier.weight"
].shape[0]
in_features = pretrained_dict[
"model.encoder_q.classifier.weight"
].shape[1]
self.model = models.__dict__[arch](num_classes=feature_dim)
self.model.load_state_dict(state_dict)
del self.model.classifier
self.model.add_module(
"classifier", torch.nn.Linear(in_features, num_classes)
)
elif "model.encoder_q.fc.weight" in pretrained_dict.keys():
feature_dim = pretrained_dict["model.encoder_q.fc.weight"].shape[0]
in_features = pretrained_dict["model.encoder_q.fc.weight"].shape[1]
self.model = models.__dict__[arch](num_classes=feature_dim)
self.model.load_state_dict(state_dict)
del self.model.fc
self.model.add_module("fc", torch.nn.Linear(in_features, num_classes))
else:
raise RuntimeError("Unrecognized classifier.")
else:
self.model = models.__dict__[arch](num_classes=num_classes)
# loss function
if pos_weights is None:
pos_weights = torch.ones(num_classes)
self.register_buffer("pos_weights", pos_weights)
print(self.pos_weights)
# metrics
self.train_acc = torch.nn.ModuleList(
[pl.metrics.Accuracy() for _ in val_pathology_list]
)
self.val_acc = torch.nn.ModuleList(
[pl.metrics.Accuracy() for _ in val_pathology_list]
)
def on_epoch_start(self):
if self.pretrained_file is not None:
self.model.eval()
def forward(self, image):
return self.model(image)
def loss(self, output, target):
counts = 0
loss = 0
for i in range(len(output)):
pos_weights, _ = filter_nans(self.pos_weights, target[i])
loss_fn = torch.nn.BCEWithLogitsLoss(
pos_weight=pos_weights, reduction="sum"
)
bind_logits, bind_labels = filter_nans(output[i], target[i])
loss = loss + loss_fn(bind_logits, bind_labels)
counts = counts + bind_labels.numel()
counts = 1 if counts == 0 else counts
loss = loss / counts
return loss
def training_step(self, batch, batch_idx):
# forward pass
output = self(batch["image"])
target = batch["labels"]
# calculate loss
loss_val = self.loss(output, target)
# metrics
self.log("train_metrics/loss", loss_val)
for i, path in enumerate(self.val_pathology_list):
j = self.label_list.index(path)
logits, labels = filter_nans(output[:, j], target[:, j])
self.train_acc[i](logits, labels)
self.log(
f"train_metrics/accuracy_{path}",
self.train_acc[i],
on_step=True,
on_epoch=False,
)
return loss_val
def validation_step(self, batch, batch_idx):
# forward pass
output = self(batch["image"])
target = batch["labels"]
# calculate loss
loss_val = self.loss(output, target)
# metrics
result_logits = {}
result_labels = {}
self.log("val_metrics/loss", loss_val)
for path in self.val_pathology_list:
j = self.label_list.index(path)
logits, labels = filter_nans(output[:, j], target[:, j])
result_logits[path] = logits
result_labels[path] = labels
return {"logits": result_logits, "targets": result_labels}
def validation_epoch_end(self, outputs):
# make sure we didn't change the pretrained weights
if self.pretrained_file is not None:
validate_pretrained_model(self.model.state_dict(), self.pretrained_file)
auc_vals = []
for i, path in enumerate(self.val_pathology_list):
logits = []
targets = []
for output in outputs:
logits.append(output["logits"][path].flatten())
targets.append(output["targets"][path].flatten())
logits = torch.cat(logits)
targets = torch.cat(targets)
print(f"path: {path}, len: {len(logits)}")
self.val_acc[i](logits, targets)
try:
auc_val = pl.metrics.functional.auroc(torch.sigmoid(logits), targets)
auc_vals.append(auc_val)
except ValueError:
auc_val = 0
print(f"path: {path}, auc_val: {auc_val}")
self.log(
f"val_metrics/accuracy_{path}",
self.val_acc[i],
on_step=False,
on_epoch=True,
)
self.log(f"val_metrics/auc_{path}", auc_val)
self.log("val_metrics/auc_mean", sum(auc_vals) / len(auc_vals))
def configure_optimizers(self):
if self.pretrained_file is None:
model = self.model
else:
if hasattr(self.model, "classifier"):
model = self.model.classifier
elif hasattr(self.model, "fc"):
model = self.model.fc
else:
raise RuntimeError("Unrecognized classifier.")
optimizer = torch.optim.Adam(model.parameters(), self.learning_rate)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, self.epochs)
return [optimizer], [scheduler]
@staticmethod
def add_model_specific_args(parent_parser):
parser = ArgumentParser(parents=[parent_parser], add_help=False)
parser.add_argument("--arch", default="densenet121", type=str)
parser.add_argument("--num_classes", default=14, type=int)
parser.add_argument("--pretrained_file", default=None, type=str)
parser.add_argument("--val_pathology_list", nargs="+")
parser.add_argument("--learning_rate", default=1e-2, type=float)
parser.add_argument("--pos_weights", default=None, type=float)
return parser
|
CovidPrognosis-main
|
cp_examples/sip_finetune/sip_finetune.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import logging
import os
from argparse import ArgumentParser
from pathlib import Path
from warnings import warn
import numpy as np
import pytorch_lightning as pl
import torch
import yaml
from covidprognosis.data.transforms import (
Compose,
HistogramNormalize,
NanToInt,
RemapLabel,
TensorToRGB,
)
from covidprognosis.plmodules import XrayDataModule
from torchvision import transforms
from sip_finetune import SipModule
def build_args(arg_defaults=None):
pl.seed_everything(1234)
data_config = Path.cwd() / "../../configs/data.yaml"
tmp = arg_defaults
arg_defaults = {
"accelerator": "ddp",
"batch_size": 32,
"max_epochs": 5,
"gpus": 1,
"num_workers": 10,
"callbacks": [],
}
if tmp is not None:
arg_defaults.update(tmp)
# ------------
# args
# ------------
parser = ArgumentParser()
parser.add_argument("--im_size", default=224, type=int)
parser.add_argument("--uncertain_label", default=np.nan, type=float)
parser.add_argument("--nan_label", default=np.nan, type=float)
parser = pl.Trainer.add_argparse_args(parser)
parser = XrayDataModule.add_model_specific_args(parser)
parser = SipModule.add_model_specific_args(parser)
parser.set_defaults(**arg_defaults)
args = parser.parse_args()
if args.default_root_dir is None:
args.default_root_dir = Path.cwd()
if args.pretrained_file is None:
warn("Pretrained file not specified, training from scratch.")
else:
logging.info(f"Loading pretrained file from {args.pretrained_file}")
if args.dataset_dir is None:
with open(data_config, "r") as f:
paths = yaml.load(f, Loader=yaml.SafeLoader)["paths"]
if args.dataset_name == "nih":
args.dataset_dir = paths["nih"]
if args.dataset_name == "mimic":
args.dataset_dir = paths["mimic"]
elif args.dataset_name == "chexpert":
args.dataset_dir = paths["chexpert"]
elif args.dataset_name == "mimic-chexpert":
args.dataset_dir = [paths["chexpert"], paths["mimic"]]
else:
raise ValueError("Unrecognized path config.")
if args.dataset_name in ("chexpert", "mimic", "mimic-chexpert"):
args.val_pathology_list = [
"Atelectasis",
"Cardiomegaly",
"Consolidation",
"Edema",
"Pleural Effusion",
]
elif args.dataset_name == "nih":
args.val_pathology_list = [
"Atelectasis",
"Cardiomegaly",
"Consolidation",
"Edema",
"Effusion",
]
else:
raise ValueError("Unrecognized dataset.")
# ------------
# checkpoints
# ------------
checkpoint_dir = Path(args.default_root_dir) / "checkpoints"
if not checkpoint_dir.exists():
checkpoint_dir.mkdir(parents=True)
elif args.resume_from_checkpoint is None:
ckpt_list = sorted(checkpoint_dir.glob("*.ckpt"), key=os.path.getmtime)
if ckpt_list:
args.resume_from_checkpoint = str(ckpt_list[-1])
args.callbacks.append(
pl.callbacks.ModelCheckpoint(dirpath=checkpoint_dir, verbose=True)
)
return args
def fetch_pos_weights(dataset_name, csv, label_list, uncertain_label, nan_label):
if dataset_name == "nih":
pos = [(csv["Finding Labels"].str.contains(lab)).sum() for lab in label_list]
neg = [(~csv["Finding Labels"].str.contains(lab)).sum() for lab in label_list]
pos_weights = torch.tensor((neg / np.maximum(pos, 1)).astype(np.float))
else:
pos = (csv[label_list] == 1).sum()
neg = (csv[label_list] == 0).sum()
if uncertain_label == 1:
pos = pos + (csv[label_list] == -1).sum()
elif uncertain_label == -1:
neg = neg + (csv[label_list] == -1).sum()
if nan_label == 1:
pos = pos + (csv[label_list].isna()).sum()
elif nan_label == -1:
neg = neg + (csv[label_list].isna()).sum()
pos_weights = torch.tensor((neg / np.maximum(pos, 1)).values.astype(np.float))
return pos_weights
def cli_main(args):
# ------------
# data
# ------------
train_transform_list = [
transforms.Resize(args.im_size),
transforms.CenterCrop(args.im_size),
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.ToTensor(),
HistogramNormalize(),
TensorToRGB(),
RemapLabel(-1, args.uncertain_label),
NanToInt(args.nan_label),
]
val_transform_list = [
transforms.Resize(args.im_size),
transforms.CenterCrop(args.im_size),
transforms.ToTensor(),
HistogramNormalize(),
TensorToRGB(),
RemapLabel(-1, args.uncertain_label),
]
data_module = XrayDataModule(
dataset_name=args.dataset_name,
dataset_dir=args.dataset_dir,
batch_size=args.batch_size,
num_workers=args.num_workers,
train_transform=Compose(train_transform_list),
val_transform=Compose(val_transform_list),
test_transform=Compose(val_transform_list),
)
# ------------
# model
# ------------
pos_weights = fetch_pos_weights(
dataset_name=args.dataset_name,
csv=data_module.train_dataset.csv,
label_list=data_module.label_list,
uncertain_label=args.uncertain_label,
nan_label=args.nan_label,
)
model = SipModule(
arch=args.arch,
num_classes=len(data_module.label_list),
pretrained_file=args.pretrained_file,
label_list=data_module.label_list,
val_pathology_list=args.val_pathology_list,
learning_rate=args.learning_rate,
pos_weights=pos_weights,
epochs=args.max_epochs,
)
# ------------
# training
# ------------
trainer = pl.Trainer.from_argparse_args(args)
trainer.fit(model, datamodule=data_module)
if __name__ == "__main__":
args = build_args()
cli_main(args)
|
CovidPrognosis-main
|
cp_examples/sip_finetune/train_sip.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import datetime
import numpy as np
import time
import json
import os
from pathlib import Path
import torch
import torch.backends.cudnn as cudnn
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import timm
assert timm.__version__ == "0.3.2" # version check
import timm.optim.optim_factory as optim_factory
from engine_pretrain import train_one_epoch
import models.fcmae as fcmae
import utils
from utils import NativeScalerWithGradNormCount as NativeScaler
from utils import str2bool
def get_args_parser():
parser = argparse.ArgumentParser('FCMAE pre-training', add_help=False)
parser.add_argument('--batch_size', default=64, type=int,
help='Per GPU batch size')
parser.add_argument('--epochs', default=800, type=int)
parser.add_argument('--warmup_epochs', type=int, default=40, metavar='N',
help='epochs to warmup LR')
parser.add_argument('--update_freq', default=1, type=int,
help='gradient accumulation step')
# Model parameters
parser.add_argument('--model', default='convnextv2_base', type=str, metavar='MODEL',
help='Name of model to train')
parser.add_argument('--input_size', default=224, type=int,
help='image input size')
parser.add_argument('--mask_ratio', default=0.6, type=float,
help='Masking ratio (percentage of removed patches).')
parser.add_argument('--norm_pix_loss', action='store_true',
help='Use (per-patch) normalized pixels as targets for computing loss')
parser.set_defaults(norm_pix_loss=True)
parser.add_argument('--decoder_depth', type=int, default=1)
parser.add_argument('--decoder_embed_dim', type=int, default=512)
# Optimizer parameters
parser.add_argument('--weight_decay', type=float, default=0.05,
help='weight decay (default: 0.05)')
parser.add_argument('--lr', type=float, default=None, metavar='LR',
help='learning rate (absolute lr)')
parser.add_argument('--blr', type=float, default=1.5e-4, metavar='LR',
help='base learning rate: absolute_lr = base_lr * total_batch_size / 256')
parser.add_argument('--min_lr', type=float, default=0., metavar='LR',
help='lower lr bound for cyclic schedulers that hit 0')
# Dataset parameters
parser.add_argument('--data_path', default='/datasets01/imagenet_full_size/061417/', type=str,
help='dataset path')
parser.add_argument('--output_dir', default='',
help='path where to save, empty for no saving')
parser.add_argument('--log_dir', default=None,
help='path where to tensorboard log')
parser.add_argument('--device', default='cuda',
help='device to use for training / testing')
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--resume', default='',
help='resume from checkpoint')
parser.add_argument('--auto_resume', type=str2bool, default=True)
parser.add_argument('--save_ckpt', type=str2bool, default=True)
parser.add_argument('--save_ckpt_freq', default=1, type=int)
parser.add_argument('--save_ckpt_num', default=3, type=int)
parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
help='start epoch')
parser.add_argument('--num_workers', default=10, type=int)
parser.add_argument('--pin_mem', type=str2bool, default=True,
help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
# Evaluation parameters
parser.add_argument('--crop_pct', type=float, default=None)
# distributed training parameters
parser.add_argument('--world_size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--local_rank', default=-1, type=int)
parser.add_argument('--dist_on_itp', type=str2bool, default=False)
parser.add_argument('--dist_url', default='env://',
help='url used to set up distributed training')
return parser
def main(args):
utils.init_distributed_mode(args)
print(args)
device = torch.device(args.device)
# fix the seed for reproducibility
seed = args.seed + utils.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
cudnn.benchmark = True
# simple augmentation
transform_train = transforms.Compose([
transforms.RandomResizedCrop(args.input_size, scale=(0.2, 1.0), interpolation=3), # 3 is bicubic
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
dataset_train = datasets.ImageFolder(os.path.join(args.data_path, 'train'), transform=transform_train)
print(dataset_train)
num_tasks = utils.get_world_size()
global_rank = utils.get_rank()
sampler_train = torch.utils.data.DistributedSampler(
dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True, seed=args.seed,
)
print("Sampler_train = %s" % str(sampler_train))
if global_rank == 0 and args.log_dir is not None:
os.makedirs(args.log_dir, exist_ok=True)
# log_writer = SummaryWriter(log_dir=args.log_dir)
log_writer = utils.TensorboardLogger(log_dir=args.log_dir)
else:
log_writer = None
data_loader_train = torch.utils.data.DataLoader(
dataset_train, sampler=sampler_train,
batch_size=args.batch_size,
num_workers=args.num_workers,
pin_memory=args.pin_mem,
drop_last=True,
)
# define the model
model = fcmae.__dict__[args.model](
mask_ratio=args.mask_ratio,
decoder_depth=args.decoder_depth,
decoder_embed_dim=args.decoder_embed_dim,
norm_pix_loss=args.norm_pix_loss
)
model.to(device)
model_without_ddp = model
n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
print("Model = %s" % str(model_without_ddp))
print('number of params:', n_parameters)
eff_batch_size = args.batch_size * args.update_freq * utils.get_world_size()
num_training_steps_per_epoch = len(dataset_train) // eff_batch_size
if args.lr is None:
args.lr = args.blr * eff_batch_size / 256
print("base lr: %.2e" % (args.lr * 256 / eff_batch_size))
print("actual lr: %.2e" % args.lr)
print("accumulate grad iterations: %d" % args.update_freq)
print("effective batch size: %d" % eff_batch_size)
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu], find_unused_parameters=True)
model_without_ddp = model.module
param_groups = optim_factory.add_weight_decay(model_without_ddp, args.weight_decay)
optimizer = torch.optim.AdamW(param_groups, lr=args.lr, betas=(0.9, 0.95))
print(optimizer)
loss_scaler = NativeScaler()
utils.auto_load_model(
args=args, model=model, model_without_ddp=model_without_ddp,
optimizer=optimizer, loss_scaler=loss_scaler)
print(f"Start training for {args.epochs} epochs")
start_time = time.time()
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
data_loader_train.sampler.set_epoch(epoch)
if log_writer is not None:
log_writer.set_step(epoch * num_training_steps_per_epoch * args.update_freq)
train_stats = train_one_epoch(
model, data_loader_train,
optimizer, device, epoch, loss_scaler,
log_writer=log_writer,
args=args
)
if args.output_dir and args.save_ckpt:
if (epoch + 1) % args.save_ckpt_freq == 0 or epoch + 1 == args.epochs:
utils.save_model(
args=args, model=model, model_without_ddp=model_without_ddp, optimizer=optimizer,
loss_scaler=loss_scaler, epoch=epoch)
log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},
'epoch': epoch,
'n_parameters': n_parameters}
if args.output_dir and utils.is_main_process():
if log_writer is not None:
log_writer.flush()
with open(os.path.join(args.output_dir, "log.txt"), mode="a", encoding="utf-8") as f:
f.write(json.dumps(log_stats) + "\n")
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Training time {}'.format(total_time_str))
if __name__ == '__main__':
args = get_args_parser()
args = args.parse_args()
if args.output_dir:
Path(args.output_dir).mkdir(parents=True, exist_ok=True)
main(args)
|
ConvNeXt-V2-main
|
main_pretrain.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import os
import uuid
from pathlib import Path
import main_finetune as trainer
import submitit
def parse_args():
trainer_parser = trainer.get_args_parser()
parser = argparse.ArgumentParser("Submitit for finetune", parents=[trainer_parser])
parser.add_argument("--ngpus", default=8, type=int, help="Number of gpus to request on each node")
parser.add_argument("--nodes", default=2, type=int, help="Number of nodes to request")
parser.add_argument("--timeout", default=4320, type=int, help="Duration of the job")
parser.add_argument("--job_dir", default="", type=str, help="Job dir. Leave empty for automatic.")
parser.add_argument("--partition", default="learnlab", type=str, help="Partition where to submit")
parser.add_argument("--use_volta32", action='store_true', help="Request 32G V100 GPUs")
parser.add_argument('--comment', default="", type=str, help="Comment to pass to scheduler")
return parser.parse_args()
def get_shared_folder() -> Path:
user = os.getenv("USER")
if Path("/checkpoint/").is_dir():
p = Path(f"/checkpoint/{user}/experiments")
p.mkdir(exist_ok=True)
return p
raise RuntimeError("No shared folder available")
def get_init_file():
# Init file must not exist, but it's parent dir must exist.
os.makedirs(str(get_shared_folder()), exist_ok=True)
init_file = get_shared_folder() / f"{uuid.uuid4().hex}_init"
if init_file.exists():
os.remove(str(init_file))
return init_file
class Trainer(object):
def __init__(self, args):
self.args = args
def __call__(self):
import main_finetune as trainer
self._setup_gpu_args()
trainer.main(self.args)
def checkpoint(self):
import os
import submitit
self.args.dist_url = get_init_file().as_uri()
checkpoint_file = os.path.join(self.args.output_dir, "checkpoint.pth")
if os.path.exists(checkpoint_file):
self.args.resume = checkpoint_file
print("Requeuing ", self.args)
empty_trainer = type(self)(self.args)
return submitit.helpers.DelayedSubmission(empty_trainer)
def _setup_gpu_args(self):
import submitit
from pathlib import Path
job_env = submitit.JobEnvironment()
self.args.output_dir = Path(str(self.args.output_dir).replace("%j", str(job_env.job_id)))
self.args.log_dir = self.args.output_dir
self.args.gpu = job_env.local_rank
self.args.rank = job_env.global_rank
self.args.world_size = job_env.num_tasks
print(f"Process group: {job_env.num_tasks} tasks, rank: {job_env.global_rank}")
def main():
args = parse_args()
if args.job_dir == "":
args.job_dir = get_shared_folder() / "%j"
# Note that the folder will depend on the job_id, to easily track experiments
executor = submitit.AutoExecutor(folder=args.job_dir, slurm_max_num_timeout=30)
num_gpus_per_node = args.ngpus
nodes = args.nodes
timeout_min = args.timeout
partition = args.partition
kwargs = {}
if args.use_volta32:
kwargs['slurm_constraint'] = 'volta32gb'
if args.comment:
kwargs['slurm_comment'] = args.comment
executor.update_parameters(
mem_gb=40 * num_gpus_per_node,
gpus_per_node=num_gpus_per_node,
tasks_per_node=num_gpus_per_node, # one task per GPU
cpus_per_task=10,
nodes=nodes,
timeout_min=timeout_min, # max is 60 * 72
# Below are cluster dependent parameters
slurm_partition=partition,
slurm_signal_delay_s=120,
**kwargs
)
executor.update_parameters(name="finetune")
args.dist_url = get_init_file().as_uri()
args.output_dir = args.job_dir
trainer = Trainer(args)
job = executor.submit(trainer)
# print("Submitted job_id:", job.job_id)
print(job.job_id)
if __name__ == "__main__":
main()
|
ConvNeXt-V2-main
|
submitit_finetune.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
from torchvision import datasets, transforms
from timm.data.constants import \
IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD
from timm.data import create_transform
def build_dataset(is_train, args):
transform = build_transform(is_train, args)
print("Transform = ")
if isinstance(transform, tuple):
for trans in transform:
print(" - - - - - - - - - - ")
for t in trans.transforms:
print(t)
else:
for t in transform.transforms:
print(t)
print("---------------------------")
if args.data_set == 'CIFAR':
dataset = datasets.CIFAR100(args.data_path, train=is_train, transform=transform, download=True)
nb_classes = 100
elif args.data_set == 'IMNET':
print("reading from datapath", args.data_path)
root = os.path.join(args.data_path, 'train' if is_train else 'val')
dataset = datasets.ImageFolder(root, transform=transform)
nb_classes = 1000
elif args.data_set == "image_folder":
root = args.data_path if is_train else args.eval_data_path
dataset = datasets.ImageFolder(root, transform=transform)
nb_classes = args.nb_classes
assert len(dataset.class_to_idx) == nb_classes
else:
raise NotImplementedError()
print("Number of the class = %d" % nb_classes)
return dataset, nb_classes
def build_transform(is_train, args):
resize_im = args.input_size > 32
imagenet_default_mean_and_std = args.imagenet_default_mean_and_std
mean = IMAGENET_INCEPTION_MEAN if not imagenet_default_mean_and_std else IMAGENET_DEFAULT_MEAN
std = IMAGENET_INCEPTION_STD if not imagenet_default_mean_and_std else IMAGENET_DEFAULT_STD
if is_train:
# this should always dispatch to transforms_imagenet_train
transform = create_transform(
input_size=args.input_size,
is_training=True,
color_jitter=args.color_jitter,
auto_augment=args.aa,
interpolation=args.train_interpolation,
re_prob=args.reprob,
re_mode=args.remode,
re_count=args.recount,
mean=mean,
std=std,
)
if not resize_im:
transform.transforms[0] = transforms.RandomCrop(
args.input_size, padding=4)
return transform
t = []
if resize_im:
# warping (no cropping) when evaluated at 384 or larger
if args.input_size >= 384:
t.append(
transforms.Resize((args.input_size, args.input_size),
interpolation=transforms.InterpolationMode.BICUBIC),
)
print(f"Warping {args.input_size} size input images...")
else:
if args.crop_pct is None:
args.crop_pct = 224 / 256
size = int(args.input_size / args.crop_pct)
t.append(
# to maintain same ratio w.r.t. 224 images
transforms.Resize(size, interpolation=transforms.InterpolationMode.BICUBIC),
)
t.append(transforms.CenterCrop(args.input_size))
t.append(transforms.ToTensor())
t.append(transforms.Normalize(mean, std))
return transforms.Compose(t)
|
ConvNeXt-V2-main
|
datasets.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import math
from typing import Iterable, Optional
import torch
from timm.data import Mixup
from timm.utils import accuracy, ModelEma
import utils
from utils import adjust_learning_rate
def train_one_epoch(model: torch.nn.Module, criterion: torch.nn.Module,
data_loader: Iterable, optimizer: torch.optim.Optimizer,
device: torch.device, epoch: int, loss_scaler, max_norm: float = 0,
model_ema: Optional[ModelEma] = None, mixup_fn: Optional[Mixup] = None,
log_writer=None, args=None):
model.train(True)
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
header = 'Epoch: [{}]'.format(epoch)
print_freq = 20
update_freq = args.update_freq
use_amp = args.use_amp
optimizer.zero_grad()
for data_iter_step, (samples, targets) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):
# we use a per iteration (instead of per epoch) lr scheduler
if data_iter_step % update_freq == 0:
adjust_learning_rate(optimizer, data_iter_step / len(data_loader) + epoch, args)
samples = samples.to(device, non_blocking=True)
targets = targets.to(device, non_blocking=True)
if mixup_fn is not None:
samples, targets = mixup_fn(samples, targets)
if use_amp:
with torch.cuda.amp.autocast():
output = model(samples)
loss = criterion(output, targets)
else: # full precision
output = model(samples)
loss = criterion(output, targets)
loss_value = loss.item()
if not math.isfinite(loss_value):
print("Loss is {}, stopping training".format(loss_value))
assert math.isfinite(loss_value)
if use_amp:
# this attribute is added by timm on one optimizer (adahessian)
is_second_order = hasattr(optimizer, 'is_second_order') and optimizer.is_second_order
loss /= update_freq
grad_norm = loss_scaler(loss, optimizer, clip_grad=max_norm,
parameters=model.parameters(), create_graph=is_second_order,
update_grad=(data_iter_step + 1) % update_freq == 0)
if (data_iter_step + 1) % update_freq == 0:
optimizer.zero_grad()
if model_ema is not None:
model_ema.update(model)
else: # full precision
loss /= update_freq
loss.backward()
if (data_iter_step + 1) % update_freq == 0:
optimizer.step()
optimizer.zero_grad()
if model_ema is not None:
model_ema.update(model)
torch.cuda.synchronize()
if mixup_fn is None:
class_acc = (output.max(-1)[-1] == targets).float().mean()
else:
class_acc = None
metric_logger.update(loss=loss_value)
metric_logger.update(class_acc=class_acc)
min_lr = 10.
max_lr = 0.
for group in optimizer.param_groups:
min_lr = min(min_lr, group["lr"])
max_lr = max(max_lr, group["lr"])
metric_logger.update(lr=max_lr)
metric_logger.update(min_lr=min_lr)
weight_decay_value = None
for group in optimizer.param_groups:
if group["weight_decay"] > 0:
weight_decay_value = group["weight_decay"]
metric_logger.update(weight_decay=weight_decay_value)
if use_amp:
metric_logger.update(grad_norm=grad_norm)
if log_writer is not None:
log_writer.update(loss=loss_value, head="loss")
log_writer.update(class_acc=class_acc, head="loss")
log_writer.update(lr=max_lr, head="opt")
log_writer.update(min_lr=min_lr, head="opt")
log_writer.update(weight_decay=weight_decay_value, head="opt")
if use_amp:
log_writer.update(grad_norm=grad_norm, head="opt")
log_writer.set_step()
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger)
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
@torch.no_grad()
def evaluate(data_loader, model, device, use_amp=False):
criterion = torch.nn.CrossEntropyLoss()
metric_logger = utils.MetricLogger(delimiter=" ")
header = 'Test:'
# switch to evaluation mode
model.eval()
for batch in metric_logger.log_every(data_loader, 10, header):
images = batch[0]
target = batch[-1]
images = images.to(device, non_blocking=True)
target = target.to(device, non_blocking=True)
# compute output
if use_amp:
with torch.cuda.amp.autocast():
output = model(images)
if isinstance(output, dict):
output = output['logits']
loss = criterion(output, target)
else:
output = model(images)
if isinstance(output, dict):
output = output['logits']
loss = criterion(output, target)
torch.cuda.synchronize()
acc1, acc5 = accuracy(output, target, topk=(1, 5))
batch_size = images.shape[0]
metric_logger.update(loss=loss.item())
metric_logger.meters['acc1'].update(acc1.item(), n=batch_size)
metric_logger.meters['acc5'].update(acc5.item(), n=batch_size)
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print('* Acc@1 {top1.global_avg:.3f} Acc@5 {top5.global_avg:.3f} loss {losses.global_avg:.3f}'
.format(top1=metric_logger.acc1, top5=metric_logger.acc5, losses=metric_logger.loss))
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
|
ConvNeXt-V2-main
|
engine_finetune.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import math
import sys
from typing import Iterable
import torch
import utils
def train_one_epoch(model: torch.nn.Module,
data_loader: Iterable, optimizer: torch.optim.Optimizer,
device: torch.device, epoch: int, loss_scaler,
log_writer=None,
args=None):
model.train(True)
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
header = 'Epoch: [{}]'.format(epoch)
print_freq = 20
update_freq = args.update_freq
optimizer.zero_grad()
for data_iter_step, (samples, labels) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):
# we use a per iteration (instead of per epoch) lr scheduler
if data_iter_step % update_freq == 0:
utils.adjust_learning_rate(optimizer, data_iter_step / len(data_loader) + epoch, args)
if not isinstance(samples, list):
samples = samples.to(device, non_blocking=True)
labels = labels.to(device, non_blocking=True)
loss, _, _ = model(samples, labels, mask_ratio=args.mask_ratio)
loss_value = loss.item()
if not math.isfinite(loss_value):
print("Loss is {}, stopping training".format(loss_value))
sys.exit(1)
loss /= update_freq
loss_scaler(loss, optimizer, parameters=model.parameters(),
update_grad=(data_iter_step + 1) % update_freq == 0)
if (data_iter_step + 1) % update_freq == 0:
optimizer.zero_grad()
torch.cuda.empty_cache() # clear the GPU cache at a regular interval for training ME network
metric_logger.update(loss=loss_value)
lr = optimizer.param_groups[0]["lr"]
metric_logger.update(lr=lr)
loss_value_reduce = utils.all_reduce_mean(loss_value)
if log_writer is not None and (data_iter_step + 1) % update_freq == 0:
""" We use epoch_1000x as the x-axis in tensorboard.
This calibrates different curves when batch size changes.
"""
epoch_1000x = int((data_iter_step / len(data_loader) + epoch) * 1000)
log_writer.update(train_loss=loss_value_reduce, head="loss", step=epoch_1000x)
log_writer.update(lr=lr, head="opt", step=epoch_1000x)
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger)
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
|
ConvNeXt-V2-main
|
engine_pretrain.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import math
import time
from collections import defaultdict, deque
import datetime
import numpy as np
from timm.utils import get_state_dict
from pathlib import Path
import torch
import torch.distributed as dist
from torch._six import inf
from tensorboardX import SummaryWriter
from collections import OrderedDict
def str2bool(v):
"""
Converts string to bool type; enables command line
arguments in the format of '--arg1 true --arg2 false'
"""
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
class SmoothedValue(object):
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
"""
def __init__(self, window_size=20, fmt=None):
if fmt is None:
fmt = "{median:.4f} ({global_avg:.4f})"
self.deque = deque(maxlen=window_size)
self.total = 0.0
self.count = 0
self.fmt = fmt
def update(self, value, n=1):
self.deque.append(value)
self.count += n
self.total += value * n
def synchronize_between_processes(self):
"""
Warning: does not synchronize the deque!
"""
if not is_dist_avail_and_initialized():
return
t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda')
dist.barrier()
dist.all_reduce(t)
t = t.tolist()
self.count = int(t[0])
self.total = t[1]
@property
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item()
@property
def avg(self):
d = torch.tensor(list(self.deque), dtype=torch.float32)
return d.mean().item()
@property
def global_avg(self):
return self.total / self.count
@property
def max(self):
return max(self.deque)
@property
def value(self):
return self.deque[-1]
def __str__(self):
return self.fmt.format(
median=self.median,
avg=self.avg,
global_avg=self.global_avg,
max=self.max,
value=self.value)
class MetricLogger(object):
def __init__(self, delimiter="\t"):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for k, v in kwargs.items():
if v is None:
continue
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if attr in self.meters:
return self.meters[attr]
if attr in self.__dict__:
return self.__dict__[attr]
raise AttributeError("'{}' object has no attribute '{}'".format(
type(self).__name__, attr))
def __str__(self):
loss_str = []
for name, meter in self.meters.items():
loss_str.append(
"{}: {}".format(name, str(meter))
)
return self.delimiter.join(loss_str)
def synchronize_between_processes(self):
for meter in self.meters.values():
meter.synchronize_between_processes()
def add_meter(self, name, meter):
self.meters[name] = meter
def log_every(self, iterable, print_freq, header=None):
i = 0
if not header:
header = ''
start_time = time.time()
end = time.time()
iter_time = SmoothedValue(fmt='{avg:.4f}')
data_time = SmoothedValue(fmt='{avg:.4f}')
space_fmt = ':' + str(len(str(len(iterable)))) + 'd'
log_msg = [
header,
'[{0' + space_fmt + '}/{1}]',
'eta: {eta}',
'{meters}',
'time: {time}',
'data: {data}'
]
if torch.cuda.is_available():
log_msg.append('max mem: {memory:.0f}')
log_msg = self.delimiter.join(log_msg)
MB = 1024.0 * 1024.0
for obj in iterable:
data_time.update(time.time() - end)
yield obj
iter_time.update(time.time() - end)
if i % print_freq == 0 or i == len(iterable) - 1:
eta_seconds = iter_time.global_avg * (len(iterable) - i)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if torch.cuda.is_available():
print(log_msg.format(
i, len(iterable), eta=eta_string,
meters=str(self),
time=str(iter_time), data=str(data_time),
memory=torch.cuda.max_memory_allocated() / MB))
else:
print(log_msg.format(
i, len(iterable), eta=eta_string,
meters=str(self),
time=str(iter_time), data=str(data_time)))
i += 1
end = time.time()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('{} Total time: {} ({:.4f} s / it)'.format(
header, total_time_str, total_time / len(iterable)))
class TensorboardLogger(object):
def __init__(self, log_dir):
self.writer = SummaryWriter(logdir=log_dir)
self.step = 0
def set_step(self, step=None):
if step is not None:
self.step = step
else:
self.step += 1
def update(self, head='scalar', step=None, **kwargs):
for k, v in kwargs.items():
if v is None:
continue
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.writer.add_scalar(head + "/" + k, v, self.step if step is None else step)
def flush(self):
self.writer.flush()
class WandbLogger(object):
def __init__(self, args):
self.args = args
try:
import wandb
self._wandb = wandb
except ImportError:
raise ImportError(
"To use the Weights and Biases Logger please install wandb."
"Run `pip install wandb` to install it."
)
# Initialize a W&B run
if self._wandb.run is None:
self._wandb.init(
project=args.project,
config=args
)
def log_epoch_metrics(self, metrics, commit=True):
"""
Log train/test metrics onto W&B.
"""
# Log number of model parameters as W&B summary
self._wandb.summary['n_parameters'] = metrics.get('n_parameters', None)
metrics.pop('n_parameters', None)
# Log current epoch
self._wandb.log({'epoch': metrics.get('epoch')}, commit=False)
metrics.pop('epoch')
for k, v in metrics.items():
if 'train' in k:
self._wandb.log({f'Global Train/{k}': v}, commit=False)
elif 'test' in k:
self._wandb.log({f'Global Test/{k}': v}, commit=False)
self._wandb.log({})
def log_checkpoints(self):
output_dir = self.args.output_dir
model_artifact = self._wandb.Artifact(
self._wandb.run.id + "_model", type="model"
)
model_artifact.add_dir(output_dir)
self._wandb.log_artifact(model_artifact, aliases=["latest", "best"])
def set_steps(self):
# Set global training step
self._wandb.define_metric('Rank-0 Batch Wise/*', step_metric='Rank-0 Batch Wise/global_train_step')
# Set epoch-wise step
self._wandb.define_metric('Global Train/*', step_metric='epoch')
self._wandb.define_metric('Global Test/*', step_metric='epoch')
def setup_for_distributed(is_master):
"""
This function disables printing when not in master process
"""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop('force', False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
def is_dist_avail_and_initialized():
if not dist.is_available():
return False
if not dist.is_initialized():
return False
return True
def get_world_size():
if not is_dist_avail_and_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not is_dist_avail_and_initialized():
return 0
return dist.get_rank()
def is_main_process():
return get_rank() == 0
def save_on_master(*args, **kwargs):
if is_main_process():
torch.save(*args, **kwargs)
def init_distributed_mode(args):
if args.dist_on_itp:
args.rank = int(os.environ['OMPI_COMM_WORLD_RANK'])
args.world_size = int(os.environ['OMPI_COMM_WORLD_SIZE'])
args.gpu = int(os.environ['OMPI_COMM_WORLD_LOCAL_RANK'])
args.dist_url = "tcp://%s:%s" % (os.environ['MASTER_ADDR'], os.environ['MASTER_PORT'])
os.environ['LOCAL_RANK'] = str(args.gpu)
os.environ['RANK'] = str(args.rank)
os.environ['WORLD_SIZE'] = str(args.world_size)
# ["RANK", "WORLD_SIZE", "MASTER_ADDR", "MASTER_PORT", "LOCAL_RANK"]
elif 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:
args.rank = int(os.environ["RANK"])
args.world_size = int(os.environ['WORLD_SIZE'])
args.gpu = int(os.environ['LOCAL_RANK'])
elif 'SLURM_PROCID' in os.environ:
args.rank = int(os.environ['SLURM_PROCID'])
args.gpu = args.rank % torch.cuda.device_count()
os.environ['RANK'] = str(args.rank)
os.environ['LOCAL_RANK'] = str(args.gpu)
os.environ['WORLD_SIZE'] = str(args.world_size)
else:
print('Not using distributed mode')
args.distributed = False
return
args.distributed = True
torch.cuda.set_device(args.gpu)
args.dist_backend = 'nccl'
print('| distributed init (rank {}): {}, gpu {}'.format(
args.rank, args.dist_url, args.gpu), flush=True)
torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
torch.distributed.barrier()
setup_for_distributed(args.rank == 0)
def all_reduce_mean(x):
world_size = get_world_size()
if world_size > 1:
x_reduce = torch.tensor(x).cuda()
dist.all_reduce(x_reduce)
x_reduce /= world_size
return x_reduce.item()
else:
return x
def load_state_dict(model, state_dict, prefix='', ignore_missing="relative_position_index"):
missing_keys = []
unexpected_keys = []
error_msgs = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=''):
local_metadata = {} if metadata is None else metadata.get(
prefix[:-1], {})
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
load(model, prefix=prefix)
warn_missing_keys = []
ignore_missing_keys = []
for key in missing_keys:
keep_flag = True
for ignore_key in ignore_missing.split('|'):
if ignore_key in key:
keep_flag = False
break
if keep_flag:
warn_missing_keys.append(key)
else:
ignore_missing_keys.append(key)
missing_keys = warn_missing_keys
if len(missing_keys) > 0:
print("Weights of {} not initialized from pretrained model: {}".format(
model.__class__.__name__, missing_keys))
if len(unexpected_keys) > 0:
print("Weights from pretrained model not used in {}: {}".format(
model.__class__.__name__, unexpected_keys))
if len(ignore_missing_keys) > 0:
print("Ignored weights of {} not initialized from pretrained model: {}".format(
model.__class__.__name__, ignore_missing_keys))
if len(error_msgs) > 0:
print('\n'.join(error_msgs))
class NativeScalerWithGradNormCount:
state_dict_key = "amp_scaler"
def __init__(self):
self._scaler = torch.cuda.amp.GradScaler()
def __call__(self, loss, optimizer, clip_grad=None, parameters=None, create_graph=False, update_grad=True):
self._scaler.scale(loss).backward(create_graph=create_graph)
if update_grad:
if clip_grad is not None:
assert parameters is not None
self._scaler.unscale_(optimizer) # unscale the gradients of optimizer's assigned params in-place
norm = torch.nn.utils.clip_grad_norm_(parameters, clip_grad)
else:
self._scaler.unscale_(optimizer)
norm = get_grad_norm_(parameters)
self._scaler.step(optimizer)
self._scaler.update()
else:
norm = None
return norm
def state_dict(self):
return self._scaler.state_dict()
def load_state_dict(self, state_dict):
self._scaler.load_state_dict(state_dict)
def get_grad_norm_(parameters, norm_type: float = 2.0) -> torch.Tensor:
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
parameters = [p for p in parameters if p.grad is not None]
norm_type = float(norm_type)
if len(parameters) == 0:
return torch.tensor(0.)
device = parameters[0].grad.device
if norm_type == inf:
total_norm = max(p.grad.detach().abs().max().to(device) for p in parameters)
else:
total_norm = torch.norm(torch.stack([torch.norm(p.grad.detach(), norm_type).to(device) for p in parameters]), norm_type)
return total_norm
def save_model(args, epoch, model, model_without_ddp, optimizer, loss_scaler, model_ema=None):
output_dir = Path(args.output_dir)
epoch_name = str(epoch)
checkpoint_paths = [output_dir / ('checkpoint-%s.pth' % epoch_name)]
for checkpoint_path in checkpoint_paths:
to_save = {
'model': model_without_ddp.state_dict(),
'optimizer': optimizer.state_dict(),
'epoch': epoch,
'scaler': loss_scaler.state_dict(),
'args': args,
}
if model_ema is not None:
to_save['model_ema'] = get_state_dict(model_ema)
save_on_master(to_save, checkpoint_path)
if is_main_process() and isinstance(epoch, int):
to_del = epoch - args.save_ckpt_num * args.save_ckpt_freq
old_ckpt = output_dir / ('checkpoint-%s.pth' % to_del)
if os.path.exists(old_ckpt):
os.remove(old_ckpt)
def auto_load_model(args, model, model_without_ddp, optimizer, loss_scaler, model_ema=None):
output_dir = Path(args.output_dir)
if args.auto_resume and len(args.resume) == 0:
import glob
all_checkpoints = glob.glob(os.path.join(output_dir, 'checkpoint-*.pth'))
latest_ckpt = -1
for ckpt in all_checkpoints:
t = ckpt.split('-')[-1].split('.')[0]
if t.isdigit():
latest_ckpt = max(int(t), latest_ckpt)
if latest_ckpt >= 0:
args.resume = os.path.join(output_dir, 'checkpoint-%d.pth' % latest_ckpt)
print("Auto resume checkpoint: %s" % args.resume)
if args.resume:
if args.resume.startswith('https'):
checkpoint = torch.hub.load_state_dict_from_url(
args.resume, map_location='cpu', check_hash=True)
else:
checkpoint = torch.load(args.resume, map_location='cpu')
model_without_ddp.load_state_dict(checkpoint['model'])
print("Resume checkpoint %s" % args.resume)
if 'optimizer' in checkpoint and 'epoch' in checkpoint:
optimizer.load_state_dict(checkpoint['optimizer'])
if not isinstance(checkpoint['epoch'], str): # does not support resuming with 'best', 'best-ema'
args.start_epoch = checkpoint['epoch'] + 1
else:
assert args.eval, 'Does not support resuming with checkpoint-best'
if hasattr(args, 'model_ema') and args.model_ema:
if 'model_ema' in checkpoint.keys():
model_ema.ema.load_state_dict(checkpoint['model_ema'])
else:
model_ema.ema.load_state_dict(checkpoint['model'])
if 'scaler' in checkpoint:
loss_scaler.load_state_dict(checkpoint['scaler'])
print("With optim & sched!")
def cosine_scheduler(base_value, final_value, epochs, niter_per_ep, warmup_epochs=0,
start_warmup_value=0, warmup_steps=-1):
warmup_schedule = np.array([])
warmup_iters = warmup_epochs * niter_per_ep
if warmup_steps > 0:
warmup_iters = warmup_steps
print("Set warmup steps = %d" % warmup_iters)
if warmup_epochs > 0:
warmup_schedule = np.linspace(start_warmup_value, base_value, warmup_iters)
iters = np.arange(epochs * niter_per_ep - warmup_iters)
schedule = np.array(
[final_value + 0.5 * (base_value - final_value) * (1 + math.cos(math.pi * i / (len(iters)))) for i in iters])
schedule = np.concatenate((warmup_schedule, schedule))
assert len(schedule) == epochs * niter_per_ep
return schedule
def adjust_learning_rate(optimizer, epoch, args):
"""Decay the learning rate with half-cycle cosine after warmup"""
if epoch < args.warmup_epochs:
lr = args.lr * epoch / args.warmup_epochs
else:
lr = args.min_lr + (args.lr - args.min_lr) * 0.5 * \
(1. + math.cos(math.pi * (epoch - args.warmup_epochs) / (args.epochs - args.warmup_epochs)))
for param_group in optimizer.param_groups:
if "lr_scale" in param_group:
param_group["lr"] = lr * param_group["lr_scale"]
else:
param_group["lr"] = lr
return lr
def remap_checkpoint_keys(ckpt):
new_ckpt = OrderedDict()
for k, v in ckpt.items():
if k.startswith('encoder'):
k = '.'.join(k.split('.')[1:]) # remove encoder in the name
if k.endswith('kernel'):
k = '.'.join(k.split('.')[:-1]) # remove kernel in the name
new_k = k + '.weight'
if len(v.shape) == 3: # resahpe standard convolution
kv, in_dim, out_dim = v.shape
ks = int(math.sqrt(kv))
new_ckpt[new_k] = v.permute(2, 1, 0).\
reshape(out_dim, in_dim, ks, ks).transpose(3, 2)
elif len(v.shape) == 2: # reshape depthwise convolution
kv, dim = v.shape
ks = int(math.sqrt(kv))
new_ckpt[new_k] = v.permute(1, 0).\
reshape(dim, 1, ks, ks).transpose(3, 2)
continue
elif 'ln' in k or 'linear' in k:
k = k.split('.')
k.pop(-2) # remove ln and linear in the name
new_k = '.'.join(k)
else:
new_k = k
new_ckpt[new_k] = v
# reshape grn affine parameters and biases
for k, v in new_ckpt.items():
if k.endswith('bias') and len(v.shape) != 1:
new_ckpt[k] = v.reshape(-1)
elif 'grn' in k:
new_ckpt[k] = v.unsqueeze(0).unsqueeze(1)
return new_ckpt
|
ConvNeXt-V2-main
|
utils.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import os
import uuid
from pathlib import Path
import main_pretrain as trainer
import submitit
def parse_args():
trainer_parser = trainer.get_args_parser()
parser = argparse.ArgumentParser("Submitit for pretrain", parents=[trainer_parser])
parser.add_argument("--ngpus", default=8, type=int, help="Number of gpus to request on each node")
parser.add_argument("--nodes", default=2, type=int, help="Number of nodes to request")
parser.add_argument("--timeout", default=4320, type=int, help="Duration of the job")
parser.add_argument("--job_dir", default="", type=str, help="Job dir. Leave empty for automatic.")
parser.add_argument("--partition", default="learnlab", type=str, help="Partition where to submit")
parser.add_argument("--use_volta32", action='store_true', help="Request 32G V100 GPUs")
parser.add_argument('--comment', default="", type=str, help="Comment to pass to scheduler")
return parser.parse_args()
def get_shared_folder() -> Path:
user = os.getenv("USER")
if Path("/checkpoint/").is_dir():
p = Path(f"/checkpoint/{user}/experiments")
p.mkdir(exist_ok=True)
return p
raise RuntimeError("No shared folder available")
def get_init_file():
# Init file must not exist, but it's parent dir must exist.
os.makedirs(str(get_shared_folder()), exist_ok=True)
init_file = get_shared_folder() / f"{uuid.uuid4().hex}_init"
if init_file.exists():
os.remove(str(init_file))
return init_file
class Trainer(object):
def __init__(self, args):
self.args = args
def __call__(self):
import main_pretrain as trainer
self._setup_gpu_args()
trainer.main(self.args)
def checkpoint(self):
import os
import submitit
self.args.dist_url = get_init_file().as_uri()
checkpoint_file = os.path.join(self.args.output_dir, "checkpoint.pth")
if os.path.exists(checkpoint_file):
self.args.resume = checkpoint_file
print("Requeuing ", self.args)
empty_trainer = type(self)(self.args)
return submitit.helpers.DelayedSubmission(empty_trainer)
def _setup_gpu_args(self):
import submitit
from pathlib import Path
job_env = submitit.JobEnvironment()
self.args.output_dir = Path(str(self.args.output_dir).replace("%j", str(job_env.job_id)))
self.args.log_dir = self.args.output_dir
self.args.gpu = job_env.local_rank
self.args.rank = job_env.global_rank
self.args.world_size = job_env.num_tasks
print(f"Process group: {job_env.num_tasks} tasks, rank: {job_env.global_rank}")
def main():
args = parse_args()
if args.job_dir == "":
args.job_dir = get_shared_folder() / "%j"
# Note that the folder will depend on the job_id, to easily track experiments
executor = submitit.AutoExecutor(folder=args.job_dir, slurm_max_num_timeout=30)
num_gpus_per_node = args.ngpus
nodes = args.nodes
timeout_min = args.timeout
partition = args.partition
kwargs = {}
if args.use_volta32:
kwargs['slurm_constraint'] = 'volta32gb'
if args.comment:
kwargs['slurm_comment'] = args.comment
executor.update_parameters(
mem_gb=40 * num_gpus_per_node,
gpus_per_node=num_gpus_per_node,
tasks_per_node=num_gpus_per_node, # one task per GPU
cpus_per_task=10,
nodes=nodes,
timeout_min=timeout_min, # max is 60 * 72
# Below are cluster dependent parameters
slurm_partition=partition,
slurm_signal_delay_s=120,
**kwargs
)
executor.update_parameters(name="pretrain")
args.dist_url = get_init_file().as_uri()
args.output_dir = args.job_dir
trainer = Trainer(args)
job = executor.submit(trainer)
# print("Submitted job_id:", job.job_id)
print(job.job_id)
if __name__ == "__main__":
main()
|
ConvNeXt-V2-main
|
submitit_pretrain.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import datetime
import numpy as np
import time
import json
import os
from pathlib import Path
import torch
import torch.backends.cudnn as cudnn
from timm.models.layers import trunc_normal_
from timm.data.mixup import Mixup
from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy
from timm.utils import ModelEma
from optim_factory import create_optimizer, LayerDecayValueAssigner
from datasets import build_dataset
from engine_finetune import train_one_epoch, evaluate
import utils
from utils import NativeScalerWithGradNormCount as NativeScaler
from utils import str2bool, remap_checkpoint_keys
import models.convnextv2 as convnextv2
def get_args_parser():
parser = argparse.ArgumentParser('FCMAE fine-tuning', add_help=False)
parser.add_argument('--batch_size', default=64, type=int,
help='Per GPU batch size')
parser.add_argument('--epochs', default=100, type=int)
parser.add_argument('--update_freq', default=1, type=int,
help='gradient accumulation steps')
# Model parameters
parser.add_argument('--model', default='convnextv2_base', type=str, metavar='MODEL',
help='Name of model to train')
parser.add_argument('--input_size', default=224, type=int,
help='image input size')
parser.add_argument('--drop_path', type=float, default=0., metavar='PCT',
help='Drop path rate (default: 0.1)')
parser.add_argument('--layer_decay_type', type=str, choices=['single', 'group'], default='single',
help="""Layer decay strategies. The single strategy assigns a distinct decaying value for each layer,
whereas the group strategy assigns the same decaying value for three consecutive layers""")
# EMA related parameters
parser.add_argument('--model_ema', type=str2bool, default=False)
parser.add_argument('--model_ema_decay', type=float, default=0.9999, help='')
parser.add_argument('--model_ema_force_cpu', type=str2bool, default=False, help='')
parser.add_argument('--model_ema_eval', type=str2bool, default=False, help='Using ema to eval during training.')
# Optimization parameters
parser.add_argument('--clip_grad', type=float, default=None, metavar='NORM',
help='Clip gradient norm (default: None, no clipping)')
parser.add_argument('--weight_decay', type=float, default=0.05,
help='weight decay (default: 0.05)')
parser.add_argument('--lr', type=float, default=None, metavar='LR',
help='learning rate (absolute lr)')
parser.add_argument('--blr', type=float, default=5e-4, metavar='LR',
help='base learning rate: absolute_lr = base_lr * total_batch_size / 256')
parser.add_argument('--layer_decay', type=float, default=1.0)
parser.add_argument('--min_lr', type=float, default=1e-6, metavar='LR',
help='lower lr bound for cyclic schedulers that hit 0 (1e-6)')
parser.add_argument('--warmup_epochs', type=int, default=20, metavar='N',
help='epochs to warmup LR, if scheduler supports')
parser.add_argument('--warmup_steps', type=int, default=-1, metavar='N',
help='num of steps to warmup LR, will overload warmup_epochs if set > 0')
parser.add_argument('--opt', default='adamw', type=str, metavar='OPTIMIZER',
help='Optimizer (default: "adamw"')
parser.add_argument('--opt_eps', default=1e-8, type=float, metavar='EPSILON',
help='Optimizer Epsilon (default: 1e-8)')
parser.add_argument('--opt_betas', default=None, type=float, nargs='+', metavar='BETA',
help='Optimizer Betas (default: None, use opt default)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum (default: 0.9)')
parser.add_argument('--weight_decay_end', type=float, default=None, help="""Final value of the
weight decay. We use a cosine schedule for WD and using a larger decay by
the end of training improves performance for ViTs.""")
# Augmentation parameters
parser.add_argument('--color_jitter', type=float, default=None, metavar='PCT',
help='Color jitter factor (enabled only when not using Auto/RandAug)')
parser.add_argument('--aa', type=str, default='rand-m9-mstd0.5-inc1', metavar='NAME',
help='Use AutoAugment policy. "v0" or "original". " + "(default: rand-m9-mstd0.5-inc1)')
parser.add_argument('--smoothing', type=float, default=0.1,
help='Label smoothing (default: 0.1)')
parser.add_argument('--train_interpolation', type=str, default='bicubic',
help='Training interpolation (random, bilinear, bicubic default: "bicubic")')
# * Random Erase params
parser.add_argument('--reprob', type=float, default=0.25, metavar='PCT',
help='Random erase prob (default: 0.25)')
parser.add_argument('--remode', type=str, default='pixel',
help='Random erase mode (default: "pixel")')
parser.add_argument('--recount', type=int, default=1,
help='Random erase count (default: 1)')
parser.add_argument('--resplit', type=str2bool, default=False,
help='Do not random erase first (clean) augmentation split')
# * Mixup params
parser.add_argument('--mixup', type=float, default=0.,
help='mixup alpha, mixup enabled if > 0.')
parser.add_argument('--cutmix', type=float, default=0.,
help='cutmix alpha, cutmix enabled if > 0.')
parser.add_argument('--cutmix_minmax', type=float, nargs='+', default=None,
help='cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)')
parser.add_argument('--mixup_prob', type=float, default=1.0,
help='Probability of performing mixup or cutmix when either/both is enabled')
parser.add_argument('--mixup_switch_prob', type=float, default=0.5,
help='Probability of switching to cutmix when both mixup and cutmix enabled')
parser.add_argument('--mixup_mode', type=str, default='batch',
help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"')
# * Finetuning params
parser.add_argument('--finetune', default='',
help='finetune from checkpoint')
parser.add_argument('--head_init_scale', default=0.001, type=float,
help='classifier head initial scale, typically adjusted in fine-tuning')
parser.add_argument('--model_key', default='model|module', type=str,
help='which key to load from saved state dict, usually model or model_ema')
parser.add_argument('--model_prefix', default='', type=str)
# Dataset parameters
parser.add_argument('--data_path', default='/datasets01/imagenet_full_size/061417/', type=str,
help='dataset path')
parser.add_argument('--nb_classes', default=1000, type=int,
help='number of the classification types')
parser.add_argument('--output_dir', default='',
help='path where to save, empty for no saving')
parser.add_argument('--log_dir', default=None,
help='path where to tensorboard log')
parser.add_argument('--device', default='cuda',
help='device to use for training / testing')
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--resume', default='',
help='resume from checkpoint')
parser.add_argument('--eval_data_path', default=None, type=str,
help='dataset path for evaluation')
parser.add_argument('--imagenet_default_mean_and_std', type=str2bool, default=True)
parser.add_argument('--data_set', default='IMNET', choices=['CIFAR', 'IMNET', 'image_folder'],
type=str, help='ImageNet dataset path')
parser.add_argument('--auto_resume', type=str2bool, default=True)
parser.add_argument('--save_ckpt', type=str2bool, default=True)
parser.add_argument('--save_ckpt_freq', default=1, type=int)
parser.add_argument('--save_ckpt_num', default=3, type=int)
parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
help='start epoch')
parser.add_argument('--eval', type=str2bool, default=False,
help='Perform evaluation only')
parser.add_argument('--dist_eval', type=str2bool, default=True,
help='Enabling distributed evaluation')
parser.add_argument('--disable_eval', type=str2bool, default=False,
help='Disabling evaluation during training')
parser.add_argument('--num_workers', default=10, type=int)
parser.add_argument('--pin_mem', type=str2bool, default=True,
help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
# Evaluation parameters
parser.add_argument('--crop_pct', type=float, default=None)
# distributed training parameters
parser.add_argument('--world_size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--local_rank', default=-1, type=int)
parser.add_argument('--dist_on_itp', type=str2bool, default=False)
parser.add_argument('--dist_url', default='env://',
help='url used to set up distributed training')
parser.add_argument('--use_amp', type=str2bool, default=False,
help="Use apex AMP (Automatic Mixed Precision) or not")
return parser
def main(args):
utils.init_distributed_mode(args)
print(args)
device = torch.device(args.device)
# fix the seed for reproducibility
seed = args.seed + utils.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
cudnn.benchmark = True
dataset_train, args.nb_classes = build_dataset(is_train=True, args=args)
if args.disable_eval:
args.dist_eval = False
dataset_val = None
else:
dataset_val, _ = build_dataset(is_train=False, args=args)
num_tasks = utils.get_world_size()
global_rank = utils.get_rank()
sampler_train = torch.utils.data.DistributedSampler(
dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True, seed=args.seed,
)
print("Sampler_train = %s" % str(sampler_train))
if args.dist_eval:
if len(dataset_val) % num_tasks != 0:
print('Warning: Enabling distributed evaluation with an eval dataset not divisible by process number. '
'This will slightly alter validation results as extra duplicate entries are added to achieve '
'equal num of samples per-process.')
sampler_val = torch.utils.data.DistributedSampler(
dataset_val, num_replicas=num_tasks, rank=global_rank, shuffle=False)
else:
sampler_val = torch.utils.data.SequentialSampler(dataset_val)
if global_rank == 0 and args.log_dir is not None:
os.makedirs(args.log_dir, exist_ok=True)
log_writer = utils.TensorboardLogger(log_dir=args.log_dir)
else:
log_writer = None
data_loader_train = torch.utils.data.DataLoader(
dataset_train, sampler=sampler_train,
batch_size=args.batch_size,
num_workers=args.num_workers,
pin_memory=args.pin_mem,
drop_last=True,
)
if dataset_val is not None:
data_loader_val = torch.utils.data.DataLoader(
dataset_val, sampler=sampler_val,
batch_size=args.batch_size,
num_workers=args.num_workers,
pin_memory=args.pin_mem,
drop_last=False
)
else:
data_loader_val = None
mixup_fn = None
mixup_active = args.mixup > 0 or args.cutmix > 0. or args.cutmix_minmax is not None
if mixup_active:
print("Mixup is activated!")
mixup_fn = Mixup(
mixup_alpha=args.mixup, cutmix_alpha=args.cutmix, cutmix_minmax=args.cutmix_minmax,
prob=args.mixup_prob, switch_prob=args.mixup_switch_prob, mode=args.mixup_mode,
label_smoothing=args.smoothing, num_classes=args.nb_classes)
model = convnextv2.__dict__[args.model](
num_classes=args.nb_classes,
drop_path_rate=args.drop_path,
head_init_scale=args.head_init_scale,
)
if args.finetune:
checkpoint = torch.load(args.finetune, map_location='cpu')
print("Load pre-trained checkpoint from: %s" % args.finetune)
checkpoint_model = checkpoint['model']
state_dict = model.state_dict()
for k in ['head.weight', 'head.bias']:
if k in checkpoint_model and checkpoint_model[k].shape != state_dict[k].shape:
print(f"Removing key {k} from pretrained checkpoint")
del checkpoint_model[k]
# remove decoder weights
checkpoint_model_keys = list(checkpoint_model.keys())
for k in checkpoint_model_keys:
if 'decoder' in k or 'mask_token'in k or \
'proj' in k or 'pred' in k:
print(f"Removing key {k} from pretrained checkpoint")
del checkpoint_model[k]
checkpoint_model = remap_checkpoint_keys(checkpoint_model)
utils.load_state_dict(model, checkpoint_model, prefix=args.model_prefix)
# manually initialize fc layer
trunc_normal_(model.head.weight, std=2e-5)
torch.nn.init.constant_(model.head.bias, 0.)
model.to(device)
model_ema = None
if args.model_ema:
# Important to create EMA model after cuda(), DP wrapper, and AMP but before SyncBN and DDP wrapper
model_ema = ModelEma(
model,
decay=args.model_ema_decay,
device='cpu' if args.model_ema_force_cpu else '',
resume='')
print("Using EMA with decay = %.8f" % args.model_ema_decay)
model_without_ddp = model
n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
print("Model = %s" % str(model_without_ddp))
print('number of params:', n_parameters)
eff_batch_size = args.batch_size * args.update_freq * utils.get_world_size()
num_training_steps_per_epoch = len(dataset_train) // eff_batch_size
if args.lr is None:
args.lr = args.blr * eff_batch_size / 256
print("base lr: %.2e" % (args.lr * 256 / eff_batch_size))
print("actual lr: %.2e" % args.lr)
print("accumulate grad iterations: %d" % args.update_freq)
print("effective batch size: %d" % eff_batch_size)
if args.layer_decay < 1.0 or args.layer_decay > 1.0:
assert args.layer_decay_type in ['single', 'group']
if args.layer_decay_type == 'group': # applies for Base and Large models
num_layers = 12
else:
num_layers = sum(model_without_ddp.depths)
assigner = LayerDecayValueAssigner(
list(args.layer_decay ** (num_layers + 1 - i) for i in range(num_layers + 2)),
depths=model_without_ddp.depths, layer_decay_type=args.layer_decay_type)
else:
assigner = None
if assigner is not None:
print("Assigned values = %s" % str(assigner.values))
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu], find_unused_parameters=False)
model_without_ddp = model.module
optimizer = create_optimizer(
args, model_without_ddp, skip_list=None,
get_num_layer=assigner.get_layer_id if assigner is not None else None,
get_layer_scale=assigner.get_scale if assigner is not None else None)
loss_scaler = NativeScaler()
if mixup_fn is not None:
# smoothing is handled with mixup label transform
criterion = SoftTargetCrossEntropy()
elif args.smoothing > 0.:
criterion = LabelSmoothingCrossEntropy(smoothing=args.smoothing)
else:
criterion = torch.nn.CrossEntropyLoss()
print("criterion = %s" % str(criterion))
utils.auto_load_model(
args=args, model=model, model_without_ddp=model_without_ddp,
optimizer=optimizer, loss_scaler=loss_scaler, model_ema=model_ema)
if args.eval:
print(f"Eval only mode")
test_stats = evaluate(data_loader_val, model, device)
print(f"Accuracy of the network on {len(dataset_val)} test images: {test_stats['acc1']:.5f}%")
return
max_accuracy = 0.0
if args.model_ema and args.model_ema_eval:
max_accuracy_ema = 0.0
print("Start training for %d epochs" % args.epochs)
start_time = time.time()
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
data_loader_train.sampler.set_epoch(epoch)
if log_writer is not None:
log_writer.set_step(epoch * num_training_steps_per_epoch * args.update_freq)
train_stats = train_one_epoch(
model, criterion, data_loader_train,
optimizer, device, epoch, loss_scaler,
args.clip_grad, model_ema, mixup_fn,
log_writer=log_writer,
args=args
)
if args.output_dir and args.save_ckpt:
if (epoch + 1) % args.save_ckpt_freq == 0 or epoch + 1 == args.epochs:
utils.save_model(
args=args, model=model, model_without_ddp=model_without_ddp, optimizer=optimizer,
loss_scaler=loss_scaler, epoch=epoch, model_ema=model_ema)
if data_loader_val is not None:
test_stats = evaluate(data_loader_val, model, device, use_amp=args.use_amp)
print(f"Accuracy of the model on the {len(dataset_val)} test images: {test_stats['acc1']:.1f}%")
if max_accuracy < test_stats["acc1"]:
max_accuracy = test_stats["acc1"]
if args.output_dir and args.save_ckpt:
utils.save_model(
args=args, model=model, model_without_ddp=model_without_ddp, optimizer=optimizer,
loss_scaler=loss_scaler, epoch="best", model_ema=model_ema)
print(f'Max accuracy: {max_accuracy:.2f}%')
if log_writer is not None:
log_writer.update(test_acc1=test_stats['acc1'], head="perf", step=epoch)
log_writer.update(test_acc5=test_stats['acc5'], head="perf", step=epoch)
log_writer.update(test_loss=test_stats['loss'], head="perf", step=epoch)
log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},
**{f'test_{k}': v for k, v in test_stats.items()},
'epoch': epoch,
'n_parameters': n_parameters}
# repeat testing routines for EMA, if ema eval is turned on
if args.model_ema and args.model_ema_eval:
test_stats_ema = evaluate(data_loader_val, model_ema.ema, device, use_amp=args.use_amp)
print(f"Accuracy of the model EMA on {len(dataset_val)} test images: {test_stats_ema['acc1']:.1f}%")
if max_accuracy_ema < test_stats_ema["acc1"]:
max_accuracy_ema = test_stats_ema["acc1"]
if args.output_dir and args.save_ckpt:
utils.save_model(
args=args, model=model, model_without_ddp=model_without_ddp, optimizer=optimizer,
loss_scaler=loss_scaler, epoch="best-ema", model_ema=model_ema)
print(f'Max EMA accuracy: {max_accuracy_ema:.2f}%')
if log_writer is not None:
log_writer.update(test_acc1_ema=test_stats_ema['acc1'], head="perf", step=epoch)
log_stats.update({**{f'test_{k}_ema': v for k, v in test_stats_ema.items()}})
else:
log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},
'epoch': epoch,
'n_parameters': n_parameters}
if args.output_dir and utils.is_main_process():
if log_writer is not None:
log_writer.flush()
with open(os.path.join(args.output_dir, "log.txt"), mode="a", encoding="utf-8") as f:
f.write(json.dumps(log_stats) + "\n")
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Training time {}'.format(total_time_str))
if __name__ == '__main__':
parser = argparse.ArgumentParser('FCMAE fine-tuning', parents=[get_args_parser()])
args = parser.parse_args()
if args.output_dir:
Path(args.output_dir).mkdir(parents=True, exist_ok=True)
main(args)
|
ConvNeXt-V2-main
|
main_finetune.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
from torch import optim as optim
from timm.optim.adafactor import Adafactor
from timm.optim.adahessian import Adahessian
from timm.optim.adamp import AdamP
from timm.optim.lookahead import Lookahead
from timm.optim.nadam import Nadam
from timm.optim.novograd import NovoGrad
from timm.optim.nvnovograd import NvNovoGrad
from timm.optim.radam import RAdam
from timm.optim.rmsprop_tf import RMSpropTF
from timm.optim.sgdp import SGDP
import json
try:
from apex.optimizers import FusedNovoGrad, FusedAdam, FusedLAMB, FusedSGD
has_apex = True
except ImportError:
has_apex = False
def get_num_layer_for_convnext_single(var_name, depths):
"""
Each layer is assigned distinctive layer ids
"""
if var_name.startswith("downsample_layers"):
stage_id = int(var_name.split('.')[1])
layer_id = sum(depths[:stage_id]) + 1
return layer_id
elif var_name.startswith("stages"):
stage_id = int(var_name.split('.')[1])
block_id = int(var_name.split('.')[2])
layer_id = sum(depths[:stage_id]) + block_id + 1
return layer_id
else:
return sum(depths) + 1
def get_num_layer_for_convnext(var_name):
"""
Divide [3, 3, 27, 3] layers into 12 groups; each group is three
consecutive blocks, including possible neighboring downsample layers;
adapted from https://github.com/microsoft/unilm/blob/master/beit/optim_factory.py
"""
num_max_layer = 12
if var_name.startswith("downsample_layers"):
stage_id = int(var_name.split('.')[1])
if stage_id == 0:
layer_id = 0
elif stage_id == 1 or stage_id == 2:
layer_id = stage_id + 1
elif stage_id == 3:
layer_id = 12
return layer_id
elif var_name.startswith("stages"):
stage_id = int(var_name.split('.')[1])
block_id = int(var_name.split('.')[2])
if stage_id == 0 or stage_id == 1:
layer_id = stage_id + 1
elif stage_id == 2:
layer_id = 3 + block_id // 3
elif stage_id == 3:
layer_id = 12
return layer_id
else:
return num_max_layer + 1
class LayerDecayValueAssigner(object):
def __init__(self, values, depths=[3,3,27,3], layer_decay_type='single'):
self.values = values
self.depths = depths
self.layer_decay_type = layer_decay_type
def get_scale(self, layer_id):
return self.values[layer_id]
def get_layer_id(self, var_name):
if self.layer_decay_type == 'single':
return get_num_layer_for_convnext_single(var_name, self.depths)
else:
return get_num_layer_for_convnext(var_name)
def get_parameter_groups(model, weight_decay=1e-5, skip_list=(), get_num_layer=None, get_layer_scale=None):
parameter_group_names = {}
parameter_group_vars = {}
for name, param in model.named_parameters():
if not param.requires_grad:
continue # frozen weights
if len(param.shape) == 1 or name.endswith(".bias") or name in skip_list or \
name.endswith(".gamma") or name.endswith(".beta"):
group_name = "no_decay"
this_weight_decay = 0.
else:
group_name = "decay"
this_weight_decay = weight_decay
if get_num_layer is not None:
layer_id = get_num_layer(name)
group_name = "layer_%d_%s" % (layer_id, group_name)
else:
layer_id = None
if group_name not in parameter_group_names:
if get_layer_scale is not None:
scale = get_layer_scale(layer_id)
else:
scale = 1.
parameter_group_names[group_name] = {
"weight_decay": this_weight_decay,
"params": [],
"lr_scale": scale
}
parameter_group_vars[group_name] = {
"weight_decay": this_weight_decay,
"params": [],
"lr_scale": scale
}
parameter_group_vars[group_name]["params"].append(param)
parameter_group_names[group_name]["params"].append(name)
print("Param groups = %s" % json.dumps(parameter_group_names, indent=2))
return list(parameter_group_vars.values())
def create_optimizer(args, model, get_num_layer=None, get_layer_scale=None, filter_bias_and_bn=True, skip_list=None):
opt_lower = args.opt.lower()
weight_decay = args.weight_decay
# if weight_decay and filter_bias_and_bn:
if filter_bias_and_bn:
skip = {}
if skip_list is not None:
skip = skip_list
elif hasattr(model, 'no_weight_decay'):
skip = model.no_weight_decay()
parameters = get_parameter_groups(model, weight_decay, skip, get_num_layer, get_layer_scale)
weight_decay = 0.
else:
parameters = model.parameters()
if 'fused' in opt_lower:
assert has_apex and torch.cuda.is_available(), 'APEX and CUDA required for fused optimizers'
opt_args = dict(lr=args.lr, weight_decay=weight_decay)
if hasattr(args, 'opt_eps') and args.opt_eps is not None:
opt_args['eps'] = args.opt_eps
if hasattr(args, 'opt_betas') and args.opt_betas is not None:
opt_args['betas'] = args.opt_betas
opt_split = opt_lower.split('_')
opt_lower = opt_split[-1]
if opt_lower == 'sgd' or opt_lower == 'nesterov':
opt_args.pop('eps', None)
optimizer = optim.SGD(parameters, momentum=args.momentum, nesterov=True, **opt_args)
elif opt_lower == 'momentum':
opt_args.pop('eps', None)
optimizer = optim.SGD(parameters, momentum=args.momentum, nesterov=False, **opt_args)
elif opt_lower == 'adam':
optimizer = optim.Adam(parameters, **opt_args)
elif opt_lower == 'adamw':
optimizer = optim.AdamW(parameters, **opt_args)
elif opt_lower == 'nadam':
optimizer = Nadam(parameters, **opt_args)
elif opt_lower == 'radam':
optimizer = RAdam(parameters, **opt_args)
elif opt_lower == 'adamp':
optimizer = AdamP(parameters, wd_ratio=0.01, nesterov=True, **opt_args)
elif opt_lower == 'sgdp':
optimizer = SGDP(parameters, momentum=args.momentum, nesterov=True, **opt_args)
elif opt_lower == 'adadelta':
optimizer = optim.Adadelta(parameters, **opt_args)
elif opt_lower == 'adafactor':
if not args.lr:
opt_args['lr'] = None
optimizer = Adafactor(parameters, **opt_args)
elif opt_lower == 'adahessian':
optimizer = Adahessian(parameters, **opt_args)
elif opt_lower == 'rmsprop':
optimizer = optim.RMSprop(parameters, alpha=0.9, momentum=args.momentum, **opt_args)
elif opt_lower == 'rmsproptf':
optimizer = RMSpropTF(parameters, alpha=0.9, momentum=args.momentum, **opt_args)
elif opt_lower == 'novograd':
optimizer = NovoGrad(parameters, **opt_args)
elif opt_lower == 'nvnovograd':
optimizer = NvNovoGrad(parameters, **opt_args)
elif opt_lower == 'fusedsgd':
opt_args.pop('eps', None)
optimizer = FusedSGD(parameters, momentum=args.momentum, nesterov=True, **opt_args)
elif opt_lower == 'fusedmomentum':
opt_args.pop('eps', None)
optimizer = FusedSGD(parameters, momentum=args.momentum, nesterov=False, **opt_args)
elif opt_lower == 'fusedadam':
optimizer = FusedAdam(parameters, adam_w_mode=False, **opt_args)
elif opt_lower == 'fusedadamw':
optimizer = FusedAdam(parameters, adam_w_mode=True, **opt_args)
elif opt_lower == 'fusedlamb':
optimizer = FusedLAMB(parameters, **opt_args)
elif opt_lower == 'fusednovograd':
opt_args.setdefault('betas', (0.95, 0.98))
optimizer = FusedNovoGrad(parameters, **opt_args)
else:
assert False and "Invalid optimizer"
if len(opt_split) > 1:
if opt_split[0] == 'lookahead':
optimizer = Lookahead(optimizer)
return optimizer
|
ConvNeXt-V2-main
|
optim_factory.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.models.layers import trunc_normal_, DropPath
from .utils import LayerNorm, GRN
class Block(nn.Module):
""" ConvNeXtV2 Block.
Args:
dim (int): Number of input channels.
drop_path (float): Stochastic depth rate. Default: 0.0
"""
def __init__(self, dim, drop_path=0.):
super().__init__()
self.dwconv = nn.Conv2d(dim, dim, kernel_size=7, padding=3, groups=dim) # depthwise conv
self.norm = LayerNorm(dim, eps=1e-6)
self.pwconv1 = nn.Linear(dim, 4 * dim) # pointwise/1x1 convs, implemented with linear layers
self.act = nn.GELU()
self.grn = GRN(4 * dim)
self.pwconv2 = nn.Linear(4 * dim, dim)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def forward(self, x):
input = x
x = self.dwconv(x)
x = x.permute(0, 2, 3, 1) # (N, C, H, W) -> (N, H, W, C)
x = self.norm(x)
x = self.pwconv1(x)
x = self.act(x)
x = self.grn(x)
x = self.pwconv2(x)
x = x.permute(0, 3, 1, 2) # (N, H, W, C) -> (N, C, H, W)
x = input + self.drop_path(x)
return x
class ConvNeXtV2(nn.Module):
""" ConvNeXt V2
Args:
in_chans (int): Number of input image channels. Default: 3
num_classes (int): Number of classes for classification head. Default: 1000
depths (tuple(int)): Number of blocks at each stage. Default: [3, 3, 9, 3]
dims (int): Feature dimension at each stage. Default: [96, 192, 384, 768]
drop_path_rate (float): Stochastic depth rate. Default: 0.
head_init_scale (float): Init scaling value for classifier weights and biases. Default: 1.
"""
def __init__(self, in_chans=3, num_classes=1000,
depths=[3, 3, 9, 3], dims=[96, 192, 384, 768],
drop_path_rate=0., head_init_scale=1.
):
super().__init__()
self.depths = depths
self.downsample_layers = nn.ModuleList() # stem and 3 intermediate downsampling conv layers
stem = nn.Sequential(
nn.Conv2d(in_chans, dims[0], kernel_size=4, stride=4),
LayerNorm(dims[0], eps=1e-6, data_format="channels_first")
)
self.downsample_layers.append(stem)
for i in range(3):
downsample_layer = nn.Sequential(
LayerNorm(dims[i], eps=1e-6, data_format="channels_first"),
nn.Conv2d(dims[i], dims[i+1], kernel_size=2, stride=2),
)
self.downsample_layers.append(downsample_layer)
self.stages = nn.ModuleList() # 4 feature resolution stages, each consisting of multiple residual blocks
dp_rates=[x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))]
cur = 0
for i in range(4):
stage = nn.Sequential(
*[Block(dim=dims[i], drop_path=dp_rates[cur + j]) for j in range(depths[i])]
)
self.stages.append(stage)
cur += depths[i]
self.norm = nn.LayerNorm(dims[-1], eps=1e-6) # final norm layer
self.head = nn.Linear(dims[-1], num_classes)
self.apply(self._init_weights)
self.head.weight.data.mul_(head_init_scale)
self.head.bias.data.mul_(head_init_scale)
def _init_weights(self, m):
if isinstance(m, (nn.Conv2d, nn.Linear)):
trunc_normal_(m.weight, std=.02)
nn.init.constant_(m.bias, 0)
def forward_features(self, x):
for i in range(4):
x = self.downsample_layers[i](x)
x = self.stages[i](x)
return self.norm(x.mean([-2, -1])) # global average pooling, (N, C, H, W) -> (N, C)
def forward(self, x):
x = self.forward_features(x)
x = self.head(x)
return x
def convnextv2_atto(**kwargs):
model = ConvNeXtV2(depths=[2, 2, 6, 2], dims=[40, 80, 160, 320], **kwargs)
return model
def convnextv2_femto(**kwargs):
model = ConvNeXtV2(depths=[2, 2, 6, 2], dims=[48, 96, 192, 384], **kwargs)
return model
def convnext_pico(**kwargs):
model = ConvNeXtV2(depths=[2, 2, 6, 2], dims=[64, 128, 256, 512], **kwargs)
return model
def convnextv2_nano(**kwargs):
model = ConvNeXtV2(depths=[2, 2, 8, 2], dims=[80, 160, 320, 640], **kwargs)
return model
def convnextv2_tiny(**kwargs):
model = ConvNeXtV2(depths=[3, 3, 9, 3], dims=[96, 192, 384, 768], **kwargs)
return model
def convnextv2_base(**kwargs):
model = ConvNeXtV2(depths=[3, 3, 27, 3], dims=[128, 256, 512, 1024], **kwargs)
return model
def convnextv2_large(**kwargs):
model = ConvNeXtV2(depths=[3, 3, 27, 3], dims=[192, 384, 768, 1536], **kwargs)
return model
def convnextv2_huge(**kwargs):
model = ConvNeXtV2(depths=[3, 3, 27, 3], dims=[352, 704, 1408, 2816], **kwargs)
return model
|
ConvNeXt-V2-main
|
models/convnextv2.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
from timm.models.layers import trunc_normal_
from .utils import (
LayerNorm,
MinkowskiLayerNorm,
MinkowskiGRN,
MinkowskiDropPath
)
from MinkowskiEngine import (
MinkowskiConvolution,
MinkowskiDepthwiseConvolution,
MinkowskiLinear,
MinkowskiGELU
)
from MinkowskiOps import (
to_sparse,
)
class Block(nn.Module):
""" Sparse ConvNeXtV2 Block.
Args:
dim (int): Number of input channels.
drop_path (float): Stochastic depth rate. Default: 0.0
layer_scale_init_value (float): Init value for Layer Scale. Default: 1e-6.
"""
def __init__(self, dim, drop_path=0., D=3):
super().__init__()
self.dwconv = MinkowskiDepthwiseConvolution(dim, kernel_size=7, bias=True, dimension=D)
self.norm = MinkowskiLayerNorm(dim, 1e-6)
self.pwconv1 = MinkowskiLinear(dim, 4 * dim)
self.act = MinkowskiGELU()
self.pwconv2 = MinkowskiLinear(4 * dim, dim)
self.grn = MinkowskiGRN(4 * dim)
self.drop_path = MinkowskiDropPath(drop_path)
def forward(self, x):
input = x
x = self.dwconv(x)
x = self.norm(x)
x = self.pwconv1(x)
x = self.act(x)
x = self.grn(x)
x = self.pwconv2(x)
x = input + self.drop_path(x)
return x
class SparseConvNeXtV2(nn.Module):
""" Sparse ConvNeXtV2.
Args:
in_chans (int): Number of input image channels. Default: 3
num_classes (int): Number of classes for classification head. Default: 1000
depths (tuple(int)): Number of blocks at each stage. Default: [3, 3, 9, 3]
dims (int): Feature dimension at each stage. Default: [96, 192, 384, 768]
drop_path_rate (float): Stochastic depth rate. Default: 0.
head_init_scale (float): Init scaling value for classifier weights and biases. Default: 1.
"""
def __init__(self,
in_chans=3,
num_classes=1000,
depths=[3, 3, 9, 3],
dims=[96, 192, 384, 768],
drop_path_rate=0.,
D=3):
super().__init__()
self.depths = depths
self.num_classes = num_classes
self.downsample_layers = nn.ModuleList() # stem and 3 intermediate downsampling conv layers
stem = nn.Sequential(
nn.Conv2d(in_chans, dims[0], kernel_size=4, stride=4),
LayerNorm(dims[0], eps=1e-6, data_format="channels_first")
)
self.downsample_layers.append(stem)
for i in range(3):
downsample_layer = nn.Sequential(
MinkowskiLayerNorm(dims[i], eps=1e-6),
MinkowskiConvolution(dims[i], dims[i+1], kernel_size=2, stride=2, bias=True, dimension=D)
)
self.downsample_layers.append(downsample_layer)
self.stages = nn.ModuleList() # 4 feature resolution stages, each consisting of multiple residual blocks
dp_rates=[x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))]
cur = 0
for i in range(4):
stage = nn.Sequential(
*[Block(dim=dims[i], drop_path=dp_rates[cur + j], D=D) for j in range(depths[i])]
)
self.stages.append(stage)
cur += depths[i]
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, MinkowskiConvolution):
trunc_normal_(m.kernel, std=.02)
nn.init.constant_(m.bias, 0)
if isinstance(m, MinkowskiDepthwiseConvolution):
trunc_normal_(m.kernel, std=.02)
nn.init.constant_(m.bias, 0)
if isinstance(m, MinkowskiLinear):
trunc_normal_(m.linear.weight, std=.02)
nn.init.constant_(m.linear.bias, 0)
def upsample_mask(self, mask, scale):
assert len(mask.shape) == 2
p = int(mask.shape[1] ** .5)
return mask.reshape(-1, p, p).\
repeat_interleave(scale, axis=1).\
repeat_interleave(scale, axis=2)
def forward(self, x, mask):
num_stages = len(self.stages)
mask = self.upsample_mask(mask, 2**(num_stages-1))
mask = mask.unsqueeze(1).type_as(x)
# patch embedding
x = self.downsample_layers[0](x)
x *= (1.-mask)
# sparse encoding
x = to_sparse(x)
for i in range(4):
x = self.downsample_layers[i](x) if i > 0 else x
x = self.stages[i](x)
# densify
x = x.dense()[0]
return x
|
ConvNeXt-V2-main
|
models/convnextv2_sparse.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
from MinkowskiEngine import (
MinkowskiConvolution,
MinkowskiDepthwiseConvolution,
MinkowskiLinear,
)
from timm.models.layers import trunc_normal_
from .convnextv2_sparse import SparseConvNeXtV2
from .convnextv2 import Block
class FCMAE(nn.Module):
""" Fully Convolutional Masked Autoencoder with ConvNeXtV2 backbone
"""
def __init__(
self,
img_size=224,
in_chans=3,
depths=[3, 3, 9, 3],
dims=[96, 192, 384, 768],
decoder_depth=1,
decoder_embed_dim=512,
patch_size=32,
mask_ratio=0.6,
norm_pix_loss=False):
super().__init__()
# configs
self.img_size = img_size
self.depths = depths
self.imds = dims
self.patch_size = patch_size
self.mask_ratio = mask_ratio
self.num_patches = (img_size // patch_size) ** 2
self.decoder_embed_dim = decoder_embed_dim
self.decoder_depth = decoder_depth
self.norm_pix_loss = norm_pix_loss
# encoder
self.encoder = SparseConvNeXtV2(
in_chans=in_chans, depths=depths, dims=dims, D=2)
# decoder
self.proj = nn.Conv2d(
in_channels=dims[-1],
out_channels=decoder_embed_dim,
kernel_size=1)
# mask tokens
self.mask_token = nn.Parameter(torch.zeros(1, decoder_embed_dim, 1, 1))
decoder = [Block(
dim=decoder_embed_dim,
drop_path=0.) for i in range(decoder_depth)]
self.decoder = nn.Sequential(*decoder)
# pred
self.pred = nn.Conv2d(
in_channels=decoder_embed_dim,
out_channels=patch_size ** 2 * in_chans,
kernel_size=1)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, MinkowskiConvolution):
trunc_normal_(m.kernel, std=.02)
nn.init.constant_(m.bias, 0)
if isinstance(m, MinkowskiDepthwiseConvolution):
trunc_normal_(m.kernel)
nn.init.constant_(m.bias, 0)
if isinstance(m, MinkowskiLinear):
trunc_normal_(m.linear.weight)
nn.init.constant_(m.linear.bias, 0)
if isinstance(m, nn.Conv2d):
w = m.weight.data
trunc_normal_(w.view([w.shape[0], -1]))
nn.init.constant_(m.bias, 0)
if isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
if hasattr(self, 'mask_token'):
torch.nn.init.normal_(self.mask_token, std=.02)
def patchify(self, imgs):
"""
imgs: (N, 3, H, W)
x: (N, L, patch_size**2 *3)
"""
p = self.patch_size
assert imgs.shape[2] == imgs.shape[3] and imgs.shape[2] % p == 0
h = w = imgs.shape[2] // p
x = imgs.reshape(shape=(imgs.shape[0], 3, h, p, w, p))
x = torch.einsum('nchpwq->nhwpqc', x)
x = x.reshape(shape=(imgs.shape[0], h * w, p**2 * 3))
return x
def unpatchify(self, x):
"""
x: (N, L, patch_size**2 *3)
imgs: (N, 3, H, W)
"""
p = self.patch_size
h = w = int(x.shape[1]**.5)
assert h * w == x.shape[1]
x = x.reshape(shape=(x.shape[0], h, w, p, p, 3))
x = torch.einsum('nhwpqc->nchpwq', x)
imgs = x.reshape(shape=(x.shape[0], 3, h * p, h * p))
return imgs
def gen_random_mask(self, x, mask_ratio):
N = x.shape[0]
L = (x.shape[2] // self.patch_size) ** 2
len_keep = int(L * (1 - mask_ratio))
noise = torch.randn(N, L, device=x.device)
# sort noise for each sample
ids_shuffle = torch.argsort(noise, dim=1)
ids_restore = torch.argsort(ids_shuffle, dim=1)
# generate the binary mask: 0 is keep 1 is remove
mask = torch.ones([N, L], device=x.device)
mask[:, :len_keep] = 0
# unshuffle to get the binary mask
mask = torch.gather(mask, dim=1, index=ids_restore)
return mask
def upsample_mask(self, mask, scale):
assert len(mask.shape) == 2
p = int(mask.shape[1] ** .5)
return mask.reshape(-1, p, p).\
repeat_interleave(scale, axis=1).\
repeat_interleave(scale, axis=2)
def forward_encoder(self, imgs, mask_ratio):
# generate random masks
mask = self.gen_random_mask(imgs, mask_ratio)
# encoding
x = self.encoder(imgs, mask)
return x, mask
def forward_decoder(self, x, mask):
x = self.proj(x)
# append mask token
n, c, h, w = x.shape
mask = mask.reshape(-1, h, w).unsqueeze(1).type_as(x)
mask_token = self.mask_token.repeat(x.shape[0], 1, x.shape[2], x.shape[3])
x = x * (1. - mask) + mask_token * mask
# decoding
x = self.decoder(x)
# pred
pred = self.pred(x)
return pred
def forward_loss(self, imgs, pred, mask):
"""
imgs: [N, 3, H, W]
pred: [N, L, p*p*3]
mask: [N, L], 0 is keep, 1 is remove
"""
if len(pred.shape) == 4:
n, c, _, _ = pred.shape
pred = pred.reshape(n, c, -1)
pred = torch.einsum('ncl->nlc', pred)
target = self.patchify(imgs)
if self.norm_pix_loss:
mean = target.mean(dim=-1, keepdim=True)
var = target.var(dim=-1, keepdim=True)
target = (target - mean) / (var + 1.e-6)**.5
loss = (pred - target) ** 2
loss = loss.mean(dim=-1) # [N, L], mean loss per patch
loss = (loss * mask).sum() / mask.sum() # mean loss on removed patches
return loss
def forward(self, imgs, labels=None, mask_ratio=0.6):
x, mask = self.forward_encoder(imgs, mask_ratio)
pred = self.forward_decoder(x, mask)
loss = self.forward_loss(imgs, pred, mask)
return loss, pred, mask
def convnextv2_atto(**kwargs):
model = FCMAE(
depths=[2, 2, 6, 2], dims=[40, 80, 160, 320], **kwargs)
return model
def convnextv2_femto(**kwargs):
model = FCMAE(
depths=[2, 2, 6, 2], dims=[48, 96, 192, 384], **kwargs)
return model
def convnextv2_pico(**kwargs):
model = FCMAE(
depths=[2, 2, 6, 2], dims=[64, 128, 256, 512], **kwargs)
return model
def convnextv2_nano(**kwargs):
model = FCMAE(
depths=[2, 2, 8, 2], dims=[80, 160, 320, 640], **kwargs)
return model
def convnextv2_tiny(**kwargs):
model = FCMAE(
depths=[3, 3, 9, 3], dims=[96, 192, 384, 768], **kwargs)
return model
def convnextv2_base(**kwargs):
model = FCMAE(
depths=[3, 3, 27, 3], dims=[128, 256, 512, 1024], **kwargs)
return model
def convnextv2_large(**kwargs):
model = FCMAE(
depths=[3, 3, 27, 3], dims=[192, 384, 768, 1536], **kwargs)
return model
def convnextv2_huge(**kwargs):
model = FCMAE(
depths=[3, 3, 27, 3], dims=[352, 704, 1408, 2816], **kwargs)
return model
|
ConvNeXt-V2-main
|
models/fcmae.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy.random as random
import torch
import torch.nn as nn
import torch.nn.functional as F
from MinkowskiEngine import SparseTensor
class MinkowskiGRN(nn.Module):
""" GRN layer for sparse tensors.
"""
def __init__(self, dim):
super().__init__()
self.gamma = nn.Parameter(torch.zeros(1, dim))
self.beta = nn.Parameter(torch.zeros(1, dim))
def forward(self, x):
cm = x.coordinate_manager
in_key = x.coordinate_map_key
Gx = torch.norm(x.F, p=2, dim=0, keepdim=True)
Nx = Gx / (Gx.mean(dim=-1, keepdim=True) + 1e-6)
return SparseTensor(
self.gamma * (x.F * Nx) + self.beta + x.F,
coordinate_map_key=in_key,
coordinate_manager=cm)
class MinkowskiDropPath(nn.Module):
""" Drop Path for sparse tensors.
"""
def __init__(self, drop_prob: float = 0., scale_by_keep: bool = True):
super(MinkowskiDropPath, self).__init__()
self.drop_prob = drop_prob
self.scale_by_keep = scale_by_keep
def forward(self, x):
if self.drop_prob == 0. or not self.training:
return x
cm = x.coordinate_manager
in_key = x.coordinate_map_key
keep_prob = 1 - self.drop_prob
mask = torch.cat([
torch.ones(len(_)) if random.uniform(0, 1) > self.drop_prob
else torch.zeros(len(_)) for _ in x.decomposed_coordinates
]).view(-1, 1).to(x.device)
if keep_prob > 0.0 and self.scale_by_keep:
mask.div_(keep_prob)
return SparseTensor(
x.F * mask,
coordinate_map_key=in_key,
coordinate_manager=cm)
class MinkowskiLayerNorm(nn.Module):
""" Channel-wise layer normalization for sparse tensors.
"""
def __init__(
self,
normalized_shape,
eps=1e-6,
):
super(MinkowskiLayerNorm, self).__init__()
self.ln = nn.LayerNorm(normalized_shape, eps=eps)
def forward(self, input):
output = self.ln(input.F)
return SparseTensor(
output,
coordinate_map_key=input.coordinate_map_key,
coordinate_manager=input.coordinate_manager)
class LayerNorm(nn.Module):
""" LayerNorm that supports two data formats: channels_last (default) or channels_first.
The ordering of the dimensions in the inputs. channels_last corresponds to inputs with
shape (batch_size, height, width, channels) while channels_first corresponds to inputs
with shape (batch_size, channels, height, width).
"""
def __init__(self, normalized_shape, eps=1e-6, data_format="channels_last"):
super().__init__()
self.weight = nn.Parameter(torch.ones(normalized_shape))
self.bias = nn.Parameter(torch.zeros(normalized_shape))
self.eps = eps
self.data_format = data_format
if self.data_format not in ["channels_last", "channels_first"]:
raise NotImplementedError
self.normalized_shape = (normalized_shape, )
def forward(self, x):
if self.data_format == "channels_last":
return F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)
elif self.data_format == "channels_first":
u = x.mean(1, keepdim=True)
s = (x - u).pow(2).mean(1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.eps)
x = self.weight[:, None, None] * x + self.bias[:, None, None]
return x
class GRN(nn.Module):
""" GRN (Global Response Normalization) layer
"""
def __init__(self, dim):
super().__init__()
self.gamma = nn.Parameter(torch.zeros(1, 1, 1, dim))
self.beta = nn.Parameter(torch.zeros(1, 1, 1, dim))
def forward(self, x):
Gx = torch.norm(x, p=2, dim=(1,2), keepdim=True)
Nx = Gx / (Gx.mean(dim=-1, keepdim=True) + 1e-6)
return self.gamma * (x * Nx) + self.beta + x
|
ConvNeXt-V2-main
|
models/utils.py
|
"""
Copyright (c) Meta Platforms, Inc. and affiliates.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy
import pickle
import json
from pricefunction import PriceFunction
import pandas
class Dam(object):
def __init__(self,
instance=0,
):
self._instance = instance
self._marketpath="../marketinfo/"
if(instance not in [0,1,2,3,4]):
raise ValueError("the instance id is incorrect. it must be 0, 1, 2, 3, or 4.")
return
def getbudget(self,):
budget = numpy.loadtxt(self._marketpath+str(self._instance)+"/price/"+"/budget.txt")
return float(budget)
def getbuyerdata(self,):
path = self._marketpath+str(self._instance)+"/data_buyer/"+"/20.csv"
buydata = pandas.read_csv(path,header=None,engine="pyarrow").to_numpy()
return buydata
def getmlmodel(self,):
path = self._marketpath+str(self._instance)+"/data_buyer/"+"/mlmodel.pickle"
with open(path, 'rb') as handle:
model = pickle.load(handle)
return model
def getsellerid(self,):
path = self._marketpath+str(self._instance)+"/sellerid.txt"
ids = numpy.loadtxt(path)
return ids
def getsellerinfo(self,seller_id):
path = self._marketpath+str(self._instance)+"/summary/"+str(seller_id)+".csv.json"
f = open(path)
ids = json.load(f)
price = numpy.loadtxt(self._marketpath+str(self._instance)+"/price/"+"/price.txt",
delimiter=',',dtype=str)
price_i = price[seller_id]
MyPricing1 = PriceFunction()
#print("row number",ids['row_number'])
MyPricing1.setup(max_p = float(price_i[1]), method=price_i[0], data_size=ids['row_number'])
samples = numpy.loadtxt(self._marketpath+str(self._instance)+"/summary/"+str(seller_id)+".csvsamples.csv",
delimiter=' ',dtype=float)
return MyPricing1, ids, samples
def main():
MyDam = Dam()
budget = MyDam.getbudget() # get budget
buyer_data = MyDam.getbuyerdata() # get buyer data
mlmodel = MyDam.getmlmodel() # get ml model
sellers_id = MyDam.getsellerid()
i=0
seller_i_price, seller_i_summary, seller_i_samples = MyDam.getsellerinfo(seller_id=i)
return
if __name__ == "__main__":
main()
|
Data_Acquisition_for_ML_Benchmark-main
|
src/dam.py
|
"""
Copyright (c) Meta Platforms, Inc. and affiliates.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 16 18:35:29 2022
@author: lingjiao
"""
from sklearn.linear_model import LogisticRegression
import numpy
class Buyer(object):
def __init__(self):
return
def loaddata(self,
data=None,
datapath=None,):
if(not (data is None)):
self.data = data
return
if(datapath != None):
self.data = numpy.loadtxt(open(datapath, "rb"),
delimiter=",",
skiprows=1)
return
raise ValueError("Not implemented load data of buyer")
return
def load_stretagy(self,
stretagy=None):
return
def get_stretagy(self):
return self.stretagy
def load_mlmodel(self,
mlmodel):
self.mlmodel = mlmodel
return 0
def train_mlmodel(self,
train_data):
X = train_data[:,0:-1]
y = numpy.ravel(train_data[:,-1])
self.mlmodel.fit(X,y)
X_1 = self.data[:,0:-1]
y_1 = numpy.ravel(self.data[:,-1])
eval_acc = self.mlmodel.score(X_1, y_1)
return eval_acc
def main():
print("test of the buyer")
MyBuyer = Buyer()
MyBuyer.loaddata(data=numpy.asmatrix([[0,1,1,1],[1,0,1,0]]))
mlmodel1 = LogisticRegression(random_state=0)
MyBuyer.load_mlmodel(mlmodel1)
train_data = numpy.asmatrix([[0,1,1,1],[1,0,1,0],[1,1,1,1]])
eval1 = MyBuyer.train_mlmodel(train_data)
print("eval acc",eval1)
if __name__ == '__main__':
main()
|
Data_Acquisition_for_ML_Benchmark-main
|
src/buyer.py
|
"""
Copyright (c) Meta Platforms, Inc. and affiliates.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 16 18:36:30 2022
@author: lingjiao
"""
from sklearn.linear_model import LogisticRegression
import numpy
from seller import Seller
from buyer import Buyer
from pricefunction import PriceFunction
class MarketEngine(object):
def __init__(self):
return
def setup_market(self,
seller_data=None,
seller_prices=None,
buyer_data=None,
buyer_budget=None,
mlmodel=None):
sellers = list()
for i in range(len(seller_data)):
MySeller = Seller()
MySeller.loaddata(data=seller_data[i])
MySeller.setprice(seller_prices[i])
sellers.append(MySeller)
self.sellers = sellers
MyBuyer = Buyer()
MyBuyer.loaddata(data=buyer_data)
mlmodel1 = mlmodel
MyBuyer.load_mlmodel(mlmodel1)
self.buyer = MyBuyer
self.buyer_budget = buyer_budget
#print("set up the market")
return
def load_stretagy(self,
stretagy=None,):
self.stretagy = stretagy
return
def train_buyer_model(self):
print(" train buyer model ")
# check if the budget constraint is satisified.
cost = sum(self.stretagy[1])
if(cost>self.buyer_budget):
raise ValueError("The budget constraint is not satisifed!")
return
traindata = None
for i in range(len(self.sellers)):
d1 = self.sellers[i].getdata(self.stretagy[0][i],self.stretagy[1][i])
if(i==0):
traindata = d1
else:
traindata = numpy.concatenate((traindata,d1))
print(i,d1)
print("budget checked! data loaded!")
#print("train data", traindata)
acc = self.buyer.train_mlmodel(traindata)
return acc
def main():
print("test of the market engine")
MyMarketEngine = MarketEngine()
data_1 = numpy.asmatrix([[0,1,0],[1,0,0]])
data_2 = numpy.asmatrix([[0,1,1],[1,0,1],[1,1,1],[0,0,1]])
data_b = numpy.asmatrix([[0,1,0],[1,0,1],[0,1,1]])
buyer_budget = 100
MyPricing1 = PriceFunction()
MyPricing1.setup(max_p = 100, method="lin")
MyPricing2 = PriceFunction()
MyPricing2.setup(max_p = 100, method="lin")
mlmodel1 = LogisticRegression(random_state=0)
MyMarketEngine.setup_market(seller_data=[data_1,data_2],
seller_prices = [MyPricing1,MyPricing2],
buyer_data=data_b,
buyer_budget=buyer_budget,
mlmodel=mlmodel1,
)
stretagy = [[1,2],[50,50]]
MyMarketEngine.load_stretagy(stretagy)
acc1 = MyMarketEngine.train_buyer_model()
print("acc is ",acc1)
if __name__ == '__main__':
main()
|
Data_Acquisition_for_ML_Benchmark-main
|
src/marketengine.py
|
"""
Copyright (c) Meta Platforms, Inc. and affiliates.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 16 18:36:30 2022
@author: lingjiao
"""
from sklearn.linear_model import LogisticRegression
import matplotlib.pyplot as plt
import matplotlib
import numpy
from seller import Seller
from buyer import Buyer
from pricefunction import PriceFunction
from marketengine import MarketEngine
from helper import Helper
import pandas
from sklearn.neighbors import KNeighborsClassifier
import seaborn as sns
def visualize_acc_cost(data_path="../logs/0/acc_cost_tradeoffs_uniform_logreg.csv",
savepath="../figures/",
):
plt.clf()
data = pandas.read_csv(data_path)
print("data",data)
mean1 = data.groupby("budget").mean()
var1 = data.groupby("budget").var()
max1 = data.groupby("budget").max()
min1 = data.groupby("budget").min()
print("mean1 of acc",mean1['acc'])
print("var",var1['acc'])
print("diff, max, and min",max1['acc']-min1['acc'],max1['acc'],min1['acc'])
sns.color_palette("tab10")
swarm_plot = sns.histplot(data=data, x="acc", hue="budget",palette=["C0", "C1", "C2","C3","C4"])
#swarm_plot = sns.scatterplot(data=data, x= "cost",y="acc")
plt.figure()
fig = swarm_plot.get_figure()
data_parse = data_path.split("/")
method = data_parse[-1].split("_")[-2]
instanceid = data_parse[-2]
ml = data_parse[-1].split("_")[-1]
fig.savefig(savepath+str(instanceid)+"/"+method+ml+".pdf")
plt.figure()
swarm_plot = sns.lineplot(data=data, y="acc", x="budget", err_style="band")
fig2 = swarm_plot.get_figure()
fig2.savefig(savepath+str(instanceid)+"/"+method+ml+"_line.pdf")
return
def evaluate(
MarketHelper,
MarketEngineObj,
model,
buyer_data,
trial=100, # number of trials per budget
seller_data_size_list = [100,200,300],
cost_scale=0.1,
method="single",
):
trial_list = list(range(trial))
acc_list = list()
cost_list = list()
for i in range(trial):
print("trial:",i)
# generate a submission
submission = gen_submission(seller_data_size_list,cost_scale=cost_scale,
method=method)
# calculate the cost of the submission
cost = MarketHelper.get_cost(submission,MarketEngineObj)
# generate the accuracy of the submission
traindata = MarketHelper.load_data(submission, MarketEngineObj)
model = MarketHelper.train_model(model, traindata[:,0:-1],
numpy.ravel(traindata[:,-1]))
acc1 = MarketHelper.eval_model(model,test_X=buyer_data[:,0:-1],test_Y=buyer_data[:,-1])
cost_list.append(cost)
acc_list.append(acc1)
result = pandas.DataFrame()
result['trial'] = trial_list
result['acc'] = acc_list
result['cost'] = cost_list
return result
''' generate a pandas dataframe
trial,accuracy, cost
'''
def gen_submission(seller_data_size_list=[100,200,300],
cost_scale=1,
method="uniform"):
if(method=="uniform"):
submission = [numpy.random.randint(0,int(a*cost_scale)) for a in seller_data_size_list]
if(method=="single"):
submission = [0]*len(seller_data_size_list)
index = numpy.random.randint(0,len(submission))
submission[index] = int(seller_data_size_list[index]*cost_scale)
return submission
def evaluate_budget(MarketHelper,
MarketEngineObj,
model,
buyer_data,
trial=100, # number of trials per budget
seller_data_size_list = [100,200,300],
cost_scale_list=[0.1],
method="single",
):
results = [evaluate(
MarketHelper=MarketHelper,
MarketEngineObj=MarketEngineObj,
model=model,
buyer_data=buyer_data,
trial=trial, # number of trials per budget
seller_data_size_list = seller_data_size_list,
cost_scale=c1,
method=method,
) for c1 in cost_scale_list]
full_result = pandas.concat(results, ignore_index=True,axis=0)
return full_result
def main():
matplotlib.pyplot.close('all')
instance_ids = [0,1,2,3,4]
methods = ['single','uniform']
methods=['uniform']
for instance_id in instance_ids:
for method in methods:
#visualize_acc_cost(data_path="../logs/"+str(instance_id)+"/acc_cost_tradeoffs_"+method+"_knn.csv")
visualize_acc_cost(data_path="../logs/"+str(instance_id)+"/acc_cost_tradeoffs_"+method+"_rf.csv")
#visualize_acc_cost(data_path="../logs/"+str(instance_id)+"/acc_cost_tradeoffs_"+method+"_logreg.csv")
'''
print("evaluate acc and cost tradeoffs")
instance_id=0
MyHelper = Helper()
seller_data, seller_prices, buyer_data, buyer_budget, data_size = MyHelper.load_market_instance(
feature_path="../features/"+str(instance_id)+"/",
buyer_data_path="../marketinfo/"+str(instance_id)+"/data_buyer/20.csv",
price_path="../marketinfo/"+str(instance_id)+"/price/price.txt",
budget_path="../marketinfo/"+str(instance_id)+"/price/budget.txt",
)
MyMarketEngine = MarketEngine()
mlmodel1 = LogisticRegression(random_state=0)
mlmodel1 = KNeighborsClassifier(n_neighbors=9)
MyMarketEngine.setup_market(seller_data=seller_data,
seller_prices = seller_prices,
buyer_data=buyer_data,
buyer_budget=1e10,
mlmodel=mlmodel1,
)
result = evaluate(
MarketHelper=MyHelper,
MarketEngineObj=MyMarketEngine,
model=mlmodel1,
buyer_data=buyer_data,
trial=10, # number of trials per budget
seller_data_size_list = numpy.loadtxt("../marketinfo/"+str(instance_id)+"/seller_datasize.csv"),
cost_scale=0.1,
)
result2 = evaluate_budget(
MarketHelper=MyHelper,
MarketEngineObj=MyMarketEngine,
model=mlmodel1,
buyer_data=buyer_data,
trial=100, # number of trials per budget
seller_data_size_list = numpy.loadtxt("../marketinfo/" + str(instance_id) +"/seller_datasize.csv"),
# cost_scale_list=[0.005,0.0075,0.01,0.025],
# method="uniform",
cost_scale_list=[0.05,0.1,0.5,1],
method="single",
)
folder1 = "../logs/"+str(instance_id)+"/"
result2.to_csv(folder1+"acc_cost_tradeoffs.csv")
print("result is:",result)
'''
if __name__ == '__main__':
main()
|
Data_Acquisition_for_ML_Benchmark-main
|
src/visualize_acc_cost.py
|
"""
Copyright (c) Meta Platforms, Inc. and affiliates.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 16 18:36:30 2022
@author: lingjiao
"""
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import GradientBoostingClassifier
import numpy
from seller import Seller
from buyer import Buyer
from pricefunction import PriceFunction
from marketengine import MarketEngine
from helper import Helper
import pandas
from sklearn.neighbors import KNeighborsClassifier
def evaluate(
MarketHelper,
MarketEngineObj,
model,
buyer_data,
trial=100, # number of trials per budget
seller_data_size_list = [100,200,300],
cost_scale=0.1,
method="single",
full_price=100,
):
trial_list = list(range(trial))
acc_list = list()
cost_list = list()
budget_list = list()
for i in range(trial):
print("trial:",i)
# generate a submission
submission = gen_submission(seller_data_size_list,cost_scale=cost_scale,
method=method)
# calculate the cost of the submission
cost = MarketHelper.get_cost(submission,MarketEngineObj)
# generate the accuracy of the submission
traindata = MarketHelper.load_data(submission, MarketEngineObj)
model = MarketHelper.train_model(model, traindata[:,0:-1],
numpy.ravel(traindata[:,-1]))
acc1 = MarketHelper.eval_model(model,test_X=buyer_data[:,0:-1],test_Y=buyer_data[:,-1])
cost_list.append(cost)
acc_list.append(acc1)
budget_list.append(cost_scale*full_price)
result = pandas.DataFrame()
result['trial'] = trial_list
result['acc'] = acc_list
result['cost'] = cost_list
result['budget'] = budget_list
return result
''' generate a pandas dataframe
trial,accuracy, cost
'''
def gen_submission(seller_data_size_list=[100,200,300],
cost_scale=1,
method="uniform"):
if(method=="uniform"):
d = len(seller_data_size_list)
submission = [numpy.random.randint(0,int(a*cost_scale/d*2)) for a in seller_data_size_list]
if(method=="single"):
submission = [0]*len(seller_data_size_list)
index = numpy.random.randint(0,len(submission))
submission[index] = int(seller_data_size_list[index]*cost_scale)
return submission
def evaluate_budget(MarketHelper,
MarketEngineObj,
model,
buyer_data,
trial=100, # number of trials per budget
seller_data_size_list = [100,200,300],
cost_scale_list=[0.1],
method="single",
):
results = [evaluate(
MarketHelper=MarketHelper,
MarketEngineObj=MarketEngineObj,
model=model,
buyer_data=buyer_data,
trial=trial, # number of trials per budget
seller_data_size_list = seller_data_size_list,
cost_scale=c1,
method=method,
) for c1 in cost_scale_list]
full_result = pandas.concat(results, ignore_index=True,axis=0)
return full_result
def evaluate_full(instance_id=0,
method="single",
model_name="knn",):
print("evaluate acc and cost tradeoffs")
# instance_id=0
# method="single"
# model_name="knn"
MyHelper = Helper()
seller_data, seller_prices, buyer_data, buyer_budget, data_size = MyHelper.load_market_instance(
feature_path="../features/"+str(instance_id)+"/",
buyer_data_path="../marketinfo/"+str(instance_id)+"/data_buyer/20.csv",
price_path="../marketinfo/"+str(instance_id)+"/price/price.txt",
budget_path="../marketinfo/"+str(instance_id)+"/price/budget.txt",
)
numpy.savetxt("../marketinfo/"+str(instance_id)+"/seller_datasize.csv",data_size,fmt="%d")
MyMarketEngine = MarketEngine()
mlmodel1 = LogisticRegression(random_state=0)
if(model_name=="knn"):
mlmodel1 = KNeighborsClassifier(n_neighbors=9)
if(model_name=='rf'):
mlmodel1 = GradientBoostingClassifier(n_estimators=100, learning_rate=1.0,
max_depth=1, random_state=0)
MyMarketEngine.setup_market(seller_data=seller_data,
seller_prices = seller_prices,
buyer_data=buyer_data,
buyer_budget=1e10,
mlmodel=mlmodel1,
)
result = evaluate(
MarketHelper=MyHelper,
MarketEngineObj=MyMarketEngine,
model=mlmodel1,
buyer_data=buyer_data,
trial=10, # number of trials per budget
seller_data_size_list = numpy.loadtxt("../marketinfo/"+str(instance_id)+"/seller_datasize.csv"),
cost_scale=0.1,
)
result2 = evaluate_budget(
MarketHelper=MyHelper,
MarketEngineObj=MyMarketEngine,
model=mlmodel1,
buyer_data=buyer_data,
trial=100, # number of trials per budget
seller_data_size_list = numpy.loadtxt("../marketinfo/" + str(instance_id) +"/seller_datasize.csv"),
# cost_scale_list=[0.005,0.0075,0.01,0.025,0.05,0.075,0.1],
cost_scale_list=[0.01,0.025,0.05,0.1,0.2],
method=method,
# cost_scale_list=[0.05,0.1,0.5,1],
# method="single",
)
folder1 = "../logs/"+str(instance_id)+"/"
result2.to_csv(folder1+"acc_cost_tradeoffs_"+method+"_"+model_name+".csv")
print("result is:",result)
return
def main():
instance_ids = [3,4]
methods = ['single','uniform']
for instance_id in instance_ids:
for method in methods:
evaluate_full(instance_id=instance_id,method=method,model_name="knn")
evaluate_full(instance_id=instance_id,method=method,model_name="logreg")
evaluate_full(instance_id=instance_id,method=method,model_name="rf")
return
if __name__ == '__main__':
main()
|
Data_Acquisition_for_ML_Benchmark-main
|
src/evaluator_acc_cost.py
|
import matplotlib # noqa
matplotlib.use('Agg') # noqa
import matplotlib.pyplot as plt
plt.rcParams['axes.facecolor'] = 'white'
import numpy as np
import matplotlib.ticker as ticker
import json
import seaborn as sn
import pandas as pd
from matplotlib.colors import LogNorm
import seaborn as sns
from matplotlib.colors import LinearSegmentedColormap
import umap
#import matplotlib.pyplot as plt
class VisualizeTools(object):
def __init__(self,figuresize = (10,8),figureformat='jpg',
colorset=['r','orange','k','yellow','g','b','k'],
markersize=30,
fontsize=30,
usecommand=True):
self.figuresize=figuresize
self.figureformat = figureformat
self.fontsize = fontsize
self.linewidth = 5
self.markersize = markersize
self.folder = "../figures/" # use "../figures/" if needed
self.colorset=colorset
self.markerset = ['o','X','^','v','s','o','*','d','p']
self.marker = 'o' # from ['X','^','v','s','o','*','d','p'],
self.linestyle = '-' # from ['-.','--','--','-.','-',':','--','-.'],
self.linestyleset = ['-','-.','--','--','-.','-',':','--','-.']
self.usecommand = usecommand
def plotline(self,
xvalue,
yvalue,
xlabel='xlabel',
ylabel='ylabel',
legend=None,
filename='lineplot',
fig=None,
color=None,
ax=None):
if(ax==None):
# setup figures
fig = plt.figure(figsize=self.figuresize)
fig, ax = plt.subplots(figsize=self.figuresize)
plt.rcParams.update({'font.size': self.fontsize})
plt.rcParams["font.weight"] = "bold"
plt.rcParams["axes.labelweight"] = "bold"
plt.rcParams["lines.linewidth"] = self.linewidth
plt.rcParams["lines.markersize"] = self.markersize
plt.rcParams["font.sans-serif"] = 'Arial'
# plot it
if(color==None):
color = self.colorset[0]
ax.plot(xvalue,
yvalue,
marker=self.marker,
label=legend,
color=color,
linestyle = self.linestyle,
zorder=0,
)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.grid(True)
ax.locator_params(axis='x', nbins=6)
ax.locator_params(axis='y', nbins=6)
formatter = ticker.FormatStrFormatter('%0.2e')
formatterx = ticker.FormatStrFormatter('%0.2f')
ax.yaxis.set_major_formatter(formatter)
ax.xaxis.set_major_formatter(formatterx)
filename =filename+'.'+self.figureformat
if(self.figureformat=='jpg'):
plt.savefig(filename, format=self.figureformat, bbox_inches='tight',dpi=300)
else:
plt.savefig(filename, format=self.figureformat, bbox_inches='tight')
return fig, ax
#plt.fill_between(bud, np.asarray(acc_mean)-np.asarray(acc_std), np.asarray(acc_mean)+np.asarray(acc_std),alpha=0.3,facecolor='lightgray')
def plotlines(self,
xvalue,
yvalues,
xlabel='xlabel',
ylabel='ylabel',
legend=None,
filename='lineplot',
fig=None,
ax=None,
showlegend=False,
log=False,
fontsize=60,
basey=10,
ylim=None):
#if(-1):
if(ax==None):
# setup figures
fig = plt.figure(figsize=self.figuresize)
fig, ax = plt.subplots(figsize=self.figuresize,frameon=True)
plt.rcParams.update({'font.size': fontsize})
plt.rcParams["font.weight"] = "bold"
plt.rcParams["axes.labelweight"] = "bold"
plt.rcParams["lines.linewidth"] = self.linewidth
plt.rcParams["lines.markersize"] = self.markersize
plt.rcParams["font.sans-serif"] = 'Arial'
ax.set_facecolor("white")
#ax.set_edgecolor("black")
ax.grid("True",color="grey")
ax.get_yaxis().set_visible(True)
ax.get_xaxis().set_visible(True)
# plot it
for i in range(len(yvalues)):
ax.plot(xvalue,
yvalues[i],
marker=self.markerset[i],
label=legend[i],
color=self.colorset[i],
linestyle = self.linestyleset[i],
zorder=0,
markersize=self.markersize,
markevery=1,
)
plt.xlabel(xlabel,fontsize=fontsize)
plt.ylabel(ylabel,fontsize=fontsize)
plt.grid(True)
#ax.locator_params(axis='x', nbins=6)
#ax.locator_params(axis='y', nbins=6)
'''
formatter = ticker.FormatStrFormatter('%d')
formatterx = ticker.FormatStrFormatter('%d')
ax.yaxis.set_major_formatter(formatter)
ax.xaxis.set_major_formatter(formatterx)
'''
ax.tick_params(axis='both', which='major', labelsize=fontsize)
if(ylim!=None):
plt.ylim(ylim)
if(log==True):
ax.set_yscale('log',base=basey)
if(showlegend==True):
ax.legend(legend,facecolor="white",prop={'size': fontsize},
markerscale=1, numpoints= 2,loc="best")
filename =filename+'.'+self.figureformat
if(self.figureformat=='jpg'):
plt.savefig(filename, format=self.figureformat, bbox_inches='tight',dpi=300)
else:
plt.savefig(filename, format=self.figureformat, bbox_inches='tight')
return fig, ax
#plt.fill_between(bud, np.asarray(acc_mean)-np.asarray(acc_std), np.asarray(acc_mean)+np.asarray(acc_std),alpha=0.3,facecolor='lightgray')
def Histogram(self,
xvalue,
xlabel='xlabel',
ylabel='ylabel',
legend=None,
filename='lineplot',
fig=None,
ax=None,
showlegend=False,
log=False,
fontsize=90,
ylim=None,
n_bins=20):
#if(-1):
if(ax==None):
# setup figures
fig = plt.figure(figsize=self.figuresize)
fig, ax = plt.subplots(figsize=self.figuresize,frameon=True)
plt.rcParams.update({'font.size': fontsize})
plt.rcParams["font.weight"] = "bold"
plt.rcParams["axes.labelweight"] = "bold"
plt.rcParams["lines.linewidth"] = self.linewidth
plt.rcParams["lines.markersize"] = self.markersize
plt.rcParams["font.sans-serif"] = 'Arial'
ax.set_facecolor("white")
#ax.set_edgecolor("black")
ax.grid("True",color="grey")
ax.get_yaxis().set_visible(True)
ax.get_xaxis().set_visible(True)
# plot it
plt.hist(xvalue,bins=n_bins)
'''
for i in range(len(yvalues)):
ax.plot(xvalue,
yvalues[i],
marker=self.markerset[i],
label=legend[i],
color=self.colorset[i],
linestyle = self.linestyleset[i],
zorder=0,
markersize=self.markersize,
markevery=10,
)
'''
plt.xlabel(xlabel,fontsize=fontsize)
plt.ylabel(ylabel,fontsize=fontsize)
plt.grid(True)
#ax.locator_params(axis='x', nbins=6)
#ax.locator_params(axis='y', nbins=6)
'''
formatter = ticker.FormatStrFormatter('%d')
formatterx = ticker.FormatStrFormatter('%d')
ax.yaxis.set_major_formatter(formatter)
ax.xaxis.set_major_formatter(formatterx)
'''
ax.tick_params(axis='both', which='major', labelsize=fontsize)
if(ylim!=None):
plt.ylim(ylim)
if(log==True):
ax.set_yscale('log')
if(showlegend==True):
ax.legend(legend,facecolor="white",prop={'size': fontsize},
markerscale=2, numpoints= 2,loc=0)
filename =filename+'.'+self.figureformat
if(self.figureformat=='jpg'):
plt.savefig(filename, format=self.figureformat, bbox_inches='tight',dpi=300)
else:
plt.savefig(filename, format=self.figureformat, bbox_inches='tight')
return fig, ax
#plt.fill_between(bud, np.asarray(acc_mean)-np.asarray(acc_std), np.asarray(acc_mean)+np.asarray(acc_std),alpha=0.3,facecolor='lightgray')
def Histograms(self,
xvalues,
xlabel='xlabel',
ylabel='ylabel',
legend=None,
filename='lineplot',
fig=None,
ax=None,
showlegend=False,
log=False,
fontsize=90,
color=['red','orange'],
ylim=None,
n_bins=20):
#if(-1):
if(ax==None):
# setup figures
fig = plt.figure(figsize=self.figuresize)
fig, ax = plt.subplots(figsize=self.figuresize,frameon=True)
plt.rcParams.update({'font.size': fontsize})
plt.rcParams["font.weight"] = "bold"
plt.rcParams["axes.labelweight"] = "bold"
plt.rcParams["lines.linewidth"] = self.linewidth
plt.rcParams["lines.markersize"] = self.markersize
plt.rcParams["font.sans-serif"] = 'Arial'
ax.set_facecolor("white")
#ax.set_edgecolor("black")
ax.grid("True",color="grey")
ax.get_yaxis().set_visible(True)
ax.get_xaxis().set_visible(True)
# plot it
plt.hist(xvalues,bins=n_bins, density=True,color=color)
'''
for i in range(len(yvalues)):
ax.plot(xvalue,
yvalues[i],
marker=self.markerset[i],
label=legend[i],
color=self.colorset[i],
linestyle = self.linestyleset[i],
zorder=0,
markersize=self.markersize,
markevery=10,
)
'''
plt.xlabel(xlabel,fontsize=fontsize)
plt.ylabel(ylabel,fontsize=fontsize)
plt.grid(True)
#ax.locator_params(axis='x', nbins=6)
#ax.locator_params(axis='y', nbins=6)
'''
formatter = ticker.FormatStrFormatter('%d')
formatterx = ticker.FormatStrFormatter('%d')
ax.yaxis.set_major_formatter(formatter)
ax.xaxis.set_major_formatter(formatterx)
'''
ax.tick_params(axis='both', which='major', labelsize=fontsize)
if(ylim!=None):
plt.ylim(ylim)
if(log==True):
ax.set_yscale('log')
if(showlegend==True):
ax.legend(legend,facecolor="white",prop={'size': fontsize},
markerscale=2, numpoints= 2,loc=0)
filename =filename+'.'+self.figureformat
if(self.figureformat=='jpg'):
plt.savefig(filename, format=self.figureformat, bbox_inches='tight',dpi=300)
else:
plt.savefig(filename, format=self.figureformat, bbox_inches='tight')
return fig, ax
#plt.fill_between(bud, np.asarray(acc_mean)-np.asarray(acc_std), np.asarray(acc_mean)+np.asarray(acc_std),alpha=0.3,facecolor='lightgray')
def plotscatter(self,
xvalue=0.3,
yvalue=0.5,
filename='lineplot',
markersize=10,
legend='Learned Thres',
color='blue',
showlegend=False,
fig=None,
ax=None):
if(ax==None):
# setup figures
fig = plt.figure(figsize=self.figuresize)
fig, ax = plt.subplots(figsize=self.figuresize)
plt.rcParams.update({'font.size': self.fontsize})
plt.rcParams["font.weight"] = "bold"
plt.rcParams["axes.labelweight"] = "bold"
plt.rcParams["lines.linewidth"] = self.linewidth
plt.rcParams["lines.markersize"] = self.markersize
plt.rcParams["font.sans-serif"] = 'Arial'
ax.plot(xvalue,yvalue,'*',markersize=markersize,color=color,
label=legend)
if(showlegend):
handles, labels = ax.get_legend_handles_labels()
print("labels",labels)
ax.legend(handles[::-1],labels[::-1], prop={'size': 35},markerscale=3, numpoints= 1,loc=0)
filename =filename+'.'+self.figureformat
if(self.figureformat=='jpg'):
plt.savefig(filename, format=self.figureformat, bbox_inches='tight',dpi=300)
else:
plt.savefig(filename, format=self.figureformat, bbox_inches='tight')
return fig, ax
def plotscatter(self,
xvalue=0.3,
yvalue=0.5,
filename='lineplot',
markersize=10,
legend='Learned Thres',
color='blue',
showlegend=False,
fig=None,
ax=None):
if(ax==None):
# setup figures
fig = plt.figure(figsize=self.figuresize)
fig, ax = plt.subplots(figsize=self.figuresize)
plt.rcParams.update({'font.size': self.fontsize})
plt.rcParams["font.weight"] = "bold"
plt.rcParams["axes.labelweight"] = "bold"
plt.rcParams["lines.linewidth"] = self.linewidth
plt.rcParams["lines.markersize"] = self.markersize
plt.rcParams["font.sans-serif"] = 'Arial'
ax.plot(xvalue,yvalue,'*',markersize=markersize,color=color,
label=legend)
if(showlegend):
handles, labels = ax.get_legend_handles_labels()
print("labels",labels)
ax.legend(handles[::-1],labels[::-1], prop={'size': 35},markerscale=3, numpoints= 1,loc=0)
filename =filename+'.'+self.figureformat
if(self.figureformat=='jpg'):
plt.savefig(filename, format=self.figureformat, bbox_inches='tight',dpi=300)
else:
plt.savefig(filename, format=self.figureformat, bbox_inches='tight')
return fig, ax
def plotscatters_annotation(self,
xvalue=[0.3],
yvalue=[0.5],
filename='lineplot',
markersize=10,
legend='Learned Thres',
color='blue',
showlegend=False,
fig=None,
ax=None,
annotation=None):
if(ax==None):
# setup figures
fig = plt.figure(figsize=self.figuresize)
fig, ax = plt.subplots(figsize=self.figuresize)
plt.rcParams.update({'font.size': self.fontsize})
plt.rcParams["font.weight"] = "bold"
plt.rcParams["axes.labelweight"] = "bold"
plt.rcParams["lines.linewidth"] = self.linewidth
plt.rcParams["lines.markersize"] = self.markersize
plt.rcParams["font.sans-serif"] = 'Arial'
ax.scatter(xvalue,yvalue,)
# '*',markersize=markersize,color=color,
# )
for i in range(len(xvalue)):
ax.annotate(annotation[i], xy=[xvalue[i],yvalue[i]])
if(showlegend):
handles, labels = ax.get_legend_handles_labels()
print("labels",labels)
ax.legend(handles[::-1],labels[::-1], prop={'size': 35},markerscale=3, numpoints= 1,loc=0)
filename =filename+'.'+self.figureformat
if(self.figureformat=='jpg'):
plt.savefig(filename, format=self.figureformat, bbox_inches='tight',dpi=300)
else:
plt.savefig(filename, format=self.figureformat, bbox_inches='tight')
return fig, ax
def plot_bar(self,barname,barvalue,
filename='barplot',
markersize=2,
yname='Frequency',
xname="",
color='blue',
ylim=None,
fig=None,
showlegend=False,
ax=None,
labelpad=None,
fontsize=30,
threshold=10,
add_thresline=False,):
if(ax==None):
# setup figures
fig = plt.figure(figsize=self.figuresize)
fig, ax = plt.subplots(figsize=self.figuresize)
ax.set_facecolor("white")
plt.rcParams.update({'font.size': 1})
plt.rcParams["font.weight"] = "bold"
plt.rcParams["axes.labelweight"] = "bold"
plt.rcParams["lines.linewidth"] = self.linewidth
plt.rcParams["lines.markersize"] = markersize
plt.rcParams["font.sans-serif"] = 'Arial'
plt.rc('font', size=1) # controls default text sizes
print("xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx")
plt.grid(True,color="grey")
x = np.arange(len(barname))
ax.bar(x,barvalue,color=color,
label=barname)
ax.set_ylabel(yname,fontsize=fontsize)
if(xname!=""):
ax.set_xlabel(xname,fontsize=fontsize)
#ax.set_title('Scores by group and gender')
ax.set_xticks(x)
ax.set_xticklabels(barname,rotation='horizontal',fontsize=fontsize)
#ax.set_xticklabels(barname,rotation='vertical')
plt.xlim(x[0]-0.5,x[-1]+0.5)
if(add_thresline==True):
ax.plot([min(x)-0.5, max(x)+0.5], [threshold, threshold], "k--")
matplotlib.rc('xtick', labelsize=fontsize)
ax.tick_params(axis='both', which='major', labelsize=fontsize)
if(not(labelpad==None)):
ax.tick_params(axis='x', which='major', pad=labelpad)
#matplotlib.rc('ytick', labelsize=fontsize)
#ax.text(0.5,0.5,"hello")
#ax.legend()
if(showlegend):
handles, labels = ax.get_legend_handles_labels()
print("labels",labels)
ax.legend(handles[::-1],labels[::-1], prop={'size': 10},markerscale=3, numpoints= 1,loc=0)
#ticks = [tick for tick in plt.gca().get_xticklabels()]
#print("ticks 0 is",ticks[0].get_window_extent())
'''
plt.text(-0.07, -0.145, 'label:', horizontalalignment='center',fontsize=fontsize,
verticalalignment='center', transform=ax.transAxes)
plt.text(-0.07, -0.25, 'qs:', horizontalalignment='center',fontsize=fontsize,
verticalalignment='center', transform=ax.transAxes)
'''
filename =filename+'.'+self.figureformat
if(not(ylim==None)):
plt.ylim(ylim)
if(self.figureformat=='jpg'):
plt.savefig(filename, format=self.figureformat, bbox_inches='tight',dpi=300)
else:
plt.savefig(filename, format=self.figureformat, bbox_inches='tight')
return fig, ax
def plot_bar2value(self,barname,barvalue, barvalue2,
filename='barplot',
markersize=2,
yname='Frequency',
color='blue',
fig=None,
showlegend=False,
legend=['precision','recall'],
yrange = None,
ax=None,
fontsize=25,
showvalues = False,
legend_loc="upper left",
hatch=None):
if(ax==None):
# setup figures
fig = plt.figure(figsize=self.figuresize)
fig, ax = plt.subplots(figsize=self.figuresize)
plt.rcParams.update({'font.size': fontsize})
plt.rcParams["font.weight"] = "bold"
plt.rcParams["axes.labelweight"] = "bold"
plt.rcParams["lines.linewidth"] = self.linewidth
plt.rcParams["lines.markersize"] = markersize
plt.rcParams["font.sans-serif"] = 'Arial'
width=0.3
x = np.arange(len(barname))
ax.bar(x-width/2,barvalue,width,color=color[0],
label=legend[0])
ax.bar(x+width/2,barvalue2,width, color=color[1],
hatch=hatch,
label=legend[1])
ax.set_ylabel(yname,fontsize=fontsize)
#ax.set_title('Scores by group and gender')
ax.set_xticks(x)
#ax.set_xticklabels(barname,rotation='vertical')
#ax.set_xticklabels(barname,rotation=45)
ax.set_xticklabels(barname,rotation='horizontal')
plt.xlim(x[0]-0.5,x[-1]+0.5)
if(not(yrange==None)):
plt.ylim(yrange[0],yrange[1])
matplotlib.rc('xtick', labelsize=fontsize)
matplotlib.rc('ytick', labelsize=fontsize)
#ax.legend()
if(showvalues==True):
for i, v in enumerate(barvalue):
ax.text(i - 0.33,v + 0.1, "{:.1f}".format(v), color=color[0], fontweight='bold',)
for i, v in enumerate(barvalue2):
ax.text(i + .10,v + 0.2, "{:.1f}".format(v), color=color[1], fontweight='bold',)
if(showlegend):
handles, labels = ax.get_legend_handles_labels()
print("labels",labels)
ax.legend(handles[::-1],labels[::-1], prop={'size': fontsize},markerscale=3, numpoints= 1,
loc=legend_loc,ncol=1, )#bbox_to_anchor=(0, 1.05))
filename =filename+'.'+self.figureformat
if(self.figureformat=='jpg'):
plt.savefig(filename, format=self.figureformat, bbox_inches='tight',dpi=300)
else:
plt.savefig(filename, format=self.figureformat, bbox_inches='tight')
return fig, ax
def plotconfusionmaitrix(self,confmatrix,
xlabel=None,ylabel=None,
filename='confmatrix',
keywordsize = 16,
font_scale=2,
figuresize=(10,10),
cmap="coolwarm", # "Blues"
vmin=0,
vmax=10,
fonttype='Arial',
title1="",
fmt=".1f",
xlabel1 = "Predicted label",
ylabel1="True label",):
if(self.usecommand==True):
return self.plotconfusionmaitrix_common1(confmatrix=confmatrix,
xlabel=xlabel,
ylabel=ylabel,
filename=filename,
keywordsize = keywordsize,
font_scale=font_scale,
figuresize=figuresize,
cmap=cmap,
vmin=vmin,
vmax=vmax,
fonttype=fonttype,
title1=title1,
xlabel1=xlabel1,
ylabel1=ylabel1,
fmt=fmt)
sn.set(font=fonttype)
#boundaries = [0.0, 0.045, 0.05, 0.055, 0.06,0.065,0.07,0.08,0.1,0.15, 1.0] # custom boundaries
boundaries = [0.0, 0.06,0.2, 0.25,0.3, 0.4,0.5,0.6,0.7, 0.8, 1.0] # custom boundaries
# here I generated twice as many colors,
# so that I could prune the boundaries more clearly
#hex_colors = sns.light_palette('blue', n_colors=len(boundaries) * 2 + 2, as_cmap=False).as_hex()
#hex_colors = [hex_colors[i] for i in range(0, len(hex_colors), 2)]
#print("hex",hex_colors)
# My color
hex_colors = ['#ffffff','#ebf1f7',
'#d3e4f3',
'#bfd8ed',
'#a1cbe2',
'#7db8da',
'#5ca4d0',
'#3f8fc5',
'#2676b8',
'#135fa7',
'#08488e']
'''
['#e5eff9',
'#d3e4f3',
'#bfd8ed',
'#a1cbe2',
'#7db8da',
'#5ca4d0',
'#3f8fc5',
'#2676b8',
'#135fa7',
'#08488e']
'''
boundaries = [0.0, 0.03, 0.06,0.1,0.2,0.29,0.3,0.8,1.0]
hex_colors = ['#F2F6FA','#ebf1f7','#FFB9C7','#FF1242', '#FF1242','#FF1242','#2676b8','#135fa7','#08488e']
colors=list(zip(boundaries, hex_colors))
custom_color_map = LinearSegmentedColormap.from_list(
name='custom_navy',
colors=colors,
)
tol=1e-4
labels = confmatrix
confmatrix=confmatrix*(confmatrix>0.35)
print("confmatrix",confmatrix+tol)
df_cm = pd.DataFrame(confmatrix+tol,xlabel,ylabel)
plt.figure(figsize=figuresize)
sn.set(font_scale=font_scale) # for label size
g = sn.heatmap(df_cm,
linewidths=0.3,
linecolor="grey",
cmap=custom_color_map,
#annot=True,
annot = labels,
annot_kws={"size": keywordsize},fmt=".1f",
#mask=df_cm < 0.02,
vmin=vmin+tol,
vmax=vmax,
cbar=False,
#cbar_kws={"ticks":[0.1,0.3,1,3,10]},
#norm=LogNorm(),
#legend=False,
) # font size
#g.cax.set_visible(False)
#sn.heatmap(df, cbar=False)
g.set_yticklabels(labels=g.get_yticklabels(), va='center')
filename =filename+'.'+self.figureformat
plt.ylabel(ylabel1)
plt.xlabel(xlabel1)
plt.title("Overall accuracy:"+"{:.1f}".format(np.trace(confmatrix)),
fontweight="bold",
pad=32)
g.set_xticklabels(g.get_xticklabels(), rotation = 0)
if(self.figureformat=='jpg'):
plt.savefig(filename, format=self.figureformat, bbox_inches='tight',dpi=300)
else:
plt.savefig(filename, format=self.figureformat, bbox_inches='tight')
return 0
def plotconfusionmaitrix_common1(self,confmatrix,
xlabel=None,ylabel=None,
filename='confmatrix',
keywordsize = 16,
font_scale=2,
figuresize=(10,10),
cmap="vlag",
vmin=0,
vmax=10,
fonttype='Arial',
title1="",
fmt=".1f",
xlabel1 = "Predicted label",
ylabel1="True label",
):
print("Use common confusion matrix plot!")
sn.set(font=fonttype)
#boundaries = [0.0, 0.045, 0.05, 0.055, 0.06,0.065,0.07,0.08,0.1,0.15, 1.0] # custom boundaries
boundaries = [0.0, 0.06,0.2, 0.25,0.3, 0.4,0.5,0.6,0.7, 0.8, 1.0] # custom boundaries
# here I generated twice as many colors,
# so that I could prune the boundaries more clearly
#hex_colors = sns.light_palette('blue', n_colors=len(boundaries) * 2 + 2, as_cmap=False).as_hex()
#hex_colors = [hex_colors[i] for i in range(0, len(hex_colors), 2)]
#print("hex",hex_colors)
# My color
hex_colors = ['#ffffff','#ebf1f7',
'#d3e4f3',
'#bfd8ed',
'#a1cbe2',
'#7db8da',
'#5ca4d0',
'#3f8fc5',
'#2676b8',
'#135fa7',
'#08488e']
'''
['#e5eff9',
'#d3e4f3',
'#bfd8ed',
'#a1cbe2',
'#7db8da',
'#5ca4d0',
'#3f8fc5',
'#2676b8',
'#135fa7',
'#08488e']
'''
boundaries = [0.0, 0.03, 0.06,0.1,0.2,0.29,0.3,0.8,1.0]
hex_colors = ['#F2F6FA','#ebf1f7','#FFB9C7','#FF1242', '#FF1242','#FF1242','#2676b8','#135fa7','#08488e']
colors=list(zip(boundaries, hex_colors))
custom_color_map = LinearSegmentedColormap.from_list(
name='custom_navy',
colors=colors,
)
tol=1e-4
labels = confmatrix
#confmatrix=confmatrix*(confmatrix>0.35)
#print("confmatrix",confmatrix+tol)
df_cm = pd.DataFrame(confmatrix+tol,xlabel,ylabel)
plt.figure(figsize=figuresize)
sn.set(font_scale=font_scale) # for label size
g = sn.heatmap(-df_cm,
linewidths=0.3,
linecolor="grey",
cmap=cmap,
#annot=True,
annot = labels,
annot_kws={"size": keywordsize},fmt=fmt,
#mask=df_cm < 0.02,
#vmin=vmin+tol,
#vmax=vmax,
cbar=False,
center=0,
#cbar_kws={"ticks":[0.1,0.3,1,3,10]},
#norm=LogNorm(),
#legend=False,
) # font size
#g.cax.set_visible(False)
#sn.heatmap(df, cbar=False)
g.set_yticklabels(labels=g.get_yticklabels(), va='center')
filename =filename+'.'+self.figureformat
plt.ylabel(ylabel1)
plt.xlabel(xlabel1)
print("trece",np.trace(confmatrix),confmatrix)
plt.title(title1,
fontweight="bold",
fontsize=keywordsize*1.1,
pad=40)
g.set_xticklabels(g.get_xticklabels(), rotation = 0)
if(self.figureformat=='jpg'):
plt.savefig(filename, format=self.figureformat, bbox_inches='tight',dpi=300)
else:
plt.savefig(filename, format=self.figureformat, bbox_inches='tight')
return 0
def plotconfusionmaitrix_common(self,confmatrix,
xlabel=None,ylabel=None,
filename='confmatrix',
keywordsize = 16,
font_scale=2,
figuresize=(10,10),
cmap='vlag',#sn.diverging_palette(240, 10, n=9),
vmin=-5,
vmax=10,
center=0,
fonttype='Arial'):
cmap = LinearSegmentedColormap.from_list('RedWhiteGreen', ['red', 'white', 'green'])
sn.set(font=fonttype)
tol=1e-4
labels = (confmatrix+0.05)*(np.abs(confmatrix)>0.1)
labels = list()
for i in range(confmatrix.shape[0]):
temp = list()
for j in range(confmatrix.shape[1]):
a = confmatrix[i,j]
if(a>0.1):
temp.append("+"+"{0:.1f}".format(a))
if(a<-0.1):
temp.append("{0:.1f}".format(a))
if(a<=0.1 and a>=-0.1):
temp.append(str(0.0))
labels.append(temp)
#labels = (confmatrix+0.05)*(np.abs(confmatrix)>0.1)
print("labels",labels)
confmatrix=confmatrix=confmatrix*(np.abs(confmatrix)>0.7)
print("confmatrix",confmatrix+tol)
df_cm = pd.DataFrame(confmatrix+tol,xlabel,ylabel)
plt.figure(figsize=figuresize)
sn.set(font_scale=font_scale) # for label size
g = sn.heatmap(df_cm,
linewidths=12.0,
linecolor="grey",
cmap=cmap,
center=center,
#annot=True,
annot = labels,
annot_kws={"size": keywordsize},fmt="s",#fmt="{0:+.1f}",
#mask=df_cm < 0.02,
vmin=vmin,
vmax=vmax,
cbar=False,
#cbar_kws={"ticks":[0.1,0.3,1,3,10]},
#norm=LogNorm(),
#legend=False,
) # font size
#g.cax.set_visible(False)
#sn.heatmap(df, cbar=False)
g.set_yticklabels(labels=g.get_yticklabels(), va='center')
filename =filename+'.'+self.figureformat
plt.ylabel("ML API")
plt.xlabel("Dataset",)
#plt.title("Overall accuracy:"+"{:.1f}".format(np.trace(confmatrix)),
# fontweight="bold",
# pad=32)
g.set_xticklabels(g.get_xticklabels(), rotation = 0)
if(self.figureformat=='jpg'):
plt.savefig(filename, format=self.figureformat, bbox_inches='tight',dpi=40)
else:
plt.savefig(filename, format=self.figureformat, bbox_inches='tight')
return 0
def reward_vs_confidence(self,
BaseID = 100,
ModelID=[100,0,1,2],
confidencerange = (0.1,0.2,0.3,0.4,0.5,0.6,0.7,.99,1),
prob_range=None,
datapath='path/to/imagenet/result/val_performance'):
"""
Run a small experiment on solving a Bernoulli bandit with K slot machines,
each with a randomly initialized reward probability.
Args:
K (int): number of slot machiens.
N (int): number of time steps to try.
"""
datapath = self.datapath
print('reward datapath',datapath)
b0 = BernoulliBanditwithData(ModelID=ModelID,datapath=datapath)
K = len(ModelID)
print ("Data generated Bernoulli bandit has reward probabilities:\n", b0.probas)
print ("The best machine has index: {} and proba: {}".format(
max(range(K), key=lambda i: b0.probas[i]), max(b0.probas)))
Params0 = context_params(ModelID=ModelID,datapath=datapath)
#confidencerange = (0.02,0.03,0.04,0.05,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,0.95,0.99,0.9999,1)
#confidencerange = (0.99,0.991,0.992,0.993,0.994,0.995,0.996,0.997,0.9999,1)
if(not(prob_range==None)):
confidencerange = self.mlmodels.prob2qvalue(prob_interval=prob_range,conf_id=BaseID)
BaseAccuracy, Others =self.mlmodels.accuracy_condition_score_List(ScoreRange=confidencerange,BaseID=BaseID,ModelID=ModelID)
print(BaseAccuracy, Others)
CDF = Params0.BaseModel.Compute_Prob_vs_Score(ScoreRange=confidencerange)
print(CDF)
plot_reward_vs_confidence(confidencerange, BaseAccuracy,Others, ModelID,"model reward compare_ModelID_{}.png".format(ModelID),CDF)
def reward_vs_prob(self,
BaseID = 100,
ModelID=[100,0,1,2],
confidencerange = (0.1,0.2,0.3,0.4,0.5,0.6,0.7,.99,1),
prob_range=None,
datapath='path/to/imagenet/result/val_performance',
dataname='imagenet_val',
context=None):
"""
compute and plot reward as a function of the probability of not using
the basemodel.
Args:
See the name.
"""
datapath = self.datapath
print('reward datapath',datapath)
if(not(prob_range==None)):
confidencerange = self.mlmodels.prob2qvalue(prob_interval=prob_range,conf_id=BaseID,context = context)
BaseAccuracy, Others =self.mlmodels.accuracy_condition_score_list(ScoreRange=confidencerange,BaseID=BaseID,ModelID=ModelID,context=context)
print('Base Accuracy', BaseAccuracy, 'Other',Others)
CDF = self.mlmodels.compute_prob_vs_score(ScoreRange=confidencerange,context = context)
print('CDF',CDF)
self._plot_reward_vs_prob(CDF, BaseAccuracy,Others, ModelID,self.folder+"Reward_vs_Prob_BaseID_{}_{}_context_{}.{}".format(BaseID,dataname,context,self.figureformat),CDF)
def reward_vs_prob_pdf(self,
BaseID = 100,
ModelID=[100,0,1,2],
confidencerange = (0.1,0.2,0.3,0.4,0.5,0.6,0.7,.99,1),
prob_range=None,
datapath='path/to/imagenet/result/val_performance',
dataname='imagenet_val',
context=None):
"""
compute and plot reward as a function of the probability of not using
the basemodel.
Args:
See the name.
"""
datapath = self.datapath
print('reward datapath',datapath)
if(not(prob_range==None)):
confidencerange = self.mlmodels.prob2qvalue(prob_interval=prob_range,conf_id=BaseID,context = context)
BaseAccuracy, Others =self.mlmodels.accuracy_condition_score_list(ScoreRange=confidencerange,BaseID=BaseID,ModelID=ModelID,context=context)
print('Base Accuracy', BaseAccuracy, 'Other',Others)
CDF = self.mlmodels.compute_prob_vs_score(ScoreRange=confidencerange,context = context)
print('CDF',CDF)
self._plot_reward_vs_prob(CDF, BaseAccuracy,Others, ModelID,self.folder+"Reward_vs_Prob_BaseID_{}_{}_context_{}.{}".format(BaseID,dataname,context,self.figureformat),CDF)
if(not(prob_range==None)):
base_pdf,other_pdf = self.mlmodels.accuracy_condition_score_list_cdf2pdf(prob_range,BaseAccuracy,Others,diff = False)
print('base pdf',base_pdf)
print('other pdf',other_pdf)
self._plot_reward_vs_prob(CDF, base_pdf,other_pdf, ModelID,self.folder+"Reward_vs_Probpdf_diff_BaseID_{}_{}_context_{}.{}".format(BaseID,dataname,context,self.figureformat),CDF)
self._plot_reward_vs_prob(confidencerange, base_pdf,other_pdf, ModelID,self.folder+"Reward_vs_conf_pdf_diff_BaseID_{}_{}_context_{}.{}".format(BaseID,dataname,context,self.figureformat),CDF)
def qvalue_vs_prob(self,
confidence_range = None,
BaseID = 100,
prob_range = None,
dataname = 'imagenet_val',
context=None):
if(not(prob_range==None)):
confidence_range = self.mlmodels.prob2qvalue(prob_interval=prob_range,conf_id=BaseID,context=context)
filename = self.folder+"Conf_vs_prob_BaseID_{}_{}_context_{}.{}".format(BaseID,dataname,context,self.figureformat)
prob = self.mlmodels.compute_prob_wrt_confidence(confidence_range=confidence_range,BaseID = BaseID,context=context)
self._plot_q_value_vs_prob(confidence_range,prob,filename)
return 0
def _plot_reward_vs_prob(self, confidence_range, base_acc, model_acc, model_names, figname, CDF):
"""
Plot the results by multi-armed bandit solvers.
Args:
solvers (list<Solver>): All of them should have been fitted.
solver_names (list<str)
figname (str)
"""
fig = plt.figure(figsize=self.figuresize)
plt.rcParams.update({'font.size': self.fontsize})
plt.rcParams["font.weight"] = "bold"
plt.rcParams["axes.labelweight"] = "bold"
plt.rcParams["lines.linewidth"] = self.linewidth
plt.rcParams["lines.markersize"] = self.markersize
k=0
for i in model_acc:
plt.plot(confidence_range, i, label=model_names[k],marker='x')
k=k+1
plt.xlabel('Fraction of Low Confidence Data')
plt.ylabel('Accuracy on Low Confidence Data')
plt.legend(loc=8, ncol=5)
plt.savefig(figname, format=self.figureformat, bbox_inches='tight')
def _plot_q_value_vs_prob(self,confidence_range,prob,figname):
fig = plt.figure(figsize=self.figuresize)
plt.rcParams.update({'font.size': self.fontsize})
plt.rcParams["font.weight"] = "bold"
plt.rcParams["axes.labelweight"] = "bold"
plt.rcParams["lines.linewidth"] = self.linewidth
plt.rcParams["lines.markersize"] = self.markersize
plt.plot(prob,confidence_range,marker='x')
plt.xlabel('Fraction of Low Confidence Data')
plt.ylabel('Confidence Threshold')
#plt.legend(loc=9, ncol=5)
plt.savefig(figname, format=self.figureformat, bbox_inches='tight')
def plot_accuracy(self,
namestick=['bm', 's0','s1','s2'],
model_id=[100,0,1,2],
base_id = 100,
datapath='path/to/imagenet/result/val_performance',
dataname='imagenet_val'):
datapath = self.datapath
print('reward datapath',datapath)
BaseAccuracy, Others =self.mlmodels.accuracy_condition_score_list(ScoreRange=[1],BaseID=base_id,ModelID=model_id)
print('Base Accuracy', BaseAccuracy, 'Other',len(Others))
fig = plt.figure(figsize=self.figuresize)
plt.rcParams.update({'font.size': self.fontsize})
plt.rcParams["font.weight"] = "bold"
plt.rcParams["axes.labelweight"] = "bold"
plt.rcParams["lines.linewidth"] = self.linewidth
plt.rcParams["lines.markersize"] = self.markersize
flattened = [val for sublist in Others for val in sublist]
print('flat others',flattened)
acc = flattened
#plt.bar(range(len(acc)),acc,color=self.colorset,tick_label=namestick)
bars = plt.bar(range(len(acc)),acc,color=self.colorset,hatch="/")
#plt.bar(range(len(acc)),acc,color='r',edgecolor='k',hatch="/")
#ax = plt.gca()
#ax.bar(range(1, 5), range(1, 5), color='red', edgecolor='black', hatch="/")
#patterns = ('-', '+', 'x', '\\', '*', 'o', 'O', '.')
patterns = ('-', '\\', '/', 'o', 'O', '.','+', 'x','*')
for bar, pattern in zip(bars, patterns):
bar.set_hatch(pattern)
#ax.set_hatch('/')
plt.xlabel('ML Services')
plt.ylabel('Accuracy')
plt.ylim(min(acc)-0.01)
#set_xticklabels(namestick)
matplotlib.pyplot.xticks(range(len(acc)), namestick)
#plt.legend(loc=9, ncol=5)
figname = self.folder+"accuracy_dataset_{}.{}".format(dataname,self.figureformat)
plt.savefig(figname, format=self.figureformat, bbox_inches='tight')
def plot_umaps(self,
fit_data=[[1,2,3],[4,5,6]],
data=[[1,2,3],[4,5,6]],
filename="umap",
markersize=2,
markershape=["8","s"],
yname='Frequency',
color=['blue','red'],
fig=None,
showlegend=False,
legend=['male','female'],
yrange = None,
ax=None,
fontsize=30,
figureformat="jpg",):
# generate embeddings
reducer = umap.UMAP(random_state=42)
reducer.fit(fit_data[:,0:-1])
for i in range(len(data)):
datum1 = data[i]
embedding = reducer.transform(datum1[:,0:-1])
plt.scatter(embedding[:, 0], embedding[:, 1], c=datum1[:,-1], cmap='Spectral', s=markersize,marker=markershape[i],label=legend[i])
# plt.legend(loc=8, ncol=5)
lgnd = plt.legend(loc="lower left", scatterpoints=1, fontsize=10)
for handle in lgnd.legendHandles:
handle.set_sizes([2.0])
self.figureformat = figureformat
if(self.figureformat=='jpg'):
plt.savefig(filename+".jpg", format=self.figureformat, bbox_inches='tight',dpi=300)
else:
plt.savefig(filename, format=self.figureformat, bbox_inches='tight')
plt.close("all")
return
def plot_results(solvers, solver_names, figname):
"""
Plot the results by multi-armed bandit solvers.
Args:
solvers (list<Solver>): All of them should have been fitted.
solver_names (list<str)
figname (str)
"""
assert len(solvers) == len(solver_names)
assert all(map(lambda s: isinstance(s, Solver), solvers))
assert all(map(lambda s: len(s.regrets) > 0, solvers))
b = solvers[0].bandit
fig = plt.figure(figsize=(14, 4))
fig.subplots_adjust(bottom=0.3, wspace=0.3)
ax1 = fig.add_subplot(131)
ax2 = fig.add_subplot(132)
ax3 = fig.add_subplot(133)
# Sub.fig. 1: Regrets in time.
for i, s in enumerate(solvers):
ax1.plot(range(len(s.regrets)), s.regrets, label=solver_names[i])
ax1.set_xlabel('Time step')
ax1.set_ylabel('Cumulative regret')
ax1.legend(loc=9, bbox_to_anchor=(1.82, -0.25), ncol=5)
ax1.grid('k', ls='--', alpha=0.3)
# Sub.fig. 2: Probabilities estimated by solvers.
sorted_indices = sorted(range(b.n), key=lambda x: b.probas[x])
ax2.plot(range(b.n), [b.probas[x] for x in sorted_indices], 'k--', markersize=12)
for s in solvers:
ax2.plot(range(b.n), [s.estimated_probas[x] for x in sorted_indices], 'x', markeredgewidth=2)
ax2.set_xlabel('Actions sorted by ' + r'$\theta$')
ax2.set_ylabel('Estimated')
ax2.grid('k', ls='--', alpha=0.3)
# Sub.fig. 3: Action counts
for s in solvers:
ax3.plot(range(b.n), np.array(s.counts) / float(len(solvers[0].regrets)), ls='steps', lw=2)
ax3.set_xlabel('Actions')
ax3.set_ylabel('Frac. # trials')
ax3.grid('k', ls='--', alpha=0.3)
plt.savefig(figname)
def plot_reward_vs_confidence(confidence_range, base_acc, model_acc, model_names, figname, CDF):
"""
Plot the results by multi-armed bandit solvers.
Args:
solvers (list<Solver>): All of them should have been fitted.
solver_names (list<str)
figname (str)
"""
fig = plt.figure(figsize=(14, 6))
fig.subplots_adjust(bottom=0.3, wspace=0.3)
ax1 = fig.add_subplot(121)
#ax2 = fig.add_subplot(212)
ax3 = fig.add_subplot(122)
#ax4 = fig.add_subplot(214)
# Sub.fig. 1: Regrets in time.
k=0
for i in model_acc:
ax1.plot(confidence_range, i, label=model_names[k],marker='x')
k=k+1
ax1.set_xlabel('Probability threshold')
ax1.set_ylabel('Reward Value')
ax1.legend(loc=9, bbox_to_anchor=(1.82, -0.25), ncol=5)
ax1.grid('k', ls='--', alpha=0.3)
# Sub.fig. 2: Regrets in time.
k=0
for i in model_acc:
ax3.plot(confidence_range, CDF, label=model_names[k],marker='x')
k=k+1
ax3.set_xlabel('Probability threshold')
ax3.set_ylabel('CDF')
ax3.legend(loc=9, bbox_to_anchor=(1.82, -0.25), ncol=5)
ax3.grid('k', ls='--', alpha=0.3)
plt.savefig(figname, dpi=1000)
def plot_reward_vs_confidence_old(confidence_range, base_acc, model_acc, model_names, figname, CDF):
"""
Plot the results by multi-armed bandit solvers.
Args:
solvers (list<Solver>): All of them should have been fitted.
solver_names (list<str)
figname (str)
"""
fig = plt.figure(figsize=(14, 4))
fig.subplots_adjust(bottom=0.3, wspace=0.3)
ax1 = fig.add_subplot(221)
ax2 = fig.add_subplot(222)
ax3 = fig.add_subplot(223)
ax4 = fig.add_subplot(224)
# Sub.fig. 1: Regrets in time.
k=0
for i in model_acc:
ax1.plot(confidence_range, i, label=model_names[k],marker='x')
k=k+1
ax1.set_xlabel('Probability threshold')
ax1.set_ylabel('Reward Value')
ax1.legend(loc=9, bbox_to_anchor=(1.82, -0.25), ncol=5)
ax1.grid('k', ls='--', alpha=0.3)
# Sub.fig. 2: Regrets in time.
k=0
for i in model_acc:
ax2.plot(confidence_range, np.array(i)-np.asarray(base_acc), label=model_names[k],marker='x')
k=k+1
ax2.set_xlabel('Probability threshold')
ax2.set_ylabel('Reward Value-Base')
ax2.legend(loc=9, bbox_to_anchor=(1.82, -0.25), ncol=5)
ax2.grid('k', ls='--', alpha=0.3)
# Sub.fig. 2: Regrets in time.
k=0
for i in model_acc:
ax3.plot(confidence_range, CDF, label=model_names[k],marker='x')
k=k+1
ax3.set_xlabel('Probability threshold')
ax3.set_ylabel('CDF')
ax3.legend(loc=9, bbox_to_anchor=(1.82, -0.25), ncol=5)
ax3.grid('k', ls='--', alpha=0.3)
# Sub.fig. 2: Regrets in time.
k=0
for i in model_acc:
ax4.plot(confidence_range, (np.array(i)-np.asarray(base_acc))*np.asarray(CDF), label=model_names[k],marker='x')
k=k+1
ax4.set_xlabel('Probability threshold')
ax4.set_ylabel('Reward*Prob')
ax4.legend(loc=9, bbox_to_anchor=(1.82, -0.25), ncol=5)
ax4.grid('k', ls='--', alpha=0.3)
plt.savefig(figname, dpi=1000)
def reward_vs_confidence(N=1000,
ModelID=[100,0,1,2,3,4],
ModelIndex = [0,1,2,3],
confidencerange = (0.1,0.2,0.3,0.4,0.5,0.6,0.7,.99,1),
datapath='path/to/imagenet/result/val_performance'):
"""
Run a small experiment on solving a Bernoulli bandit with K slot machines,
each with a randomly initialized reward probability.
Args:
K (int): number of slot machiens.
N (int): number of time steps to try.
"""
print('reward datapaht',datapath)
b0 = BernoulliBanditwithData(ModelID=ModelID,datapath=datapath)
K = len(ModelID)
print ("Data generated Bernoulli bandit has reward probabilities:\n", b0.probas)
print ("The best machine has index: {} and proba: {}".format(
max(range(K), key=lambda i: b0.probas[i]), max(b0.probas)))
Params0 = context_params(ModelID=ModelID,datapath=datapath)
#confidencerange = (0.02,0.03,0.04,0.05,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,0.95,0.99,0.9999,1)
#confidencerange = (0.99,0.991,0.992,0.993,0.994,0.995,0.996,0.997,0.9999,1)
BaseAccuracy, Others = Params0.BaseModel.Compute_Conditional_Accuracy_AmongModel_List(ScoreRange=confidencerange,BaseID=0,ModelID=ModelIndex)
print(BaseAccuracy, Others)
CDF = Params0.BaseModel.Compute_Prob_vs_Score(ScoreRange=confidencerange)
print(CDF)
#CDF1 = Compute_CDF_wrt_Score(ScoreRange=confidencerange)
#print(CDF1)
#print(Params0.BaseModel.Compute_Conditional_Accuracy(Score))
#Params1 = context_params(ModelID=[2])
#print(Params1.BaseModel.Compute_Conditional_Accuracy(Score))
# Test for different combinaers
#ParamsTest = BaseModel(ModelID=[0,1,3,4,5,100])
#output = ParamsTest.Stacking_AllModels()
# End of Test
# print(ParamsTest.Compute_Conditional_Accuracy_AmongModel(ScoreBound=Score, ModelID = [0,1]))
plot_reward_vs_confidence(confidencerange, BaseAccuracy,Others, ModelID,"model reward compare_ModelID_{}.png".format(ModelID),CDF)
def test_plotline():
prange= [0. , 0.0001, 0.0002, 0.0003, 0.0004, 0.0005, 0.0006, 0.0007,
0.0008, 0.0009, 0.001 , 0.0011, 0.0012, 0.0013, 0.0014, 0.0015,
0.0016, 0.0017, 0.0018, 0.0019, 0.002 , 0.0021, 0.0022, 0.0023,
0.0024, 0.0025, 0.0026, 0.0027, 0.0028, 0.0029, 0.003 , 0.0031,
0.0032, 0.0033, 0.0034, 0.0035, 0.0036, 0.0037, 0.0038, 0.0039,
0.004 , 0.0041, 0.0042, 0.0043, 0.0044, 0.0045, 0.0046, 0.0047,
0.0048, 0.0049, 0.005 , 0.0051, 0.0052, 0.0053, 0.0054, 0.0055,
0.0056, 0.0057, 0.0058, 0.0059, 0.006 , 0.0061, 0.0062, 0.0063,
0.0064, 0.0065, 0.0066, 0.0067, 0.0068, 0.0069, 0.007 , 0.0071,
0.0072, 0.0073, 0.0074, 0.0075, 0.0076, 0.0077, 0.0078, 0.0079,
0.008 , 0.0081, 0.0082, 0.0083, 0.0084, 0.0085, 0.0086, 0.0087,
0.0088, 0.0089, 0.009 , 0.0091, 0.0092, 0.0093, 0.0094, 0.0095,
0.0096, 0.0097, 0.0098, 0.0099, 0.01 , 0.0101, 0.0102, 0.0103,
0.0104, 0.0105, 0.0106, 0.0107, 0.0108, 0.0109, 0.011 , 0.0111,
0.0112, 0.0113, 0.0114, 0.0115, 0.0116, 0.0117, 0.0118, 0.0119,
0.012 , 0.0121, 0.0122, 0.0123, 0.0124, 0.0125, 0.0126, 0.0127,
0.0128, 0.0129, 0.013 , 0.0131, 0.0132, 0.0133, 0.0134, 0.0135,
0.0136, 0.0137, 0.0138, 0.0139, 0.014 , 0.0141, 0.0142, 0.0143,
0.0144, 0.0145, 0.0146, 0.0147, 0.0148, 0.0149, 0.015 , 0.0151,
0.0152, 0.0153, 0.0154, 0.0155, 0.0156, 0.0157, 0.0158, 0.0159,
0.016 , 0.0161, 0.0162, 0.0163, 0.0164, 0.0165, 0.0166, 0.0167,
0.0168, 0.0169, 0.017 , 0.0171, 0.0172, 0.0173, 0.0174, 0.0175,
0.0176, 0.0177, 0.0178, 0.0179, 0.018 , 0.0181, 0.0182, 0.0183,
0.0184, 0.0185, 0.0186, 0.0187, 0.0188, 0.0189, 0.019 , 0.0191,
0.0192, 0.0193, 0.0194, 0.0195, 0.0196, 0.0197, 0.0198, 0.0199,
0.02 , 0.0201, 0.0202, 0.0203, 0.0204, 0.0205, 0.0206, 0.0207,
0.0208, 0.0209, 0.021 , 0.0211, 0.0212, 0.0213, 0.0214, 0.0215,
0.0216, 0.0217, 0.0218, 0.0219, 0.022 , 0.0221, 0.0222, 0.0223,
0.0224, 0.0225, 0.0226, 0.0227, 0.0228, 0.0229, 0.023 , 0.0231,
0.0232, 0.0233, 0.0234, 0.0235, 0.0236, 0.0237, 0.0238, 0.0239,
0.024 , 0.0241, 0.0242, 0.0243, 0.0244, 0.0245, 0.0246, 0.0247,
0.0248, 0.0249, 0.025 , 0.0251, 0.0252, 0.0253, 0.0254, 0.0255,
0.0256, 0.0257, 0.0258, 0.0259, 0.026 , 0.0261, 0.0262, 0.0263,
0.0264, 0.0265, 0.0266, 0.0267, 0.0268, 0.0269, 0.027 , 0.0271,
0.0272, 0.0273, 0.0274, 0.0275, 0.0276, 0.0277, 0.0278, 0.0279,
0.028 , 0.0281, 0.0282, 0.0283, 0.0284, 0.0285, 0.0286, 0.0287,
0.0288, 0.0289, 0.029 , 0.0291, 0.0292, 0.0293, 0.0294, 0.0295,
0.0296, 0.0297, 0.0298, 0.0299, 0.03 , 0.0301, 0.0302, 0.0303,
0.0304, 0.0305, 0.0306, 0.0307, 0.0308, 0.0309, 0.031 , 0.0311,
0.0312, 0.0313, 0.0314, 0.0315, 0.0316, 0.0317, 0.0318, 0.0319,
0.032 , 0.0321, 0.0322, 0.0323, 0.0324, 0.0325, 0.0326, 0.0327,
0.0328, 0.0329, 0.033 , 0.0331, 0.0332, 0.0333, 0.0334, 0.0335,
0.0336, 0.0337, 0.0338, 0.0339, 0.034 , 0.0341, 0.0342, 0.0343,
0.0344, 0.0345, 0.0346, 0.0347, 0.0348, 0.0349, 0.035 , 0.0351,
0.0352, 0.0353, 0.0354, 0.0355, 0.0356, 0.0357, 0.0358, 0.0359,
0.036 , 0.0361, 0.0362, 0.0363, 0.0364, 0.0365, 0.0366, 0.0367,
0.0368, 0.0369, 0.037 , 0.0371, 0.0372, 0.0373, 0.0374, 0.0375,
0.0376, 0.0377, 0.0378, 0.0379, 0.038 , 0.0381, 0.0382, 0.0383,
0.0384, 0.0385, 0.0386, 0.0387, 0.0388, 0.0389, 0.039 , 0.0391,
0.0392, 0.0393, 0.0394, 0.0395, 0.0396, 0.0397, 0.0398, 0.0399,
0.04 , 0.0401, 0.0402, 0.0403, 0.0404, 0.0405, 0.0406, 0.0407,
0.0408, 0.0409, 0.041 , 0.0411, 0.0412, 0.0413, 0.0414, 0.0415,
0.0416, 0.0417, 0.0418, 0.0419, 0.042 , 0.0421, 0.0422, 0.0423,
0.0424, 0.0425, 0.0426, 0.0427, 0.0428, 0.0429, 0.043 , 0.0431,
0.0432, 0.0433, 0.0434, 0.0435, 0.0436, 0.0437, 0.0438, 0.0439,
0.044 , 0.0441, 0.0442, 0.0443, 0.0444, 0.0445, 0.0446, 0.0447,
0.0448, 0.0449, 0.045 , 0.0451, 0.0452, 0.0453, 0.0454, 0.0455,
0.0456, 0.0457, 0.0458, 0.0459, 0.046 , 0.0461, 0.0462, 0.0463,
0.0464, 0.0465, 0.0466, 0.0467, 0.0468, 0.0469, 0.047 , 0.0471,
0.0472, 0.0473, 0.0474, 0.0475, 0.0476, 0.0477, 0.0478, 0.0479,
0.048 , 0.0481, 0.0482, 0.0483, 0.0484, 0.0485, 0.0486, 0.0487,
0.0488, 0.0489, 0.049 , 0.0491, 0.0492, 0.0493, 0.0494, 0.0495,
0.0496, 0.0497, 0.0498, 0.0499]
acc = [0.48301023, 0.48457155, 0.48538639, 0.48615516, 0.48668402,
0.48743234, 0.48818995, 0.48874007, 0.48916215, 0.48976699,
0.49029502, 0.49083267, 0.49127285, 0.49186667, 0.49235521,
0.49291153, 0.49324094, 0.4937676 , 0.494199 , 0.49455204,
0.49486084, 0.49522269, 0.49560935, 0.49594377, 0.49625499,
0.49656768, 0.49680171, 0.497076 , 0.49740774, 0.49774282,
0.49808112, 0.49844063, 0.49888367, 0.49907962, 0.49934593,
0.4996519 , 0.50010442, 0.50044377, 0.50083441, 0.50119005,
0.50157951, 0.50191593, 0.50229962, 0.50263862, 0.5029507 ,
0.50321984, 0.50355179, 0.50382114, 0.50421764, 0.50475099,
0.50509806, 0.50548435, 0.50571974, 0.50673374, 0.50709485,
0.50754149, 0.50806022, 0.50838091, 0.50895068, 0.51405688,
0.51405485, 0.51387681, 0.51375979, 0.51368061, 0.51363966,
0.51358214, 0.51348813, 0.51320118, 0.5131013 , 0.51299855,
0.51285864, 0.51261339, 0.51251116, 0.51239189, 0.51230296,
0.51222542, 0.51213922, 0.51213295, 0.51199515, 0.51189603,
0.51178252, 0.51170102, 0.51167787, 0.51146198, 0.51132532,
0.51125551, 0.51083861, 0.51080367, 0.51065056, 0.51054177,
0.51030458, 0.51009311, 0.50985171, 0.50952144, 0.50941722,
0.50885601, 0.50872907, 0.5086227 , 0.50837746, 0.50827709,
0.50811138, 0.50789465, 0.50776248, 0.50757616, 0.50723169,
0.50710103, 0.50692262, 0.50636731, 0.50551236, 0.5052581 ,
0.50467229, 0.50438479, 0.50419524, 0.50370731, 0.50344827,
0.50315895, 0.50299427, 0.50237316, 0.50076062, 0.5000888 ,
0.49904193, 0.49821103, 0.49782044, 0.49751325, 0.49721562,
0.49615602, 0.49570051, 0.49546224, 0.49528738, 0.49499799,
0.49480788, 0.49441211, 0.49400208, 0.4937852 , 0.49357293,
0.4932995 , 0.49308115, 0.49274721, 0.49232387, 0.4904962 ,
0.48979254, 0.48883763, 0.48723269, 0.48694961, 0.48664716,
0.48383779, 0.4823387 , 0.48139062, 0.48097353, 0.48045641,
0.47893606, 0.47857627, 0.4783139 , 0.47800683, 0.47767765,
0.47749323, 0.47730572, 0.47711734, 0.47697098, 0.47674 ,
0.47655673, 0.47617975, 0.47604766, 0.47593491, 0.4756809 ,
0.47553382, 0.47541119, 0.47521898, 0.47502894, 0.47485214,
0.47467929, 0.47456147, 0.47439464, 0.4742843 , 0.47419802,
0.47407429, 0.47394389, 0.47376382, 0.47366419, 0.47347226,
0.4733965 , 0.47327365, 0.47314522, 0.47295317, 0.47277204,
0.47265397, 0.47254881, 0.47238721, 0.47231294, 0.47213071,
0.47205963, 0.471965 , 0.47181219, 0.47166599, 0.4715344 ,
0.47142736, 0.47133969, 0.47124544, 0.47120949, 0.47109536,
0.47099978, 0.47084196, 0.47067891, 0.47054779, 0.47039573,
0.47028735, 0.47016451, 0.47002245, 0.46977837, 0.46963242,
0.46943925, 0.4692959 , 0.46914154, 0.46891011, 0.4687833 ,
0.4685379 , 0.46843594, 0.46825524, 0.46778678, 0.46757136,
0.46737609, 0.46692911, 0.46674504, 0.46645814, 0.46626084,
0.46601046, 0.46587982, 0.46568659, 0.46549668, 0.46531255,
0.46491423, 0.4644362 , 0.46398542, 0.4631161 , 0.46295977,
0.46250332, 0.46236719, 0.46221666, 0.462093 , 0.46187842,
0.46174634, 0.46159738, 0.46147783, 0.46137749, 0.46129638,
0.4611781 , 0.46107324, 0.46094401, 0.46083739, 0.46074101,
0.46072508, 0.46064278, 0.46052262, 0.46042853, 0.46034242,
0.46028446, 0.46017712, 0.46011206, 0.46002659, 0.45995817,
0.45986543, 0.45975698, 0.45968683, 0.45957428, 0.45942207,
0.45930791, 0.45921235, 0.45910849, 0.45898494, 0.45888329,
0.45879647, 0.45870982, 0.45870496, 0.45862491, 0.45850992,
0.45846477, 0.4583252 , 0.45870034, 0.45860152, 0.4584608 ,
0.45840916, 0.45837632, 0.45829484, 0.45822002, 0.45816921,
0.45808426, 0.45801872, 0.4579592 , 0.45785556, 0.45777885,
0.4577343 , 0.45766358, 0.45753936, 0.45752268, 0.45744507,
0.45736837, 0.45728324, 0.45717934, 0.45703663, 0.45697995,
0.45691548, 0.45679727, 0.45673414, 0.45666303, 0.45661996,
0.4565089 , 0.45641751, 0.45633791, 0.45626128, 0.45619948,
0.4561366 , 0.45613471, 0.45607387, 0.45597782, 0.45588608,
0.45581065, 0.45568215, 0.4555245 , 0.45539021, 0.45530577,
0.45521037, 0.4550916 , 0.45500052, 0.45498943, 0.45484803,
0.45476247, 0.45469974, 0.45461052, 0.45449327, 0.45441162,
0.4543233 , 0.45421517, 0.45414812, 0.45402163, 0.45396933,
0.45382181, 0.45372327, 0.45364773, 0.4535485 , 0.45345609,
0.45338647, 0.45332349, 0.45321917, 0.45318078, 0.45311913,
0.45302852, 0.45289496, 0.45282775, 0.45291292, 0.45281203,
0.45271895, 0.45259684, 0.45251492, 0.45226131, 0.45199698,
0.45190208, 0.45177381, 0.45167107, 0.45156732, 0.45120557,
0.4510243 , 0.45040894, 0.45016372, 0.41494005, 0.41482359,
0.4147391 , 0.41467827, 0.41456255, 0.41442845, 0.41435356,
0.41427217, 0.4141186 , 0.41393056, 0.41373277, 0.41356792,
0.41346815, 0.41313181, 0.41306098, 0.41297357, 0.41284036,
0.41271761, 0.41264731, 0.41260986, 0.41259229, 0.41252037,
0.41246792, 0.41244859, 0.41239455, 0.41236259, 0.41230149,
0.41226418, 0.41217959, 0.41212254, 0.41211362, 0.41207712,
0.41202834, 0.4119794 , 0.41189217, 0.41186648, 0.41183323,
0.41177104, 0.4117605 , 0.41172562, 0.41171102, 0.4116806 ,
0.41165032, 0.41161321, 0.41153588, 0.4114937 , 0.41145179,
0.41141475, 0.41141205, 0.4113842 , 0.41137095, 0.41133905,
0.41131634, 0.41129309, 0.41124033, 0.41121707, 0.41119274,
0.41117111, 0.41115895, 0.41114137, 0.4111238 , 0.4111119 ,
0.41109377, 0.41106132, 0.41101536, 0.41100238, 0.41097399,
0.41095669, 0.4109064 , 0.41086747, 0.4108653 , 0.41084692,
0.41080381, 0.41078624, 0.4107565 , 0.41074001, 0.4107346 ,
0.41071432, 0.41067972, 0.41063105, 0.41062294, 0.41059725,
0.41055453, 0.41050722, 0.41047964, 0.41046612, 0.41040232,
0.41038609, 0.41036176, 0.41036446, 0.41036176, 0.41034473,
0.41029336, 0.41027285, 0.4102012 , 0.41018011, 0.41015145,
0.41014199, 0.41010603, 0.4100817 , 0.41002357, 0.40999707,
0.40999301, 0.40998193, 0.40995883, 0.40995234, 0.40991009,
0.40989792, 0.4098425 , 0.40983087, 0.40981059, 0.40980789,
0.40978626, 0.40978414, 0.40976115, 0.40971627, 0.40970445,
0.40969383, 0.40966004, 0.40961732, 0.40958487, 0.4095454 ,
0.40952512, 0.40952269, 0.40948619, 0.40948078, 0.40944969,
0.40944428, 0.40941995, 0.40940778, 0.40941589, 0.40941589,
0.40937777, 0.40934938, 0.40932234, 0.40931288, 0.4092899 ]
cost = [5.99998378, 5.99995133, 5.99998378, 5.99998378, 5.99998378,
5.99998378, 5.99996756, 5.99996756, 5.99998378, 5.99993511,
6. , 5.99995133, 5.99998378, 5.99995133, 5.99996756,
5.99996756, 5.99993511, 5.99998378, 5.99996756, 5.99993511,
5.99995133, 5.99996756, 5.99993511, 6. , 5.99998378,
5.99995133, 6. , 6. , 5.99996756, 5.99996756,
6. , 5.99995133, 5.99995133, 5.99998378, 5.99991889,
5.99998378, 5.99995133, 5.99996756, 5.99995133, 5.99991889,
5.99995133, 6. , 5.99995133, 6. , 5.99996756,
5.99998378, 5.99998378, 5.99993511, 5.99996756, 6. ,
5.99993511, 5.99996756, 6. , 5.99998378, 5.99996756,
5.99993511, 5.99995133, 5.99996756, 5.99995133, 5.99482512,
5.96959964, 5.93986438, 5.90917202, 5.875365 , 5.84858218,
5.81982026, 5.76072286, 5.73048472, 5.70715723, 5.68048796,
5.65459737, 5.62457011, 5.59642463, 5.57118292, 5.54910454,
5.52188372, 5.49866978, 5.47579651, 5.45198235, 5.42672442,
5.39849783, 5.37371034, 5.34908507, 5.32488158, 5.29626565,
5.27175394, 5.22610473, 5.19873791, 5.17562131, 5.1535267 ,
5.12797677, 5.10221595, 5.07600091, 5.04115567, 5.01468107,
4.94257349, 4.91944066, 4.89812472, 4.87351567, 4.84459153,
4.82238336, 4.79530855, 4.77207839, 4.73781714, 4.70809811,
4.68477062, 4.65748491, 4.60662838, 4.53348258, 4.5040231 ,
4.43806372, 4.41154046, 4.3859743 , 4.34853352, 4.32293492,
4.29568166, 4.26372396, 4.20829278, 4.08779443, 4.03799234,
3.97743495, 3.91929466, 3.89564272, 3.86397703, 3.83818376,
3.76805529, 3.72065408, 3.69499059, 3.67588086, 3.65414314,
3.63321653, 3.60901304, 3.58041334, 3.55827007, 3.53622413,
3.51049575, 3.48917981, 3.46007722, 3.42815197, 3.31135228,
3.26297774, 3.19297904, 3.06952826, 3.04595743, 3.01523263,
2.82674713, 2.74002336, 2.67805464, 2.6509636 , 2.60555772,
2.50275777, 2.48069561, 2.4648952 , 2.44812147, 2.43077996,
2.41721822, 2.40394848, 2.39111673, 2.37920966, 2.36603725,
2.35312439, 2.33891376, 2.32945623, 2.32024203, 2.30869184,
2.29655765, 2.28491013, 2.27579326, 2.26568685, 2.25663487,
2.24586334, 2.23767114, 2.22874895, 2.22080008, 2.21041788,
2.20259879, 2.19516904, 2.18631173, 2.17881708, 2.1691811 ,
2.16051846, 2.15156382, 2.14377717, 2.13540653, 2.12697099,
2.12012524, 2.1119817 , 2.10146973, 2.09397508, 2.08388489,
2.07486536, 2.06678671, 2.05770229, 2.05040231, 2.04141522,
2.0323308 , 2.02389527, 2.01721173, 2.00828953, 2.00157355,
1.99427357, 1.98590293, 1.97717539, 1.96831808, 1.96066122,
1.95154435, 1.94359548, 1.93636039, 1.92682175, 1.915012 ,
1.90612225, 1.89590228, 1.8862663 , 1.87789566, 1.86851924,
1.85755305, 1.84786841, 1.83615599, 1.80666407, 1.79229122,
1.78366102, 1.76315619, 1.75384466, 1.73528648, 1.7239634 ,
1.71507365, 1.70748167, 1.69943547, 1.69190838, 1.6825644 ,
1.6716631 , 1.65424048, 1.63678541, 1.61277659, 1.60628772,
1.5939913 , 1.58695088, 1.57871001, 1.57163714, 1.56274739,
1.55638829, 1.5488612 , 1.54246966, 1.53666212, 1.53244436,
1.52702615, 1.52173772, 1.51602751, 1.51041464, 1.50561287,
1.50136266, 1.49763156, 1.4931218 , 1.48887159, 1.48410226,
1.48027383, 1.47514762, 1.47080008, 1.46742586, 1.46398676,
1.459769 , 1.45636234, 1.45321524, 1.44925702, 1.4444877 ,
1.44023749, 1.43653884, 1.43186685, 1.42761664, 1.42278243,
1.41905133, 1.41447667, 1.41042113, 1.4068198 , 1.40192071,
1.39640517, 1.39127896, 1.37959899, 1.37586789, 1.37070923,
1.36668613, 1.36263059, 1.35948349, 1.35607683, 1.35208617,
1.34708974, 1.34361819, 1.33988709, 1.33547466, 1.33115956,
1.32713646, 1.32080981, 1.31717604, 1.31415872, 1.31110895,
1.30760496, 1.30423074, 1.29998053, 1.29560055, 1.29199922,
1.28856012, 1.2840828 , 1.28074103, 1.27694504, 1.27065083,
1.26717929, 1.2636753 , 1.26036597, 1.25686198, 1.25364999,
1.25004867, 1.24761534, 1.2440789 , 1.24031536, 1.23525404,
1.23204205, 1.22814872, 1.22266563, 1.2176692 , 1.21319188,
1.20839011, 1.2038479 , 1.20112257, 1.19677503, 1.19310882,
1.18992927, 1.18730128, 1.18363507, 1.17850886, 1.17562131,
1.17302576, 1.16926222, 1.16702355, 1.16189735, 1.15858802,
1.15313737, 1.14856271, 1.14583739, 1.14340406, 1.13844008,
1.13526053, 1.13045876, 1.12695477, 1.12267212, 1.11946013,
1.11400947, 1.10949971, 1.10661216, 1.09973396, 1.09558108,
1.08763221, 1.08305756, 1.07887223, 1.07186425, 1.06485627,
1.06021673, 1.05340341, 1.0491532 , 1.04516255, 1.02744793,
1.02125105, 1.00470443, 0.99208358, 0.23337227, 0.22814872,
0.22406074, 0.22204919, 0.21922653, 0.2165012 , 0.21445721,
0.21192655, 0.20916878, 0.20537279, 0.2015768 , 0.19693725,
0.19512037, 0.18824216, 0.18661995, 0.18389462, 0.18120174,
0.17740575, 0.17432354, 0.17273376, 0.17205243, 0.17017066,
0.16861333, 0.167932 , 0.16637467, 0.16484978, 0.16410356,
0.1630329 , 0.16167024, 0.16014535, 0.1587178 , 0.15787425,
0.15654403, 0.1552787 , 0.15287781, 0.15209915, 0.15080138,
0.14963338, 0.14866005, 0.14797872, 0.14778405, 0.14729738,
0.14642139, 0.14535072, 0.14382584, 0.14324184, 0.14256051,
0.1414574 , 0.1411654 , 0.1402894 , 0.13951074, 0.13834274,
0.13727208, 0.13649341, 0.13542275, 0.13493608, 0.13376809,
0.13250276, 0.13201609, 0.13143209, 0.13065343, 0.13006943,
0.12932321, 0.12864188, 0.12718188, 0.12659788, 0.12552722,
0.12429434, 0.12302901, 0.12150412, 0.12101746, 0.12004412,
0.11926546, 0.11897346, 0.11800013, 0.11735124, 0.11715658,
0.11647524, 0.11559925, 0.11452858, 0.11407436, 0.11349036,
0.11261437, 0.11164104, 0.11099215, 0.11044059, 0.10943482,
0.10875349, 0.10787749, 0.10748816, 0.10651483, 0.10602816,
0.10505483, 0.10447083, 0.10333528, 0.10297839, 0.10229706,
0.1018104 , 0.10125884, 0.10057751, 0.09953929, 0.09866329,
0.09827396, 0.09788463, 0.0974953 , 0.09710596, 0.0960353 ,
0.09528908, 0.09444553, 0.09366686, 0.09308286, 0.09279086,
0.09220687, 0.09181753, 0.09152553, 0.0905522 , 0.08987087,
0.08948154, 0.08883265, 0.08805399, 0.08746999, 0.08669132,
0.08630199, 0.08581533, 0.084842 , 0.084258 , 0.083674 ,
0.08321978, 0.08273311, 0.08224645, 0.08205178, 0.08205178,
0.08127312, 0.08078645, 0.08039712, 0.08010512, 0.07955357]
a = VisualizeTools()
max_x = 200
prange=prange[0:max_x]
acc =acc[0:max_x]
cost = cost[0:max_x]
fig, ax = a.plotline(prange,acc,xlabel='Weight Value', ylabel='Accuracy',
filename='coco_p_value_acc')
fig, ax = a.plotscatter(xvalue=[0.0060416667],
yvalue=[0.5140010157426727],
fig=fig,ax=ax,
markersize=30,
legend='Learned Thres',
filename='coco_p_value_acc')
fig, ax = a.plotline(prange,cost,xlabel='Weight Value', ylabel='Cost',
filename='coco_p_value_cost')
fig, ax = a.plotscatter(xvalue=[0.0060416667],
yvalue=[5.9999899999999995],
fig=fig,ax=ax,
markersize=30,
legend='Learned Thres',
filename='coco_p_value_cost')
def getlabeldist(datapath='..\APIperformance\mlserviceperformance_coco\Model0_TrueLabel.txt'):
mydict = dict()
labels = json.load(open(datapath))
for imgname in labels:
labelexist = dict()
for temp in labels[imgname]:
#print(temp)
label = temp['transcription']
if label in mydict:
if(label not in labelexist):
mydict[label]+=1
labelexist[label] = 1
else:
mydict[label] = 1
len_img = len(labels)
return mydict, len_img
def test_label_dist():
showlegend = True
a = VisualizeTools(figuresize=(22,8),figureformat='jpg')
name = ['Microsoft','Google']
value1 = [5175/6358,4302/6358]
value2 = [5368/6358,4304/6358]
legend = ['2020 March', '2021 Feb']
a.plot_bar2value(barname = name,barvalue = value1,
barvalue2 = value2,
color=['r','b'],
filename='FERPLUS',yname='',
legend=legend,
showlegend=showlegend,
yrange=[min(value1)-0.05,max(value2)+0.05])
showlegend = True
a = VisualizeTools(figuresize=(22,8),figureformat='jpg')
name = ['Microsoft','Google']
value1 = [10996/15339,10069/15339]
value2 = [11000/15339,10073/15339]
legend = ['2020 March', '2021 Feb']
a.plot_bar2value(barname = name,barvalue = value1,
barvalue2 = value2,
color=['r','b'],
filename='RAFDB',yname='',
legend=legend,
showlegend=showlegend,
yrange=[min(value1)-0.05,max(value2)+0.05])
a.plot_bar(barname = name,barvalue = value1)
def getlabelprecisionandrecall(targetlabel='person',
truelabelpath='..\APIperformance\mlserviceperformance_coco\Model2_TrueLabel.txt',
predlabelpath='..\APIperformance\mlserviceperformance_coco\Model6_PredictedLabel.txt',):
truelabel = json.load(open(truelabelpath))
predlabel = json.load(open(predlabelpath))
count = 0
for imgname in truelabel:
truehas = False
for temp in truelabel[imgname]:
#print(temp)
label = temp['transcription']
if label == targetlabel:
truehas = True
predhas = False
for temp in predlabel[imgname]:
#print(temp)
label = temp['transcription']
if label == targetlabel:
predhas = True
if(truehas and predhas):
count+=1
totaltrue = getlabeldist(truelabelpath)
totalpred = getlabeldist(predlabelpath)
if(targetlabel in totalpred[0]):
pred1 = totalpred[0][targetlabel]
else:
pred1 = 0
print('total true, total pred, all correct',totaltrue[0][targetlabel],pred1,count)
if(pred1==0):
return 0, count/totaltrue[0][targetlabel]
return count/totalpred[0][targetlabel], count/totaltrue[0][targetlabel]
def test_precisionrecall(predlabelpath='cocoresult\majvote_coco.txt',
labelid=100,
showlegend=False):
labeldist, labelen = getlabeldist()
labellist = list()
precisionlist = list()
recalllist = list()
for label in sorted(labeldist):
print(label)
pre, recall = getlabelprecisionandrecall(targetlabel=label,
predlabelpath=predlabelpath,)
precisionlist.append(pre)
recalllist.append(recall)
labellist.append(label)
print('pre and recall',precisionlist, recalllist)
np.savetxt('precision'+str(labelid)+'.txt', precisionlist)
np.savetxt('recall'+str(labelid)+'.txt', precisionlist)
np.savetxt('label'+str(labelid)+'.txt',labellist,fmt='%s')
a = VisualizeTools(figuresize=(23,8),figureformat='eps')
a.plot_bar(barname = labellist,barvalue = precisionlist,filename='precisionmajvote',yname='')
a.plot_bar(barname = labellist,barvalue = recalllist,filename='recallmajvote',yname='')
a.plot_bar2value(barname = labellist,barvalue = precisionlist,
barvalue2 = recalllist,
color=['r','b'],
filename='preandrecall'+str(labelid),yname='',
showlegend=showlegend)
return 0
if __name__ == '__main__':
'''
test_precisionrecall(predlabelpath='cocoresult\\FrugalMCTcoco.txt',
labelid=99999)
test_precisionrecall(predlabelpath='cocoresult\\majvote_coco.txt',
labelid=888)
test_precisionrecall(predlabelpath='cocoresult\\100000_coco_thres.txt',
labelid=100000,showlegend=True)
test_precisionrecall(predlabelpath='cocoresult\\0_coco_thres.txt',
labelid=0)
test_precisionrecall(predlabelpath='cocoresult\\6_coco_thres.txt',
labelid=6)
test_precisionrecall(predlabelpath='cocoresult\\2_coco_thres.txt',
labelid=2)
'''
#getlabelprecisionandrecall()
test_label_dist()
#test_plotline()
matplotlib.pyplot.close('all')
|
Data_Acquisition_for_ML_Benchmark-main
|
src/visualizetools.py
|
"""
Copyright (c) Meta Platforms, Inc. and affiliates.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from dam import Dam
print("Loading Dataset...")
instance=2 # instance id, can be 0,1,2,3,4
MyDam = Dam(instance=instance)
print("Dataset loaded!")
budget = MyDam.getbudget() # get budget
print("budget is:",budget)
# 3. Display seller_data
buyer_data = MyDam.getbuyerdata() # get buyer data
print("buyer data is:",buyer_data)
mlmodel = MyDam.getmlmodel() # get ml model
print("mlmodel is",mlmodel)
sellers_id = MyDam.getsellerid() # seller ids
print("seller ids are", sellers_id)
for i in sellers_id:
seller_i_price, seller_i_summary, seller_i_samples = MyDam.getsellerinfo(seller_id=int(i))
print("seller ", i, " price: ", seller_i_price.get_price_samplesize(100))
print("seller ", i, " summary: ", seller_i_summary)
print("seller ", i, " samples: ", seller_i_samples)
|
Data_Acquisition_for_ML_Benchmark-main
|
src/example.py
|
"""
Copyright (c) Meta Platforms, Inc. and affiliates.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 16 18:39:21 2022
@author: lingjiao
"""
def pricefunc_lin(frac = 1,
max_p = 100):
p1 = max_p * frac
return p1
|
Data_Acquisition_for_ML_Benchmark-main
|
src/utils.py
|
"""
Copyright (c) Meta Platforms, Inc. and affiliates.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 16 18:35:29 2022
@author: lingjiao
"""
from pricefunction import PriceFunction
import numpy
numpy.random.seed(1111)
class Seller(object):
def __init__(self):
return
def loaddata(self,
data=None,
datapath=None,):
# data: a m x n matrix
# datapath: a path to a csv file.
# the file should be a matrix with column names.
if(not (data is None)):
self.data = data
return
if(datapath != None):
self.data = numpy.loadtxt(open(datapath, "rb"),
delimiter=",",
skiprows=1)
return
print("Not implemented load data of seller")
return
def setprice(self, pricefunc):
self.pricefunc = pricefunc
def getprice(self,data_size):
q1 = data_size/(len(self.data))
return self.pricefunc.get_price(q1)
def getdata(self, data_size, price):
data = self.data
q1 = data_size/(len(self.data))
if(q1>1):
raise ValueError("The required number of samples is too large!")
if(self.pricefunc.get_price(q1) <= price):
number_of_rows = self.data.shape[0]
random_indices = numpy.random.choice(number_of_rows,
size=data_size,
replace=True)
rows = data[random_indices, :]
return rows
else:
raise ValueError("The buyer's offer is too small!")
return
def main():
print("test of the seller")
MySeller = Seller()
MySeller.loaddata(data=numpy.asmatrix([[0,1,1],[1,0,1]]))
MyPricing = PriceFunction()
MyPricing.setup(max_p = 100, method="lin")
MySeller.setprice(MyPricing)
data = MySeller.getdata(1,60)
print("get data is ",data)
if __name__ == '__main__':
main()
|
Data_Acquisition_for_ML_Benchmark-main
|
src/seller.py
|
"""
Copyright (c) Meta Platforms, Inc. and affiliates.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 16 18:36:30 2022
@author: lingjiao
"""
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.dummy import DummyClassifier
import numpy
from seller import Seller
from buyer import Buyer
from pricefunction import PriceFunction
from marketengine import MarketEngine
import glob
import pandas
def sub2stretagy(submission,MarketEngineObj):
stretagy1 = list()
cost1 = list()
for i in range(len(submission)):
stretagy1.append(submission[i])
cost1.append(MarketEngineObj.sellers[i].getprice(submission[i]))
stretagy = list()
stretagy.append(stretagy1)
stretagy.append(cost1)
#print("stretagy is:",stretagy)
return stretagy
class Helper(object):
def __init__(self):
return
def get_cost(self,submission,MarketEngineObj):
stretagy = sub2stretagy(submission,MarketEngineObj)
cost = sum(stretagy[1])
return cost
def load_data(self, submission, MarketEngineObj):
'''
load submissions.
return: train X and y
'''
#print(" train buyer model ")
stretagy = sub2stretagy(submission,MarketEngineObj)
buyer_budget = MarketEngineObj.buyer_budget
print("strategy is:",stretagy)
# check if the budget constraint is satisified.
cost = sum(stretagy[1])
if(cost>buyer_budget):
raise ValueError("The budget constraint is not satisifed!")
return
traindata = None
for i in range(len(MarketEngineObj.sellers)):
d1 = MarketEngineObj.sellers[i].getdata(stretagy[0][i],stretagy[1][i])
if(i==0):
traindata = d1
else:
traindata = numpy.concatenate((traindata,d1))
return traindata
def train_model(self, model, train_X, train_Y):
model.fit(train_X,train_Y)
return model
def eval_model(self, model, test_X, test_Y):
eval_acc = model.score(test_X, test_Y)
return eval_acc
def load_market_instance(self,
feature_path="features/0/",
buyer_data_path="buyerdata.csv",
price_path="price.txt",
budget_path="budget.txt",
):
paths = glob.glob(feature_path+"*.csv")
print("paths:",paths)
# 1. load seller data
seller_data = list()
seller_prices = list()
buyer_budget = numpy.loadtxt(budget_path)
buyer_budget = float(buyer_budget)
#print('budget_ is', type(buyer_budget))
# datafull = [numpy.loadtxt(path,delimiter=',') for path in paths]
datafull = [pandas.read_csv(path,header=None,engine="pyarrow").to_numpy() for path in paths]
seller_datasize = [len(data1) for data1 in datafull]
pricefull = numpy.loadtxt(price_path,delimiter=',',dtype=str)
for i in range(len(datafull)):
if(1):
seller_data.append(datafull[i])
#print(pricefull[i])
MyPricing1 = PriceFunction()
MyPricing1.setup(max_p = float(pricefull[i][1]), method=pricefull[i][0])
seller_prices.append(MyPricing1)
# buyer_data = numpy.loadtxt(buyer_data_path,delimiter=',')
buyer_data = pandas.read_csv(buyer_data_path,header=None,engine="pyarrow").to_numpy()
return seller_data, seller_prices, buyer_data, buyer_budget, seller_datasize
def main():
print("test of the helper")
MyMarketEngine = MarketEngine()
data_1 = numpy.asmatrix([[0,1,0],[1,0,0]])
data_2 = numpy.asmatrix([[0,1,1],[1,0,1],[1,1,1],[0,0,1]])
data_b = numpy.asmatrix([[0,1,0],[1,0,1],[0,1,1]])
buyer_budget = 100
MyPricing1 = PriceFunction()
MyPricing1.setup(max_p = 100, method="lin")
MyPricing2 = PriceFunction()
MyPricing2.setup(max_p = 100, method="lin")
mlmodel1 = LogisticRegression(random_state=0)
MyMarketEngine.setup_market(seller_data=[data_1,data_2],
seller_prices = [MyPricing1,MyPricing2],
buyer_data=data_b,
buyer_budget=buyer_budget,
mlmodel=mlmodel1,
)
stretagy = [[1,2],[50,50]]
#MyMarketEngine.load_stretagy(stretagy)
#acc1 = MyMarketEngine.train_buyer_model()
#print("acc is ",acc1)
MyHelper = Helper()
seller_data, seller_prices, buyer_data, buyer_budget, seller_datasize = MyHelper.load_market_instance(
feature_path="../features/0/",
buyer_data_path="../marketinfo/0/data_buyer/20.csv",
price_path="../marketinfo/0/price/price.txt",
budget_path="../marketinfo/0/price/budget.txt",
)
print("load data finished")
print("seller data size:",seller_datasize)
numpy.savetxt("../marketinfo/0/seller_datasize.csv",seller_datasize,fmt="%d")
MyMarketEngine.setup_market(seller_data=seller_data,
seller_prices = seller_prices,
buyer_data=buyer_data,
buyer_budget=buyer_budget,
mlmodel=mlmodel1,
)
print("set up market finished")
stretagy=[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,10,10,10,10,15]
stretagy=[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,0,0,0,0,0]
stretagy=[10,20,30,40,50,60,70,80,9,10,11,12,13,14,15,0,0,0,0,0]
stretagy=[10,20,30,40,50,60,70,800,9,10,11,12,13,14,15,0,0,0,0,0]
stretagy=[10,20,30,40,50,60,70,80,9,10,11,12,13,14,15,0,0,0,0,0]
stretagy=[50,20,30,40,5,6,7,80,9,10,11,12,13,14,15,0,400,0,50,0]
stretagy=[100,200,300,400,500,600,70,80,9,10,11,12,13,14,15,50,50,50,50,50]
stretagy=[10,20,30,40,50,60,70,80,9,10,11,12,13,14,15,50,50,50,50,50]
stretagy=[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,0,0,0,0,0]
traindata = MyHelper.load_data(stretagy, MyMarketEngine)
model = RandomForestClassifier()
model = KNeighborsClassifier(n_neighbors=9)
model = LogisticRegression(random_state=0)
model = MyHelper.train_model(model, traindata[:,0:-1],
numpy.ravel(traindata[:,-1]))
acc1 = MyHelper.eval_model(model,test_X=buyer_data[:,0:-1],test_Y=buyer_data[:,-1])
print("acc is:", acc1)
model2 = DummyClassifier(strategy="most_frequent")
model2 = MyHelper.train_model(model2, traindata[:,0:-1],
numpy.ravel(traindata[:,-1]))
acc2 = MyHelper.eval_model(model2,test_X=buyer_data[:,0:-1],test_Y=buyer_data[:,-1])
print("dummy acc is:", acc2)
if __name__ == '__main__':
main()
|
Data_Acquisition_for_ML_Benchmark-main
|
src/helper.py
|
"""
Copyright (c) Meta Platforms, Inc. and affiliates.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 16 18:35:29 2022
@author: lingjiao
"""
class PriceFunction(object):
def __init__(self):
return
def setup(self, max_p = 100, method="lin",
data_size=1):
self.max_p = max_p
self.method = "lin"
self.data_size = data_size
def get_price(self,
frac=1,
):
if(frac<0 or frac>1):
raise ValueError("The fraction of samples must be within [0,1]!")
max_p = self.max_p
if(self.method=="lin"):
p1 = max_p * frac
return p1
return
def get_price_samplesize(self,
samplesize=10,
):
frac = samplesize/self.data_size
#print("frac is",frac)
return self.get_price(frac)
def main():
print("test of the price func")
if __name__ == '__main__':
main()
|
Data_Acquisition_for_ML_Benchmark-main
|
src/pricefunction.py
|
"""
Copyright (c) Meta Platforms, Inc. and affiliates.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 16 18:36:30 2022
@author: lingjiao
"""
from sklearn.linear_model import LogisticRegression
import numpy
from seller import Seller
from buyer import Buyer
from pricefunction import PriceFunction
from marketengine import MarketEngine
from helper import Helper
class Evaluator(object):
def __init__(self):
self.Helper = Helper()
return
def eval_submission(self,
submission,
seller_data,
buyer_data,
seller_price,
buyer_budget=100,
mlmodel=LogisticRegression(random_state=0),
):
'''
Parameters
----------
submission : TYPE
DESCRIPTION.
seller_data_path : TYPE
DESCRIPTION.
buyer_data_path : TYPE
DESCRIPTION.
price_data_path : TYPE
mlmodel: TYPE
DESCRIPTION.
: TYPE
DESCRIPTION.
Returns
-------
None.
'''
MyMarketEngine = MarketEngine()
MyHelper = self.Helper
# set up the market
MyMarketEngine.setup_market(seller_data=seller_data,
seller_prices = seller_price,
buyer_data=buyer_data,
buyer_budget=buyer_budget,
mlmodel=mlmodel,
)
# get train data
traindata = MyHelper.load_data(submission, MyMarketEngine)
# train the model
model = MyHelper.train_model(mlmodel, traindata[:,0:-1],
numpy.ravel(traindata[:,-1]))
# eval the model
acc1 = MyHelper.eval_model(model,test_X=buyer_data[:,0:-1],test_Y=buyer_data[:,-1])
return acc1
def main():
print("test of the evaluator")
submission = [[1,2],[50,50]]
data_1 = numpy.asmatrix([[0,1,0],[1,0,0]])
data_2 = numpy.asmatrix([[0,1,1],[1,0,1],[1,1,1],[0,0,1]])
seller_data = [data_1, data_2]
buyer_data = numpy.asmatrix([[0,1,0],[1,0,1],[0,1,1]])
MyPricing1 = PriceFunction()
MyPricing1.setup(max_p = 100, method="lin")
MyPricing2 = PriceFunction()
MyPricing2.setup(max_p = 100, method="lin")
seller_price = [MyPricing1, MyPricing2]
MyEval = Evaluator()
acc1 = MyEval.eval_submission(
submission,
seller_data,
buyer_data,
seller_price,
buyer_budget=100,
mlmodel=LogisticRegression(random_state=0),
)
print("acc is:", acc1)
if __name__ == '__main__':
main()
|
Data_Acquisition_for_ML_Benchmark-main
|
src/evaluator.py
|
"""
Copyright (c) Meta Platforms, Inc. and affiliates.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 16 18:36:30 2022
@author: lingjiao
"""
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import GradientBoostingClassifier
import numpy
from marketengine import MarketEngine
from helper import Helper
from sklearn.neighbors import KNeighborsClassifier
import json
def evaluate_batch(data_config,
):
instance_ids = data_config['instance_ids']
result = dict()
for id1 in instance_ids:
result[id1] = evaluate_multiple_trial(data_config,instance_id=id1)
return result
def evaluate_multiple_trial(data_config,
instance_id,
num_trial=10,
):
results = [evaluate_once(data_config=data_config,
instance_id=instance_id) for i in range(num_trial)]
#print("results are:",results)
results_avg = dict()
results_avg['cost'] = 0
results_avg['acc'] = 0
for item in results:
#print("item is:",item)
results_avg['cost'] += item['cost']/len(results)
results_avg['acc'] += item['acc']/len(results)
return results_avg
def evaluate_once(data_config,
instance_id):
# load submission
submission = load_submission(path = data_config['submission_path']+str(instance_id)+".csv")
# get the helper
model_name = data_config['model_name']
MarketHelper, MarketEngineObj, model, traindata, buyer_data = get_market_info(instance_id=instance_id,
model_name=model_name)
# calculate the cost of the submission
cost = MarketHelper.get_cost(submission,MarketEngineObj)
# generate the accuracy of the submission
traindata = MarketHelper.load_data(submission, MarketEngineObj)
model = MarketHelper.train_model(model, traindata[:,0:-1],
numpy.ravel(traindata[:,-1]))
acc1 = MarketHelper.eval_model(model,test_X=buyer_data[:,0:-1],test_Y=buyer_data[:,-1])
result = dict()
result['cost'] = cost
result['acc'] = acc1
return result
def load_submission(path):
data = numpy.loadtxt(path,delimiter=",",dtype=int)
return data
def get_market_info(instance_id,
model_name="lr"):
MyHelper = Helper()
seller_data, seller_prices, buyer_data, buyer_budget, data_size = MyHelper.load_market_instance(
feature_path="../features/"+str(instance_id)+"/",
buyer_data_path="../marketinfo/"+str(instance_id)+"/data_buyer/20.csv",
price_path="../marketinfo/"+str(instance_id)+"/price/price.txt",
budget_path="../marketinfo/"+str(instance_id)+"/price/budget.txt",
)
MyMarketEngine = MarketEngine()
mlmodel1 = LogisticRegression(random_state=0)
if(model_name=="knn"):
mlmodel1 = KNeighborsClassifier(n_neighbors=9)
if(model_name=='rf'):
mlmodel1 = GradientBoostingClassifier(n_estimators=100, learning_rate=1.0,
max_depth=1, random_state=0)
MyMarketEngine.setup_market(seller_data=seller_data,
seller_prices = seller_prices,
buyer_data=buyer_data,
buyer_budget=1e10,
mlmodel=mlmodel1,
)
return MyHelper, MyMarketEngine, mlmodel1,seller_data, buyer_data
def main():
data_config = json.load(open("../config/bilge20230301_rf.json")) # load the data folder
result = evaluate_batch(data_config)
json_object = json.dumps(result, indent=4)
save_path = data_config['save_path']
with open(save_path, "w") as outfile:
outfile.write(json_object)
print("The result is:",result)
return
if __name__ == '__main__':
main()
|
Data_Acquisition_for_ML_Benchmark-main
|
src/evaluator_submission.py
|
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# coding: utf-8
# In[1]:
import os, sys
import time
sys.path.insert(0, '..')
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.cbook import flatten
import lib
import torch, torch.nn as nn
import torch.nn.functional as F
from sklearn.preprocessing import QuantileTransformer
from sklearn.preprocessing import MinMaxScaler
from pickle import dump
import random
import pandas as pd
from itertools import chain
device = 'cuda' if torch.cuda.is_available() else 'cpu'
#device = 'cpu'
experiment_name = 'augur_node_shallow'
experiment_name = '{}_{}.{:0>2d}.{:0>2d}_{:0>2d}:{:0>2d}'.format(experiment_name, *time.gmtime()[:5])
print("experiment:", experiment_name)
# In[2]:
data = lib.Dataset("AUGUR", random_state=round(time.time()), quantile_transform=False,scaling='None', log_transform=False,quantile_noise=1e-3)
in_features = data.X_train.shape[1]
random_state=1337
output_distribution='normal'
data.y_train = np.log10(data.y_train)
data.y_valid = np.log10(data.y_valid)
data.y_test = np.log10(data.y_test)
print("Dataset reading Successful!")
# Plots the y-distribution
plt.hist(data.y_train, density=False, bins=30)
plt.xlabel('Total performance')
plt.ylabel('count')
plt.savefig("y_train_dist.png")
plt.close()
plt.hist(data.y_test.reshape(-1), density=False, bins=30)
plt.xlabel('Total performance')
plt.ylabel('count')
plt.savefig("y_test_dist.png")
plt.close()
plt.hist(data.y_valid.reshape(-1), density=False, bins=30)
plt.xlabel('Total performance')
plt.ylabel('count')
plt.savefig("y_valid_dist.png")
plt.close()
model = nn.Sequential(
lib.DenseBlock(in_features, 128, num_layers=6, tree_dim=3, depth=8, flatten_output=False,
choice_function=lib.entmax15, bin_function=lib.entmoid15),
lib.Lambda(lambda x: x[..., 0].mean(dim=-1)), # average first channels of every tree
).to(device)
with torch.no_grad():
res = model(torch.as_tensor(np.float32(data.X_train[:1000]), device=device))
# trigger data-aware init
#if torch.cuda.device_count() > 1:
# model = nn.DataParallel(model)
# In[31]:
from qhoptim.pyt import QHAdam
optimizer_params = { 'nus':(0.7, 1.0), 'betas':(0.95, 0.998) }
print("qhoptim import successful!")
# In[33]:
if(True):
experiment_name = "augur_energy_6_layers_128_depth8_log_transformed__rel_error"
#experiment_name = "dummy_test"
trainer = lib.Trainer(
model=model, loss_function=F.mse_loss,
experiment_name=experiment_name,
warm_start=False,
Optimizer=QHAdam,
optimizer_params=optimizer_params,
verbose=True,
n_last_checkpoints=5
)
# Training parameters to control
loss_history, mse_history = [], []
best_mse = float('inf')
best_step_mse = 0
early_stopping_rounds = 5000
report_frequency = 100
# Train and plot the training loss and validation loss
if (True):
for batch in lib.iterate_minibatches(np.float32(data.X_train), np.float32(data.y_train), batch_size=512,
shuffle=True, epochs=float('inf')):
metrics = trainer.train_on_batch(*batch, device=device)
loss_history.append(metrics['loss'])
if trainer.step % report_frequency == 0:
trainer.save_checkpoint()
trainer.average_checkpoints(out_tag='avg')
trainer.load_checkpoint(tag='avg')
mse = trainer.evaluate_mse(
np.float32(data.X_valid), np.float32(data.y_valid), device=device, batch_size=512)
if mse < best_mse:
best_mse = mse
best_step_mse = trainer.step
trainer.save_checkpoint(tag='best_mse')
mse_history.append(mse)
trainer.load_checkpoint() # last
trainer.remove_old_temp_checkpoints()
plt.figure(figsize=[18, 6])
plt.subplot(1, 2, 1)
plt.plot(loss_history)
plt.title('Loss')
plt.grid()
plt.subplot(1, 2, 2)
plt.plot(mse_history)
plt.title('MSE')
plt.grid()
#plt.show()
filename = experiment_name + ".png"
plt.savefig(filename)
plt.close()
print("Loss %.5f" % (metrics['loss']))
print("Val MSE: %0.5f" % (mse))
if trainer.step > best_step_mse + early_stopping_rounds:
print('BREAK. There is no improvment for {} steps'.format(early_stopping_rounds))
print("Best step: ", best_step_mse)
print("Best Val MSE: %0.5f" % (best_mse))
break
# In case you want to test a particular checkpoint, uncomment the following
# and comment line 173-177
'''
trainer_test = lib.Trainer(model=model, loss_function=F.mse_loss)
ckpt_path = "/workspace/node/node/notebooks/augur_energy_6k_dataset_6_layers_128_depth8_log_transformed__rel_error/checkpoint_best_mse.pth"
trainer_test.load_checkpoint(path=ckpt_path)
mse, pred, ground, error = trainer_test.evaluate_mse_test(np.float32(data.X_test), np.float32(data.y_test), device=device)
print('Best step: ', trainer_test.step)
print('Mean Error', np.mean(error))
'''
# Evaluation on the test dataset
trainer.load_checkpoint(tag='best_mse')
mse, pred, ground, error = trainer.evaluate_mse_test(np.float32(data.X_test), np.float32(data.y_test), device=device)
print('Best step: ', trainer.step)
print("Test MSE: %0.5f" % (mse))
# Plot the correlation on the test set
plt.scatter(ground, ground, color='green', alpha=0.1)
plt.scatter(ground, pred, color='gray')
test_filename = experiment_name + "_test.png"
plt.savefig(test_filename)
plt.close()
|
Augur-main
|
train/augur_node_trainer.py
|
from __future__ import print_function, division
import imgaug as ia
import augmenters as iaa
import parameters as iap
#from skimage import
import numpy as np
from scipy import ndimage, misc
from skimage import data
import matplotlib.pyplot as plt
from matplotlib import gridspec
import six
import six.moves as sm
def main():
draw_single_sequential_images()
draw_per_augmenter_images()
def draw_single_sequential_images():
image = misc.imresize(ndimage.imread("quokka.jpg")[0:643, 0:643], (128, 128))
st = lambda aug: iaa.Sometimes(0.5, aug)
seq = iaa.Sequential([
iaa.Fliplr(0.5),
iaa.Flipud(0.5),
st(iaa.Crop(percent=(0, 0.1))),
st(iaa.GaussianBlur((0, 3.0))),
st(iaa.AdditiveGaussianNoise(loc=0, scale=(0.0, 0.05*255), per_channel=0.5)),
st(iaa.Dropout((0.0, 0.1), per_channel=0.5)),
st(iaa.Add((-10, 10), per_channel=0.5)),
st(iaa.Multiply((0.5, 1.5), per_channel=0.5)),
st(iaa.ContrastNormalization((0.5, 2.0), per_channel=0.5)),
st(iaa.Grayscale(alpha=(0.0, 1.0), name="Grayscale")),
st(iaa.Affine(
scale={"x": (0.8, 1.2), "y": (0.8, 1.2)},
translate_px={"x": (-16, 16), "y": (-16, 16)},
rotate=(-45, 45),
shear=(-16, 16),
order=[0, 1],
cval=(0, 1.0),
mode=ia.ALL
)),
st(iaa.ElasticTransformation(alpha=(0.5, 3.5), sigma=0.25))
],
random_order=True
)
grid = seq.draw_grid(image, cols=8, rows=8)
misc.imsave("examples_grid.jpg", grid)
def draw_per_augmenter_images():
print("[draw_per_augmenter_images] Loading image...")
image = misc.imresize(ndimage.imread("quokka.jpg")[0:643, 0:643], (128, 128))
#image = misc.imresize(data.chelsea()[0:300, 50:350, :], (128, 128))
#image = misc.imresize(data.astronaut(), (128, 128))
#keypoints = [ia.Keypoint(x=43, y=43), ia.Keypoint(x=78, y=40), ia.Keypoint(x=64, y=73)] # left eye, right eye, mouth
keypoints = [ia.Keypoint(x=34, y=15), ia.Keypoint(x=85, y=13), ia.Keypoint(x=63, y=73)] # left ear, right ear, mouth
keypoints = [ia.KeypointsOnImage(keypoints, shape=image.shape)]
print("[draw_per_augmenter_images] Initializing...")
rows_augmenters = [
("Noop", [("", iaa.Noop()) for _ in sm.xrange(5)]),
#("Crop", [iaa.Crop(px=vals) for vals in [(2, 4), (4, 8), (6, 16), (8, 32), (10, 64)]]),
("Crop\n(top, right,\nbottom, left)", [(str(vals), iaa.Crop(px=vals)) for vals in [(2, 0, 0, 0), (0, 8, 8, 0), (4, 0, 16, 4), (8, 0, 0, 32), (32, 64, 0, 0)]]),
("Fliplr", [(str(p), iaa.Fliplr(p)) for p in [0, 0, 1, 1, 1]]),
("Flipud", [(str(p), iaa.Flipud(p)) for p in [0, 0, 1, 1, 1]]),
("Add", [("value=%d" % (val,), iaa.Add(val)) for val in [-45, -25, 0, 25, 45]]),
("Add\n(per channel)", [("value=(%d, %d)" % (vals[0], vals[1],), iaa.Add(vals, per_channel=True)) for vals in [(-55, -35), (-35, -15), (-10, 10), (15, 35), (35, 55)]]),
("Multiply", [("value=%.2f" % (val,), iaa.Multiply(val)) for val in [0.25, 0.5, 1.0, 1.25, 1.5]]),
("Multiply\n(per channel)", [("value=(%.2f, %.2f)" % (vals[0], vals[1],), iaa.Multiply(vals, per_channel=True)) for vals in [(0.15, 0.35), (0.4, 0.6), (0.9, 1.1), (1.15, 1.35), (1.4, 1.6)]]),
("GaussianBlur", [("sigma=%.2f" % (sigma,), iaa.GaussianBlur(sigma=sigma)) for sigma in [0.25, 0.50, 1.0, 2.0, 4.0]]),
("AdditiveGaussianNoise", [("scale=%.2f" % (scale,), iaa.AdditiveGaussianNoise(scale=scale * 255)) for scale in [0.025, 0.05, 0.1, 0.2, 0.3]]),
("AdditiveGaussianNoise\n(per channel)", [("scale=%.2f" % (scale,), iaa.AdditiveGaussianNoise(scale=scale * 255, per_channel=True)) for scale in [0.025, 0.05, 0.1, 0.2, 0.3]]),
("Dropout", [("p=%.2f" % (p,), iaa.Dropout(p=p)) for p in [0.025, 0.05, 0.1, 0.2, 0.4]]),
("Dropout\n(per channel)", [("p=%.2f" % (p,), iaa.Dropout(p=p, per_channel=True)) for p in [0.025, 0.05, 0.1, 0.2, 0.4]]),
("ContrastNormalization", [("alpha=%.1f" % (alpha,), iaa.ContrastNormalization(alpha=alpha)) for alpha in [0.5, 0.75, 1.0, 1.25, 1.50]]),
("ContrastNormalization\n(per channel)", [("alpha=(%.2f, %.2f)" % (alphas[0], alphas[1],), iaa.ContrastNormalization(alpha=alphas, per_channel=True)) for alphas in [(0.4, 0.6), (0.65, 0.85), (0.9, 1.1), (1.15, 1.35), (1.4, 1.6)]]),
("Grayscale", [("alpha=%.1f" % (alpha,), iaa.Grayscale(alpha=alpha)) for alpha in [0.0, 0.25, 0.5, 0.75, 1.0]]),
("Affine: Scale", [("%.1fx" % (scale,), iaa.Affine(scale=scale)) for scale in [0.1, 0.5, 1.0, 1.5, 1.9]]),
("Affine: Translate", [("x=%d y=%d" % (x, y), iaa.Affine(translate_px={"x": x, "y": y})) for x, y in [(-32, -16), (-16, -32), (-16, -8), (16, 8), (16, 32)]]),
("Affine: Rotate", [("%d deg" % (rotate,), iaa.Affine(rotate=rotate)) for rotate in [-90, -45, 0, 45, 90]]),
("Affine: Shear", [("%d deg" % (shear,), iaa.Affine(shear=shear)) for shear in [-45, -25, 0, 25, 45]]),
("Affine: Modes", [(mode, iaa.Affine(translate_px=-32, mode=mode)) for mode in ["constant", "edge", "symmetric", "reflect", "wrap"]]),
("Affine: cval", [("%.2f" % (cval,), iaa.Affine(translate_px=-32, cval=cval, mode="constant")) for cval in [0.0, 0.25, 0.5, 0.75, 1.0]]),
(
"Affine: all", [
(
"",
iaa.Affine(
scale={"x": (0.5, 1.5), "y": (0.5, 1.5)},
translate_px={"x": (-32, 32), "y": (-32, 32)},
rotate=(-45, 45),
shear=(-32, 32),
mode=ia.ALL,
cval=(0.0, 1.0)
)
)
for _ in sm.xrange(5)
]
),
("ElasticTransformation\n(sigma=0.2)", [("alpha=%.1f" % (alpha,), iaa.ElasticTransformation(alpha=alpha, sigma=0.2)) for alpha in [0.1, 0.5, 1.0, 3.0, 9.0]])
]
print("[draw_per_augmenter_images] Augmenting...")
rows = []
for (row_name, augmenters) in rows_augmenters:
row_images = []
row_keypoints = []
row_titles = []
for img_title, augmenter in augmenters:
aug_det = augmenter.to_deterministic()
row_images.append(aug_det.augment_image(image))
row_keypoints.append(aug_det.augment_keypoints(keypoints)[0])
row_titles.append(img_title)
rows.append((row_name, row_images, row_keypoints, row_titles))
print("[draw_per_augmenter_images] Plotting...")
width = 8
height = int(1.5 * len(rows_augmenters))
fig = plt.figure(figsize=(width, height))
grid_rows = len(rows)
grid_cols = 1 + 5
gs = gridspec.GridSpec(grid_rows, grid_cols, width_ratios=[2, 1, 1, 1, 1, 1])
axes = []
for i in sm.xrange(grid_rows):
axes.append([plt.subplot(gs[i, col_idx]) for col_idx in sm.xrange(grid_cols)])
fig.tight_layout()
#fig.subplots_adjust(bottom=0.2 / grid_rows, hspace=0.22)
#fig.subplots_adjust(wspace=0.005, hspace=0.425, bottom=0.02)
fig.subplots_adjust(wspace=0.005, hspace=0.005, bottom=0.02)
for row_idx, (row_name, row_images, row_keypoints, row_titles) in enumerate(rows):
axes_row = axes[row_idx]
for col_idx in sm.xrange(grid_cols):
ax = axes_row[col_idx]
ax.cla()
ax.axis("off")
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
if col_idx == 0:
ax.text(0, 0.5, row_name, color="black")
else:
cell_image = row_images[col_idx-1]
cell_keypoints = row_keypoints[col_idx-1]
cell_image_kp = cell_keypoints.draw_on_image(cell_image, size=5)
ax.imshow(cell_image_kp)
x = 0
y = 145
#ax.text(x, y, row_titles[col_idx-1], color="black", backgroundcolor="white", fontsize=6)
ax.text(x, y, row_titles[col_idx-1], color="black", fontsize=7)
fig.savefig("examples.jpg", bbox_inches="tight")
#plt.show()
if __name__ == "__main__":
main()
|
imgaug-master
|
generate_example_images.py
|
imgaug-master
|
__init__.py
|
|
from setuptools import setup, find_packages
try:
import cv2
except ImportError as e:
raise Exception("Could not find package 'cv2' (OpenCV). It cannot be automatically installed, so you will have to manually install it.")
long_description = """A library for image augmentation in machine learning experiments, particularly convolutional neural networks.
Supports augmentation of images and keypoints/landmarks in a variety of different ways."""
setup(
name="imgaug",
version="0.1",
author="Alexander Jung",
author_email="kontakt@ajung.name",
url="https://github.com/aleju/imgaug",
install_requires=["scipy", "scikit-image", "numpy", "six"],
packages=find_packages(),
license="MIT",
description="Image augmentation library for machine learning",
long_description=long_description
)
|
imgaug-master
|
setup.py
|
"""
Tests to measure the performance of each augmenter.
Run these checks from the project directory (i.e. parent directory) via
python check_performance.py
"""
from __future__ import print_function, division
#import sys
#import os
#sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import imgaug as ia
from imgaug import augmenters as iaa
import numpy as np
#from scipy import ndimage, misc
#from skimage import data
import time
import random
import six
import six.moves as sm
"""
---------------------------
Keypoints
---------------------------
[Augmenter: Noop]
(4, 4, 3) | SUM 0.01990s | PER ITER avg 0.00020s, min 0.00017s, max 0.00043s
(32, 32, 3) | SUM 0.01863s | PER ITER avg 0.00019s, min 0.00017s, max 0.00033s
(256, 256, 3) | SUM 0.01879s | PER ITER avg 0.00019s, min 0.00017s, max 0.00029s
[Augmenter: Crop-px]
(4, 4, 3) | SUM 0.20215s | PER ITER avg 0.00202s, min 0.00168s, max 0.01908s
(32, 32, 3) | SUM 0.19844s | PER ITER avg 0.00198s, min 0.00164s, max 0.01933s
(256, 256, 3) | SUM 0.17918s | PER ITER avg 0.00179s, min 0.00166s, max 0.00214s
[Augmenter: Crop-percent]
(4, 4, 3) | SUM 0.14201s | PER ITER avg 0.00142s, min 0.00114s, max 0.02041s
(32, 32, 3) | SUM 0.16912s | PER ITER avg 0.00169s, min 0.00137s, max 0.02023s
(256, 256, 3) | SUM 0.15548s | PER ITER avg 0.00155s, min 0.00142s, max 0.00193s
[Augmenter: Fliplr]
(4, 4, 3) | SUM 0.02303s | PER ITER avg 0.00023s, min 0.00021s, max 0.00034s
(32, 32, 3) | SUM 0.02477s | PER ITER avg 0.00025s, min 0.00021s, max 0.00038s
(256, 256, 3) | SUM 0.02383s | PER ITER avg 0.00024s, min 0.00022s, max 0.00036s
[Augmenter: Flipud]
(4, 4, 3) | SUM 0.02362s | PER ITER avg 0.00024s, min 0.00021s, max 0.00035s
(32, 32, 3) | SUM 0.02356s | PER ITER avg 0.00024s, min 0.00021s, max 0.00032s
(256, 256, 3) | SUM 0.02415s | PER ITER avg 0.00024s, min 0.00021s, max 0.00037s
[Augmenter: Grayscale]
(4, 4, 3) | SUM 0.01908s | PER ITER avg 0.00019s, min 0.00017s, max 0.00030s
(32, 32, 3) | SUM 0.01903s | PER ITER avg 0.00019s, min 0.00017s, max 0.00030s
(256, 256, 3) | SUM 0.01876s | PER ITER avg 0.00019s, min 0.00017s, max 0.00027s
[Augmenter: GaussianBlur]
(4, 4, 3) | SUM 0.01904s | PER ITER avg 0.00019s, min 0.00017s, max 0.00029s
(32, 32, 3) | SUM 0.01851s | PER ITER avg 0.00019s, min 0.00017s, max 0.00033s
(256, 256, 3) | SUM 0.01894s | PER ITER avg 0.00019s, min 0.00017s, max 0.00025s
[Augmenter: AdditiveGaussianNoise]
(4, 4, 3) | SUM 0.01902s | PER ITER avg 0.00019s, min 0.00017s, max 0.00029s
(32, 32, 3) | SUM 0.01905s | PER ITER avg 0.00019s, min 0.00017s, max 0.00028s
(256, 256, 3) | SUM 0.01971s | PER ITER avg 0.00020s, min 0.00017s, max 0.00046s
[Augmenter: Dropout]
(4, 4, 3) | SUM 0.01887s | PER ITER avg 0.00019s, min 0.00017s, max 0.00027s
(32, 32, 3) | SUM 0.01913s | PER ITER avg 0.00019s, min 0.00017s, max 0.00030s
(256, 256, 3) | SUM 0.01922s | PER ITER avg 0.00019s, min 0.00017s, max 0.00029s
[Augmenter: Multiply]
(4, 4, 3) | SUM 0.01942s | PER ITER avg 0.00019s, min 0.00017s, max 0.00028s
(32, 32, 3) | SUM 0.01922s | PER ITER avg 0.00019s, min 0.00017s, max 0.00032s
(256, 256, 3) | SUM 0.01875s | PER ITER avg 0.00019s, min 0.00017s, max 0.00030s
[Augmenter: ContrastNormalization]
(4, 4, 3) | SUM 0.01852s | PER ITER avg 0.00019s, min 0.00017s, max 0.00029s
(32, 32, 3) | SUM 0.01869s | PER ITER avg 0.00019s, min 0.00017s, max 0.00026s
(256, 256, 3) | SUM 0.01875s | PER ITER avg 0.00019s, min 0.00017s, max 0.00028s
[Augmenter: Grayscale]
(4, 4, 3) | SUM 0.01919s | PER ITER avg 0.00019s, min 0.00017s, max 0.00030s
(32, 32, 3) | SUM 0.01923s | PER ITER avg 0.00019s, min 0.00017s, max 0.00033s
(256, 256, 3) | SUM 0.01888s | PER ITER avg 0.00019s, min 0.00017s, max 0.00028s
[Augmenter: ElasticTransformation]
(4, 4, 3) | SUM 0.01882s | PER ITER avg 0.00019s, min 0.00017s, max 0.00024s
(32, 32, 3) | SUM 0.01883s | PER ITER avg 0.00019s, min 0.00017s, max 0.00029s
(256, 256, 3) | SUM 0.01869s | PER ITER avg 0.00019s, min 0.00017s, max 0.00029s
[Augmenter: AffineOrder0ModeConstant]
(4, 4, 3) | SUM 0.28146s | PER ITER avg 0.00281s, min 0.00243s, max 0.02199s
(32, 32, 3) | SUM 0.28047s | PER ITER avg 0.00280s, min 0.00243s, max 0.02083s
(256, 256, 3) | SUM 0.28715s | PER ITER avg 0.00287s, min 0.00243s, max 0.02088s
[Augmenter: AffineOrder0]
(4, 4, 3) | SUM 0.27242s | PER ITER avg 0.00272s, min 0.00246s, max 0.00362s
(32, 32, 3) | SUM 0.29675s | PER ITER avg 0.00297s, min 0.00247s, max 0.02220s
(256, 256, 3) | SUM 0.28988s | PER ITER avg 0.00290s, min 0.00247s, max 0.02128s
[Augmenter: AffineOrder1]
(4, 4, 3) | SUM 0.26750s | PER ITER avg 0.00267s, min 0.00246s, max 0.00321s
(32, 32, 3) | SUM 0.28361s | PER ITER avg 0.00284s, min 0.00245s, max 0.02144s
(256, 256, 3) | SUM 0.28973s | PER ITER avg 0.00290s, min 0.00246s, max 0.02070s
[Augmenter: AffineAll]
(4, 4, 3) | SUM 0.27070s | PER ITER avg 0.00271s, min 0.00246s, max 0.00367s
(32, 32, 3) | SUM 0.28405s | PER ITER avg 0.00284s, min 0.00247s, max 0.02120s
(256, 256, 3) | SUM 0.28895s | PER ITER avg 0.00289s, min 0.00247s, max 0.02144s
---------------------------
Images
---------------------------
[Augmenter: Noop]
(16, 4, 4, 3) | SUM 0.00135s | PER ITER avg 0.00001s, min 0.00001s, max 0.00008s
(16, 32, 32, 3) | SUM 0.00203s | PER ITER avg 0.00002s, min 0.00002s, max 0.00005s
(16, 256, 256, 3) | SUM 0.05284s | PER ITER avg 0.00053s, min 0.00044s, max 0.00194s
[Augmenter: Crop-px]
(16, 4, 4, 3) | SUM 0.09324s | PER ITER avg 0.00093s, min 0.00084s, max 0.00315s
(16, 32, 32, 3) | SUM 0.10302s | PER ITER avg 0.00103s, min 0.00094s, max 0.00162s
(16, 256, 256, 3) | SUM 0.81943s | PER ITER avg 0.00819s, min 0.00767s, max 0.00934s
[Augmenter: Crop-percent]
(16, 4, 4, 3) | SUM 0.06562s | PER ITER avg 0.00066s, min 0.00057s, max 0.00099s
(16, 32, 32, 3) | SUM 0.09784s | PER ITER avg 0.00098s, min 0.00089s, max 0.00131s
(16, 256, 256, 3) | SUM 0.80779s | PER ITER avg 0.00808s, min 0.00732s, max 0.01008s
[Augmenter: Fliplr]
(16, 4, 4, 3) | SUM 0.00525s | PER ITER avg 0.00005s, min 0.00004s, max 0.00017s
(16, 32, 32, 3) | SUM 0.01025s | PER ITER avg 0.00010s, min 0.00007s, max 0.00015s
(16, 256, 256, 3) | SUM 0.36918s | PER ITER avg 0.00369s, min 0.00181s, max 0.00553s
[Augmenter: Flipud]
(16, 4, 4, 3) | SUM 0.00512s | PER ITER avg 0.00005s, min 0.00004s, max 0.00009s
(16, 32, 32, 3) | SUM 0.00665s | PER ITER avg 0.00007s, min 0.00006s, max 0.00011s
(16, 256, 256, 3) | SUM 0.12664s | PER ITER avg 0.00127s, min 0.00092s, max 0.00167s
[Augmenter: Grayscale]
(16, 4, 4, 3) | SUM 0.05943s | PER ITER avg 0.00059s, min 0.00050s, max 0.00125s
(16, 32, 32, 3) | SUM 0.12247s | PER ITER avg 0.00122s, min 0.00106s, max 0.00205s
(16, 256, 256, 3) | SUM 3.62785s | PER ITER avg 0.03628s, min 0.03508s, max 0.03963s
[Augmenter: GaussianBlur]
(16, 4, 4, 3) | SUM 0.15514s | PER ITER avg 0.00155s, min 0.00136s, max 0.00188s
(16, 32, 32, 3) | SUM 0.25121s | PER ITER avg 0.00251s, min 0.00221s, max 0.00298s
(16, 256, 256, 3) | SUM 5.51685s | PER ITER avg 0.05517s, min 0.04923s, max 0.06026s
[Augmenter: AdditiveGaussianNoise]
(16, 4, 4, 3) | SUM 0.09606s | PER ITER avg 0.00096s, min 0.00085s, max 0.00150s
(16, 32, 32, 3) | SUM 0.21302s | PER ITER avg 0.00213s, min 0.00196s, max 0.00254s
(16, 256, 256, 3) | SUM 7.22374s | PER ITER avg 0.07224s, min 0.07017s, max 0.07558s
[Augmenter: Dropout]
(16, 4, 4, 3) | SUM 0.09362s | PER ITER avg 0.00094s, min 0.00084s, max 0.00118s
(16, 32, 32, 3) | SUM 0.17472s | PER ITER avg 0.00175s, min 0.00161s, max 0.00230s
(16, 256, 256, 3) | SUM 5.04969s | PER ITER avg 0.05050s, min 0.04839s, max 0.05631s
[Augmenter: Multiply]
(16, 4, 4, 3) | SUM 0.05442s | PER ITER avg 0.00054s, min 0.00046s, max 0.00089s
(16, 32, 32, 3) | SUM 0.06895s | PER ITER avg 0.00069s, min 0.00060s, max 0.00109s
(16, 256, 256, 3) | SUM 0.87311s | PER ITER avg 0.00873s, min 0.00799s, max 0.00993s
[Augmenter: ContrastNormalization]
(16, 4, 4, 3) | SUM 0.05746s | PER ITER avg 0.00057s, min 0.00050s, max 0.00094s
(16, 32, 32, 3) | SUM 0.08083s | PER ITER avg 0.00081s, min 0.00071s, max 0.00133s
(16, 256, 256, 3) | SUM 1.57577s | PER ITER avg 0.01576s, min 0.01443s, max 0.01831s
[Augmenter: Grayscale]
(16, 4, 4, 3) | SUM 0.05464s | PER ITER avg 0.00055s, min 0.00049s, max 0.00069s
(16, 32, 32, 3) | SUM 0.12058s | PER ITER avg 0.00121s, min 0.00104s, max 0.00223s
(16, 256, 256, 3) | SUM 3.57037s | PER ITER avg 0.03570s, min 0.03461s, max 0.03780s
[Augmenter: ElasticTransformation]
(16, 4, 4, 3) | SUM 0.29551s | PER ITER avg 0.00296s, min 0.00272s, max 0.00336s
(16, 32, 32, 3) | SUM 0.68591s | PER ITER avg 0.00686s, min 0.00642s, max 0.00764s
(16, 256, 256, 3) | SUM 26.30515s | PER ITER avg 0.26305s, min 0.25754s, max 0.26912s
[Augmenter: AffineOrder0ModeConstant]
(16, 4, 4, 3) | SUM 0.35887s | PER ITER avg 0.00359s, min 0.00333s, max 0.00424s
(16, 32, 32, 3) | SUM 0.47889s | PER ITER avg 0.00479s, min 0.00451s, max 0.00535s
(16, 256, 256, 3) | SUM 9.83738s | PER ITER avg 0.09837s, min 0.09417s, max 0.10458s
[Augmenter: AffineOrder0]
(16, 4, 4, 3) | SUM 0.37980s | PER ITER avg 0.00380s, min 0.00340s, max 0.00517s
(16, 32, 32, 3) | SUM 0.53106s | PER ITER avg 0.00531s, min 0.00472s, max 0.00630s
(16, 256, 256, 3) | SUM 10.69961s | PER ITER avg 0.10700s, min 0.10223s, max 0.11325s
[Augmenter: AffineOrder1]
(16, 4, 4, 3) | SUM 0.39431s | PER ITER avg 0.00394s, min 0.00363s, max 0.00511s
(16, 32, 32, 3) | SUM 0.62730s | PER ITER avg 0.00627s, min 0.00576s, max 0.00711s
(16, 256, 256, 3) | SUM 14.50003s | PER ITER avg 0.14500s, min 0.13785s, max 0.15291s
[Augmenter: AffineAll]
(16, 4, 4, 3) | SUM 0.58742s | PER ITER avg 0.00587s, min 0.00429s, max 0.00724s
(16, 32, 32, 3) | SUM 3.69956s | PER ITER avg 0.03700s, min 0.01358s, max 0.06233s
(16, 256, 256, 3) | SUM 212.91776s | PER ITER avg 2.12918s, min 0.57114s, max 3.95389s
"""
def main():
augmenters = [
iaa.Noop(name="Noop"),
iaa.Crop(px=(0, 8), name="Crop-px"),
iaa.Crop(percent=(0, 0.1), name="Crop-percent"),
iaa.Fliplr(0.5, name="Fliplr"),
iaa.Flipud(0.5, name="Flipud"),
iaa.Grayscale((0.0, 1.0), name="Grayscale"),
iaa.GaussianBlur((0, 3.0), name="GaussianBlur"),
iaa.AdditiveGaussianNoise(loc=0, scale=(0.0, 0.1), name="AdditiveGaussianNoise"),
iaa.Dropout((0.0, 0.1), name="Dropout"),
iaa.Multiply((0.5, 1.5), name="Multiply"),
iaa.ContrastNormalization(alpha=(0.5, 2.0), name="ContrastNormalization"),
iaa.Grayscale(alpha=(0.0, 1.0), name="Grayscale"),
iaa.ElasticTransformation(alpha=(0.5, 8.0), sigma=1.0, name="ElasticTransformation"),
iaa.Affine(
scale={"x": (0.8, 1.2), "y": (0.8, 1.2)},
translate_px={"x": (-16, 16), "y": (-16, 16)},
rotate=(-45, 45),
shear=(-16, 16),
order=0,
cval=(0, 1.0),
mode="constant",
name="AffineOrder0ModeConstant"
)
]
for order in [0, 1]:
augmenters.append(
iaa.Affine(
scale={"x": (0.8, 1.2), "y": (0.8, 1.2)},
translate_px={"x": (-16, 16), "y": (-16, 16)},
rotate=(-45, 45),
shear=(-16, 16),
order=order,
cval=(0, 1.0),
mode=ia.ALL,
name="AffineOrder%d" % (order,)
)
)
augmenters.append(
iaa.Affine(
scale={"x": (0.8, 1.2), "y": (0.8, 1.2)},
translate_px={"x": (-16, 16), "y": (-16, 16)},
rotate=(-45, 45),
shear=(-16, 16),
order=ia.ALL,
cval=(0, 1.0),
mode=ia.ALL,
name="AffineAll"
)
)
kps = []
for _ in sm.xrange(20):
x = random.randint(0, 31)
y = random.randint(0, 31)
kps.append(ia.Keypoint(x=x, y=y))
kps = ia.KeypointsOnImage(kps, shape=(32, 32, 3))
#small_keypoints_one = ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1), ia.Keypoint(x=3, y=3)], shape=(4, 4, 3))
#medium_keypoints_one = ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=16, y=16), ia.Keypoint(x=31, y=31)], shape=(32, 32, 3))
#large_keypoints_one = ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=128, y=128), ia.Keypoint(x=255, y=255)], shape=(256, 256, 3))
small_keypoints_one = kps.on((4, 4, 3))
medium_keypoints_one = kps.on((32, 32, 3))
large_keypoints_one = kps.on((256, 256, 3))
small_keypoints = [small_keypoints_one.deepcopy() for _ in sm.xrange(16)]
medium_keypoints = [medium_keypoints_one.deepcopy() for _ in sm.xrange(16)]
large_keypoints = [large_keypoints_one.deepcopy() for _ in sm.xrange(16)]
small_images = np.random.randint(0, 255, (16, 4, 4, 3)).astype(np.uint8)
medium_images = np.random.randint(0, 255, (16, 32, 32, 3)).astype(np.uint8)
large_images = np.random.randint(0, 255, (16, 256, 256, 3)).astype(np.uint8)
print("---------------------------")
print("Keypoints")
print("---------------------------")
for augmenter in augmenters:
print("[Augmenter: %s]" % (augmenter.name,))
for keypoints in [small_keypoints, medium_keypoints, large_keypoints]:
times = []
for i in sm.xrange(100):
time_start = time.time()
img_aug = augmenter.augment_keypoints(keypoints)
time_end = time.time()
times.append(time_end - time_start)
times = np.array(times)
img_str = "{:20s}".format(keypoints[0].shape)
print("%s | SUM %.5fs | PER ITER avg %.5fs, min %.5fs, max %.5fs" % (img_str, np.sum(times), np.average(times), np.min(times), np.max(times)))
print("---------------------------")
print("Images")
print("---------------------------")
for augmenter in augmenters:
print("[Augmenter: %s]" % (augmenter.name,))
for images in [small_images, medium_images, large_images]:
times = []
for i in sm.xrange(100):
time_start = time.time()
img_aug = augmenter.augment_images(images)
time_end = time.time()
times.append(time_end - time_start)
times = np.array(times)
img_str = "{:20s}".format(images.shape)
print("%s | SUM %.5fs | PER ITER avg %.5fs, min %.5fs, max %.5fs" % (img_str, np.sum(times), np.average(times), np.min(times), np.max(times)))
if __name__ == "__main__":
main()
|
imgaug-master
|
tests/check_performance.py
|
"""
Tests to visually inspect the results of the library's functionality.
Run these checks from the project directory (i.e. parent directory) via
python check_visually.py
"""
from __future__ import print_function, division
#import sys
#import os
#sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import imgaug as ia
from imgaug import augmenters as iaa
from imgaug import parameters as iap
import numpy as np
from scipy import ndimage, misc
from skimage import data
def main():
images = [
misc.imresize(ndimage.imread("../quokka.jpg")[0:643, 0:643], (128, 128)),
misc.imresize(data.astronaut(), (128, 128))
]
augmenters = [
iaa.Noop(name="Noop"),
iaa.Crop(px=(0, 8), name="Crop-px"),
iaa.Crop(percent=(0, 0.1), name="Crop-percent"),
iaa.Fliplr(0.5, name="Fliplr"),
iaa.Flipud(0.5, name="Flipud"),
iaa.Grayscale(0.5, name="Grayscale0.5"),
iaa.Grayscale(1.0, name="Grayscale1.0"),
iaa.GaussianBlur((0, 3.0), name="GaussianBlur"),
iaa.AdditiveGaussianNoise(loc=0, scale=(0.0, 0.1*255), name="AdditiveGaussianNoise"),
iaa.Dropout((0.0, 0.1), name="Dropout"),
iaa.Multiply((0.5, 1.5), name="Multiply"),
iaa.ContrastNormalization(alpha=(0.5, 2.0), name="ContrastNormalization"),
iaa.Affine(
scale={"x": (0.8, 1.2), "y": (0.8, 1.2)},
translate_px={"x": (-16, 16), "y": (-16, 16)},
rotate=(-45, 45),
shear=(-16, 16),
order=ia.ALL,
cval=(0, 1.0),
mode=ia.ALL,
name="Affine"
),
iaa.ElasticTransformation(alpha=(0.5, 8.0), sigma=1.0, name="ElasticTransformation")
]
#for i, aug in enumerate(augmenters):
#print(i)
#aug.deepcopy()
#import copy
#copy.deepcopy(aug)
seq = iaa.Sequential([aug.copy() for aug in augmenters], name="Sequential")
st = iaa.Sometimes(0.5, seq.copy(), name="Sometimes")
augmenters.append(seq)
augmenters.append(st)
for augmenter in augmenters:
print("Augmenter: %s" % (augmenter.name,))
grid = augmenter.draw_grid(images, rows=1, cols=16)
misc.imshow(grid)
if __name__ == "__main__":
main()
|
imgaug-master
|
tests/check_visually.py
|
"""
Script to verify all examples in the readme.
Run from the project directory (i.e. parent) with
python test_readme_examples.py
"""
from __future__ import print_function, division
#import sys
#import os
#sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import numpy as np
from scipy import misc
def main():
example_standard_situation()
example_heavy_augmentations()
example_show()
example_grayscale()
example_determinism()
example_keypoints()
example_single_augmenters()
example_unusual_distributions()
example_hooks()
def example_standard_situation():
print("Example: Standard Situation")
# -------
# dummy functions to make the example runnable here
def load_batch(batch_idx):
return np.random.randint(0, 255, (1, 16, 16, 3), dtype=np.uint8)
def train_on_images(images):
pass
# -------
from imgaug import augmenters as iaa
seq = iaa.Sequential([
iaa.Crop(px=(0, 16)), # crop images from each side by 0 to 16px (randomly chosen)
iaa.Fliplr(0.5), # horizontally flip 50% of the images
iaa.GaussianBlur(sigma=(0, 3.0)) # blur images with a sigma of 0 to 3.0
])
for batch_idx in range(1000):
# 'images' should be either a 4D numpy array of shape (N, height, width, channels)
# or a list of 3D numpy arrays, each having shape (height, width, channels).
# Grayscale images must have shape (height, width, 1) each.
# All images must have numpy's dtype uint8. Values are expected to be in
# range 0-255.
images = load_batch(batch_idx)
images_aug = seq.augment_images(images)
train_on_images(images_aug)
# -----
# Make sure that the example really does something
if batch_idx == 0:
assert not np.array_equal(images, images_aug)
def example_heavy_augmentations():
print("Example: Heavy Augmentations")
import imgaug as ia
from imgaug import augmenters as iaa
# random example images
images = np.random.randint(0, 255, (16, 128, 128, 3), dtype=np.uint8)
# Sometimes(0.5, ...) applies the given augmenter in 50% of all cases,
# e.g. Sometimes(0.5, GaussianBlur(0.3)) would blur roughly every second image.
st = lambda aug: iaa.Sometimes(0.5, aug)
# Define our sequence of augmentation steps that will be applied to every image
# All augmenters with per_channel=0.5 will sample one value _per image_
# in 50% of all cases. In all other cases they will sample new values
# _per channel_.
seq = iaa.Sequential([
iaa.Fliplr(0.5), # horizontally flip 50% of all images
iaa.Flipud(0.5), # vertically flip 50% of all images
st(iaa.Crop(percent=(0, 0.1))), # crop images by 0-10% of their height/width
st(iaa.GaussianBlur((0, 3.0))), # blur images with a sigma between 0 and 3.0
st(iaa.AdditiveGaussianNoise(loc=0, scale=(0.0, 0.05*255), per_channel=0.5)), # add gaussian noise to images
st(iaa.Dropout((0.0, 0.1), per_channel=0.5)), # randomly remove up to 10% of the pixels
st(iaa.Add((-10, 10), per_channel=0.5)), # change brightness of images (by -10 to 10 of original value)
st(iaa.Multiply((0.5, 1.5), per_channel=0.5)), # change brightness of images (50-150% of original value)
st(iaa.ContrastNormalization((0.5, 2.0), per_channel=0.5)), # improve or worsen the contrast
st(iaa.Grayscale((0.0, 1.0))), # blend with grayscale image
st(iaa.Affine(
scale={"x": (0.8, 1.2), "y": (0.8, 1.2)}, # scale images to 80-120% of their size, individually per axis
translate_px={"x": (-16, 16), "y": (-16, 16)}, # translate by -16 to +16 pixels (per axis)
rotate=(-45, 45), # rotate by -45 to +45 degrees
shear=(-16, 16), # shear by -16 to +16 degrees
order=[0, 1], # use scikit-image's interpolation orders 0 (nearest neighbour) and 1 (bilinear)
cval=(0, 1.0), # if mode is constant, use a cval between 0 and 1.0
mode=ia.ALL # use any of scikit-image's warping modes (see 2nd image from the top for examples)
)),
st(iaa.ElasticTransformation(alpha=(0.5, 3.5), sigma=0.25)) # apply elastic transformations with random strengths
],
random_order=True # do all of the above in random order
)
images_aug = seq.augment_images(images)
# -----
# Make sure that the example really does something
assert not np.array_equal(images, images_aug)
def example_show():
print("Example: Show")
from imgaug import augmenters as iaa
images = np.random.randint(0, 255, (16, 128, 128, 3), dtype=np.uint8)
seq = iaa.Sequential([iaa.Fliplr(0.5), iaa.GaussianBlur((0, 3.0))])
# show an image with 8*8 augmented versions of image 0
seq.show_grid(images[0], cols=8, rows=8)
# Show an image with 8*8 augmented versions of image 0 and 8*8 augmented
# versions of image 1. The identical augmentations will be applied to
# image 0 and 1.
seq.show_grid([images[0], images[1]], cols=8, rows=8)
def example_grayscale():
print("Example: Grayscale")
from imgaug import augmenters as iaa
images = np.random.randint(0, 255, (16, 128, 128), dtype=np.uint8)
seq = iaa.Sequential([iaa.Fliplr(0.5), iaa.GaussianBlur((0, 3.0))])
# The library expects a list of images (3D inputs) or a single array (4D inputs).
# So we add an axis to our grayscale array to convert it to shape (16, 128, 128, 1).
images_aug = seq.augment_images(images[:, :, :, np.newaxis])
# -----
# Make sure that the example really does something
assert not np.array_equal(images, images_aug)
def example_determinism():
print("Example: Determinism")
from imgaug import augmenters as iaa
# Standard scenario: You have N RGB-images and additionally 21 heatmaps per image.
# You want to augment each image and its heatmaps identically.
images = np.random.randint(0, 255, (16, 128, 128, 3), dtype=np.uint8)
heatmaps = np.random.randint(0, 255, (16, 128, 128, 21), dtype=np.uint8)
seq = iaa.Sequential([iaa.GaussianBlur((0, 3.0)), iaa.Affine(translate_px={"x": (-40, 40)})])
# Convert the stochastic sequence of augmenters to a deterministic one.
# The deterministic sequence will always apply the exactly same effects to the images.
seq_det = seq.to_deterministic() # call this for each batch again, NOT only once at the start
images_aug = seq_det.augment_images(images)
heatmaps_aug = seq_det.augment_images(heatmaps)
# -----
# Make sure that the example really does something
import imgaug as ia
assert not np.array_equal(images, images_aug)
assert not np.array_equal(heatmaps, heatmaps_aug)
images_show = []
for img_idx in range(len(images)):
images_show.extend([images[img_idx], images_aug[img_idx], heatmaps[img_idx][..., 0:3], heatmaps_aug[img_idx][..., 0:3]])
ia.show_grid(images_show, cols=4)
def example_keypoints():
print("Example: Keypoints")
import imgaug as ia
from imgaug import augmenters as iaa
from scipy import misc
import random
images = np.random.randint(0, 50, (4, 128, 128, 3), dtype=np.uint8)
# Generate random keypoints.
# The augmenters expect a list of imgaug.KeypointsOnImage.
keypoints_on_images = []
for image in images:
height, width = image.shape[0:2]
keypoints = []
for _ in range(4):
x = random.randint(0, width-1)
y = random.randint(0, height-1)
keypoints.append(ia.Keypoint(x=x, y=y))
keypoints_on_images.append(ia.KeypointsOnImage(keypoints, shape=image.shape))
seq = iaa.Sequential([iaa.GaussianBlur((0, 3.0)), iaa.Affine(scale=(0.5, 0.7))])
seq_det = seq.to_deterministic() # call this for each batch again, NOT only once at the start
# augment keypoints and images
images_aug = seq_det.augment_images(images)
keypoints_aug = seq_det.augment_keypoints(keypoints_on_images)
# Example code to show each image and print the new keypoints coordinates
for img_idx, (image_before, image_after, keypoints_before, keypoints_after) in enumerate(zip(images, images_aug, keypoints_on_images, keypoints_aug)):
image_before = keypoints_before.draw_on_image(image_before)
image_after = keypoints_after.draw_on_image(image_after)
misc.imshow(np.concatenate((image_before, image_after), axis=1)) # before and after
for kp_idx, keypoint in enumerate(keypoints_after.keypoints):
keypoint_old = keypoints_on_images[img_idx].keypoints[kp_idx]
x_old, y_old = keypoint_old.x, keypoint_old.y
x_new, y_new = keypoint.x, keypoint.y
print("[Keypoints for image #%d] before aug: x=%d y=%d | after aug: x=%d y=%d" % (img_idx, x_old, y_old, x_new, y_new))
def example_single_augmenters():
print("Example: Single Augmenters")
from imgaug import augmenters as iaa
images = np.random.randint(0, 255, (16, 128, 128, 3), dtype=np.uint8)
flipper = iaa.Fliplr(1.0) # always horizontally flip each input image
images[0] = flipper.augment_image(images[0]) # horizontally flip image 0
vflipper = iaa.Flipud(0.9) # vertically flip each input image with 90% probability
images[1] = vflipper.augment_image(images[1]) # probably vertically flip image 1
blurer = iaa.GaussianBlur(3.0)
images[2] = blurer.augment_image(images[2]) # blur image 2 by a sigma of 3.0
images[3] = blurer.augment_image(images[3]) # blur image 3 by a sigma of 3.0 too
translater = iaa.Affine(translate_px={"x": -16}) # move each input image by 16px to the left
images[4] = translater.augment_image(images[4]) # move image 4 to the left
scaler = iaa.Affine(scale={"y": (0.8, 1.2)}) # scale each input image to 80-120% on the y axis
images[5] = scaler.augment_image(images[5]) # scale image 5 by 80-120% on the y axis
def example_unusual_distributions():
print("Example: Unusual Distributions")
from imgaug import augmenters as iaa
from imgaug import parameters as iap
images = np.random.randint(0, 255, (16, 128, 128, 3), dtype=np.uint8)
# Blur by a value sigma which is sampled from a uniform distribution
# of range 0.1 <= x < 3.0.
# The convenience shortcut for this is: iaa.GaussianBlur((0.1, 3.0))
blurer = iaa.GaussianBlur(iap.Uniform(0.1, 3.0))
images_aug = blurer.augment_images(images)
# Blur by a value sigma which is sampled from a normal distribution N(1.0, 0.1),
# i.e. sample a value that is usually around 1.0.
# Clip the resulting value so that it never gets below 0.1 or above 3.0.
blurer = iaa.GaussianBlur(iap.Clip(iap.Normal(1.0, 0.1), 0.1, 3.0))
images_aug = blurer.augment_images(images)
# Same again, but this time the mean of the normal distribution is not constant,
# but comes itself from a uniform distribution between 0.5 and 1.5.
blurer = iaa.GaussianBlur(iap.Clip(iap.Normal(iap.Uniform(0.5, 1.5), 0.1), 0.1, 3.0))
images_aug = blurer.augment_images(images)
# Use for sigma one of exactly three allowed values: 0.5, 1.0 or 1.5.
blurer = iaa.GaussianBlur(iap.Choice([0.5, 1.0, 1.5]))
images_aug = blurer.augment_images(images)
# Sample sigma from a discrete uniform distribution of range 1 <= sigma <= 5,
# i.e. sigma will have any of the following values: 1, 2, 3, 4, 5.
blurer = iaa.GaussianBlur(iap.DiscreteUniform(1, 5))
images_aug = blurer.augment_images(images)
def example_hooks():
print("Example: Hooks")
import imgaug as ia
from imgaug import augmenters as iaa
import numpy as np
# images and heatmaps, just arrays filled with value 30
images = np.ones((16, 128, 128, 3), dtype=np.uint8) * 30
heatmaps = np.ones((16, 128, 128, 21), dtype=np.uint8) * 30
# add vertical lines to see the effect of flip
images[:, 16:128-16, 120:124, :] = 120
heatmaps[:, 16:128-16, 120:124, :] = 120
seq = iaa.Sequential([
iaa.Fliplr(0.5, name="Flipper"),
iaa.GaussianBlur((0, 3.0), name="GaussianBlur"),
iaa.Dropout(0.02, name="Dropout"),
iaa.AdditiveGaussianNoise(scale=0.01*255, name="MyLittleNoise"),
iaa.AdditiveGaussianNoise(loc=32, scale=0.0001*255, name="SomeOtherNoise"),
iaa.Affine(translate_px={"x": (-40, 40)}, name="Affine")
])
# change the activated augmenters for heatmaps
def activator_heatmaps(images, augmenter, parents, default):
if augmenter.name in ["GaussianBlur", "Dropout", "MyLittleNoise"]:
return False
else:
# default value for all other augmenters
return default
hooks_heatmaps = ia.HooksImages(activator=activator_heatmaps)
seq_det = seq.to_deterministic() # call this for each batch again, NOT only once at the start
images_aug = seq_det.augment_images(images)
heatmaps_aug = seq_det.augment_images(heatmaps, hooks=hooks_heatmaps)
# -----------
ia.show_grid(images_aug)
ia.show_grid(heatmaps_aug[..., 0:3])
if __name__ == "__main__":
main()
|
imgaug-master
|
tests/test_readme_examples.py
|
"""
Automatically running tests for this library.
Run these from the project directory (i.e. parent directory) via
python test.py
"""
from __future__ import print_function, division
#import sys
#import os
#sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import imgaug as ia
from imgaug import augmenters as iaa
from imgaug import parameters as iap
import numpy as np
import six
import six.moves as sm
def main():
test_is_single_integer()
test_is_single_float()
test_find()
test_remove()
test_hooks()
test_Noop()
test_Lambda()
test_AssertLambda()
test_AssertShape()
test_Crop()
test_Fliplr()
test_Flipud()
test_GaussianBlur()
test_AdditiveGaussianNoise()
# MultiplicativeGaussianNoise
# ReplacingGaussianNoise
test_Dropout()
test_Multiply()
test_Affine()
test_ElasticTransformation()
test_Sequential()
test_Sometimes()
print("Finished without errors.")
def test_is_single_integer():
assert ia.is_single_integer("A") == False
assert ia.is_single_integer(None) == False
assert ia.is_single_integer(1.2) == False
assert ia.is_single_integer(1.0) == False
assert ia.is_single_integer(np.ones((1,), dtype=np.float32)[0]) == False
assert ia.is_single_integer(1) == True
assert ia.is_single_integer(1234) == True
assert ia.is_single_integer(np.ones((1,), dtype=np.uint8)[0]) == True
assert ia.is_single_integer(np.ones((1,), dtype=np.int32)[0]) == True
def test_is_single_float():
assert ia.is_single_float("A") == False
assert ia.is_single_float(None) == False
assert ia.is_single_float(1.2) == True
assert ia.is_single_float(1.0) == True
assert ia.is_single_float(np.ones((1,), dtype=np.float32)[0]) == True
assert ia.is_single_float(1) == False
assert ia.is_single_float(1234) == False
assert ia.is_single_float(np.ones((1,), dtype=np.uint8)[0]) == False
assert ia.is_single_float(np.ones((1,), dtype=np.int32)[0]) == False
def test_find():
noop1 = iaa.Noop(name="Noop")
fliplr = iaa.Fliplr(name="Fliplr")
flipud = iaa.Flipud(name="Flipud")
noop2 = iaa.Noop(name="Noop2")
seq2 = iaa.Sequential([flipud, noop2], name="Seq2")
seq1 = iaa.Sequential([noop1, fliplr, seq2], name="Seq")
augs = seq1.find_augmenters_by_name("Seq")
assert len(augs) == 1
assert augs[0] == seq1
augs = seq1.find_augmenters_by_name("Seq2")
assert len(augs) == 1
assert augs[0] == seq2
augs = seq1.find_augmenters_by_names(["Seq", "Seq2"])
assert len(augs) == 2
assert augs[0] == seq1
assert augs[1] == seq2
augs = seq1.find_augmenters_by_name(r"Seq.*", regex=True)
assert len(augs) == 2
assert augs[0] == seq1
assert augs[1] == seq2
augs = seq1.find_augmenters(lambda aug, parents: aug.name in ["Seq", "Seq2"])
assert len(augs) == 2
assert augs[0] == seq1
assert augs[1] == seq2
augs = seq1.find_augmenters(lambda aug, parents: aug.name in ["Seq", "Seq2"] and len(parents) > 0)
assert len(augs) == 1
assert augs[0] == seq2
augs = seq1.find_augmenters(lambda aug, parents: aug.name in ["Seq", "Seq2"], flat=False)
assert len(augs) == 2
assert augs[0] == seq1
assert augs[1] == [seq2]
def test_remove():
def get_seq():
noop1 = iaa.Noop(name="Noop")
fliplr = iaa.Fliplr(name="Fliplr")
flipud = iaa.Flipud(name="Flipud")
noop2 = iaa.Noop(name="Noop2")
seq2 = iaa.Sequential([flipud, noop2], name="Seq2")
seq1 = iaa.Sequential([noop1, fliplr, seq2], name="Seq")
return seq1
augs = get_seq()
augs = augs.remove_augmenters(lambda aug, parents: aug.name == "Seq2")
seqs = augs.find_augmenters_by_name(r"Seq.*", regex=True)
assert len(seqs) == 1
assert seqs[0].name == "Seq"
augs = get_seq()
augs = augs.remove_augmenters(lambda aug, parents: aug.name == "Seq2" and len(parents) == 0)
seqs = augs.find_augmenters_by_name(r"Seq.*", regex=True)
assert len(seqs) == 2
assert seqs[0].name == "Seq"
assert seqs[1].name == "Seq2"
augs = get_seq()
augs = augs.remove_augmenters(lambda aug, parents: True)
assert augs is not None
assert isinstance(augs, iaa.Noop)
augs = get_seq()
augs = augs.remove_augmenters(lambda aug, parents: True, noop_if_topmost=False)
assert augs is None
def test_hooks():
image = np.array([[0, 0, 1],
[0, 0, 1],
[0, 1, 1]], dtype=np.uint8)
image_lr = np.array([[1, 0, 0],
[1, 0, 0],
[1, 1, 0]], dtype=np.uint8)
image_ud = np.array([[0, 1, 1],
[0, 0, 1],
[0, 0, 1]], dtype=np.uint8)
image_lrud = np.array([[1, 1, 0],
[1, 0, 0],
[1, 0, 0]], dtype=np.uint8)
image = image[:, :, np.newaxis]
image_lr = image_lr[:, :, np.newaxis]
image_ud = image_ud[:, :, np.newaxis]
image_lrud = image_lrud[:, :, np.newaxis]
seq = iaa.Sequential([iaa.Fliplr(1.0), iaa.Flipud(1.0)])
# preprocessing
def preprocessor(images, augmenter, parents):
img = np.copy(images)
img[0][1, 1, 0] += 1
return img
hooks = ia.HooksImages(preprocessor=preprocessor)
images_aug = seq.augment_images([image], hooks=hooks)
expected = np.copy(image_lrud)
expected[1, 1, 0] = 3
assert np.array_equal(images_aug[0], expected)
# postprocessing
def postprocessor(images, augmenter, parents):
img = np.copy(images)
img[0][1, 1, 0] += 1
return img
hooks = ia.HooksImages(postprocessor=postprocessor)
images_aug = seq.augment_images([image], hooks=hooks)
expected = np.copy(image_lrud)
expected[1, 1, 0] = 3
assert np.array_equal(images_aug[0], expected)
# propagating
def propagator(images, augmenter, parents, default):
if "Seq" in augmenter.name:
return False
else:
return default
hooks = ia.HooksImages(propagator=propagator)
images_aug = seq.augment_images([image], hooks=hooks)
assert np.array_equal(images_aug[0], image)
# activation
def activator(images, augmenter, parents, default):
if "Flipud" in augmenter.name:
return False
else:
return default
hooks = ia.HooksImages(activator=activator)
images_aug = seq.augment_images([image], hooks=hooks)
assert np.array_equal(images_aug[0], image_lr)
def test_Noop():
images = create_random_images((16, 70, 50, 3))
keypoints = create_random_keypoints((16, 70, 50, 3), 4)
aug = iaa.Noop()
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
def test_Lambda():
base_img = np.array([[0, 0, 1],
[0, 0, 1],
[0, 1, 1]], dtype=np.uint8)
base_img = base_img[:, :, np.newaxis]
images = np.array([base_img])
images_list = [base_img]
images_aug = images + 1
images_aug_list = [image + 1 for image in images_list]
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1), ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
keypoints_aug = [ia.KeypointsOnImage([ia.Keypoint(x=1, y=0), ia.Keypoint(x=2, y=1), ia.Keypoint(x=0, y=2)], shape=base_img.shape)]
def func_images(images, random_state, parents, hooks):
if isinstance(images, list):
images = [image + 1 for image in images]
else:
images = images + 1
return images
def func_keypoints(keypoints_on_images, random_state, parents, hooks):
for keypoints_on_image in keypoints_on_images:
for kp in keypoints_on_image.keypoints:
kp.x = (kp.x + 1) % 3
return keypoints_on_images
aug = iaa.Lambda(func_images, func_keypoints)
aug_det = aug.to_deterministic()
# check once that the augmenter can handle lists correctly
observed = aug.augment_images(images_list)
expected = images_aug_list
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images_list)
expected = images_aug_list
assert array_equal_lists(observed, expected)
for _ in sm.xrange(10):
observed = aug.augment_images(images)
expected = images_aug
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images)
expected = images_aug
assert np.array_equal(observed, expected)
observed = aug.augment_keypoints(keypoints)
expected = keypoints_aug
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints_aug
assert keypoints_equal(observed, expected)
def test_AssertLambda():
base_img = np.array([[0, 0, 1],
[0, 0, 1],
[0, 1, 1]], dtype=np.uint8)
base_img = base_img[:, :, np.newaxis]
images = np.array([base_img])
images_list = [base_img]
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1), ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
def func_images_succeeds(images, random_state, parents, hooks):
return images[0][0, 0] == 0 and images[0][2, 2] == 1
def func_images_fails(images, random_state, parents, hooks):
return images[0][0, 0] == 1
def func_keypoints_succeeds(keypoints_on_images, random_state, parents, hooks):
return keypoints_on_images[0].keypoints[0].x == 0 and keypoints_on_images[0].keypoints[2].x == 2
def func_keypoints_fails(keypoints_on_images, random_state, parents, hooks):
return keypoints_on_images[0].keypoints[0].x == 2
aug_succeeds = iaa.AssertLambda(func_images_succeeds, func_keypoints_succeeds)
aug_succeeds_det = aug_succeeds.to_deterministic()
aug_fails = iaa.AssertLambda(func_images_fails, func_keypoints_fails)
aug_fails_det = aug_fails.to_deterministic()
# images as numpy array
observed = aug_succeeds.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
try:
observed = aug_fails.augment_images(images)
errored = False
except AssertionError as e:
errored = True
assert errored
observed = aug_succeeds_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
try:
observed = aug_fails.augment_images(images)
errored = False
except AssertionError as e:
errored = True
assert errored
# Lists of images
observed = aug_succeeds.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
try:
observed = aug_fails.augment_images(images_list)
errored = False
except AssertionError as e:
errored = True
assert errored
observed = aug_succeeds_det.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
try:
observed = aug_fails.augment_images(images_list)
errored = False
except AssertionError as e:
errored = True
assert errored
# keypoints
observed = aug_succeeds.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
try:
observed = aug_fails.augment_keypoints(keypoints)
errored = False
except AssertionError as e:
errored = True
assert errored
observed = aug_succeeds_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
try:
observed = aug_fails.augment_keypoints(keypoints)
errored = False
except AssertionError as e:
errored = True
assert errored
def test_AssertShape():
base_img = np.array([[0, 0, 1, 0],
[0, 0, 1, 0],
[0, 1, 1, 0]], dtype=np.uint8)
base_img = base_img[:, :, np.newaxis]
images = np.array([base_img])
images_list = [base_img]
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1), ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
base_img_h4 = np.array([[0, 0, 1, 0],
[0, 0, 1, 0],
[0, 1, 1, 0],
[1, 0, 1, 0]], dtype=np.uint8)
base_img_h4 = base_img_h4[:, :, np.newaxis]
images_h4 = np.array([base_img_h4])
keypoints_h4 = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1), ia.Keypoint(x=2, y=2)], shape=base_img_h4.shape)]
# image must have exactly shape (1, 3, 4, 1)
aug = iaa.AssertShape((1, 3, 4, 1))
aug_det = aug.to_deterministic()
# check once that the augmenter can handle lists correctly
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
for _ in sm.xrange(10):
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
try:
observed = aug.augment_images(images_h4)
errored = False
except AssertionError as e:
errored = True
assert errored
try:
observed = aug.augment_keypoints(keypoints_h4)
errored = False
except AssertionError as e:
errored = True
assert errored
# any value for number of images allowed (None)
aug = iaa.AssertShape((None, 3, 4, 1))
aug_det = aug.to_deterministic()
for _ in sm.xrange(10):
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
try:
observed = aug.augment_images(images_h4)
errored = False
except AssertionError as e:
errored = True
assert errored
try:
observed = aug.augment_keypoints(keypoints_h4)
errored = False
except AssertionError as e:
errored = True
assert errored
# list of possible choices [1, 3, 5] for height
aug = iaa.AssertShape((1, [1, 3, 5], 4, 1))
aug_det = aug.to_deterministic()
for _ in sm.xrange(10):
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
try:
observed = aug.augment_images(images_h4)
errored = False
except AssertionError as e:
errored = True
assert errored
try:
observed = aug.augment_keypoints(keypoints_h4)
errored = False
except AssertionError as e:
errored = True
assert errored
# range of 1-3 for height (tuple comparison is a <= x < b, so we use (1,4) here)
aug = iaa.AssertShape((1, (1, 4), 4, 1))
aug_det = aug.to_deterministic()
for _ in sm.xrange(10):
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
try:
observed = aug.augment_images(images_h4)
errored = False
except AssertionError as e:
errored = True
assert errored
try:
observed = aug.augment_keypoints(keypoints_h4)
errored = False
except AssertionError as e:
errored = True
assert errored
def test_Crop():
base_img = np.array([[0, 0, 0],
[0, 1, 0],
[0, 0, 0]], dtype=np.uint8)
base_img = base_img[:, :, np.newaxis]
images = np.array([base_img])
images_list = [base_img]
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1), ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
# test crop by 1 pixel on each side
crops = [
(1, 0, 0, 0),
(0, 1, 0, 0),
(0, 0, 1, 0),
(0, 0, 0, 1),
]
for crop in crops:
top, right, bottom, left = crop
height, width = base_img.shape[0:2]
aug = iaa.Crop(px=crop, keep_size=False)
base_img_cropped = base_img[top:height-bottom, left:width-right, :]
observed = aug.augment_images(images)
assert np.array_equal(observed, np.array([base_img_cropped]))
observed = aug.augment_images(images_list)
assert array_equal_lists(observed, [base_img_cropped])
keypoints_moved = [keypoints[0].shift(x=-left, y=-top)]
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_moved)
# test crop by range of pixels
crops = [
((0, 2), 0, 0, 0),
(0, (0, 2), 0, 0),
(0, 0, (0, 2), 0),
(0, 0, 0, (0, 2)),
]
for crop in crops:
top, right, bottom, left = crop
height, width = base_img.shape[0:2]
aug = iaa.Crop(px=crop, keep_size=False)
aug_det = aug.to_deterministic()
images_cropped = []
keypoints_cropped = []
top_range = top if isinstance(top, tuple) else (top, top)
right_range = right if isinstance(right, tuple) else (right, right)
bottom_range = bottom if isinstance(bottom, tuple) else (bottom, bottom)
left_range = left if isinstance(left, tuple) else (left, left)
for top_val in sm.xrange(top_range[0], top_range[1]+1):
for right_val in sm.xrange(right_range[0], right_range[1]+1):
for bottom_val in sm.xrange(bottom_range[0], bottom_range[1]+1):
for left_val in sm.xrange(left_range[0], left_range[1]+1):
images_cropped.append(base_img[top_val:height-bottom_val, left_val:width-right_val, :])
keypoints_cropped.append(keypoints[0].shift(x=-left_val, y=-top_val))
movements = []
movements_det = []
for i in sm.xrange(100):
observed = aug.augment_images(images)
matches = [1 if np.array_equal(observed, np.array([base_img_cropped])) else 0 for base_img_cropped in images_cropped]
movements.append(np.argmax(np.array(matches)))
assert any([val == 1 for val in matches])
observed = aug_det.augment_images(images)
matches = [1 if np.array_equal(observed, np.array([base_img_cropped])) else 0 for base_img_cropped in images_cropped]
movements_det.append(np.argmax(np.array(matches)))
assert any([val == 1 for val in matches])
observed = aug.augment_images(images_list)
assert any([array_equal_lists(observed, [base_img_cropped]) for base_img_cropped in images_cropped])
observed = aug.augment_keypoints(keypoints)
assert any([keypoints_equal(observed, [kp]) for kp in keypoints_cropped])
assert len(set(movements)) == 3
assert len(set(movements_det)) == 1
# test crop by list of exact pixel values
crops = [
([0, 2], 0, 0, 0),
(0, [0, 2], 0, 0),
(0, 0, [0, 2], 0),
(0, 0, 0, [0, 2]),
]
for crop in crops:
top, right, bottom, left = crop
height, width = base_img.shape[0:2]
aug = iaa.Crop(px=crop, keep_size=False)
aug_det = aug.to_deterministic()
images_cropped = []
keypoints_cropped = []
top_range = top if isinstance(top, list) else [top]
right_range = right if isinstance(right, list) else [right]
bottom_range = bottom if isinstance(bottom, list) else [bottom]
left_range = left if isinstance(left, list) else [left]
for top_val in top_range:
for right_val in right_range:
for bottom_val in bottom_range:
for left_val in left_range:
images_cropped.append(base_img[top_val:height-bottom_val, left_val:width-right_val, :])
keypoints_cropped.append(keypoints[0].shift(x=-left_val, y=-top_val))
movements = []
movements_det = []
for i in sm.xrange(100):
observed = aug.augment_images(images)
matches = [1 if np.array_equal(observed, np.array([base_img_cropped])) else 0 for base_img_cropped in images_cropped]
movements.append(np.argmax(np.array(matches)))
assert any([val == 1 for val in matches])
observed = aug_det.augment_images(images)
matches = [1 if np.array_equal(observed, np.array([base_img_cropped])) else 0 for base_img_cropped in images_cropped]
movements_det.append(np.argmax(np.array(matches)))
assert any([val == 1 for val in matches])
observed = aug.augment_images(images_list)
assert any([array_equal_lists(observed, [base_img_cropped]) for base_img_cropped in images_cropped])
observed = aug.augment_keypoints(keypoints)
assert any([keypoints_equal(observed, [kp]) for kp in keypoints_cropped])
assert len(set(movements)) == 2
assert len(set(movements_det)) == 1
# TODO
print("[Note] Crop by percentages is currently not tested.")
print("[Note] Landmark projection after crop with resize is currently not tested.")
def test_Fliplr():
base_img = np.array([[0, 0, 1],
[0, 0, 1],
[0, 1, 1]], dtype=np.uint8)
base_img = base_img[:, :, np.newaxis]
base_img_flipped = np.array([[1, 0, 0],
[1, 0, 0],
[1, 1, 0]], dtype=np.uint8)
base_img_flipped = base_img_flipped[:, :, np.newaxis]
images = np.array([base_img])
images_flipped = np.array([base_img_flipped])
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1), ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
keypoints_flipped = [ia.KeypointsOnImage([ia.Keypoint(x=2, y=0), ia.Keypoint(x=1, y=1), ia.Keypoint(x=0, y=2)], shape=base_img.shape)]
# 0% chance of flip
aug = iaa.Fliplr(0)
aug_det = aug.to_deterministic()
for _ in sm.xrange(10):
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
# 100% chance of flip
aug = iaa.Fliplr(1.0)
aug_det = aug.to_deterministic()
for _ in sm.xrange(10):
observed = aug.augment_images(images)
expected = images_flipped
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images)
expected = images_flipped
assert np.array_equal(observed, expected)
observed = aug.augment_keypoints(keypoints)
expected = keypoints_flipped
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints_flipped
assert keypoints_equal(observed, expected)
# 50% chance of flip
aug = iaa.Fliplr(0.5)
aug_det = aug.to_deterministic()
nb_iterations = 1000
nb_images_flipped = 0
nb_images_flipped_det = 0
nb_keypoints_flipped = 0
nb_keypoints_flipped_det = 0
for _ in sm.xrange(nb_iterations):
observed = aug.augment_images(images)
if np.array_equal(observed, images_flipped):
nb_images_flipped += 1
observed = aug_det.augment_images(images)
if np.array_equal(observed, images_flipped):
nb_images_flipped_det += 1
observed = aug.augment_keypoints(keypoints)
if keypoints_equal(observed, keypoints_flipped):
nb_keypoints_flipped += 1
observed = aug_det.augment_keypoints(keypoints)
if keypoints_equal(observed, keypoints_flipped):
nb_keypoints_flipped_det += 1
assert int(nb_iterations * 0.3) <= nb_images_flipped <= int(nb_iterations * 0.7)
assert int(nb_iterations * 0.3) <= nb_keypoints_flipped <= int(nb_iterations * 0.7)
assert nb_images_flipped_det in [0, nb_iterations]
assert nb_keypoints_flipped_det in [0, nb_iterations]
# 50% chance of flipped, multiple images, list as input
images_multi = [base_img, base_img]
aug = iaa.Fliplr(0.5)
aug_det = aug.to_deterministic()
nb_iterations = 1000
nb_flipped_by_pos = [0] * len(images_multi)
nb_flipped_by_pos_det = [0] * len(images_multi)
for _ in sm.xrange(nb_iterations):
observed = aug.augment_images(images_multi)
for i in sm.xrange(len(images_multi)):
if np.array_equal(observed[i], base_img_flipped):
nb_flipped_by_pos[i] += 1
observed = aug_det.augment_images(images_multi)
for i in sm.xrange(len(images_multi)):
if np.array_equal(observed[i], base_img_flipped):
nb_flipped_by_pos_det[i] += 1
for val in nb_flipped_by_pos:
assert int(nb_iterations * 0.3) <= val <= int(nb_iterations * 0.7)
for val in nb_flipped_by_pos_det:
assert val in [0, nb_iterations]
def test_Flipud():
base_img = np.array([[0, 0, 1],
[0, 0, 1],
[0, 1, 1]], dtype=np.uint8)
base_img = base_img[:, :, np.newaxis]
base_img_flipped = np.array([[0, 1, 1],
[0, 0, 1],
[0, 0, 1]], dtype=np.uint8)
base_img_flipped = base_img_flipped[:, :, np.newaxis]
images = np.array([base_img])
images_flipped = np.array([base_img_flipped])
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1), ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
keypoints_flipped = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=2), ia.Keypoint(x=1, y=1), ia.Keypoint(x=2, y=0)], shape=base_img.shape)]
# 0% chance of flip
aug = iaa.Flipud(0)
aug_det = aug.to_deterministic()
for _ in sm.xrange(10):
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
# 100% chance of flip
aug = iaa.Flipud(1.0)
aug_det = aug.to_deterministic()
for _ in sm.xrange(10):
observed = aug.augment_images(images)
expected = images_flipped
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images)
expected = images_flipped
assert np.array_equal(observed, expected)
observed = aug.augment_keypoints(keypoints)
expected = keypoints_flipped
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints_flipped
assert keypoints_equal(observed, expected)
# 50% chance of flip
aug = iaa.Flipud(0.5)
aug_det = aug.to_deterministic()
nb_iterations = 1000
nb_images_flipped = 0
nb_images_flipped_det = 0
nb_keypoints_flipped = 0
nb_keypoints_flipped_det = 0
for _ in sm.xrange(nb_iterations):
observed = aug.augment_images(images)
if np.array_equal(observed, images_flipped):
nb_images_flipped += 1
observed = aug_det.augment_images(images)
if np.array_equal(observed, images_flipped):
nb_images_flipped_det += 1
observed = aug.augment_keypoints(keypoints)
if keypoints_equal(observed, keypoints_flipped):
nb_keypoints_flipped += 1
observed = aug_det.augment_keypoints(keypoints)
if keypoints_equal(observed, keypoints_flipped):
nb_keypoints_flipped_det += 1
assert int(nb_iterations * 0.3) <= nb_images_flipped <= int(nb_iterations * 0.7)
assert int(nb_iterations * 0.3) <= nb_keypoints_flipped <= int(nb_iterations * 0.7)
assert nb_images_flipped_det in [0, nb_iterations]
assert nb_keypoints_flipped_det in [0, nb_iterations]
# 50% chance of flipped, multiple images, list as input
images_multi = [base_img, base_img]
aug = iaa.Flipud(0.5)
aug_det = aug.to_deterministic()
nb_iterations = 1000
nb_flipped_by_pos = [0] * len(images_multi)
nb_flipped_by_pos_det = [0] * len(images_multi)
for _ in sm.xrange(nb_iterations):
observed = aug.augment_images(images_multi)
for i in sm.xrange(len(images_multi)):
if np.array_equal(observed[i], base_img_flipped):
nb_flipped_by_pos[i] += 1
observed = aug_det.augment_images(images_multi)
for i in sm.xrange(len(images_multi)):
if np.array_equal(observed[i], base_img_flipped):
nb_flipped_by_pos_det[i] += 1
for val in nb_flipped_by_pos:
assert int(nb_iterations * 0.3) <= val <= int(nb_iterations * 0.7)
for val in nb_flipped_by_pos_det:
assert val in [0, nb_iterations]
def test_GaussianBlur():
base_img = np.array([[0, 0, 0],
[0, 255, 0],
[0, 0, 0]], dtype=np.uint8)
base_img = base_img[:, :, np.newaxis]
images = np.array([base_img])
images_list = [base_img]
outer_pixels = ([], [])
for i in sm.xrange(base_img.shape[0]):
for j in sm.xrange(base_img.shape[1]):
if i != j:
outer_pixels[0].append(i)
outer_pixels[1].append(j)
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1), ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
# no blur, shouldnt change anything
aug = iaa.GaussianBlur(sigma=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
# weak blur of center pixel
aug = iaa.GaussianBlur(sigma=0.5)
aug_det = aug.to_deterministic()
#np.set_printoptions(formatter={'float_kind': lambda x: "%.6f" % x})
#from scipy import ndimage
#images2 = np.copy(images).astype(np.float32)
#images2[0, ...] = ndimage.gaussian_filter(images2[0, ...], 0.4)
#print(images2)
# images as numpy array
observed = aug.augment_images(images)
assert 100 < observed[0][1, 1] < 255
assert (observed[0][outer_pixels[0], outer_pixels[1]] > 0).all()
assert (observed[0][outer_pixels[0], outer_pixels[1]] < 50).all()
observed = aug_det.augment_images(images)
assert 100 < observed[0][1, 1] < 255
assert (observed[0][outer_pixels[0], outer_pixels[1]] > 0).all()
assert (observed[0][outer_pixels[0], outer_pixels[1]] < 50).all()
# images as list
observed = aug.augment_images(images_list)
assert 100 < observed[0][1, 1] < 255
assert (observed[0][outer_pixels[0], outer_pixels[1]] > 0).all()
assert (observed[0][outer_pixels[0], outer_pixels[1]] < 50).all()
observed = aug_det.augment_images(images_list)
assert 100 < observed[0][1, 1] < 255
assert (observed[0][outer_pixels[0], outer_pixels[1]] > 0).all()
assert (observed[0][outer_pixels[0], outer_pixels[1]] < 50).all()
# keypoints shouldnt be changed
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
# varying blur sigmas
aug = iaa.GaussianBlur(sigma=(0, 1))
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.8)
assert nb_changed_aug_det == 0
def test_AdditiveGaussianNoise():
#base_img = np.array([[128, 128, 128],
# [128, 128, 128],
# [128, 128, 128]], dtype=np.uint8)
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
#base_img = base_img[:, :, np.newaxis]
images = np.array([base_img])
images_list = [base_img]
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1), ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
# no noise, shouldnt change anything
aug = iaa.AdditiveGaussianNoise(loc=0, scale=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
# zero-centered noise
aug = iaa.AdditiveGaussianNoise(loc=0, scale=0.2)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
assert not np.array_equal(observed, images)
observed = aug_det.augment_images(images)
assert not np.array_equal(observed, images)
observed = aug.augment_images(images_list)
assert not array_equal_lists(observed, images_list)
observed = aug_det.augment_images(images_list)
assert not array_equal_lists(observed, images_list)
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints)
observed = aug_det.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints)
# std correct?
aug = iaa.AdditiveGaussianNoise(loc=0, scale=0.2 * 255)
aug_det = aug.to_deterministic()
images = np.ones((1, 1, 1, 1), dtype=np.uint8) * 128
nb_iterations = 10000
values = []
for i in sm.xrange(nb_iterations):
images_aug = aug.augment_images(images)
values.append(images_aug[0, 0, 0, 0])
values = np.array(values)
assert np.min(values) == 0
assert 0.1 < np.std(values) / 255.0 < 0.4
# non-zero loc
aug = iaa.AdditiveGaussianNoise(loc=0.25 * 255, scale=0.01 * 255)
aug_det = aug.to_deterministic()
images = np.ones((1, 1, 1, 1), dtype=np.uint8) * 128
nb_iterations = 10000
values = []
for i in sm.xrange(nb_iterations):
images_aug = aug.augment_images(images)
values.append(images_aug[0, 0, 0, 0] - 128)
values = np.array(values)
assert 54 < np.average(values) < 74 # loc=0.25 should be around 255*0.25=64 average
# varying locs
aug = iaa.AdditiveGaussianNoise(loc=(0, 0.5 * 255), scale=0.0001 * 255)
aug_det = aug.to_deterministic()
images = np.ones((1, 1, 1, 1), dtype=np.uint8) * 128
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.95)
assert nb_changed_aug_det == 0
# varying stds
aug = iaa.AdditiveGaussianNoise(loc=0, scale=(0.01 * 255, 0.2 * 255))
aug_det = aug.to_deterministic()
images = np.ones((1, 1, 1, 1), dtype=np.uint8) * 128
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.95)
assert nb_changed_aug_det == 0
def test_MultiplicativeGaussianNoise():
pass
def test_ReplacingGaussianNoise():
pass
def test_Dropout():
base_img = np.ones((512, 512, 1), dtype=np.uint8) * 255
images = np.array([base_img])
images_list = [base_img]
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1), ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
# no dropout, shouldnt change anything
aug = iaa.Dropout(p=0)
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
# 100% dropout, should drop everything
aug = iaa.Dropout(p=1.0)
observed = aug.augment_images(images)
expected = np.zeros((1, 512, 512, 1), dtype=np.uint8)
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = [np.zeros((512, 512, 1), dtype=np.uint8)]
assert array_equal_lists(observed, expected)
# 50% dropout
aug = iaa.Dropout(p=0.5)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
assert not np.array_equal(observed, images)
percent_nonzero = len(observed.flatten().nonzero()[0]) / (base_img.shape[0] * base_img.shape[1] * base_img.shape[2])
assert 0.35 <= (1 - percent_nonzero) <= 0.65
observed = aug_det.augment_images(images)
assert not np.array_equal(observed, images)
percent_nonzero = len(observed.flatten().nonzero()[0]) / (base_img.shape[0] * base_img.shape[1] * base_img.shape[2])
assert 0.35 <= (1 - percent_nonzero) <= 0.65
observed = aug.augment_images(images_list)
assert not array_equal_lists(observed, images_list)
percent_nonzero = len(observed[0].flatten().nonzero()[0]) / (base_img.shape[0] * base_img.shape[1] * base_img.shape[2])
assert 0.35 <= (1 - percent_nonzero) <= 0.65
observed = aug_det.augment_images(images_list)
assert not array_equal_lists(observed, images_list)
percent_nonzero = len(observed[0].flatten().nonzero()[0]) / (base_img.shape[0] * base_img.shape[1] * base_img.shape[2])
assert 0.35 <= (1 - percent_nonzero) <= 0.65
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints)
observed = aug_det.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints)
# varying p
aug = iaa.Dropout(p=(0.0, 1.0))
aug_det = aug.to_deterministic()
images = np.ones((1, 8, 8, 1), dtype=np.uint8) * 255
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.95)
assert nb_changed_aug_det == 0
def test_Multiply():
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1), ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
# no multiply, shouldnt change anything
aug = iaa.Multiply(mul=1.0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
# multiply >1.0
aug = iaa.Multiply(mul=1.2)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 120
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 120]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 120
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 120]
assert array_equal_lists(observed, expected)
# multiply <1.0
aug = iaa.Multiply(mul=0.8)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 80
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 80]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 80
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 80]
assert array_equal_lists(observed, expected)
# keypoints shouldnt be changed
aug = iaa.Multiply(mul=1.2)
aug_det = iaa.Multiply(mul=1.2)
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
# varying multiply factors
aug = iaa.Multiply(mul=(0, 2.0))
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.95)
assert nb_changed_aug_det == 0
def test_Affine():
base_img = np.array([[0, 0, 0],
[0, 255, 0],
[0, 0, 0]], dtype=np.uint8)
base_img = base_img[:, :, np.newaxis]
images = np.array([base_img])
images_list = [base_img]
outer_pixels = ([], [])
for i in sm.xrange(base_img.shape[0]):
for j in sm.xrange(base_img.shape[1]):
if i != j:
outer_pixels[0].append(i)
outer_pixels[1].append(j)
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1), ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
# no translation/scale/rotate/shear, shouldnt change nothing
aug = iaa.Affine(scale=1.0, translate_px=0, rotate=0, shear=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
# ---------------------
# scale
# ---------------------
# zoom in
aug = iaa.Affine(scale=1.75, translate_px=0, rotate=0, shear=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
assert observed[0][1, 1] > 250
assert (observed[0][outer_pixels[0], outer_pixels[1]] > 20).all()
assert (observed[0][outer_pixels[0], outer_pixels[1]] < 150).all()
observed = aug_det.augment_images(images)
assert observed[0][1, 1] > 250
assert (observed[0][outer_pixels[0], outer_pixels[1]] > 20).all()
assert (observed[0][outer_pixels[0], outer_pixels[1]] < 150).all()
observed = aug.augment_images(images_list)
assert observed[0][1, 1] > 250
assert (observed[0][outer_pixels[0], outer_pixels[1]] > 20).all()
assert (observed[0][outer_pixels[0], outer_pixels[1]] < 150).all()
observed = aug_det.augment_images(images_list)
assert observed[0][1, 1] > 250
assert (observed[0][outer_pixels[0], outer_pixels[1]] > 20).all()
assert (observed[0][outer_pixels[0], outer_pixels[1]] < 150).all()
observed = aug.augment_keypoints(keypoints)
assert observed[0].keypoints[0].x < 0
assert observed[0].keypoints[0].y < 0
assert observed[0].keypoints[1].x == 1
assert observed[0].keypoints[1].y == 1
assert observed[0].keypoints[2].x > 2
assert observed[0].keypoints[2].y > 2
observed = aug_det.augment_keypoints(keypoints)
assert observed[0].keypoints[0].x < 0
assert observed[0].keypoints[0].y < 0
assert observed[0].keypoints[1].x == 1
assert observed[0].keypoints[1].y == 1
assert observed[0].keypoints[2].x > 2
assert observed[0].keypoints[2].y > 2
# zoom in only on x axis
aug = iaa.Affine(scale={"x": 1.75, "y": 1.0}, translate_px=0, rotate=0, shear=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
assert observed[0][1, 1] > 250
assert (observed[0][[1, 1], [0, 2]] > 20).all()
assert (observed[0][[1, 1], [0, 2]] < 150).all()
assert (observed[0][0, :] < 5).all()
assert (observed[0][2, :] < 5).all()
observed = aug_det.augment_images(images)
assert observed[0][1, 1] > 250
assert (observed[0][[1, 1], [0, 2]] > 20).all()
assert (observed[0][[1, 1], [0, 2]] < 150).all()
assert (observed[0][0, :] < 5).all()
assert (observed[0][2, :] < 5).all()
observed = aug.augment_images(images_list)
assert observed[0][1, 1] > 250
assert (observed[0][[1, 1], [0, 2]] > 20).all()
assert (observed[0][[1, 1], [0, 2]] < 150).all()
assert (observed[0][0, :] < 5).all()
assert (observed[0][2, :] < 5).all()
observed = aug_det.augment_images(images_list)
assert observed[0][1, 1] > 250
assert (observed[0][[1, 1], [0, 2]] > 20).all()
assert (observed[0][[1, 1], [0, 2]] < 150).all()
assert (observed[0][0, :] < 5).all()
assert (observed[0][2, :] < 5).all()
observed = aug.augment_keypoints(keypoints)
assert observed[0].keypoints[0].x < 0
assert observed[0].keypoints[0].y == 0
assert observed[0].keypoints[1].x == 1
assert observed[0].keypoints[1].y == 1
assert observed[0].keypoints[2].x > 2
assert observed[0].keypoints[2].y == 2
observed = aug_det.augment_keypoints(keypoints)
assert observed[0].keypoints[0].x < 0
assert observed[0].keypoints[0].y == 0
assert observed[0].keypoints[1].x == 1
assert observed[0].keypoints[1].y == 1
assert observed[0].keypoints[2].x > 2
assert observed[0].keypoints[2].y == 2
# zoom in only on y axis
aug = iaa.Affine(scale={"x": 1.0, "y": 1.75}, translate_px=0, rotate=0, shear=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
assert observed[0][1, 1] > 250
assert (observed[0][[0, 2], [1, 1]] > 20).all()
assert (observed[0][[0, 2], [1, 1]] < 150).all()
assert (observed[0][:, 0] < 5).all()
assert (observed[0][:, 2] < 5).all()
observed = aug_det.augment_images(images)
assert observed[0][1, 1] > 250
assert (observed[0][[0, 2], [1, 1]] > 20).all()
assert (observed[0][[0, 2], [1, 1]] < 150).all()
assert (observed[0][:, 0] < 5).all()
assert (observed[0][:, 2] < 5).all()
observed = aug.augment_images(images_list)
assert observed[0][1, 1] > 250
assert (observed[0][[0, 2], [1, 1]] > 20).all()
assert (observed[0][[0, 2], [1, 1]] < 150).all()
assert (observed[0][:, 0] < 5).all()
assert (observed[0][:, 2] < 5).all()
observed = aug_det.augment_images(images_list)
assert observed[0][1, 1] > 250
assert (observed[0][[0, 2], [1, 1]] > 20).all()
assert (observed[0][[0, 2], [1, 1]] < 150).all()
assert (observed[0][:, 0] < 5).all()
assert (observed[0][:, 2] < 5).all()
observed = aug.augment_keypoints(keypoints)
assert observed[0].keypoints[0].x == 0
assert observed[0].keypoints[0].y < 0
assert observed[0].keypoints[1].x == 1
assert observed[0].keypoints[1].y == 1
assert observed[0].keypoints[2].x == 2
assert observed[0].keypoints[2].y > 2
observed = aug_det.augment_keypoints(keypoints)
assert observed[0].keypoints[0].x == 0
assert observed[0].keypoints[0].y < 0
assert observed[0].keypoints[1].x == 1
assert observed[0].keypoints[1].y == 1
assert observed[0].keypoints[2].x == 2
assert observed[0].keypoints[2].y > 2
# zoom out
# this one uses a 4x4 area of all 255, which is zoomed out to a 4x4 area
# in which the center 2x2 area is 255
# zoom in should probably be adapted to this style
# no seperate tests here for x/y axis, should work fine if zoom in works with that
aug = iaa.Affine(scale=0.49, translate_px=0, rotate=0, shear=0)
aug_det = aug.to_deterministic()
image = np.ones((4, 4, 1), dtype=np.uint8) * 255
images = np.array([image])
images_list = [image]
outer_pixels = ([], [])
for y in sm.xrange(4):
xs = sm.xrange(4) if y in [0, 3] else [0, 3]
for x in xs:
outer_pixels[0].append(y)
outer_pixels[1].append(x)
inner_pixels = ([1, 1, 2, 2], [1, 2, 1, 2])
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=3, y=0), ia.Keypoint(x=0, y=3), ia.Keypoint(x=3, y=3)], shape=base_img.shape)]
keypoints_aug = [ia.KeypointsOnImage([ia.Keypoint(x=1, y=1), ia.Keypoint(x=2, y=1), ia.Keypoint(x=1, y=2), ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
observed = aug.augment_images(images)
assert (observed[0][outer_pixels] < 25).all()
assert (observed[0][inner_pixels] > 200).all()
observed = aug_det.augment_images(images)
assert (observed[0][outer_pixels] < 25).all()
assert (observed[0][inner_pixels] > 200).all()
observed = aug.augment_images(images_list)
assert (observed[0][outer_pixels] < 25).all()
assert (observed[0][inner_pixels] > 200).all()
observed = aug_det.augment_images(images_list)
assert (observed[0][outer_pixels] < 25).all()
assert (observed[0][inner_pixels] > 200).all()
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
observed = aug_det.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
# varying scales
aug = iaa.Affine(scale={"x": (0.5, 1.5), "y": (0.5, 1.5)}, translate_px=0, rotate=0, shear=0)
aug_det = aug.to_deterministic()
image = np.array([[0, 0, 0, 0, 0],
[0, 1, 1, 1, 0],
[0, 1, 2, 1, 0],
[0, 1, 1, 1, 0],
[0, 0, 0, 0, 0]], dtype=np.uint8) * 100
image = image[:, :, np.newaxis]
images_list = [image]
images = np.array([image])
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.8)
assert nb_changed_aug_det == 0
# ---------------------
# translate
# ---------------------
# move one pixel to the right
aug = iaa.Affine(scale=1.0, translate_px={"x": 1, "y": 0}, rotate=0, shear=0)
aug_det = aug.to_deterministic()
image = np.zeros((3, 3, 1), dtype=np.uint8)
image_aug = np.copy(image)
image[1, 1] = 255
image_aug[1, 2] = 255
images = np.array([image])
images_aug = np.array([image_aug])
images_list = [image]
images_aug_list = [image_aug]
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=1, y=1)], shape=base_img.shape)]
keypoints_aug = [ia.KeypointsOnImage([ia.Keypoint(x=2, y=1)], shape=base_img.shape)]
observed = aug.augment_images(images)
assert np.array_equal(observed, images_aug)
observed = aug_det.augment_images(images)
assert np.array_equal(observed, images_aug)
observed = aug.augment_images(images_list)
assert array_equal_lists(observed, images_aug_list)
observed = aug_det.augment_images(images_list)
assert array_equal_lists(observed, images_aug_list)
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
observed = aug_det.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
# move one pixel to the bottom
aug = iaa.Affine(scale=1.0, translate_px={"x": 0, "y": 1}, rotate=0, shear=0)
aug_det = aug.to_deterministic()
image = np.zeros((3, 3, 1), dtype=np.uint8)
image_aug = np.copy(image)
image[1, 1] = 255
image_aug[2, 1] = 255
images = np.array([image])
images_aug = np.array([image_aug])
images_list = [image]
images_aug_list = [image_aug]
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=1, y=1)], shape=base_img.shape)]
keypoints_aug = [ia.KeypointsOnImage([ia.Keypoint(x=1, y=2)], shape=base_img.shape)]
observed = aug.augment_images(images)
assert np.array_equal(observed, images_aug)
observed = aug_det.augment_images(images)
assert np.array_equal(observed, images_aug)
observed = aug.augment_images(images_list)
assert array_equal_lists(observed, images_aug_list)
observed = aug_det.augment_images(images_list)
assert array_equal_lists(observed, images_aug_list)
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
observed = aug_det.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
# move 33% (one pixel) to the right
aug = iaa.Affine(scale=1.0, translate_percent={"x": 0.3333, "y": 0}, rotate=0, shear=0)
aug_det = aug.to_deterministic()
image = np.zeros((3, 3, 1), dtype=np.uint8)
image_aug = np.copy(image)
image[1, 1] = 255
image_aug[1, 2] = 255
images = np.array([image])
images_aug = np.array([image_aug])
images_list = [image]
images_aug_list = [image_aug]
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=1, y=1)], shape=base_img.shape)]
keypoints_aug = [ia.KeypointsOnImage([ia.Keypoint(x=2, y=1)], shape=base_img.shape)]
observed = aug.augment_images(images)
assert np.array_equal(observed, images_aug)
observed = aug_det.augment_images(images)
assert np.array_equal(observed, images_aug)
observed = aug.augment_images(images_list)
assert array_equal_lists(observed, images_aug_list)
observed = aug_det.augment_images(images_list)
assert array_equal_lists(observed, images_aug_list)
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
observed = aug_det.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
# move 33% (one pixel) to the bottom
aug = iaa.Affine(scale=1.0, translate_percent={"x": 0, "y": 0.3333}, rotate=0, shear=0)
aug_det = aug.to_deterministic()
image = np.zeros((3, 3, 1), dtype=np.uint8)
image_aug = np.copy(image)
image[1, 1] = 255
image_aug[2, 1] = 255
images = np.array([image])
images_aug = np.array([image_aug])
images_list = [image]
images_aug_list = [image_aug]
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=1, y=1)], shape=base_img.shape)]
keypoints_aug = [ia.KeypointsOnImage([ia.Keypoint(x=1, y=2)], shape=base_img.shape)]
observed = aug.augment_images(images)
assert np.array_equal(observed, images_aug)
observed = aug_det.augment_images(images)
assert np.array_equal(observed, images_aug)
observed = aug.augment_images(images_list)
assert array_equal_lists(observed, images_aug_list)
observed = aug_det.augment_images(images_list)
assert array_equal_lists(observed, images_aug_list)
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
observed = aug_det.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
# 0-1px to left/right and 0-1px to top/bottom
aug = iaa.Affine(scale=1.0, translate_px={"x": (-1, 1), "y": (-1, 1)}, rotate=0, shear=0)
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
centers_aug = np.copy(image).astype(np.int32) * 0
centers_aug_det = np.copy(image).astype(np.int32) * 0
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert len(observed_aug[0].nonzero()[0]) == 1
assert len(observed_aug_det[0].nonzero()[0]) == 1
centers_aug += (observed_aug[0] > 0)
centers_aug_det += (observed_aug_det[0] > 0)
assert nb_changed_aug >= int(nb_iterations * 0.7)
assert nb_changed_aug_det == 0
assert (centers_aug > int(nb_iterations * (1/9 * 0.6))).all()
assert (centers_aug < int(nb_iterations * (1/9 * 1.4))).all()
# ---------------------
# rotate
# ---------------------
# rotate by 45 degrees
aug = iaa.Affine(scale=1.0, translate_px=0, rotate=90, shear=0)
aug_det = aug.to_deterministic()
image = np.zeros((3, 3, 1), dtype=np.uint8)
image_aug = np.copy(image)
image[1, :] = 255
image_aug[0, 1] = 255
image_aug[1, 1] = 255
image_aug[2, 1] = 255
images = np.array([image])
images_aug = np.array([image_aug])
images_list = [image]
images_aug_list = [image_aug]
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=1), ia.Keypoint(x=1, y=1), ia.Keypoint(x=2, y=1)], shape=base_img.shape)]
keypoints_aug = [ia.KeypointsOnImage([ia.Keypoint(x=1, y=0), ia.Keypoint(x=1, y=1), ia.Keypoint(x=1, y=2)], shape=base_img.shape)]
observed = aug.augment_images(images)
observed[observed >= 100] = 255
observed[observed < 100] = 0
assert np.array_equal(observed, images_aug)
observed = aug_det.augment_images(images)
observed[observed >= 100] = 255
observed[observed < 100] = 0
assert np.array_equal(observed, images_aug)
observed = aug.augment_images(images_list)
observed[0][observed[0] >= 100] = 255
observed[0][observed[0] < 100] = 0
assert array_equal_lists(observed, images_aug_list)
observed = aug_det.augment_images(images_list)
observed[0][observed[0] >= 100] = 255
observed[0][observed[0] < 100] = 0
assert array_equal_lists(observed, images_aug_list)
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
observed = aug_det.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
# random rotation 0-364 degrees
aug = iaa.Affine(scale=1.0, translate_px=0, rotate=(0, 364), shear=0)
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
pixels_sums_aug = np.copy(image).astype(np.int32) * 0
pixels_sums_aug_det = np.copy(image).astype(np.int32) * 0
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
#assert len(observed_aug[0].nonzero()[0]) == 1
#assert len(observed_aug_det[0].nonzero()[0]) == 1
pixels_sums_aug += (observed_aug[0] > 100)
pixels_sums_aug_det += (observed_aug_det[0] > 100)
assert nb_changed_aug >= int(nb_iterations * 0.9)
assert nb_changed_aug_det == 0
# center pixel, should always be white when rotating line around center
assert pixels_sums_aug[1, 1] > (nb_iterations * 0.98)
assert pixels_sums_aug[1, 1] < (nb_iterations * 1.02)
# outer pixels, should sometimes be white
# the values here had to be set quite tolerant, the middle pixels at top/left/bottom/right get more activation than expected
outer_pixels = ([0, 0, 0, 1, 1, 2, 2, 2], [0, 1, 2, 0, 2, 0, 1, 2])
assert (pixels_sums_aug[outer_pixels] > int(nb_iterations * (2/8 * 0.4))).all()
assert (pixels_sums_aug[outer_pixels] < int(nb_iterations * (2/8 * 2.0))).all()
# ---------------------
# shear
# ---------------------
# TODO
print("[Note] There is currently no test for shear in test_Affine().")
# ---------------------
# cval
# ---------------------
# cval of 0.5 (= 128)
aug = iaa.Affine(scale=1.0, translate_px=100, rotate=0, shear=0, cval=0.5)
aug_det = aug.to_deterministic()
image = np.ones((3, 3, 1), dtype=np.uint8) * 255
image_aug = np.copy(image)
images = np.array([image])
images_list = [image]
observed = aug.augment_images(images)
assert (observed[0] > 128 - 30).all()
assert (observed[0] < 128 + 30).all()
observed = aug_det.augment_images(images)
assert (observed[0] > 128 - 30).all()
assert (observed[0] < 128 + 30).all()
observed = aug.augment_images(images_list)
assert (observed[0] > 128 - 30).all()
assert (observed[0] < 128 + 30).all()
observed = aug_det.augment_images(images_list)
assert (observed[0] > 128 - 30).all()
assert (observed[0] < 128 + 30).all()
# random cvals
aug = iaa.Affine(scale=1.0, translate_px=100, rotate=0, shear=0, cval=(0, 1.0))
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
averages = []
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
averages.append(int(np.average(observed_aug)))
assert nb_changed_aug >= int(nb_iterations * 0.9)
assert nb_changed_aug_det == 0
# center pixel, should always be white when rotating line around center
assert pixels_sums_aug[1, 1] > (nb_iterations * 0.98)
assert pixels_sums_aug[1, 1] < (nb_iterations * 1.02)
assert len(set(averages)) > 200
# ---------------------
# order
# ---------------------
# TODO
print("[Note] There is currently no test for (interpolation) order in test_Affine().")
def test_ElasticTransformation():
# TODO
print("[Note] Elastic Transformations are currently not tested.")
def test_Sequential():
image = np.array([[0, 1, 1],
[0, 0, 1],
[0, 0, 1]], dtype=np.uint8) * 255
image = image[:, :, np.newaxis]
images_list = [image]
images = np.array([image])
image_lr = np.array([[1, 1, 0],
[1, 0, 0],
[1, 0, 0]], dtype=np.uint8) * 255
image_lr = image_lr[:, :, np.newaxis]
images_lr = np.array([image_lr])
image_ud = np.array([[0, 0, 1],
[0, 0, 1],
[0, 1, 1]], dtype=np.uint8) * 255
image_ud = image_ud[:, :, np.newaxis]
images_ud = np.array([image_ud])
image_lr_ud = np.array([[1, 0, 0],
[1, 0, 0],
[1, 1, 0]], dtype=np.uint8) * 255
image_lr_ud = image_lr_ud[:, :, np.newaxis]
images_lr_ud_list = [image_lr_ud]
images_lr_ud = np.array([image_lr_ud])
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=1, y=0), ia.Keypoint(x=2, y=0), ia.Keypoint(x=2, y=1)], shape=image.shape)]
keypoints_aug = [ia.KeypointsOnImage([ia.Keypoint(x=1, y=2), ia.Keypoint(x=0, y=2), ia.Keypoint(x=0, y=1)], shape=image.shape)]
aug = iaa.Sequential([
iaa.Fliplr(1.0),
iaa.Flipud(1.0)
])
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
assert np.array_equal(observed, images_lr_ud)
observed = aug_det.augment_images(images)
assert np.array_equal(observed, images_lr_ud)
observed = aug.augment_images(images_list)
assert array_equal_lists(observed, images_lr_ud_list)
observed = aug_det.augment_images(images_list)
assert array_equal_lists(observed, images_lr_ud_list)
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
observed = aug_det.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
# 50% horizontal flip, 50% vertical flip
aug = iaa.Sequential([
iaa.Fliplr(0.5),
iaa.Flipud(0.5)
])
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert np.array_equal(observed_aug, images) or np.array_equal(observed_aug, images_lr) or np.array_equal(observed_aug, images_ud) or np.array_equal(observed_aug, images_lr_ud)
assert np.array_equal(observed_aug_det, images) or np.array_equal(observed_aug_det, images_lr) or np.array_equal(observed_aug_det, images_ud) or np.array_equal(observed_aug_det, images_lr_ud)
assert (0.25 - 0.10) <= (1 - (nb_changed_aug / nb_iterations)) <= (0.25 + 0.10) # should be the same in roughly 25% of all cases
assert nb_changed_aug_det == 0
# random order
image = np.array([[0, 1, 1],
[0, 0, 1],
[0, 0, 1]], dtype=np.uint8)
image = image[:, :, np.newaxis]
images = np.array([image])
images_first_second = (images + 10) * 10
images_second_first = (images * 10) + 10
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0)], shape=image.shape)]
keypoints_first_second = [ia.KeypointsOnImage([ia.Keypoint(x=1, y=1)], shape=image.shape)]
keypoints_second_first = [ia.KeypointsOnImage([ia.Keypoint(x=1, y=0)], shape=image.shape)]
def images_first(images, random_state, parents, hooks):
return images + 10
def images_second(images, random_state, parents, hooks):
return images * 10
def keypoints_first(keypoints_on_images, random_state, parents, hooks):
for keypoints_on_image in keypoints_on_images:
for keypoint in keypoints_on_image.keypoints:
keypoint.x = keypoint.x + 1
return keypoints_on_images
def keypoints_second(keypoints_on_images, random_state, parents, hooks):
for keypoints_on_image in keypoints_on_images:
for keypoint in keypoints_on_image.keypoints:
keypoint.y = keypoint.y + keypoint.x
return keypoints_on_images
aug_unrandom = iaa.Sequential([
iaa.Lambda(images_first, keypoints_first),
iaa.Lambda(images_second, keypoints_second)
], random_order=False)
aug_unrandom_det = aug.to_deterministic()
aug_random = iaa.Sequential([
iaa.Lambda(images_first, keypoints_first),
iaa.Lambda(images_second, keypoints_second)
], random_order=True)
aug_random_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
nb_images_first_second_unrandom = 0
nb_images_second_first_unrandom = 0
nb_images_first_second_random = 0
nb_images_second_first_random = 0
nb_keypoints_first_second_unrandom = 0
nb_keypoints_second_first_unrandom = 0
nb_keypoints_first_second_random = 0
nb_keypoints_second_first_random = 0
for i in sm.xrange(nb_iterations):
observed_aug_unrandom = aug_unrandom.augment_images(images)
observed_aug_unrandom_det = aug_unrandom_det.augment_images(images)
observed_aug_random = aug_random.augment_images(images)
observed_aug_random_det = aug_random_det.augment_images(images)
keypoints_aug_unrandom = aug_unrandom.augment_keypoints(keypoints)
keypoints_aug_unrandom_det = aug_unrandom_det.augment_keypoints(keypoints)
keypoints_aug_random = aug_random.augment_keypoints(keypoints)
keypoints_aug_random_det = aug_random_det.augment_keypoints(keypoints)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
if np.array_equal(observed_aug_unrandom, images_first_second):
nb_images_first_second_unrandom += 1
elif np.array_equal(observed_aug_unrandom, images_second_first):
nb_images_second_first_unrandom += 1
else:
raise Exception("Received output doesnt match any expected output.")
if np.array_equal(observed_aug_random, images_first_second):
nb_images_first_second_random += 1
elif np.array_equal(observed_aug_random, images_second_first):
nb_images_second_first_random += 1
else:
raise Exception("Received output doesnt match any expected output.")
if keypoints_equal(keypoints_aug_unrandom, keypoints_first_second):
nb_keypoints_first_second_unrandom += 1
elif keypoints_equal(keypoints_aug_unrandom, keypoints_second_first):
nb_keypoints_second_first_unrandom += 1
else:
raise Exception("Received output doesnt match any expected output.")
if keypoints_equal(keypoints_aug_random, keypoints_first_second):
nb_keypoints_first_second_random += 1
elif keypoints_equal(keypoints_aug_random, keypoints_second_first):
nb_keypoints_second_first_random += 1
else:
raise Exception("Received output doesnt match any expected output.")
assert nb_changed_aug == 0
assert nb_changed_aug_det == 0
assert nb_images_first_second_unrandom == nb_iterations
assert nb_images_second_first_unrandom == 0
assert nb_keypoints_first_second_unrandom == nb_iterations
assert nb_keypoints_second_first_unrandom == 0
assert (0.50 - 0.1) <= nb_images_first_second_random / nb_iterations <= (0.50 + 0.1)
assert (0.50 - 0.1) <= nb_images_second_first_random / nb_iterations <= (0.50 + 0.1)
assert (0.50 - 0.1) <= nb_keypoints_first_second_random / nb_iterations <= (0.50 + 0.1)
assert (0.50 - 0.1) <= nb_keypoints_second_first_random / nb_iterations <= (0.50 + 0.1)
def test_Sometimes():
image = np.array([[0, 1, 1],
[0, 0, 1],
[0, 0, 1]], dtype=np.uint8) * 255
image = image[:, :, np.newaxis]
images_list = [image]
images = np.array([image])
image_lr = np.array([[1, 1, 0],
[1, 0, 0],
[1, 0, 0]], dtype=np.uint8) * 255
image_lr = image_lr[:, :, np.newaxis]
images_lr_list = [image_lr]
images_lr = np.array([image_lr])
image_ud = np.array([[0, 0, 1],
[0, 0, 1],
[0, 1, 1]], dtype=np.uint8) * 255
image_ud = image_ud[:, :, np.newaxis]
images_ud_list = [image_ud]
images_ud = np.array([image_ud])
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=1, y=0), ia.Keypoint(x=2, y=0), ia.Keypoint(x=2, y=1)], shape=image.shape)]
keypoints_lr = [ia.KeypointsOnImage([ia.Keypoint(x=1, y=0), ia.Keypoint(x=0, y=0), ia.Keypoint(x=0, y=1)], shape=image.shape)]
keypoints_ud = [ia.KeypointsOnImage([ia.Keypoint(x=1, y=2), ia.Keypoint(x=2, y=2), ia.Keypoint(x=2, y=1)], shape=image.shape)]
# 100% chance of if-branch
aug = iaa.Sometimes(1.0, [iaa.Fliplr(1.0)], [iaa.Flipud(1.0)])
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
assert np.array_equal(observed, images_lr)
observed = aug_det.augment_images(images)
assert np.array_equal(observed, images_lr)
observed = aug.augment_images(images_list)
assert array_equal_lists(observed, images_lr_list)
observed = aug_det.augment_images(images_list)
assert array_equal_lists(observed, images_lr_list)
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_lr)
observed = aug_det.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_lr)
# 100% chance of else-branch
aug = iaa.Sometimes(0.0, [iaa.Fliplr(1.0)], [iaa.Flipud(1.0)])
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
assert np.array_equal(observed, images_ud)
observed = aug_det.augment_images(images)
assert np.array_equal(observed, images_ud)
observed = aug.augment_images(images_list)
assert array_equal_lists(observed, images_ud_list)
observed = aug_det.augment_images(images_list)
assert array_equal_lists(observed, images_ud_list)
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_ud)
observed = aug_det.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_ud)
# 50% if branch, 50% else branch
aug = iaa.Sometimes(0.5, [iaa.Fliplr(1.0)], [iaa.Flipud(1.0)])
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
nb_images_if_branch = 0
nb_images_else_branch = 0
nb_keypoints_if_branch = 0
nb_keypoints_else_branch = 0
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
keypoints_aug = aug.augment_keypoints(keypoints)
keypoints_aug_det = aug.augment_keypoints(keypoints)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
if np.array_equal(observed_aug, images_lr):
nb_images_if_branch += 1
elif np.array_equal(observed_aug, images_ud):
nb_images_else_branch += 1
else:
raise Exception("Received output doesnt match any expected output.")
if keypoints_equal(keypoints_aug, keypoints_lr):
nb_keypoints_if_branch += 1
elif keypoints_equal(keypoints_aug, keypoints_ud):
nb_keypoints_else_branch += 1
else:
raise Exception("Received output doesnt match any expected output.")
assert (0.50 - 0.10) <= nb_images_if_branch / nb_iterations <= (0.50 + 0.10)
assert (0.50 - 0.10) <= nb_images_else_branch / nb_iterations <= (0.50 + 0.10)
assert (0.50 - 0.10) <= nb_keypoints_if_branch / nb_iterations <= (0.50 + 0.10)
assert (0.50 - 0.10) <= nb_keypoints_else_branch / nb_iterations <= (0.50 + 0.10)
assert (0.50 - 0.10) <= (1 - (nb_changed_aug / nb_iterations)) <= (0.50 + 0.10) # should be the same in roughly 50% of all cases
assert nb_changed_aug_det == 0
# 50% if branch, otherwise no change
aug = iaa.Sometimes(0.5, iaa.Fliplr(1.0))
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
nb_images_if_branch = 0
nb_images_else_branch = 0
nb_keypoints_if_branch = 0
nb_keypoints_else_branch = 0
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
keypoints_aug = aug.augment_keypoints(keypoints)
keypoints_aug_det = aug.augment_keypoints(keypoints)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
if np.array_equal(observed_aug, images_lr):
nb_images_if_branch += 1
elif np.array_equal(observed_aug, images):
nb_images_else_branch += 1
else:
raise Exception("Received output doesnt match any expected output.")
if keypoints_equal(keypoints_aug, keypoints_lr):
nb_keypoints_if_branch += 1
elif keypoints_equal(keypoints_aug, keypoints):
nb_keypoints_else_branch += 1
else:
raise Exception("Received output doesnt match any expected output.")
assert (0.50 - 0.10) <= nb_images_if_branch / nb_iterations <= (0.50 + 0.10)
assert (0.50 - 0.10) <= nb_images_else_branch / nb_iterations <= (0.50 + 0.10)
assert (0.50 - 0.10) <= nb_keypoints_if_branch / nb_iterations <= (0.50 + 0.10)
assert (0.50 - 0.10) <= nb_keypoints_else_branch / nb_iterations <= (0.50 + 0.10)
assert (0.50 - 0.10) <= (1 - (nb_changed_aug / nb_iterations)) <= (0.50 + 0.10) # should be the same in roughly 50% of all cases
assert nb_changed_aug_det == 0
def create_random_images(size):
return np.random.uniform(0, 255, size).astype(np.uint8)
def create_random_keypoints(size_images, nb_keypoints_per_img):
result = []
for i in sm.xrange(size_images[0]):
kps = []
height, width = size_images[1], size_images[2]
for i in sm.xrange(nb_keypoints_per_img):
x = np.random.randint(0, width-1)
y = np.random.randint(0, height-1)
kps.append(ia.Keypoint(x=x, y=y))
result.append(ia.KeypointsOnImage(kps, shape=size_images[1:]))
return result
def array_equal_lists(list1, list2):
assert isinstance(list1, list)
assert isinstance(list2, list)
if len(list1) != len(list2):
return False
for a, b in zip(list1, list2):
if not np.array_equal(a, b):
return False
return True
def keypoints_equal(kps1, kps2):
if len(kps1) != len(kps2):
return False
for i in sm.xrange(len(kps1)):
a = kps1[i].keypoints
b = kps2[i].keypoints
if len(a) != len(b):
return False
for j in sm.xrange(len(a)):
if a[j].x != b[j].x or a[j].y != b[j].y:
return False
return True
if __name__ == "__main__":
main()
|
imgaug-master
|
tests/test.py
|
# -*- coding: utf-8 -*-
"""Wrapper functions and classes around scikit-images AffineTransformation.
Simplifies augmentation of images in machine learning.
Example usage:
img_width = 32 # width of the images
img_height = 32 # height of the images
images = ... # e.g. load via scipy.misc.imload(filename)
# For each image: randomly flip it horizontally (50% chance),
# randomly rotate it between -20 and +20 degrees, randomly translate
# it on the x-axis between -5 and +5 pixel.
ia = ImageAugmenter(img_width, img_height, hlip=True, rotation_deg=20,
translation_x_px=5)
augmented_images = ia.augment_batch(images)
"""
from __future__ import division
from skimage import transform as tf
import numpy as np
import random
def is_minmax_tuple(param):
"""Returns whether the parameter is a tuple containing two values.
Used in create_aug_matrices() and probably useless everywhere else.
Args:
param: The parameter to check (whether it is a tuple of length 2).
Returns:
Boolean
"""
return type(param) is tuple and len(param) == 2
def create_aug_matrices(nb_matrices, img_width_px, img_height_px,
scale_to_percent=1.0, scale_axis_equally=False,
rotation_deg=0, shear_deg=0,
translation_x_px=0, translation_y_px=0,
seed=None):
"""Creates the augmentation matrices that may later be used to transform
images.
This is a wrapper around scikit-image's transform.AffineTransform class.
You can apply those matrices to images using the apply_aug_matrices()
function.
Args:
nb_matrices: How many matrices to return, e.g. 100 returns 100 different
random-generated matrices (= 100 different transformations).
img_width_px: Width of the images that will be transformed later
on (same as the width of each of the matrices).
img_height_px: Height of the images that will be transformed later
on (same as the height of each of the matrices).
scale_to_percent: Same as in ImageAugmenter.__init__().
Up to which percentage the images may be
scaled/zoomed. The negative scaling is automatically derived
from this value. A value of 1.1 allows scaling by any value
between -10% and +10%. You may set min and max values yourself
by using a tuple instead, like (1.1, 1.2) to scale between
+10% and +20%. Default is 1.0 (no scaling).
scale_axis_equally: Same as in ImageAugmenter.__init__().
Whether to always scale both axis (x and y)
in the same way. If set to False, then e.g. the Augmenter
might scale the x-axis by 20% and the y-axis by -5%.
Default is False.
rotation_deg: Same as in ImageAugmenter.__init__().
By how much the image may be rotated around its
center (in degrees). The negative rotation will automatically
be derived from this value. E.g. a value of 20 allows any
rotation between -20 degrees and +20 degrees. You may set min
and max values yourself by using a tuple instead, e.g. (5, 20)
to rotate between +5 und +20 degrees. Default is 0 (no
rotation).
shear_deg: Same as in ImageAugmenter.__init__().
By how much the image may be sheared (in degrees). The
negative value will automatically be derived from this value.
E.g. a value of 20 allows any shear between -20 degrees and
+20 degrees. You may set min and max values yourself by using a
tuple instead, e.g. (5, 20) to shear between +5 und +20
degrees. Default is 0 (no shear).
translation_x_px: Same as in ImageAugmenter.__init__().
By up to how many pixels the image may be
translated (moved) on the x-axis. The negative value will
automatically be derived from this value. E.g. a value of +7
allows any translation between -7 and +7 pixels on the x-axis.
You may set min and max values yourself by using a tuple
instead, e.g. (5, 20) to translate between +5 und +20 pixels.
Default is 0 (no translation on the x-axis).
translation_y_px: Same as in ImageAugmenter.__init__().
See translation_x_px, just for the y-axis.
seed: Seed to use for python's and numpy's random functions.
Returns:
List of augmentation matrices.
"""
assert nb_matrices > 0
assert img_width_px > 0
assert img_height_px > 0
assert is_minmax_tuple(scale_to_percent) or scale_to_percent >= 1.0
assert is_minmax_tuple(rotation_deg) or rotation_deg >= 0
assert is_minmax_tuple(shear_deg) or shear_deg >= 0
assert is_minmax_tuple(translation_x_px) or translation_x_px >= 0
assert is_minmax_tuple(translation_y_px) or translation_y_px >= 0
if seed is not None:
random.seed(seed)
np.random.seed(seed)
result = []
shift_x = int(img_width_px / 2.0)
shift_y = int(img_height_px / 2.0)
# prepare min and max values for
# scaling/zooming (min/max values)
if is_minmax_tuple(scale_to_percent):
scale_x_min = scale_to_percent[0]
scale_x_max = scale_to_percent[1]
else:
scale_x_min = scale_to_percent
scale_x_max = 1.0 - (scale_to_percent - 1.0)
assert scale_x_min > 0.0
#if scale_x_max >= 2.0:
# warnings.warn("Scaling by more than 100 percent (%.2f)." % (scale_x_max,))
scale_y_min = scale_x_min # scale_axis_equally affects the random value generation
scale_y_max = scale_x_max
# rotation (min/max values)
if is_minmax_tuple(rotation_deg):
rotation_deg_min = rotation_deg[0]
rotation_deg_max = rotation_deg[1]
else:
rotation_deg_min = (-1) * int(rotation_deg)
rotation_deg_max = int(rotation_deg)
# shear (min/max values)
if is_minmax_tuple(shear_deg):
shear_deg_min = shear_deg[0]
shear_deg_max = shear_deg[1]
else:
shear_deg_min = (-1) * int(shear_deg)
shear_deg_max = int(shear_deg)
# translation x-axis (min/max values)
if is_minmax_tuple(translation_x_px):
translation_x_px_min = translation_x_px[0]
translation_x_px_max = translation_x_px[1]
else:
translation_x_px_min = (-1) * translation_x_px
translation_x_px_max = translation_x_px
# translation y-axis (min/max values)
if is_minmax_tuple(translation_y_px):
translation_y_px_min = translation_y_px[0]
translation_y_px_max = translation_y_px[1]
else:
translation_y_px_min = (-1) * translation_y_px
translation_y_px_max = translation_y_px
# create nb_matrices randomized affine transformation matrices
for _ in range(nb_matrices):
# generate random values for scaling, rotation, shear, translation
scale_x = random.uniform(scale_x_min, scale_x_max)
scale_y = random.uniform(scale_y_min, scale_y_max)
if not scale_axis_equally:
scale_y = random.uniform(scale_y_min, scale_y_max)
else:
scale_y = scale_x
rotation = np.deg2rad(random.randint(rotation_deg_min, rotation_deg_max))
shear = np.deg2rad(random.randint(shear_deg_min, shear_deg_max))
translation_x = random.randint(translation_x_px_min, translation_x_px_max)
translation_y = random.randint(translation_y_px_min, translation_y_px_max)
# create three affine transformation matrices
# 1st one moves the image to the top left, 2nd one transforms it, 3rd one
# moves it back to the center.
# The movement is neccessary, because rotation is applied to the top left
# and not to the image's center (same for scaling and shear).
matrix_to_topleft = tf.SimilarityTransform(translation=[-shift_x, -shift_y])
matrix_transforms = tf.AffineTransform(scale=(scale_x, scale_y),
rotation=rotation, shear=shear,
translation=(translation_x,
translation_y))
matrix_to_center = tf.SimilarityTransform(translation=[shift_x, shift_y])
# Combine the three matrices to one affine transformation (one matrix)
matrix = matrix_to_topleft + matrix_transforms + matrix_to_center
# one matrix is ready, add it to the result
result.append(matrix.inverse)
return result
def apply_aug_matrices(images, matrices, transform_channels_equally=True,
channel_is_first_axis=False, random_order=True,
mode="constant", cval=0.0, interpolation_order=1,
seed=None):
"""Augment the given images using the given augmentation matrices.
This function is a wrapper around scikit-image's transform.warp().
It is expected to be called by ImageAugmenter.augment_batch().
The matrices may be generated by create_aug_matrices().
Args:
images: Same as in ImageAugmenter.augment_batch().
Numpy array (dtype: uint8, i.e. values 0-255) with the images.
Expected shape is either (image-index, height, width) for
grayscale images or (image-index, channel, height, width) for
images with channels (e.g. RGB) where the channel has the first
index or (image-index, height, width, channel) for images with
channels, where the channel is the last index.
If your shape is (image-index, channel, width, height) then
you must also set channel_is_first_axis=True in the constructor.
matrices: A list of augmentation matrices as produced by
create_aug_matrices().
transform_channels_equally: Same as in ImageAugmenter.__init__().
Whether to apply the exactly same
transformations to each channel of an image (True). Setting
it to False allows different transformations per channel,
e.g. the red-channel might be rotated by +20 degrees, while
the blue channel (of the same image) might be rotated
by -5 degrees. If you don't have any channels (2D grayscale),
you can simply ignore this setting.
Default is True (transform all equally).
channel_is_first_axis: Same as in ImageAugmenter.__init__().
Whether the channel (e.g. RGB) is the first
axis of each image (True) or the last axis (False).
False matches the scipy and PIL implementation and is the
default. If your images are 2D-grayscale then you can ignore
this setting (as the augmenter will ignore it too).
random_order: Whether to apply the augmentation matrices in a random
order (True, e.g. the 2nd matrix might be applied to the
5th image) or in the given order (False, e.g. the 2nd matrix might
be applied to the 2nd image).
Notice that for multi-channel images (e.g. RGB) this function
will use a different matrix for each channel, unless
transform_channels_equally is set to True.
mode: Parameter used for the transform.warp-function of scikit-image.
Can usually be ignored.
cval: Parameter used for the transform.warp-function of scikit-image.
Defines the fill color for "new" pixels, e.g. for empty areas
after rotations. (0.0 is black, 1.0 is white.)
interpolation_order: Parameter used for the transform.warp-function of
scikit-image. Defines the order of all interpolations used to
generate the new/augmented image. See their documentation for
further details.
seed: Seed to use for python's and numpy's random functions.
"""
# images must be numpy array
assert type(images).__module__ == np.__name__, "Expected numpy array for " \
"parameter 'images'."
# images must have uint8 as dtype (0-255)
assert images.dtype.name == "uint8", "Expected numpy.uint8 as image dtype."
# 3 axis total (2 per image) for grayscale,
# 4 axis total (3 per image) for RGB (usually)
assert len(images.shape) in [3, 4], """Expected 'images' parameter to have
either shape (image index, y, x) for greyscale
or (image index, channel, y, x) / (image index, y, x, channel)
for multi-channel (usually color) images."""
if seed:
np.random.seed(seed)
nb_images = images.shape[0]
# estimate number of channels, set to 1 if there is no axis channel,
# otherwise it will usually be 3
has_channels = False
nb_channels = 1
if len(images.shape) == 4:
has_channels = True
if channel_is_first_axis:
nb_channels = images.shape[1] # first axis within each image
else:
nb_channels = images.shape[3] # last axis within each image
# whether to apply the transformations directly to the whole image
# array (True) or for each channel individually (False)
apply_directly = not has_channels or (transform_channels_equally
and not channel_is_first_axis)
# We generate here the order in which the matrices may be applied.
# At the end, order_indices will contain the index of the matrix to use
# for each image, e.g. [15, 2] would mean, that the 15th matrix will be
# applied to the 0th image, the 2nd matrix to the 1st image.
# If the images gave multiple channels (e.g. RGB) and
# transform_channels_equally has been set to False, we will need one
# matrix per channel instead of per image.
# 0 to nb_images, but restart at 0 if index is beyond number of matrices
len_indices = nb_images if apply_directly else nb_images * nb_channels
if random_order:
# Notice: This way to choose random matrices is concise, but can create
# problems if there is a low amount of images and matrices.
# E.g. suppose that 2 images are ought to be transformed by either
# 0px translation on the x-axis or 1px translation. So 50% of all
# matrices translate by 0px and 50% by 1px. The following method
# will randomly choose a combination of the two matrices for the
# two images (matrix 0 for image 0 and matrix 0 for image 1,
# matrix 0 for image 0 and matrix 1 for image 1, ...).
# In 50% of these cases, a different matrix will be chosen for image 0
# and image 1 (matrices 0, 1 or matrices 1, 0). But 50% of these
# "different" matrices (different index) will be the same, as 50%
# translate by 1px and 50% by 0px. As a result, 75% of all augmentations
# will transform both images in the same way.
# The effect decreases if more matrices or images are chosen.
order_indices = np.random.random_integers(0, len(matrices) - 1, len_indices)
else:
# monotonously growing indexes (each by +1), but none of them may be
# higher than or equal to the number of matrices
order_indices = np.arange(0, len_indices) % len(matrices)
result = np.zeros(images.shape, dtype=np.float32)
matrix_number = 0
# iterate over every image, find out which matrix to apply and then use
# that matrix to augment the image
for img_idx, image in enumerate(images):
if apply_directly:
# we can apply the matrix to the whole numpy array of the image
# at the same time, so we do that to save time (instead of eg. three
# steps for three channels as in the else-part)
matrix = matrices[order_indices[matrix_number]]
result[img_idx, ...] = tf.warp(image, matrix, mode=mode, cval=cval,
order=interpolation_order)
matrix_number += 1
else:
# we cant apply the matrix to the whole image in one step, instead
# we have to apply it to each channel individually. that happens
# if the channel is the first axis of each image (incompatible with
# tf.warp()) or if it was explicitly requested via
# transform_channels_equally=False.
for channel_idx in range(nb_channels):
matrix = matrices[order_indices[matrix_number]]
if channel_is_first_axis:
warped = tf.warp(image[channel_idx], matrix, mode=mode,
cval=cval, order=interpolation_order)
result[img_idx, channel_idx, ...] = warped
else:
warped = tf.warp(image[..., channel_idx], matrix, mode=mode,
cval=cval, order=interpolation_order)
result[img_idx, ..., channel_idx] = warped
if not transform_channels_equally:
matrix_number += 1
if transform_channels_equally:
matrix_number += 1
return result
class ImageAugmenter(object):
"""Helper class to randomly augment images, usually for neural networks.
Example usage:
img_width = 32 # width of the images
img_height = 32 # height of the images
images = ... # e.g. load via scipy.misc.imload(filename)
# For each image: randomly flip it horizontally (50% chance),
# randomly rotate it between -20 and +20 degrees, randomly translate
# it on the x-axis between -5 and +5 pixel.
ia = ImageAugmenter(img_width, img_height, hlip=True, rotation_deg=20,
translation_x_px=5)
augmented_images = ia.augment_batch(images)
"""
def __init__(self, img_width_px, img_height_px, channel_is_first_axis=False,
hflip=False, vflip=False,
scale_to_percent=1.0, scale_axis_equally=False,
rotation_deg=0, shear_deg=0,
translation_x_px=0, translation_y_px=0,
transform_channels_equally=True):
"""
Args:
img_width_px: The intended width of each image in pixels.
img_height_px: The intended height of each image in pixels.
channel_is_first_axis: Whether the channel (e.g. RGB) is the first
axis of each image (True) or the last axis (False).
False matches the scipy and PIL implementation and is the
default. If your images are 2D-grayscale then you can ignore
this setting (as the augmenter will ignore it too).
hflip: Whether to randomly flip images horizontally (on the y-axis).
You may choose either False (no horizontal flipping),
True (flip with probability 0.5) or use a float
value (probability) between 0.0 and 1.0. Default is False.
vflip: Whether to randomly flip images vertically (on the x-axis).
You may choose either False (no vertical flipping),
True (flip with probability 0.5) or use a float
value (probability) between 0.0 and 1.0. Default is False.
scale_to_percent: Up to which percentage the images may be
scaled/zoomed. The negative scaling is automatically derived
from this value. A value of 1.1 allows scaling by any value
between -10% and +10%. You may set min and max values yourself
by using a tuple instead, like (1.1, 1.2) to scale between
+10% and +20%. Default is 1.0 (no scaling).
scale_axis_equally: Whether to always scale both axis (x and y)
in the same way. If set to False, then e.g. the Augmenter
might scale the x-axis by 20% and the y-axis by -5%.
Default is False.
rotation_deg: By how much the image may be rotated around its
center (in degrees). The negative rotation will automatically
be derived from this value. E.g. a value of 20 allows any
rotation between -20 degrees and +20 degrees. You may set min
and max values yourself by using a tuple instead, e.g. (5, 20)
to rotate between +5 und +20 degrees. Default is 0 (no
rotation).
shear_deg: By how much the image may be sheared (in degrees). The
negative value will automatically be derived from this value.
E.g. a value of 20 allows any shear between -20 degrees and
+20 degrees. You may set min and max values yourself by using a
tuple instead, e.g. (5, 20) to shear between +5 und +20
degrees. Default is 0 (no shear).
translation_x_px: By up to how many pixels the image may be
translated (moved) on the x-axis. The negative value will
automatically be derived from this value. E.g. a value of +7
allows any translation between -7 and +7 pixels on the x-axis.
You may set min and max values yourself by using a tuple
instead, e.g. (5, 20) to translate between +5 und +20 pixels.
Default is 0 (no translation on the x-axis).
translation_y_px: See translation_x_px, just for the y-axis.
transform_channels_equally: Whether to apply the exactly same
transformations to each channel of an image (True). Setting
it to False allows different transformations per channel,
e.g. the red-channel might be rotated by +20 degrees, while
the blue channel (of the same image) might be rotated
by -5 degrees. If you don't have any channels (2D grayscale),
you can simply ignore this setting.
Default is True (transform all equally).
"""
self.img_width_px = img_width_px
self.img_height_px = img_height_px
self.channel_is_first_axis = channel_is_first_axis
self.hflip_prob = 0.0
# note: we have to check first for floats, otherwise "hflip == True"
# will evaluate to true if hflip is 1.0. So chosing 1.0 (100%) would
# result in hflip_prob to be set to 0.5 (50%).
if isinstance(hflip, float):
assert hflip >= 0.0 and hflip <= 1.0
self.hflip_prob = hflip
elif hflip == True:
self.hflip_prob = 0.5
elif hflip == False:
self.hflip_prob = 0.0
else:
raise Exception("Unexpected value for parameter 'hflip'.")
self.vflip_prob = 0.0
if isinstance(vflip, float):
assert vflip >= 0.0 and vflip <= 1.0
self.vflip_prob = vflip
elif vflip == True:
self.vflip_prob = 0.5
elif vflip == False:
self.vflip_prob = 0.0
else:
raise Exception("Unexpected value for parameter 'vflip'.")
self.scale_to_percent = scale_to_percent
self.scale_axis_equally = scale_axis_equally
self.rotation_deg = rotation_deg
self.shear_deg = shear_deg
self.translation_x_px = translation_x_px
self.translation_y_px = translation_y_px
self.transform_channels_equally = transform_channels_equally
self.cval = 0.0
self.interpolation_order = 1
self.pregenerated_matrices = None
def pregenerate_matrices(self, nb_matrices, seed=None):
"""Pregenerate/cache augmentation matrices.
If matrices are pregenerated, augment_batch() will reuse them on
each call. The augmentations will not always be the same,
as the order of the matrices will be randomized (when
they are applied to the images). The requirement for that is though
that you pregenerate enough of them (e.g. a couple thousand).
Note that generating the augmentation matrices is usually fast
and only starts to make sense if you process millions of small images
or many tens of thousands of big images.
Each call to this method results in pregenerating a new set of matrices,
e.g. to replace a list of matrices that has been used often enough.
Calling this method with nb_matrices set to 0 will remove the
pregenerated matrices and augment_batch() returns to its default
behaviour of generating new matrices on each call.
Args:
nb_matrices: The number of matrices to pregenerate. E.g. a few
thousand. If set to 0, the matrices will be generated again on
each call of augment_batch().
seed: A random seed to use.
"""
assert nb_matrices >= 0
if nb_matrices == 0:
self.pregenerated_matrices = None
else:
matrices = create_aug_matrices(nb_matrices,
self.img_width_px,
self.img_height_px,
scale_to_percent=self.scale_to_percent,
scale_axis_equally=self.scale_axis_equally,
rotation_deg=self.rotation_deg,
shear_deg=self.shear_deg,
translation_x_px=self.translation_x_px,
translation_y_px=self.translation_y_px,
seed=seed)
self.pregenerated_matrices = matrices
def augment_batch(self, images, seed=None):
"""Augments a batch of images.
Applies all settings (rotation, shear, translation, ...) that
have been chosen in the constructor.
Args:
images: Numpy array (dtype: uint8, i.e. values 0-255) with the images.
Expected shape is either (image-index, height, width) for
grayscale images or (image-index, channel, height, width) for
images with channels (e.g. RGB) where the channel has the first
index or (image-index, height, width, channel) for images with
channels, where the channel is the last index.
If your shape is (image-index, channel, width, height) then
you must also set channel_is_first_axis=True in the constructor.
seed: Seed to use for python's and numpy's random functions.
Default is None (dont use a seed).
Returns:
Augmented images as numpy array of dtype float32 (i.e. values
are between 0.0 and 1.0).
"""
shape = images.shape
nb_channels = 0
if len(shape) == 3:
# shape like (image_index, y-axis, x-axis)
assert shape[1] == self.img_height_px
assert shape[2] == self.img_width_px
nb_channels = 1
elif len(shape) == 4:
if not self.channel_is_first_axis:
# shape like (image-index, y-axis, x-axis, channel-index)
assert shape[1] == self.img_height_px
assert shape[2] == self.img_width_px
nb_channels = shape[3]
else:
# shape like (image-index, channel-index, y-axis, x-axis)
assert shape[2] == self.img_height_px
assert shape[3] == self.img_width_px
nb_channels = shape[1]
else:
msg = "Mismatch between images shape %s and " \
"predefined image width/height (%d/%d)."
raise Exception(msg % (str(shape), self.img_width_px, self.img_height_px))
if seed:
random.seed(seed)
np.random.seed(seed)
# --------------------------------
# horizontal and vertical flipping/mirroring
# --------------------------------
# This should be done before applying the affine matrices, as otherwise
# contents of image might already be rotated/translated out of the image.
# It is done with numpy instead of the affine matrices, because
# scikit-image doesn't offer a nice interface to add mirroring/flipping
# to affine transformations. The numpy operations are O(1), so they
# shouldn't have a noticeable effect on runtimes. They also won't suffer
# from interpolation problems.
if self.hflip_prob > 0 or self.vflip_prob > 0:
# TODO this currently ignores the setting in
# transform_channels_equally and will instead always flip all
# channels equally
# if this is simply a view, then the input array gets flipped too
# for some reason
images_flipped = np.copy(images)
#images_flipped = images.view()
if len(shape) == 4 and self.channel_is_first_axis:
# roll channel to the last axis
# swapaxes doesnt work here, because
# (image index, channel, y, x)
# would be turned into
# (image index, x, y, channel)
# and y needs to come before x
images_flipped = np.rollaxis(images_flipped, 1, 4)
y_p = self.hflip_prob
x_p = self.vflip_prob
for i in range(images.shape[0]):
if y_p > 0 and random.random() < y_p:
images_flipped[i] = np.fliplr(images_flipped[i])
if x_p > 0 and random.random() < x_p:
images_flipped[i] = np.flipud(images_flipped[i])
if len(shape) == 4 and self.channel_is_first_axis:
# roll channel back to the second axis (index 1)
images_flipped = np.rollaxis(images_flipped, 3, 1)
images = images_flipped
# --------------------------------
# if no augmentation has been chosen, stop early
# for improved performance (evade applying matrices)
# --------------------------------
if self.pregenerated_matrices is None \
and self.scale_to_percent == 1.0 and self.rotation_deg == 0 \
and self.shear_deg == 0 \
and self.translation_x_px == 0 and self.translation_y_px == 0:
return np.array(images, dtype=np.float32) / 255
# --------------------------------
# generate transformation matrices
# --------------------------------
if self.pregenerated_matrices is not None:
matrices = self.pregenerated_matrices
else:
# estimate the number of matrices required
if self.transform_channels_equally:
nb_matrices = shape[0]
else:
nb_matrices = shape[0] * nb_channels
# generate matrices
matrices = create_aug_matrices(nb_matrices,
self.img_width_px,
self.img_height_px,
scale_to_percent=self.scale_to_percent,
scale_axis_equally=self.scale_axis_equally,
rotation_deg=self.rotation_deg,
shear_deg=self.shear_deg,
translation_x_px=self.translation_x_px,
translation_y_px=self.translation_y_px,
seed=seed)
# --------------------------------
# apply transformation matrices (i.e. augment images)
# --------------------------------
return apply_aug_matrices(images, matrices,
transform_channels_equally=self.transform_channels_equally,
channel_is_first_axis=self.channel_is_first_axis,
cval=self.cval, interpolation_order=self.interpolation_order,
seed=seed)
def plot_image(self, image, nb_repeat=40, show_plot=True):
"""Plot augmented variations of an image.
This method takes an image and plots it by default in 40 differently
augmented versions.
This method is intended to visualize the strength of your chosen
augmentations (so for debugging).
Args:
image: The image to plot.
nb_repeat: How often to plot the image. Each time it is plotted,
the chosen augmentation will be different. (Default: 40).
show_plot: Whether to show the plot. False makes sense if you
don't have a graphical user interface on the machine.
(Default: True)
Returns:
The figure of the plot.
Use figure.savefig() to save the image.
"""
if len(image.shape) == 2:
images = np.resize(image, (nb_repeat, image.shape[0], image.shape[1]))
else:
images = np.resize(image, (nb_repeat, image.shape[0], image.shape[1],
image.shape[2]))
return self.plot_images(images, True, show_plot=show_plot)
def plot_images(self, images, augment, show_plot=True, figure=None):
"""Plot augmented variations of images.
The images will all be shown in the same window.
It is recommended to not plot too many of them (i.e. stay below 100).
This method is intended to visualize the strength of your chosen
augmentations (so for debugging).
Args:
images: A numpy array of images. See augment_batch().
augment: Whether to augment the images (True) or just display
them in the way they are (False).
show_plot: Whether to show the plot. False makes sense if you
don't have a graphical user interface on the machine.
(Default: True)
figure: The figure of the plot in which to draw the images.
Provide the return value of this function (from a prior call)
to draw in the same plot window again. Chosing 'None' will
create a new figure. (Default is None.)
Returns:
The figure of the plot.
Use figure.savefig() to save the image.
"""
import matplotlib.pyplot as plt
import matplotlib.cm as cm
if augment:
images = self.augment_batch(images)
# (Lists of) Grayscale images have the shape (image index, y, x)
# Multi-Channel images therefore must have 4 or more axes here
if len(images.shape) >= 4:
# The color-channel is expected to be the last axis by matplotlib
# therefore exchange the axes, if its the first one here
if self.channel_is_first_axis:
images = np.rollaxis(images, 1, 4)
nb_cols = 10
nb_rows = 1 + int(images.shape[0] / nb_cols)
if figure is not None:
fig = figure
plt.figure(fig.number)
fig.clear()
else:
fig = plt.figure(figsize=(10, 10))
for i, image in enumerate(images):
image = images[i]
plot_number = i + 1
ax = fig.add_subplot(nb_rows, nb_cols, plot_number, xticklabels=[],
yticklabels=[])
ax.set_axis_off()
# "cmap" should restrict the color map to grayscale, but strangely
# also works well with color images
imgplot = plt.imshow(image, cmap=cm.Greys_r, aspect="equal")
# not showing the plot might be useful e.g. on clusters
if show_plot:
plt.show()
return fig
|
imgaug-master
|
old_version/ImageAugmenter.py
|
"""Tests functionality of the ImageAugmenter class."""
from __future__ import print_function
# make sure that ImageAugmenter can be imported from parent directory
if __name__ == '__main__' and __package__ is None:
from os import sys, path
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
import unittest
import numpy as np
from ImageAugmenter import ImageAugmenter
import random
from skimage import data
random.seed(123456789)
np.random.seed(123456789)
class TestImageAugmenter(unittest.TestCase):
"""Tests functionality of the ImageAugmenter class."""
def test_rotation(self):
"""Test rotation of 90 degrees on an image that should change
upon rotation."""
image_before = [[0, 255, 0],
[0, 255, 0],
[0, 255, 0]]
image_target = [[ 0, 0, 0],
[1.0, 1.0, 1.0],
[ 0, 0, 0]]
images = np.array([image_before]).astype(np.uint8)
augmenter = ImageAugmenter(3, 3, rotation_deg=(90, 90))
image_after = augmenter.augment_batch(images)[0]
self.assertTrue(np.allclose(image_target, image_after))
def test_rotation_invariant(self):
"""Test rotation of -90 to 90 degrees on an rotation invariant image."""
image_before = [[0, 0, 0],
[0, 255, 0],
[0, 0, 0]]
image_target = [[0, 0, 0],
[0, 1.0, 0],
[0, 0, 0]]
images = np.array([image_before]).astype(np.uint8)
# random rotation of up to 180 degress
augmenter = ImageAugmenter(3, 3, rotation_deg=180)
# all must be similar to target
nb_similar = 0
for _ in range(100):
image_after = augmenter.augment_batch(images)[0]
# some tolerance here - interpolation problems can let the image
# change a bit, even though it should be invariant to rotations
if np.allclose(image_target, image_after, atol=0.1):
nb_similar += 1
self.assertEquals(nb_similar, 100)
def test_scaling(self):
"""Rough test for zooming/scaling (only zoom in / scaling >1.0).
The test is rough, because interpolation problems make the result
of scaling on synthetic images rather hard to predict (and unintuitive).
"""
size_x = 4
size_y = 4
# a 4x4 image of which the center 3x3 pixels are bright white,
# everything else black
image_before = np.zeros((size_y, size_x))
image_before[1:size_y-1, 1:size_x-1] = 255
images = np.array([image_before]).astype(np.uint8)
# about 200% zoom in
augmenter = ImageAugmenter(size_x, size_y, scale_to_percent=(1.99, 1.99),
scale_axis_equally=True)
image_after = augmenter.augment_batch(images)[0]
# we scale positively (zoom in), therefor we expect the center bright
# spot to grow, resulting in a higher total brightness
self.assertTrue(np.sum(image_after) > np.sum(image_before)/255)
def test_shear(self):
"""Very rough test of shear: It simply measures whether image tend
to be significantly different after shear (any change)."""
image_before = [[0, 255, 0],
[0, 255, 0],
[0, 255, 0]]
image_target = [[0, 1.0, 0],
[0, 1.0, 0],
[0, 1.0, 0]]
images = np.array([image_before]).astype(np.uint8)
augmenter = ImageAugmenter(3, 3, shear_deg=50)
# the majority should be different from the source image
nb_different = 0
nb_augment = 1000
for _ in range(nb_augment):
image_after = augmenter.augment_batch(images)[0]
if not np.allclose(image_target, image_after):
nb_different += 1
self.assertTrue(nb_different > nb_augment*0.9)
def test_translation_x(self):
"""Testing translation on the x-axis."""
#image_before = np.zeros((2, 2), dtype=np.uint8)
image_before = [[255, 0],
[255, 0]]
#image_after = np.zeros((2, 2), dtype=np.float32)
image_target = [[0, 1.0],
[0, 1.0]]
images = np.array([image_before]).astype(np.uint8)
augmenter = ImageAugmenter(2, 2, translation_x_px=(1,1))
# all must be similar
for _ in range(100):
image_after = augmenter.augment_batch(images)[0]
self.assertTrue(np.allclose(image_target, image_after))
def test_translation_y(self):
"""Testing translation on the y-axis."""
image_before = [[ 0, 0],
[255, 255]]
image_target = [[1.0, 1.0],
[ 0, 0]]
images = np.array([image_before]).astype(np.uint8)
# translate always by -1px on y-axis
augmenter = ImageAugmenter(2, 2, translation_y_px=(-1,-1))
# all must be similar
for _ in range(100):
image_after = augmenter.augment_batch(images)[0]
self.assertTrue(np.allclose(image_target, image_after))
def test_single_channel(self):
"""Tests images with channels (e.g. RGB channels)."""
# One single channel
# channel is last axis
# test by translating an image with one channel on the x-axis (1 px)
image_before = np.zeros((2, 2, 1), dtype=np.uint8)
image_before[0, 0, 0] = 255
image_before[1, 0, 0] = 255
image_target = np.zeros((2, 2, 1), dtype=np.float32)
image_target[0, 1, 0] = 1.0
image_target[1, 1, 0] = 1.0
images = np.array([image_before]).astype(np.uint8)
augmenter = ImageAugmenter(2, 2, translation_x_px=(1,1))
# all must be similar
for _ in range(100):
image_after = augmenter.augment_batch(images)[0]
self.assertTrue(np.allclose(image_target, image_after))
# One single channel
# channel is first axis
# test by translating an image with one channel on the x-axis (1 px)
image_before = np.zeros((1, 2, 2), dtype=np.uint8)
image_before[0] = [[255, 0],
[255, 0]]
image_target = np.zeros((1, 2, 2), dtype=np.float32)
image_target[0] = [[0, 1.0],
[0, 1.0]]
images = np.array([image_before]).astype(np.uint8)
augmenter = ImageAugmenter(2, 2, translation_x_px=(1,1),
channel_is_first_axis=True)
# all must be similar
for _ in range(100):
image_after = augmenter.augment_batch(images)[0]
self.assertTrue(np.allclose(image_target, image_after))
def test_two_channels(self):
"""Tests augmentation of images with two channels (either first or last
axis of each image). Tested using x-translation."""
# -----------------------------------------------
# two channels,
# channel is the FIRST axis of each image
# -----------------------------------------------
augmenter = ImageAugmenter(2, 2, translation_y_px=(0,1),
channel_is_first_axis=True)
image_before = np.zeros((2, 2, 2)).astype(np.uint8)
# 1st channel: top row white, bottom row black
image_before[0][0][0] = 255
image_before[0][0][1] = 255
image_before[0][1][0] = 0
image_before[0][1][1] = 0
# 2nd channel: top right corner white, everything else black
image_before[1][0][0] = 0
image_before[1][0][1] = 255
image_before[1][1][0] = 0
image_before[1][1][1] = 0
# ^ channel
# ^ y (row)
# ^ x (column)
image_target = np.zeros((2, 2, 2)).astype(np.float32)
# 1st channel: bottom row white, bottom row black
image_target[0][0][0] = 0
image_target[0][0][1] = 0
image_target[0][1][0] = 1.0
image_target[0][1][1] = 1.0
# 2nd channel: bottom right corner white, everything else black
image_target[1][0][0] = 0
image_target[1][0][1] = 0
image_target[1][1][0] = 0
image_target[1][1][1] = 1.0
nb_augment = 1000
image = np.array([image_before]).astype(np.uint8)
images = np.resize(image, (nb_augment, 2, 2, 2))
images_augmented = augmenter.augment_batch(images)
nb_similar = 0
for image_after in images_augmented:
if np.allclose(image_target, image_after):
nb_similar += 1
self.assertTrue(nb_similar > (nb_augment*0.4) and nb_similar < (nb_augment*0.6))
# -----------------------------------------------
# two channels,
# channel is the LAST axis of each image
# -----------------------------------------------
augmenter = ImageAugmenter(2, 2, translation_y_px=(0,1),
channel_is_first_axis=False)
image_before = np.zeros((2, 2, 2)).astype(np.uint8)
# 1st channel: top row white, bottom row black
image_before[0][0][0] = 255
image_before[0][1][0] = 255
image_before[1][0][0] = 0
image_before[1][1][0] = 0
# 2nd channel: top right corner white, everything else black
image_before[0][0][1] = 0
image_before[0][1][1] = 255
image_before[1][0][1] = 0
image_before[1][1][1] = 0
# ^ y
# ^ x
# ^ channel
image_target = np.zeros((2, 2, 2)).astype(np.float32)
# 1st channel: bottom row white, bottom row black
image_target[0][0][0] = 0
image_target[0][1][0] = 0
image_target[1][0][0] = 1.0
image_target[1][1][0] = 1.0
# 2nd channel: bottom right corner white, everything else black
image_target[0][0][1] = 0
image_target[0][1][1] = 0
image_target[1][0][1] = 0
image_target[1][1][1] = 1.0
nb_augment = 1000
image = np.array([image_before]).astype(np.uint8)
images = np.resize(image, (nb_augment, 2, 2, 2))
images_augmented = augmenter.augment_batch(images)
nb_similar = 0
for image_after in images_augmented:
if np.allclose(image_target, image_after):
nb_similar += 1
self.assertTrue(nb_similar > (nb_augment*0.4) and nb_similar < (nb_augment*0.6))
def test_transform_channels_unequally(self):
"""Tests whether 2 or more channels can be augmented non-identically
at the same time.
E.g. channel 0 is rotated by 20 degress, channel 1 (of the same image)
is rotated by 5 degrees.
"""
# two channels, channel is first axis of each image
augmenter = ImageAugmenter(3, 3, translation_x_px=(0,1),
transform_channels_equally=False,
channel_is_first_axis=True)
image_before = np.zeros((2, 3, 3)).astype(np.uint8)
image_before[0] = [[255, 0, 0],
[ 0, 0, 0],
[ 0, 0, 0]]
image_before[1] = [[ 0, 0, 0],
[ 0, 0, 0],
[ 0, 255, 0]]
# ^ channel
image_target = np.zeros((2, 3, 3)).astype(np.float32)
image_target[0] = [[ 0, 1.0, 0],
[ 0, 0, 0],
[ 0, 0, 0]]
image_target[1] = [[ 0, 0, 0],
[ 0, 0, 0],
[ 0, 0, 1.0]]
nb_similar_channel_0 = 0
nb_similar_channel_1 = 0
nb_equally_transformed = 0
#nb_unequally_transformed = 0
nb_augment = 1000
image = np.array([image_before]).astype(np.uint8)
images = np.resize(image, (nb_augment, 2, 3, 3))
images_augmented = augmenter.augment_batch(images)
# augment 1000 times and count how often the channels were transformed
# in equal or unequal ways.
for image_after in images_augmented:
similar_channel_0 = np.allclose(image_target[0], image_after[0])
similar_channel_1 = np.allclose(image_target[1], image_after[1])
if similar_channel_0:
nb_similar_channel_0 += 1
if similar_channel_1:
nb_similar_channel_1 += 1
if similar_channel_0 == similar_channel_1:
nb_equally_transformed += 1
#else:
# nb_unequally_transformed += 1
# each one should be around 50%
self.assertTrue(nb_similar_channel_0 > 0.40*nb_augment
and nb_similar_channel_0 < 0.60*nb_augment)
self.assertTrue(nb_similar_channel_1 > 0.40*nb_augment
and nb_similar_channel_1 < 0.60*nb_augment)
self.assertTrue(nb_equally_transformed > 0.40*nb_augment
and nb_equally_transformed < 0.60*nb_augment)
def test_no_blacks(self):
"""Test whether random augmentations can cause an image to turn
completely black (cval=0.0), which should never happen."""
image_before = data.camera()
y_size, x_size = image_before.shape
augmenter = ImageAugmenter(x_size, y_size,
scale_to_percent=1.5,
scale_axis_equally=False,
rotation_deg=90,
shear_deg=20,
translation_x_px=10,
translation_y_px=10)
image_black = np.zeros(image_before.shape, dtype=np.float32)
nb_augment = 100
images = np.resize([image_before], (nb_augment, y_size, x_size))
images_augmented = augmenter.augment_batch(images)
nb_black = 0
for image_after in images_augmented:
if np.allclose(image_after, image_black):
nb_black += 1
self.assertEqual(nb_black, 0)
def test_non_square_images(self):
"""Test whether transformation of images with unequal x and y axis sizes
works as expected."""
y_size = 11
x_size = 4
image_before = np.zeros((y_size, x_size), dtype=np.uint8)
image_target = np.zeros((y_size, x_size), dtype=np.float32)
# place a bright white line in the center (of the y-axis, so left to right)
# Augmenter will move it up by 2 (translation on y by -2)
y_line_pos = int(y_size/2) + 1
for x_pos in range(x_size):
image_before[y_line_pos][x_pos] = 255
image_target[y_line_pos - 2][x_pos] = 1.0
augmenter = ImageAugmenter(x_size, y_size, translation_y_px=(-2,-2))
nb_augment = 100
images = np.resize([image_before], (nb_augment, y_size, x_size))
images_augmented = augmenter.augment_batch(images)
nb_similar = 0
for image_after in images_augmented:
if np.allclose(image_after, image_target):
nb_similar += 1
self.assertEqual(nb_augment, nb_similar)
def test_no_information_leaking(self):
"""Tests whether the image provided to augment_batch() is changed
instead of only simply returned in the changed form (leaking
information / hidden sideffects)."""
image_before = [[255, 0, 255, 0, 255],
[ 0, 255, 0, 255, 0],
[255, 255, 255, 255, 255],
[ 0, 255, 0, 255, 0],
[255, 0, 255, 0, 255]]
image_before = np.array(image_before, dtype=np.uint8)
image_before_copy = np.copy(image_before)
nb_augment = 100
images = np.resize([image_before], (nb_augment, 5, 5))
augmenter = ImageAugmenter(5, 5,
hflip=True, vflip=True,
scale_to_percent=1.5,
rotation_deg=25, shear_deg=10,
translation_x_px=5, translation_y_px=5)
images_after = augmenter.augment_batch(images)
self.assertTrue(np.array_equal(image_before, image_before_copy))
def test_horizontal_flipping(self):
"""Tests horizontal flipping of images (mirror on y-axis)."""
image_before = [[255, 0, 0],
[ 0, 255, 255],
[ 0, 0, 255]]
image_before = np.array(image_before, dtype=np.uint8)
image_target = [[ 0, 0, 1.0],
[1.0, 1.0, 0],
[1.0, 0, 0]]
image_target = np.array(image_target, dtype=np.float32)
nb_augment = 1000
images = np.resize([image_before], (nb_augment, 3, 3))
# Test using just "False" for hflip (should be exactly 0%)
augmenter = ImageAugmenter(3, 3, hflip=False)
images_augmented = augmenter.augment_batch(images)
nb_similar = 0
for image_after in images_augmented:
if np.allclose(image_after, image_target):
nb_similar += 1
self.assertEqual(nb_similar, 0)
# Test using just "True" for hflip (should be ~50%)
augmenter = ImageAugmenter(3, 3, hflip=True)
images_augmented = augmenter.augment_batch(images)
nb_similar = 0
for image_after in images_augmented:
if np.allclose(image_after, image_target):
nb_similar += 1
self.assertTrue(nb_similar > nb_augment*0.4 and nb_similar < nb_augment*0.6)
# Test using a probability (float value) for hflip (hflip=0.9,
# should be ~90%)
augmenter = ImageAugmenter(3, 3, hflip=0.9)
images_augmented = augmenter.augment_batch(images)
nb_similar = 0
for image_after in images_augmented:
if np.allclose(image_after, image_target):
nb_similar += 1
self.assertTrue(nb_similar > nb_augment*0.8 and nb_similar <= nb_augment*1.0)
# Test with multiple channels
image_before = np.zeros((2, 3, 3), dtype=np.uint8)
image_before[0] = [[255, 0, 0],
[255, 0, 0],
[ 0, 0, 0]]
image_before[1] = [[ 0, 0, 0],
[255, 255, 0],
[ 0, 0, 0]]
image_target = np.zeros((2, 3, 3), dtype=np.float32)
image_target[0] = [[ 0, 0, 1.0],
[ 0, 0, 1.0],
[ 0, 0, 0]]
image_target[1] = [[ 0, 0, 0],
[ 0, 1.0, 1.0],
[ 0, 0, 0]]
images = np.resize([image_before], (nb_augment, 2, 3, 3))
augmenter = ImageAugmenter(3, 3, hflip=1.0, channel_is_first_axis=True)
images_augmented = augmenter.augment_batch(images)
nb_similar = 0
for image_after in images_augmented:
if np.allclose(image_after, image_target):
nb_similar += 1
self.assertTrue(nb_similar > nb_augment*0.9 and nb_similar <= nb_augment*1.0)
def test_vertical_flipping(self):
"""Tests vertical flipping of images (mirror on x-axis)."""
image_before = [[255, 0, 0],
[ 0, 255, 255],
[ 0, 0, 255]]
image_before = np.array(image_before, dtype=np.uint8)
image_target = [[ 0, 0, 1.0],
[ 0, 1.0, 1.0],
[1.0, 0, 0]]
image_target = np.array(image_target, dtype=np.float32)
nb_augment = 1000
images = np.resize([image_before], (nb_augment, 3, 3))
# Test using just "False" for vflip (should be exactly 0%)
augmenter = ImageAugmenter(3, 3, vflip=False)
images_augmented = augmenter.augment_batch(images)
nb_similar = 0
for image_after in images_augmented:
if np.allclose(image_after, image_target):
nb_similar += 1
self.assertEqual(nb_similar, 0)
# Test using just "True" for vflip (should be ~50%)
augmenter = ImageAugmenter(3, 3, vflip=True)
images_augmented = augmenter.augment_batch(images)
nb_similar = 0
for image_after in images_augmented:
if np.allclose(image_after, image_target):
nb_similar += 1
self.assertTrue(nb_similar > nb_augment*0.4 and nb_similar < nb_augment*0.6)
# Test using a probability (float value) for vflip (vflip=0.9,
# should be ~90%)
augmenter = ImageAugmenter(3, 3, vflip=0.9)
images_augmented = augmenter.augment_batch(images)
nb_similar = 0
for image_after in images_augmented:
if np.allclose(image_after, image_target):
nb_similar += 1
self.assertTrue(nb_similar > nb_augment*0.8 and nb_similar <= nb_augment*1.0)
# Test with multiple channels
image_before = np.zeros((2, 3, 3), dtype=np.uint8)
image_before[0] = [[255, 255, 0],
[255, 0, 0],
[ 0, 0, 0]]
image_before[1] = [[ 0, 255, 0],
[ 0, 255, 0],
[ 0, 0, 255]]
image_target = np.zeros((2, 3, 3), dtype=np.float32)
image_target[0] = [[ 0, 0, 0],
[1.0, 0, 0],
[1.0, 1.0, 0]]
image_target[1] = [[ 0, 0, 1.0],
[ 0, 1.0, 0],
[ 0, 1.0, 0]]
images = np.resize([image_before], (nb_augment, 2, 3, 3))
augmenter = ImageAugmenter(3, 3, vflip=1.0, channel_is_first_axis=True)
images_augmented = augmenter.augment_batch(images)
nb_similar = 0
for image_after in images_augmented:
if np.allclose(image_after, image_target):
nb_similar += 1
self.assertTrue(nb_similar > nb_augment*0.9 and nb_similar <= nb_augment*1.0)
if __name__ == '__main__':
unittest.main()
|
imgaug-master
|
old_version/tests/TestImageAugmenter.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.